python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from os import listdir, unlink
from os.path import join as path_join
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from swift.obj.diskfile import write_metadata, read_metadata, get_data_dir
from test.probe.common import ReplProbeTest, ECProbeTest
RETRIES = 5
def get_data_file_path(obj_dir):
files = []
# We might need to try a few times if a request hasn't yet settled. For
# instance, a PUT can return success when just 2 of 3 nodes has completed.
for attempt in range(RETRIES + 1):
try:
files = sorted(listdir(obj_dir), reverse=True)
break
except Exception:
if attempt < RETRIES:
time.sleep(1)
else:
raise
for filename in files:
return path_join(obj_dir, filename)
class TestObjectFailures(ReplProbeTest):
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEqual(odata, data)
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
node_id = self.config_number(onode)
device = onode['device']
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
get_data_dir(self.policy),
opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
def run_quarantine(self):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
b'VERIFY')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(odata, backend_data)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self):
container = 'container-range-%s' % uuid4()
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
b'RANGE')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx}
for header, result in [({'Range': 'bytes=0-2'}, backend_data[0:3]),
({'Range': 'bytes=1-11'}, backend_data[1:]),
({'Range': 'bytes=0-11'}, backend_data)]:
req_headers = base_headers.copy()
req_headers.update(header)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj,
headers=req_headers)[-1]
self.assertEqual(odata, result)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_head_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
headers = {'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two',
'X-Backend-Storage-Policy-Index': self.policy.idx}
direct_client.direct_post_object(
onode, opart, self.account,
container, obj,
headers=headers,
conn_timeout=1,
response_timeout=1)
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def test_runner(self):
self.run_quarantine()
self.run_quarantine_range_etag()
self.run_quarantine_zero_byte_get()
self.run_quarantine_zero_byte_head()
self.run_quarantine_zero_byte_post()
class TestECObjectFailures(ECProbeTest):
def test_ec_missing_all_durable_fragments(self):
# This tests helps assert the behavior that when
# the proxy has enough fragments to reconstruct the object
# but none are marked as durable, the proxy should return a 404.
container_name = 'container-%s' % uuid4()
object_name = 'object-%s' % uuid4()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container_name,
headers=headers)
# PUT object, should go to primary nodes
client.put_object(self.url, self.token, container_name,
object_name, contents=b'object contents')
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, container_name, object_name)
# sanity test
odata = client.get_object(self.url, self.token, container_name,
object_name)[-1]
self.assertEqual(b'object contents', odata)
# make all fragments non-durable
for node in onodes:
part_dir = self.storage_dir(node, part=opart)
for dirs, subdirs, files in os.walk(part_dir):
for fname in files:
if fname.endswith('.data'):
non_durable_fname = fname.replace('#d', '')
os.rename(os.path.join(dirs, fname),
os.path.join(dirs, non_durable_fname))
break
headers = direct_client.direct_head_object(
node, opart, self.account, container_name, object_name,
headers={
'X-Backend-Storage-Policy-Index': self.policy.idx,
'X-Backend-Fragment-Preferences': json.dumps([])})
self.assertNotIn('X-Backend-Durable-Timestamp', headers)
# Now a new GET should return *404* because all fragments
# are non-durable, even if they are reconstructable
try:
client.get_object(self.url, self.token, container_name,
object_name)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()
| swift-master | test/probe/test_object_failures.py |
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import main
from uuid import uuid4
from swiftclient import client, ClientException
from test.probe.common import kill_server, ReplProbeTest, start_server
from swift.common import direct_client, utils
from swift.common.manager import Manager
class TestDbUsyncReplicator(ReplProbeTest):
object_puts = 1 # Overridden in subclass to force rsync
def test_metadata_sync(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy': self.policy.name,
'X-Container-Meta-A': '1',
'X-Container-Meta-B': '1',
'X-Container-Meta-C': '1'})
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes.pop()
# 2 of 3 container servers are temporarily down
for node in cnodes:
kill_server((node['ip'], node['port']),
self.ipport2server)
# Put some meta on the lone server, to make sure it's merged properly
# This will 503 (since we don't have a quorum), but we don't care (!)
try:
client.post_container(self.url, self.token, container,
headers={'X-Container-Meta-A': '2',
'X-Container-Meta-B': '2',
'X-Container-Meta-D': '2'})
except ClientException:
pass
# object updates come to only one container server
for _ in range(self.object_puts):
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, 'VERIFY')
# 2 container servers make comeback
for node in cnodes:
start_server((node['ip'], node['port']),
self.ipport2server)
# But, container-server which got object updates is down
kill_server((cnode['ip'], cnode['port']),
self.ipport2server)
# Metadata update will be applied to 2 container servers
# (equal to quorum)
client.post_container(self.url, self.token, container,
headers={'X-Container-Meta-B': '3',
'X-Container-Meta-E': '3'})
# container-server which got object updates makes comeback
start_server((cnode['ip'], cnode['port']),
self.ipport2server)
# other nodes have no objects
for node in cnodes:
resp_headers = direct_client.direct_head_container(
node, cpart, self.account, container)
self.assertIn(resp_headers.get('x-container-object-count'),
(None, '0', 0))
# If container-replicator on the node which got the object updates
# runs in first, db file may be replicated by rsync to other
# containers. In that case, the db file does not information about
# metadata, so metadata should be synced before replication
Manager(['container-replicator']).once(
number=self.config_number(cnode))
expected_meta = {
'x-container-meta-a': '2',
'x-container-meta-b': '3',
'x-container-meta-c': '1',
'x-container-meta-d': '2',
'x-container-meta-e': '3',
}
# node that got the object updates now has the meta
resp_headers = direct_client.direct_head_container(
cnode, cpart, self.account, container)
for header, value in expected_meta.items():
self.assertIn(header, resp_headers)
self.assertEqual(value, resp_headers[header])
self.assertNotIn(resp_headers.get('x-container-object-count'),
(None, '0', 0))
# other nodes still have the meta, as well as objects
for node in cnodes:
resp_headers = direct_client.direct_head_container(
node, cpart, self.account, container)
for header, value in expected_meta.items():
self.assertIn(header, resp_headers)
self.assertEqual(value, resp_headers[header])
self.assertNotIn(resp_headers.get('x-container-object-count'),
(None, '0', 0))
# and after full pass on remaining nodes
for node in cnodes:
Manager(['container-replicator']).once(
number=self.config_number(node))
# ... all is right
for node in cnodes + [cnode]:
resp_headers = direct_client.direct_head_container(
node, cpart, self.account, container)
for header, value in expected_meta.items():
self.assertIn(header, resp_headers)
self.assertEqual(value, resp_headers[header])
self.assertNotIn(resp_headers.get('x-container-object-count'),
(None, '0', 0))
class TestDbRsyncReplicator(TestDbUsyncReplicator):
def setUp(self):
super(TestDbRsyncReplicator, self).setUp()
cont_configs = [utils.readconf(p, 'container-replicator')
for p in self.configs['container-replicator'].values()]
# Do more than per_diff object PUTs, to force rsync instead of usync
self.object_puts = 1 + max(int(c.get('per_diff', '1000'))
for c in cont_configs)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_db_replicator.py |
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from time import sleep
import uuid
import unittest
from swiftclient import client
from swift.account import reaper
from swift.common import utils
from swift.common.manager import Manager
from swift.common.direct_client import direct_delete_account, \
direct_get_object, direct_head_container, ClientException
from swift.common.request_helpers import get_reserved_name
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
class TestAccountReaper(ReplProbeTest):
def setUp(self):
super(TestAccountReaper, self).setUp()
self.all_objects = []
int_client = self.make_internal_client()
# upload some containers
body = b'test-body'
for policy in ENABLED_POLICIES:
container = 'container-%s-%s' % (policy.name, uuid.uuid4())
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy': policy.name})
obj = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, container, obj, body)
self.all_objects.append((policy, container, obj))
# Also create some reserved names
container = get_reserved_name(
'reserved', policy.name, str(uuid.uuid4()))
int_client.create_container(
self.account, container,
headers={'X-Storage-Policy': policy.name})
obj = get_reserved_name('object', str(uuid.uuid4()))
int_client.upload_object(
BytesIO(body), self.account, container, obj)
self.all_objects.append((policy, container, obj))
policy.load_ring('/etc/swift')
Manager(['container-updater']).once()
headers = client.head_account(self.url, self.token)
self.assertEqual(int(headers['x-account-container-count']),
len(self.all_objects))
self.assertEqual(int(headers['x-account-object-count']),
len(self.all_objects))
self.assertEqual(int(headers['x-account-bytes-used']),
len(self.all_objects) * len(body))
part, nodes = self.account_ring.get_nodes(self.account)
for node in nodes:
direct_delete_account(node, part, self.account)
def _verify_account_reaped(self):
for policy, container, obj in self.all_objects:
# verify that any container deletes were at same timestamp
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
delete_times = set()
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException as err:
self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get(
'X-Backend-DELETE-Timestamp')
# 'X-Backend-DELETE-Timestamp' confirms it was deleted
self.assertTrue(delete_time)
delete_times.add(delete_time)
else:
# Container replicas may not yet be deleted if we have a
# policy with object replicas < container replicas, so
# ignore successful HEAD. We'll check for all replicas to
# be deleted again after running the replicators.
pass
self.assertEqual(1, len(delete_times), delete_times)
# verify that all object deletes were at same timestamp
part, nodes = policy.object_ring.get_nodes(self.account,
container, obj)
headers = {'X-Backend-Storage-Policy-Index': int(policy)}
delete_times = set()
for node in nodes:
try:
direct_get_object(node, part, self.account,
container, obj, headers=headers)
except ClientException as err:
self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get('X-Backend-Timestamp')
# 'X-Backend-Timestamp' confirms obj was deleted
self.assertTrue(delete_time)
delete_times.add(delete_time)
else:
self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
(self.account, container, obj, node, policy))
self.assertEqual(1, len(delete_times))
# run replicators and updaters
self.get_to_final_state()
for policy, container, obj in self.all_objects:
# verify that ALL container replicas are now deleted
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
delete_times = set()
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException as err:
self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get(
'X-Backend-DELETE-Timestamp')
# 'X-Backend-DELETE-Timestamp' confirms it was deleted
self.assertTrue(delete_time)
delete_times.add(delete_time)
else:
self.fail('Found un-reaped /%s/%s on %r' %
(self.account, container, cnode))
self.assertEqual(1, len(delete_times))
# sanity check that object state is still consistent...
part, nodes = policy.object_ring.get_nodes(self.account,
container, obj)
headers = {'X-Backend-Storage-Policy-Index': int(policy)}
delete_times = set()
for node in nodes:
try:
direct_get_object(node, part, self.account,
container, obj, headers=headers)
except ClientException as err:
self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get('X-Backend-Timestamp')
# 'X-Backend-Timestamp' confirms obj was deleted
self.assertTrue(delete_time)
delete_times.add(delete_time)
else:
self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
(self.account, container, obj, node, policy))
self.assertEqual(1, len(delete_times))
def test_reap(self):
# run the reaper
Manager(['account-reaper']).once()
self._verify_account_reaped()
def test_delayed_reap(self):
# define reapers which are supposed to operate 3 seconds later
account_reapers = []
for conf_file in self.configs['account-server'].values():
conf = utils.readconf(conf_file, 'account-reaper')
conf['delay_reaping'] = '3'
account_reapers.append(reaper.AccountReaper(conf))
self.assertTrue(account_reapers)
# run reaper, and make sure that nothing is reaped
for account_reaper in account_reapers:
account_reaper.run_once()
for policy, container, obj in self.all_objects:
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException:
self.fail(
"Nothing should be reaped. Container should exist")
part, nodes = policy.object_ring.get_nodes(self.account,
container, obj)
headers = {'X-Backend-Storage-Policy-Index': int(policy)}
for node in nodes:
try:
direct_get_object(node, part, self.account,
container, obj, headers=headers)
except ClientException:
self.fail("Nothing should be reaped. Object should exist")
# wait 3 seconds, run reaper, and make sure that all is reaped
sleep(3)
for account_reaper in account_reapers:
account_reaper.run_once()
self._verify_account_reaped()
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_account_reaper.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.obj.diskfile import get_data_dir
from swift.common.exceptions import ClientException
from test.probe.common import (
kill_server, ReplProbeTest, start_server, get_server_number)
from swift.common.utils import readconf
from swift.common.manager import Manager
class TestEmptyDevice(ReplProbeTest):
def _get_objects_dir(self, onode):
device = onode['device']
_, node_id = get_server_number((onode['ip'], onode['port']),
self.ipport2server)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s' % (devices, device)
return obj_dir
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
# Kill one container/obj primary server
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete the default data directory for objects on the primary server
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
get_data_dir(self.policy))
shutil.rmtree(obj_dir, True)
self.assertFalse(os.path.exists(obj_dir))
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server((node['ip'], node['port']), self.ipport2server)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
# Directly to handoff server assert we can get container/obj
another_onode = next(self.object_ring.get_more_nodes(opart))
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj,
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
timeout = time.time() + 5
found_objs_on_cnode = []
while time.time() < timeout:
for cnode in [c for c in cnodes if cnodes not in
found_objs_on_cnode]:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
found_objs_on_cnode.append(cnode)
if len(found_objs_on_cnode) >= len(cnodes):
break
time.sleep(0.3)
if len(found_objs_on_cnode) < len(cnodes):
missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
cnodes if cnode not in found_objs_on_cnode]
raise Exception('Container servers %r did not know about object' %
missing)
# Bring the first container/obj primary server back up
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication for first container/obj primary server
_, num = get_server_number(
(onode['ip'], onode.get('replication_port', onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=num)
# Run object replication for handoff node
_, another_num = get_server_number(
(another_onode['ip'],
another_onode.get('replication_port', another_onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()
| swift-master | test/probe/test_empty_device_handoff.py |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from errno import EEXIST
from shutil import copyfile, move
from tempfile import mkstemp
from time import time
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.cli.relinker import main as relinker_main
from swift.common.manager import Manager, Server
from swift.common.ring import RingBuilder
from swift.common.utils import replace_partition_in_path, readconf
from swift.obj.diskfile import get_data_dir
from test.probe.common import ECProbeTest, ProbeTest, ReplProbeTest
class TestPartPowerIncrease(ProbeTest):
def setUp(self):
super(TestPartPowerIncrease, self).setUp()
_, self.ring_file_backup = mkstemp()
_, self.builder_file_backup = mkstemp()
self.ring_file = self.object_ring.serialized_path
self.builder_file = self.ring_file.replace('ring.gz', 'builder')
copyfile(self.ring_file, self.ring_file_backup)
copyfile(self.builder_file, self.builder_file_backup)
# In case the test user is not allowed to write rings
self.assertTrue(os.access('/etc/swift', os.W_OK))
self.assertTrue(os.access('/etc/swift/backups', os.W_OK))
self.assertTrue(os.access('/etc/swift/object.builder', os.W_OK))
self.assertTrue(os.access('/etc/swift/object.ring.gz', os.W_OK))
# Ensure the test object will be erasure coded
self.data = ' ' * getattr(self.policy, 'ec_segment_size', 1)
self.conf_files = Server('object').conf_files()
self.devices = [readconf(conf_file)['app:object-server']['devices']
for conf_file in self.conf_files]
def tearDown(self):
# Keep a backup copy of the modified .builder file
backup_dir = os.path.join(
os.path.dirname(self.builder_file), 'backups')
try:
os.mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
backup_name = (os.path.join(
backup_dir,
'%d.probe.' % time() + os.path.basename(self.builder_file)))
copyfile(self.builder_file, backup_name)
# Restore original ring
move(self.ring_file_backup, self.ring_file)
move(self.builder_file_backup, self.builder_file)
def _find_objs_ondisk(self, container, obj):
locations = []
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
for node in onodes:
start_dir = os.path.join(
self.device_dir(node),
get_data_dir(self.policy),
str(opart))
for root, dirs, files in os.walk(start_dir):
for filename in files:
if filename.endswith('.data'):
locations.append(os.path.join(root, filename))
return locations
def _test_main(self, cancel=False):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
obj2 = 'object-%s' % uuid4()
# Create container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container, headers=headers)
# Create a new object
client.put_object(self.url, self.token, container, obj, self.data)
client.head_object(self.url, self.token, container, obj)
# Prepare partition power increase
builder = RingBuilder.load(self.builder_file)
builder.prepare_increase_partition_power()
builder.save(self.builder_file)
ring_data = builder.get_ring()
ring_data.save(self.ring_file)
# Ensure the proxy uses the changed ring
Manager(['proxy']).restart()
# Ensure object is still accessible
client.head_object(self.url, self.token, container, obj)
# Relink existing objects
for conf in self.conf_files:
self.assertEqual(0, relinker_main(['relink', conf]))
# Create second object after relinking and ensure it is accessible
client.put_object(self.url, self.token, container, obj2, self.data)
client.head_object(self.url, self.token, container, obj2)
# Remember the original object locations
org_locations = self._find_objs_ondisk(container, obj)
org_locations += self._find_objs_ondisk(container, obj2)
# Remember the new object locations
new_locations = []
for loc in org_locations:
for dev_root in self.devices:
if loc.startswith(dev_root):
break
else:
self.fail('Unable to find device for %s' % loc)
new_locations.append(replace_partition_in_path(
dev_root, str(loc), self.object_ring.part_power + 1))
# Overwrite existing object - to ensure that older timestamp files
# will be cleaned up properly later
client.put_object(self.url, self.token, container, obj, self.data)
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Increase partition power
builder = RingBuilder.load(self.builder_file)
if not cancel:
builder.increase_partition_power()
else:
builder.cancel_increase_partition_power()
builder.save(self.builder_file)
ring_data = builder.get_ring()
ring_data.save(self.ring_file)
# Ensure the proxy uses the changed ring
Manager(['proxy']).restart()
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Overwrite existing object - to ensure that older timestamp files
# will be cleaned up properly later
client.put_object(self.url, self.token, container, obj, self.data)
# Cleanup old objects in the wrong location
for conf in self.conf_files:
self.assertEqual(0, relinker_main(['cleanup', conf]))
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Ensure data in old or relinked object locations is removed
if not cancel:
for fn in org_locations:
self.assertFalse(os.path.exists(fn))
else:
for fn in new_locations:
self.assertFalse(os.path.exists(fn))
class TestReplPartPowerIncrease(TestPartPowerIncrease, ReplProbeTest):
def test_main(self):
self._test_main()
def test_canceled(self):
self._test_main(cancel=True)
class TestECPartPowerIncrease(TestPartPowerIncrease, ECProbeTest):
def test_main(self):
self._test_main()
def test_canceled(self):
self._test_main(cancel=True)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_object_partpower_increase.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from contextlib import contextmanager
import eventlet
import json
import os
import random
import shutil
import time
from uuid import uuid4
from six.moves import http_client as httplib
from six.moves.urllib.parse import urlparse
from swift.common.ring import Ring
from swift.common.manager import Manager
from test.probe import PROXY_BASE_URL
from test.probe.common import resetswift, ReplProbeTest, client
def putrequest(conn, method, path, headers):
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
if headers:
for header, value in headers.items():
conn.putheader(header, str(value))
conn.endheaders()
def get_server_and_worker_pids(manager, old_workers=None):
# Gets all the server parent pids, as well as the set of all worker PIDs
# (i.e. any PID whose PPID is in the set of parent pids).
server_pid_set = {pid for server in manager.servers
for (_, pid) in server.iter_pid_files()}
children_pid_set = set()
old_worker_pid_set = set(old_workers or [])
all_pids = [int(f) for f in os.listdir('/proc') if f.isdigit()]
for pid in all_pids:
try:
with open('/proc/%d/status' % pid, 'r') as fh:
for line in fh:
if line.startswith('PPid:\t'):
ppid = int(line[6:])
if ppid in server_pid_set or pid in old_worker_pid_set:
children_pid_set.add(pid)
break
except Exception:
# No big deal, a process could have exited since we listed /proc,
# so we just ignore errors
pass
return {'server': server_pid_set, 'worker': children_pid_set}
def wait_for_pids(manager, callback, timeout=15, old_workers=None):
# Waits up to `timeout` seconds for the supplied callback to return True
# when passed in the manager's pid set.
start_time = time.time()
pid_sets = get_server_and_worker_pids(manager, old_workers=old_workers)
got = callback(pid_sets)
while not got and time.time() - start_time < timeout:
time.sleep(0.1)
pid_sets = get_server_and_worker_pids(manager, old_workers=old_workers)
got = callback(pid_sets)
if time.time() - start_time >= timeout:
raise AssertionError('timed out waiting for PID state; got %r' % (
pid_sets))
return pid_sets
class TestWSGIServerProcessHandling(ReplProbeTest):
# Subclasses need to define SERVER_NAME
HAS_INFO = False
PID_TIMEOUT = 25
def setUp(self):
super(TestWSGIServerProcessHandling, self).setUp()
self.container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, self.container,
headers={'X-Storage-Policy':
self.policy.name})
self.manager = Manager([self.SERVER_NAME])
for server in self.manager.servers:
self.assertTrue(server.get_running_pids,
'No running PIDs for %s' % server.cmd)
self.starting_pids = get_server_and_worker_pids(self.manager)
def assert4xx(self, resp):
self.assertEqual(resp.status // 100, 4)
got_body = resp.read()
try:
self.assertIn('resource could not be found', got_body)
except AssertionError:
self.assertIn('Invalid path: blah', got_body)
def get_conn(self):
scheme, ip, port = self.get_scheme_ip_port()
if scheme == 'https':
return httplib.HTTPSConnection('%s:%s' % (ip, port))
return httplib.HTTPConnection('%s:%s' % (ip, port))
def _check_reload(self):
conn = self.get_conn()
self.addCleanup(conn.close)
# sanity request
self.start_write_req(conn, 'sanity')
resp = self.finish_write_req(conn)
self.check_write_resp(resp)
if self.HAS_INFO:
self.check_info_value(8192)
# Start another write request before reloading...
self.start_write_req(conn, 'across-reload')
if self.HAS_INFO:
self.swap_configs() # new server's max_header_size == 8191
self.do_reload()
wait_for_pids(self.manager, self.make_post_reload_pid_cb(),
old_workers=self.starting_pids['worker'],
timeout=self.PID_TIMEOUT)
# ... and make sure we can finish what we were doing
resp = self.finish_write_req(conn)
self.check_write_resp(resp)
# After this, we're in a funny spot. With eventlet 0.22.0, the
# connection's now closed, but with prior versions we could keep
# going indefinitely. See https://bugs.launchpad.net/swift/+bug/1792615
# Close our connections, to make sure old eventlet shuts down
conn.close()
# sanity
wait_for_pids(self.manager, self.make_post_close_pid_cb(),
old_workers=self.starting_pids['worker'],
timeout=self.PID_TIMEOUT)
if self.HAS_INFO:
self.check_info_value(8191)
class OldReloadMixin(object):
def make_post_reload_pid_cb(self):
def _cb(post_reload_pids):
# We expect all old server PIDs to be gone, a new server present,
# and for there to be exactly 1 old worker PID plus additional new
# worker PIDs.
old_servers_dead = not (self.starting_pids['server'] &
post_reload_pids['server'])
one_old_worker = 1 == len(self.starting_pids['worker'] &
post_reload_pids['worker'])
new_workers_present = (post_reload_pids['worker'] -
self.starting_pids['worker'])
return (post_reload_pids['server'] and old_servers_dead and
one_old_worker and new_workers_present)
return _cb
def make_post_close_pid_cb(self):
def _cb(post_close_pids):
# We expect all old server PIDs to be gone, a new server present,
# no old worker PIDs, and additional new worker PIDs.
old_servers_dead = not (self.starting_pids['server'] &
post_close_pids['server'])
old_workers_dead = not (self.starting_pids['worker'] &
post_close_pids['worker'])
new_workers_present = (post_close_pids['worker'] -
self.starting_pids['worker'])
return (post_close_pids['server'] and old_servers_dead and
old_workers_dead and new_workers_present)
return _cb
def do_reload(self):
self.manager.reload()
class SeamlessReloadMixin(object):
def make_post_reload_pid_cb(self):
def _cb(post_reload_pids):
# We expect all orig server PIDs to STILL BE PRESENT, no new server
# present, and for there to be exactly 1 old worker PID plus
# additional new worker PIDs.
same_servers = (self.starting_pids['server'] ==
post_reload_pids['server'])
one_old_worker = 1 == len(self.starting_pids['worker'] &
post_reload_pids['worker'])
new_workers_present = (post_reload_pids['worker'] -
self.starting_pids['worker'])
return (post_reload_pids['server'] and same_servers and
one_old_worker and new_workers_present)
return _cb
def make_post_close_pid_cb(self):
def _cb(post_close_pids):
# We expect all orig server PIDs to STILL BE PRESENT, no new server
# present, no old worker PIDs, and additional new worker PIDs.
same_servers = (self.starting_pids['server'] ==
post_close_pids['server'])
old_workers_dead = not (self.starting_pids['worker'] &
post_close_pids['worker'])
new_workers_present = (post_close_pids['worker'] -
self.starting_pids['worker'])
return (post_close_pids['server'] and same_servers and
old_workers_dead and new_workers_present)
return _cb
def do_reload(self):
self.manager.reload_seamless()
class ChildReloadMixin(object):
def make_post_reload_pid_cb(self):
def _cb(post_reload_pids):
# We expect all orig server PIDs to STILL BE PRESENT, no new server
# present, and for there to be exactly 1 old worker PID plus
# all but one additional new worker PIDs.
num_workers = len(self.starting_pids['worker'])
same_servers = (self.starting_pids['server'] ==
post_reload_pids['server'])
one_old_worker = 1 == len(self.starting_pids['worker'] &
post_reload_pids['worker'])
new_workers_present = (post_reload_pids['worker'] -
self.starting_pids['worker'])
return (post_reload_pids['server'] and same_servers and
one_old_worker and
len(new_workers_present) == num_workers - 1)
return _cb
def make_post_close_pid_cb(self):
def _cb(post_close_pids):
# We expect all orig server PIDs to STILL BE PRESENT, no new server
# present, no old worker PIDs, and all new worker PIDs.
same_servers = (self.starting_pids['server'] ==
post_close_pids['server'])
old_workers_dead = not (self.starting_pids['worker'] &
post_close_pids['worker'])
new_workers_present = (post_close_pids['worker'] -
self.starting_pids['worker'])
return (post_close_pids['server'] and same_servers and
old_workers_dead and new_workers_present)
return _cb
def do_reload(self):
self.manager.kill_child_pids(seamless=True)
class TestObjectServerReloadBase(TestWSGIServerProcessHandling):
SERVER_NAME = 'object'
PID_TIMEOUT = 35
def get_scheme_ip_port(self):
self.policy.load_ring('/etc/swift')
self.ring_node = random.choice(
self.policy.object_ring.get_part_nodes(1))
return 'http', self.ring_node['ip'], self.ring_node['port']
def start_write_req(self, conn, suffix):
putrequest(conn, 'PUT', '/%s/123/%s/%s/blah-%s' % (
self.ring_node['device'], self.account, self.container, suffix),
headers={'X-Timestamp': str(time.time()),
'Content-Type': 'application/octet-string',
'Content-Length': len(self.BODY),
'X-Backend-Storage-Policy-Index': str(self.policy.idx)})
def finish_write_req(self, conn):
conn.send(self.BODY)
return conn.getresponse()
def check_write_resp(self, resp):
got_body = resp.read()
self.assertEqual(resp.status // 100, 2, 'Got status %d; %r' %
(resp.status, got_body))
self.assertEqual(b'', got_body)
return resp
class TestObjectServerReload(OldReloadMixin, TestObjectServerReloadBase):
BODY = b'test-object' * 10
def test_object_reload(self):
self._check_reload()
class TestObjectServerReloadSeamless(SeamlessReloadMixin,
TestObjectServerReloadBase):
BODY = b'test-object' * 10
def test_object_reload_seamless(self):
self._check_reload()
class TestObjectServerReloadChild(ChildReloadMixin,
TestObjectServerReloadBase):
BODY = b'test-object' * 10
def test_object_reload_child(self):
self._check_reload()
class TestProxyServerReloadBase(TestWSGIServerProcessHandling):
SERVER_NAME = 'proxy-server'
HAS_INFO = True
def setUp(self):
super(TestProxyServerReloadBase, self).setUp()
self.swift_conf_path = '/etc/swift/swift.conf'
self.new_swift_conf_path = self.swift_conf_path + '.new'
self.saved_swift_conf_path = self.swift_conf_path + '.orig'
shutil.copy(self.swift_conf_path, self.saved_swift_conf_path)
with open(self.swift_conf_path, 'r') as rfh:
config = rfh.read()
section_header = '\n[swift-constraints]\n'
if section_header in config:
config = config.replace(
section_header,
section_header + 'max_header_size = 8191\n',
1)
else:
config += section_header + 'max_header_size = 8191\n'
with open(self.new_swift_conf_path, 'w') as wfh:
wfh.write(config)
wfh.flush()
def tearDown(self):
shutil.move(self.saved_swift_conf_path, self.swift_conf_path)
try:
os.unlink(self.new_swift_conf_path)
except OSError:
pass
super(TestProxyServerReloadBase, self).tearDown()
def swap_configs(self):
shutil.copy(self.new_swift_conf_path, self.swift_conf_path)
def get_scheme_ip_port(self):
parsed = urlparse(PROXY_BASE_URL)
host, port = parsed.netloc.partition(':')[::2]
if not port:
port = '443' if parsed.scheme == 'https' else '80'
return parsed.scheme, host, int(port)
def assertMaxHeaderSize(self, resp, exp_max_header_size):
self.assertEqual(resp.status // 100, 2)
info_dict = json.loads(resp.read())
self.assertEqual(exp_max_header_size,
info_dict['swift']['max_header_size'])
def check_info_value(self, expected_value):
# show that we're talking to the original server with the default
# max_header_size == 8192
conn2 = self.get_conn()
putrequest(conn2, 'GET', '/info',
headers={'Content-Length': '0',
'Accept': 'application/json'})
conn2.send('')
resp = conn2.getresponse()
self.assertMaxHeaderSize(resp, expected_value)
conn2.close()
def start_write_req(self, conn, suffix):
putrequest(conn, 'PUT', '/v1/%s/%s/blah-%s' % (
self.account, self.container, suffix),
headers={'X-Auth-Token': self.token,
'Content-Length': len(self.BODY)})
def finish_write_req(self, conn):
conn.send(self.BODY)
return conn.getresponse()
def check_write_resp(self, resp):
got_body = resp.read()
self.assertEqual(resp.status // 100, 2, 'Got status %d; %r' %
(resp.status, got_body))
self.assertEqual(b'', got_body)
return resp
class TestProxyServerReload(OldReloadMixin, TestProxyServerReloadBase):
BODY = b'proxy' * 10
def test_proxy_reload(self):
self._check_reload()
class TestProxyServerReloadSeamless(SeamlessReloadMixin,
TestProxyServerReloadBase):
BODY = b'proxy-seamless' * 10
def test_proxy_reload_seamless(self):
self._check_reload()
class TestProxyServerReloadChild(ChildReloadMixin,
TestProxyServerReloadBase):
BODY = b'proxy-seamless' * 10
# A bit of a lie, but the respawned child won't pick up the updated config
HAS_INFO = False
def test_proxy_reload_child(self):
self._check_reload()
@contextmanager
def spawn_services(ip_ports, timeout=10):
q = eventlet.Queue()
def service(sock):
try:
conn, address = sock.accept()
q.put(address)
eventlet.sleep(timeout)
conn.close()
finally:
sock.close()
pool = eventlet.GreenPool()
for ip, port in ip_ports:
sock = eventlet.listen((ip, port))
pool.spawn(service, sock)
try:
yield q
finally:
for gt in list(pool.coroutines_running):
gt.kill()
class TestHungDaemon(unittest.TestCase):
def setUp(self):
resetswift()
self.ip_ports = [
(dev['ip'], dev['port'])
for dev in Ring('/etc/swift', ring_name='account').devs
if dev
]
def test_main(self):
reconciler = Manager(['container-reconciler'])
with spawn_services(self.ip_ports) as q:
reconciler.start()
# wait for the reconciler to connect
q.get()
# once it's hung in our connection - send it sig term
print('Attempting to stop reconciler!')
reconciler.stop()
self.assertEqual(1, reconciler.status())
if __name__ == '__main__':
unittest.main()
| swift-master | test/probe/test_signals.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from unittest import main
from uuid import uuid4
import random
from collections import defaultdict
import os
import socket
import errno
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.manager import Manager
from swift.common.utils import md5
from test.probe.common import (
Body, get_server_number, kill_server, start_server,
ReplProbeTest, ECProbeTest)
class TestObjectHandoff(ReplProbeTest):
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
# Kill one container/obj primary server
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server((node['ip'], node['port']), self.ipport2server)
# We've indirectly verified the handoff node has the container/object,
# but let's directly verify it.
another_onode = next(self.object_ring.get_more_nodes(opart))
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# drop a tempfile in the handoff's datadir, like it might have
# had if there was an rsync failure while it was previously a
# primary
handoff_device_path = self.device_dir(another_onode)
data_filename = None
for root, dirs, files in os.walk(handoff_device_path):
for filename in files:
if filename.endswith('.data'):
data_filename = filename
temp_filename = '.%s.6MbL6r' % data_filename
temp_filepath = os.path.join(root, temp_filename)
if not data_filename:
self.fail('Did not find any data files on %r' %
handoff_device_path)
open(temp_filepath, 'w')
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj not in objs:
raise Exception(
'Container server %s:%s did not know about object' %
(cnode['ip'], cnode['port']))
# Bring the first container/obj primary server back up
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication, ensuring we run the handoff node last so it
# will remove its extra handoff partition
for node in onodes:
_, node_id = get_server_number(
(node['ip'], node.get('replication_port', node['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=node_id)
another_port_num = another_onode.get(
'replication_port', another_onode['port'])
_, another_num = get_server_number(
(another_onode['ip'], another_port_num), self.ipport2server)
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# and that it does *not* have a temporary rsync dropping!
found_data_filename = False
primary_device_path = self.device_dir(onode)
for root, dirs, files in os.walk(primary_device_path):
for filename in files:
if filename.endswith('.6MbL6r'):
self.fail('Found unexpected file %s' %
os.path.join(root, filename))
if filename == data_filename:
found_data_filename = True
self.assertTrue(found_data_filename,
'Did not find data file %r on %r' % (
data_filename, primary_device_path))
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Kill the first container/obj primary server again (we have two
# primaries and the handoff up now)
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete container/obj
try:
client.delete_object(self.url, self.token, container, obj)
except client.ClientException as err:
if self.object_ring.replica_count > 2:
raise
# Object DELETE returning 503 for (404, 204)
# remove this with fix for
# https://bugs.launchpad.net/swift/+bug/1318375
self.assertEqual(503, err.http_status)
# Assert we can't head container/obj
try:
client.head_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Assert container/obj is not in the container listing, both indirectly
# and directly
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj in objs:
raise Exception('Container listing still knew about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
raise Exception(
'Container server %s:%s still knew about object' %
(cnode['ip'], cnode['port']))
# Restart the first container/obj primary server again
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert it still has container/obj
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
# Run object replication, ensuring we run the handoff node last so it
# will remove its extra handoff partition
for node in onodes:
_, node_id = get_server_number(
(node['ip'], node.get('replication_port', node['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=node_id)
_, another_node_id = get_server_number(
(another_onode['ip'], another_port_num), self.ipport2server)
Manager(['object-replicator']).once(number=another_node_id)
# Assert primary node no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
def test_stale_reads(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
# Kill one primary obj server
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primaries and one handoff)
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Restart the first container/obj primary server again
start_server((onode['ip'], onode['port']), self.ipport2server)
# send a delete request to primaries
client.delete_object(self.url, self.token, container, obj)
# there should be .ts files in all primaries now
for node in onodes:
try:
direct_client.direct_get_object(
node, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# verify that handoff still has the data, DELETEs should have gone
# only to primaries
another_onode = next(self.object_ring.get_more_nodes(opart))
handoff_data = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(handoff_data, direct_get_data)
# Indirectly (i.e., through proxy) try to GET object, it should return
# a 404, before bug #1560574, the proxy would return the stale object
# from the handoff
try:
client.get_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
def test_missing_primaries(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
# Create container/obj (goes to all three primaries)
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Kill all primaries obj server
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
for onode in onodes:
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Indirectly (i.e., through proxy) try to GET object, it should return
# a 503, since all primaries will Timeout and handoffs return a 404.
try:
client.get_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 503)
else:
self.fail("Expected ClientException but didn't get it")
# Restart the first container/obj primary server again
onode = onodes[0]
start_server((onode['ip'], onode['port']), self.ipport2server)
# Send a delete that will reach first primary and handoff.
# Sure, the DELETE will return a 404 since the handoff doesn't
# have a .data file, but object server will still write a
# Tombstone in the handoff node!
try:
client.delete_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
# kill the first container/obj primary server again
kill_server((onode['ip'], onode['port']), self.ipport2server)
# a new GET should return a 404, since all primaries will Timeout
# and the handoff will return a 404 but this time with a tombstone
try:
client.get_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
class TestECObjectHandoff(ECProbeTest):
def get_object(self, container_name, object_name):
headers, body = client.get_object(self.url, self.token,
container_name,
object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5(usedforsecurity=False)
for chunk in body:
resp_checksum.update(chunk)
return resp_checksum.hexdigest()
def test_ec_handoff_overwrite(self):
container_name = 'container-%s' % uuid4()
object_name = 'object-%s' % uuid4()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container_name,
headers=headers)
# PUT object
old_contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=old_contents)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, container_name, object_name)
# shutdown one of the primary data nodes
failed_primary = random.choice(onodes)
failed_primary_device_path = self.device_dir(failed_primary)
# first read its ec etag value for future reference - this may not
# equal old_contents.etag if for example the proxy has crypto enabled
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
headers = direct_client.direct_head_object(
failed_primary, opart, self.account, container_name,
object_name, headers=req_headers)
old_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
self.kill_drive(failed_primary_device_path)
# overwrite our object with some new data
new_contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=new_contents)
self.assertNotEqual(new_contents.etag, old_contents.etag)
# restore failed primary device
self.revive_drive(failed_primary_device_path)
# sanity - failed node has old contents
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
headers = direct_client.direct_head_object(
failed_primary, opart, self.account, container_name,
object_name, headers=req_headers)
self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
old_backend_etag)
# we have 1 primary with wrong old etag, and we should have 5 with
# new etag plus a handoff with the new etag, so killing 2 other
# primaries forces proxy to try to GET from all primaries plus handoff.
other_nodes = [n for n in onodes if n != failed_primary]
random.shuffle(other_nodes)
# grab the value of the new content's ec etag for future reference
headers = direct_client.direct_head_object(
other_nodes[0], opart, self.account, container_name,
object_name, headers=req_headers)
new_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
for node in other_nodes[:2]:
self.kill_drive(self.device_dir(node))
# sanity, after taking out two primaries we should be down to
# only four primaries, one of which has the old etag - but we
# also have a handoff with the new etag out there
found_frags = defaultdict(int)
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
for node in onodes + list(self.object_ring.get_more_nodes(opart)):
try:
headers = direct_client.direct_head_object(
node, opart, self.account, container_name,
object_name, headers=req_headers)
except Exception:
continue
found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
self.assertEqual(found_frags, {
new_backend_etag: 4, # this should be enough to rebuild!
old_backend_etag: 1,
})
# clear node error limiting
Manager(['proxy']).restart()
resp_etag = self.get_object(container_name, object_name)
self.assertEqual(resp_etag, new_contents.etag)
def _check_nodes(self, opart, onodes, container_name, object_name):
found_frags = defaultdict(int)
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
for node in onodes + list(self.object_ring.get_more_nodes(opart)):
try:
headers = direct_client.direct_head_object(
node, opart, self.account, container_name,
object_name, headers=req_headers)
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise
except direct_client.DirectClientException as e:
if e.http_status != 404:
raise
else:
found_frags[headers['X-Object-Sysmeta-Ec-Frag-Index']] += 1
return found_frags
def test_ec_handoff_duplicate_available(self):
container_name = 'container-%s' % uuid4()
object_name = 'object-%s' % uuid4()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, container_name, object_name)
# find both primary servers that have both of their devices in
# the primary node list
group_nodes_by_config = defaultdict(list)
for n in onodes:
group_nodes_by_config[self.config_number(n)].append(n)
double_disk_primary = []
for config_number, node_list in group_nodes_by_config.items():
if len(node_list) > 1:
double_disk_primary.append((config_number, node_list))
# sanity, in a 4+2 with 8 disks two servers will be doubled
self.assertEqual(len(double_disk_primary), 2)
# shutdown the first double primary
primary0_config_number, primary0_node_list = double_disk_primary[0]
Manager(['object-server']).stop(number=primary0_config_number)
# PUT object
contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=contents)
# sanity fetch two frags on handoffs
handoff_frags = []
for node in self.object_ring.get_more_nodes(opart):
headers, data = direct_client.direct_get_object(
node, opart, self.account, container_name, object_name,
headers={'X-Backend-Storage-Policy-Index': int(self.policy)}
)
handoff_frags.append((node, headers, data))
# bring the first double primary back, and fail the other one
Manager(['object-server']).start(number=primary0_config_number)
primary1_config_number, primary1_node_list = double_disk_primary[1]
Manager(['object-server']).stop(number=primary1_config_number)
# we can still GET the object
resp_etag = self.get_object(container_name, object_name)
self.assertEqual(resp_etag, contents.etag)
# now start to "revert" the first handoff frag
node = primary0_node_list[0]
handoff_node, headers, data = handoff_frags[0]
# N.B. object server api returns quoted ETag
headers['ETag'] = headers['Etag'].strip('"')
headers['X-Backend-Storage-Policy-Index'] = int(self.policy)
direct_client.direct_put_object(
node, opart,
self.account, container_name, object_name,
contents=data, headers=headers)
# sanity - check available frags
frag2count = self._check_nodes(opart, onodes,
container_name, object_name)
# ... five frags total
self.assertEqual(sum(frag2count.values()), 5)
# ... only 4 unique indexes
self.assertEqual(len(frag2count), 4)
# we can still GET the object
resp_etag = self.get_object(container_name, object_name)
self.assertEqual(resp_etag, contents.etag)
# ... but we need both handoffs or we get a error
for handoff_node, hdrs, data in handoff_frags:
Manager(['object-server']).stop(
number=self.config_number(handoff_node))
with self.assertRaises(Exception) as cm:
self.get_object(container_name, object_name)
self.assertIn(cm.exception.http_status, (404, 503))
Manager(['object-server']).start(
number=self.config_number(handoff_node))
# fix everything
Manager(['object-server']).start(number=primary1_config_number)
Manager(["object-reconstructor"]).once()
# sanity - check available frags
frag2count = self._check_nodes(opart, onodes,
container_name, object_name)
# ... six frags total
self.assertEqual(sum(frag2count.values()), 6)
# ... all six unique
self.assertEqual(len(frag2count), 6)
def test_ec_primary_timeout(self):
container_name = 'container-%s' % uuid4()
object_name = 'object-%s' % uuid4()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container_name,
headers=headers)
# PUT object, should go to primary nodes
old_contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=old_contents)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, container_name, object_name)
# shutdown three of the primary data nodes
for i in range(3):
failed_primary = onodes[i]
failed_primary_device_path = self.device_dir(failed_primary)
self.kill_drive(failed_primary_device_path)
# Indirectly (i.e., through proxy) try to GET object, it should return
# a 503, since all primaries will Timeout and handoffs return a 404.
try:
client.get_object(self.url, self.token, container_name,
object_name)
except client.ClientException as err:
self.assertEqual(err.http_status, 503)
else:
self.fail("Expected ClientException but didn't get it")
# Send a delete to write down tombstones in the handoff nodes
client.delete_object(self.url, self.token, container_name, object_name)
# Now a new GET should return 404 because the handoff nodes
# return a 404 with a Tombstone.
try:
client.get_object(self.url, self.token, container_name,
object_name)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()
| swift-master | test/probe/test_object_handoff.py |
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import time
import uuid
import unittest
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.manager import Manager
from swift.common.utils import Timestamp
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
from test.probe.brain import BrainSplitter
from swiftclient import client
class TestObjectExpirer(ReplProbeTest):
def setUp(self):
self.expirer = Manager(['object-expirer'])
self.expirer.start()
err = self.expirer.stop()
if err:
raise unittest.SkipTest('Unable to verify object-expirer service')
conf_files = []
for server in self.expirer.servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
self.client = InternalClient(conf_file, 'probe-test', 3)
super(TestObjectExpirer, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name)
def _check_obj_in_container_listing(self):
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
return True
return False
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def test_expirer_object_split_brain(self):
old_policy = random.choice(ENABLED_POLICIES)
wrong_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={'X-Delete-After': 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
create_timestamp = Timestamp(metadata['x-timestamp'])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(['object-updater']).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
self.get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
self.assertIn('x-backend-timestamp', metadata)
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
# but it is still in the listing
self.assertTrue(self._check_obj_in_container_listing(),
msg='Did not find listing for %s' % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirer again after replication
self.expirer.once()
# object is not in the listing
self.assertFalse(self._check_obj_in_container_listing(),
msg='Found listing for %s' % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in ENABLED_POLICIES:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(policy)})
if 'x-backend-timestamp' in metadata:
if found_in_policy:
self.fail('found object in %s and also %s' %
(found_in_policy, policy))
found_in_policy = policy
self.assertIn('x-backend-timestamp', metadata)
self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
def test_expirer_doesnt_make_async_pendings(self):
# The object expirer cleans up its own queue. The inner loop
# basically looks like this:
#
# for obj in stuff_to_delete:
# delete_the_object(obj)
# remove_the_queue_entry(obj)
#
# By default, upon receipt of a DELETE request for an expiring
# object, the object servers will create async_pending records to
# clean the expirer queue. Since the expirer cleans its own queue,
# this is unnecessary. The expirer can make requests in such a way
# tha the object server does not write out any async pendings; this
# test asserts that this is the case.
# Make an expiring object in each policy
for policy in ENABLED_POLICIES:
container_name = "expirer-test-%d" % policy.idx
container_headers = {'X-Storage-Policy': policy.name}
client.put_container(self.url, self.token, container_name,
headers=container_headers)
now = time.time()
delete_at = int(now + 2.0)
client.put_object(
self.url, self.token, container_name, "some-object",
headers={'X-Delete-At': str(delete_at),
'X-Timestamp': Timestamp(now).normal},
contents='dontcare')
time.sleep(2.0)
# make sure auto-created expirer-queue containers get in the account
# listing so the expirer can find them
Manager(['container-updater']).once()
# Make sure there's no async_pendings anywhere. Probe tests only run
# on single-node installs anyway, so this set should be small enough
# that an exhaustive check doesn't take too long.
all_obj_nodes = self.get_all_object_nodes()
pendings_before = self.gather_async_pendings(all_obj_nodes)
# expire the objects
Manager(['object-expirer']).once()
pendings_after = self.gather_async_pendings(all_obj_nodes)
self.assertEqual(pendings_after, pendings_before)
def test_expirer_object_should_not_be_expired(self):
# Current object-expirer checks the correctness via x-if-delete-at
# header that it can be deleted by expirer. If there are objects
# either which doesn't have x-delete-at header as metadata or which
# has different x-delete-at value from x-if-delete-at value,
# object-expirer's delete will fail as 412 PreconditionFailed.
# However, if some of the objects are in handoff nodes, the expirer
# can put the tombstone with the timestamp as same as x-delete-at and
# the object consistency will be resolved as the newer timestamp will
# be winner (in particular, overwritten case w/o x-delete-at). This
# test asserts such a situation that, at least, the overwriten object
# which have larger timestamp than the original expirered date should
# be safe.
def put_object(headers):
# use internal client to PUT objects so that X-Timestamp in headers
# is effective
headers['Content-Length'] = '0'
path = self.client.make_path(
self.account, self.container_name, self.object_name)
try:
self.client.make_request('PUT', path, headers, (2,))
except UnexpectedResponse as e:
self.fail(
'Expected 201 for PUT object but got %s' % e.resp.status)
obj_brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object', self.policy)
# T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
# < T(expirer_executed)
# Recreated obj should be appeared in any split brain case
obj_brain.put_container()
# T(obj_deleted with x-delete-at)
# object-server accepts req only if X-Delete-At is later than 'now'
# so here, T(obj_created) < T(obj_deleted with x-delete-at)
now = time.time()
delete_at = int(now + 2.0)
recreate_at = delete_at + 1.0
put_object(headers={'X-Delete-At': str(delete_at),
'X-Timestamp': Timestamp(now).normal})
# some object servers stopped to make a situation that the
# object-expirer can put tombstone in the primary nodes.
obj_brain.stop_primary_half()
# increment the X-Timestamp explicitly
# (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
put_object(headers={'X-Object-Meta-Expired': 'False',
'X-Timestamp': Timestamp(recreate_at).normal})
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# sanity, the newer object is still there
try:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name)
except UnexpectedResponse as e:
self.fail(
'Expected 200 for HEAD object but got %s' % e.resp.status)
self.assertIn('x-object-meta-expired', metadata)
# some object servers recovered
obj_brain.start_primary_half()
# sleep until after recreated_at
while time.time() <= recreate_at:
time.sleep(0.1)
# Now, expirer runs at the time after obj is recreated
self.expirer.once()
# verify that original object was deleted by expirer
obj_brain.stop_handoff_half()
try:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,))
except UnexpectedResponse as e:
self.fail(
'Expected 404 for HEAD object but got %s' % e.resp.status)
obj_brain.start_handoff_half()
# and inconsistent state of objects is recovered by replicator
Manager(['object-replicator']).once()
# check if you can get recreated object
try:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name)
except UnexpectedResponse as e:
self.fail(
'Expected 200 for HEAD object but got %s' % e.resp.status)
self.assertIn('x-object-meta-expired', metadata)
def _test_expirer_delete_outdated_object_version(self, object_exists):
# This test simulates a case where the expirer tries to delete
# an outdated version of an object.
# One case is where the expirer gets a 404, whereas the newest version
# of the object is offline.
# Another case is where the expirer gets a 412, since the old version
# of the object mismatches the expiration time sent by the expirer.
# In any of these cases, the expirer should retry deleting the object
# later, for as long as a reclaim age has not passed.
obj_brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object', self.policy)
obj_brain.put_container()
if object_exists:
obj_brain.put_object()
# currently, the object either doesn't exist, or does not have
# an expiration
# stop primary servers and put a newer version of the object, this
# time with an expiration. only the handoff servers will have
# the new version
obj_brain.stop_primary_half()
now = time.time()
delete_at = int(now + 2.0)
obj_brain.put_object({'X-Delete-At': str(delete_at)})
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# update object record in the container listing
Manager(['container-replicator']).once()
# take handoff servers down, and bring up the outdated primary servers
obj_brain.start_primary_half()
obj_brain.stop_handoff_half()
# wait until object expiration time
while time.time() <= delete_at:
time.sleep(0.1)
# run expirer against the outdated servers. it should fail since
# the outdated version does not match the expiration time
self.expirer.once()
# bring all servers up, and run replicator to update servers
obj_brain.start_handoff_half()
Manager(['object-replicator']).once()
# verify the deletion has failed by checking the container listing
self.assertTrue(self._check_obj_in_container_listing(),
msg='Did not find listing for %s' % self.object_name)
# run expirer again, delete should now succeed
self.expirer.once()
# verify the deletion by checking the container listing
self.assertFalse(self._check_obj_in_container_listing(),
msg='Found listing for %s' % self.object_name)
def test_expirer_delete_returns_outdated_404(self):
self._test_expirer_delete_outdated_object_version(object_exists=False)
def test_expirer_delete_returns_outdated_412(self):
self._test_expirer_delete_outdated_object_version(object_exists=True)
def test_slo_async_delete(self):
if not self.cluster_info.get('slo', {}).get('allow_async_delete'):
raise unittest.SkipTest('allow_async_delete not enabled')
segment_container = self.container_name + '_segments'
client.put_container(self.url, self.token, self.container_name, {})
client.put_container(self.url, self.token, segment_container, {})
client.put_object(self.url, self.token,
segment_container, 'segment_1', b'1234')
client.put_object(self.url, self.token,
segment_container, 'segment_2', b'5678')
client.put_object(
self.url, self.token, self.container_name, 'slo', json.dumps([
{'path': segment_container + '/segment_1'},
{'data': 'Cg=='},
{'path': segment_container + '/segment_2'},
]), query_string='multipart-manifest=put')
_, body = client.get_object(self.url, self.token,
self.container_name, 'slo')
self.assertEqual(body, b'1234\n5678')
client.delete_object(
self.url, self.token, self.container_name, 'slo',
query_string='multipart-manifest=delete&async=true')
# Object's deleted
_, objects = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual(objects, [])
with self.assertRaises(client.ClientException) as caught:
client.get_object(self.url, self.token, self.container_name, 'slo')
self.assertEqual(404, caught.exception.http_status)
# But segments are still around and accessible
_, objects = client.get_container(self.url, self.token,
segment_container)
self.assertEqual([o['name'] for o in objects],
['segment_1', 'segment_2'])
_, body = client.get_object(self.url, self.token,
segment_container, 'segment_1')
self.assertEqual(body, b'1234')
_, body = client.get_object(self.url, self.token,
segment_container, 'segment_2')
self.assertEqual(body, b'5678')
# make sure auto-created expirer-queue containers get in the account
# listing so the expirer can find them
Manager(['container-updater']).once()
self.expirer.once()
# Now the expirer has cleaned up the segments
_, objects = client.get_container(self.url, self.token,
segment_container)
self.assertEqual(objects, [])
with self.assertRaises(client.ClientException) as caught:
client.get_object(self.url, self.token,
segment_container, 'segment_1')
self.assertEqual(404, caught.exception.http_status)
with self.assertRaises(client.ClientException) as caught:
client.get_object(self.url, self.token,
segment_container, 'segment_2')
self.assertEqual(404, caught.exception.http_status)
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_object_expirer.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import unittest
import os
import uuid
import shutil
from datetime import datetime
from six.moves.configparser import ConfigParser
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
from swift.common import manager
from swift.common.storage_policy import get_policy_string
from swift.common.manager import Manager, Server
from swift.common.utils import readconf
CONF_SECTION = 'object-auditor:watcher:swift#dark_data'
class TestDarkDataDeletion(ReplProbeTest):
# NB: could be 'quarantine' in another test
action = 'delete'
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(TestDarkDataDeletion, self).setUp()
self.conf_dest = \
os.path.join('/tmp/',
datetime.now().strftime('swift-%Y-%m-%d_%H-%M-%S-%f'))
os.mkdir(self.conf_dest)
object_server_dir = os.path.join(self.conf_dest, 'object-server')
os.mkdir(object_server_dir)
for conf_file in Server('object-auditor').conf_files():
config = readconf(conf_file)
if 'object-auditor' not in config:
continue # *somebody* should be set up to run the auditor
config['object-auditor'].update(
{'watchers': 'swift#dark_data'})
# Note that this setdefault business may mean the watcher doesn't
# pick up DEFAULT values, but that (probably?) won't matter.
# We set grace_age to 0 so that tests don't have to deal with time.
config.setdefault(CONF_SECTION, {}).update(
{'action': self.action,
'grace_age': "0"})
parser = ConfigParser()
for section in ('object-auditor', CONF_SECTION):
parser.add_section(section)
for option, value in config[section].items():
parser.set(section, option, value)
file_name = os.path.basename(conf_file)
if file_name.endswith('.d'):
# Work around conf.d setups (like you might see with VSAIO)
file_name = file_name[:-2]
with open(os.path.join(object_server_dir, file_name), 'w') as fp:
parser.write(fp)
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
def tearDown(self):
shutil.rmtree(self.conf_dest)
def gather_object_files_by_ext(self):
result = collections.defaultdict(set)
for node in self.brain.nodes:
for path, _, files in os.walk(os.path.join(
self.device_dir(node),
get_policy_string('objects', self.policy))):
for file in files:
if file in ('.lock', 'hashes.pkl', 'hashes.invalid',
'.lock-replication'):
continue
_, ext = os.path.splitext(file)
result[ext].add(os.path.join(path, file))
return result
def test_dark_data(self):
self.brain.put_container()
self.brain.put_object()
self.brain.stop_handoff_half()
self.brain.delete_object()
Manager(['object-updater']).once()
Manager(['container-replicator']).once()
# Sanity check:
# * all containers are empty
# * primaries that are still up have two .ts files
# * primary that's down has one .data file
for index, (headers, items) in self.direct_get_container(
container=self.container_name).items():
self.assertEqual(headers['X-Container-Object-Count'], '0')
self.assertEqual(items, [])
files = self.gather_object_files_by_ext()
self.assertLengthEqual(files, 2)
self.assertLengthEqual(files['.ts'], 2)
self.assertLengthEqual(files['.data'], 1)
# Simulate a reclaim_age passing,
# so the tombstones all got cleaned up
for file_path in files['.ts']:
os.unlink(file_path)
# Old node gets reintroduced to the cluster
self.brain.start_handoff_half()
# ...so replication thinks its got some work to do
Manager(['object-replicator']).once()
# Now we're back to *three* .data files
files = self.gather_object_files_by_ext()
self.assertLengthEqual(files, 1)
self.assertLengthEqual(files['.data'], 3)
# But that's OK, audit watchers to the rescue!
old_swift_dir = manager.SWIFT_DIR
manager.SWIFT_DIR = self.conf_dest
try:
Manager(['object-auditor']).once()
finally:
manager.SWIFT_DIR = old_swift_dir
# Verify that the policy was applied.
self.check_on_disk_files(files['.data'])
def check_on_disk_files(self, files):
for file_path in files:
# File's not there
self.assertFalse(os.path.exists(file_path))
# And it's not quaratined, either!
self.assertPathDoesNotExist(os.path.join(
file_path[:file_path.index('objects')], 'quarantined'))
def assertPathExists(self, path):
msg = "Expected path %r to exist, but it doesn't" % path
self.assertTrue(os.path.exists(path), msg)
def assertPathDoesNotExist(self, path):
msg = "Expected path %r to not exist, but it does" % path
self.assertFalse(os.path.exists(path), msg)
class TestDarkDataQuarantining(TestDarkDataDeletion):
action = 'quarantine'
def check_on_disk_files(self, files):
for file_path in files:
# File's not there
self.assertPathDoesNotExist(file_path)
# Got quarantined
parts = file_path.split(os.path.sep)
policy_dir = get_policy_string('objects', self.policy)
quarantine_dir = parts[:parts.index(policy_dir)] + ['quarantined']
quarantine_path = os.path.sep.join(
quarantine_dir + [policy_dir] + parts[-2:])
self.assertPathExists(quarantine_path)
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_dark_data.py |
# Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch()
from test import get_config
from swift.common.utils import config_true_value
config = get_config('probe_test')
CHECK_SERVER_TIMEOUT = int(config.get('check_server_timeout', 30))
VALIDATE_RSYNC = config_true_value(config.get('validate_rsync', False))
PROXY_BASE_URL = config.get('proxy_base_url')
if PROXY_BASE_URL is None:
# TODO: find and load an "appropriate" proxy-server.conf(.d), piece
# something together from bind_ip, bind_port, and cert_file
PROXY_BASE_URL = 'http://127.0.0.1:8080'
| swift-master | test/probe/__init__.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from unittest import main, SkipTest
from uuid import uuid4
from swiftclient import client
from swiftclient.exceptions import ClientException
from swift.common import direct_client
from swift.common.manager import Manager
from swift.common.swob import normalize_etag
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server, ECProbeTest
class TestObjectAsyncUpdate(ReplProbeTest):
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
# Kill container servers excepting two of the primaries
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
kill_nonprimary_server(cnodes, self.ipport2server)
kill_server((cnode['ip'], cnode['port']), self.ipport2server)
# Create container/obj
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, '')
# Restart other primary server
start_server((cnode['ip'], cnode['port']), self.ipport2server)
# Assert it does not know about container/obj
self.assertFalse(direct_client.direct_get_container(
cnode, cpart, self.account, container)[1])
# Run the object-updaters
Manager(['object-updater']).once()
# Assert the other primary server now knows about container/obj
objs = [o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
self.assertIn(obj, objs)
def test_missing_container(self):
# In this test, we need to put container at handoff devices, so we
# need container devices more than replica count
if len(self.container_ring.devs) <= self.container_ring.replica_count:
raise SkipTest("Need devices more that replica count")
container = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
# Kill all primary container servers
for cnode in cnodes:
kill_server((cnode['ip'], cnode['port']), self.ipport2server)
# Create container, and all of its replicas are placed at handoff
# device
try:
client.put_container(self.url, self.token, container)
except ClientException as err:
# if the cluster doesn't have enough devices, swift may return
# error (ex. When we only have 4 devices in 3-replica cluster).
self.assertEqual(err.http_status, 503)
# Assert handoff device has a container replica
another_cnode = next(self.container_ring.get_more_nodes(cpart))
direct_client.direct_get_container(
another_cnode, cpart, self.account, container)
# Restart all primary container servers
for cnode in cnodes:
start_server((cnode['ip'], cnode['port']), self.ipport2server)
# Create container/obj
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, '')
# Run the object-updater
Manager(['object-updater']).once()
# Run the container-replicator, and now, container replicas
# at handoff device get moved to primary servers
Manager(['container-replicator']).once()
# Assert container replicas in primary servers, just moved by
# replicator don't know about the object
for cnode in cnodes:
self.assertFalse(direct_client.direct_get_container(
cnode, cpart, self.account, container)[1])
# since the container is empty - we can delete it!
client.delete_container(self.url, self.token, container)
# Re-run the object-updaters and now container replicas in primary
# container servers should get updated
Manager(['object-updater']).once()
# Assert all primary container servers know about container/obj
for cnode in cnodes:
objs = [o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
self.assertIn(obj, objs)
class TestUpdateOverrides(ReplProbeTest):
"""
Use an internal client to PUT an object to proxy server,
bypassing gatekeeper so that X-Object-Sysmeta- headers can be included.
Verify that the update override headers take effect and override
values propagate to the container server.
"""
def test_update_during_PUT(self):
# verify that update sent during a PUT has override values
int_client = self.make_internal_client()
headers = {
'Content-Type': 'text/plain',
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override-etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
'override-type',
'X-Object-Sysmeta-Container-Update-Override-Size': '1999'
}
client.put_container(self.url, self.token, 'c1',
headers={'X-Storage-Policy':
self.policy.name})
int_client.upload_object(
BytesIO(b'stuff'), self.account, 'c1', 'o1', headers)
# Run the object-updaters to be sure updates are done
Manager(['object-updater']).once()
meta = int_client.get_object_metadata(self.account, 'c1', 'o1')
self.assertEqual('text/plain', meta['content-type'])
self.assertEqual('c13d88cb4cb02003daedb8a84e5d272a', meta['etag'])
self.assertEqual('5', meta['content-length'])
obj_iter = int_client.iter_objects(self.account, 'c1')
for obj in obj_iter:
if obj['name'] == 'o1':
self.assertEqual('override-etag', obj['hash'])
self.assertEqual('override-type', obj['content_type'])
self.assertEqual(1999, obj['bytes'])
break
else:
self.fail('Failed to find object o1 in listing')
class TestUpdateOverridesEC(ECProbeTest):
# verify that the container update overrides used with EC policies make
# it to the container servers when container updates are sync or async
# and possibly re-ordered with respect to object PUT and POST requests.
def test_async_update_after_PUT(self):
cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
client.put_container(self.url, self.token, 'c1',
headers={'X-Storage-Policy':
self.policy.name})
# put an object while one container server is stopped so that we force
# an async update to it
kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
content = u'stuff'
client.put_object(self.url, self.token, 'c1', 'o1', contents=content,
content_type='test/ctype')
meta = client.head_object(self.url, self.token, 'c1', 'o1')
# re-start the container server and assert that it does not yet know
# about the object
start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
self.assertFalse(direct_client.direct_get_container(
cnodes[0], cpart, self.account, 'c1')[1])
# Run the object-updaters to be sure updates are done
Manager(['object-updater']).once()
# check the re-started container server got same update as others.
# we cannot assert the actual etag value because it may be encrypted
listing_etags = set()
for cnode in cnodes:
listing = direct_client.direct_get_container(
cnode, cpart, self.account, 'c1')[1]
self.assertEqual(1, len(listing))
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual('test/ctype', listing[0]['content_type'])
listing_etags.add(listing[0]['hash'])
self.assertEqual(1, len(listing_etags))
# check that listing meta returned to client is consistent with object
# meta returned to client
hdrs, listing = client.get_container(self.url, self.token, 'c1')
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
def test_update_during_POST_only(self):
# verify correct update values when PUT update is missed but then a
# POST update succeeds *before* the PUT async pending update is sent
cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
client.put_container(self.url, self.token, 'c1',
headers={'X-Storage-Policy':
self.policy.name})
# put an object while one container server is stopped so that we force
# an async update to it
kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
content = u'stuff'
client.put_object(self.url, self.token, 'c1', 'o1', contents=content,
content_type='test/ctype')
meta = client.head_object(self.url, self.token, 'c1', 'o1')
# re-start the container server and assert that it does not yet know
# about the object
start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
self.assertFalse(direct_client.direct_get_container(
cnodes[0], cpart, self.account, 'c1')[1])
int_client = self.make_internal_client()
int_client.set_object_metadata(
self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
self.assertEqual(
'Tomato',
int_client.get_object_metadata(self.account, 'c1', 'o1')
['x-object-meta-fruit']) # sanity
# check the re-started container server got same update as others.
# we cannot assert the actual etag value because it may be encrypted
listing_etags = set()
for cnode in cnodes:
listing = direct_client.direct_get_container(
cnode, cpart, self.account, 'c1')[1]
self.assertEqual(1, len(listing))
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual('test/ctype', listing[0]['content_type'])
listing_etags.add(listing[0]['hash'])
self.assertEqual(1, len(listing_etags))
# check that listing meta returned to client is consistent with object
# meta returned to client
hdrs, listing = client.get_container(self.url, self.token, 'c1')
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
# Run the object-updaters to send the async pending from the PUT
Manager(['object-updater']).once()
# check container listing metadata is still correct
for cnode in cnodes:
listing = direct_client.direct_get_container(
cnode, cpart, self.account, 'c1')[1]
self.assertEqual(1, len(listing))
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual('test/ctype', listing[0]['content_type'])
listing_etags.add(listing[0]['hash'])
self.assertEqual(1, len(listing_etags))
def test_async_updates_after_PUT_and_POST(self):
# verify correct update values when PUT update and POST updates are
# missed but then async updates are sent
cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
client.put_container(self.url, self.token, 'c1',
headers={'X-Storage-Policy':
self.policy.name})
# PUT and POST to object while one container server is stopped so that
# we force async updates to it
kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
content = u'stuff'
client.put_object(self.url, self.token, 'c1', 'o1', contents=content,
content_type='test/ctype')
meta = client.head_object(self.url, self.token, 'c1', 'o1')
int_client = self.make_internal_client()
int_client.set_object_metadata(
self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
self.assertEqual(
'Tomato',
int_client.get_object_metadata(self.account, 'c1', 'o1')
['x-object-meta-fruit']) # sanity
# re-start the container server and assert that it does not yet know
# about the object
start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
self.assertFalse(direct_client.direct_get_container(
cnodes[0], cpart, self.account, 'c1')[1])
# Run the object-updaters to send the async pendings
Manager(['object-updater']).once()
# check the re-started container server got same update as others.
# we cannot assert the actual etag value because it may be encrypted
listing_etags = set()
for cnode in cnodes:
listing = direct_client.direct_get_container(
cnode, cpart, self.account, 'c1')[1]
self.assertEqual(1, len(listing))
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual('test/ctype', listing[0]['content_type'])
listing_etags.add(listing[0]['hash'])
self.assertEqual(1, len(listing_etags))
# check that listing meta returned to client is consistent with object
# meta returned to client
hdrs, listing = client.get_container(self.url, self.token, 'c1')
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
if __name__ == '__main__':
main()
| swift-master | test/probe/test_object_async_update.py |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import subprocess
import unittest
import uuid
from unittest import SkipTest
import six
from six.moves.urllib.parse import quote
from swift.common import direct_client, utils
from swift.common.internal_client import UnexpectedResponse
from swift.common.manager import Manager
from swift.common.memcached import MemcacheRing
from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \
quorum_size, config_true_value, Timestamp, md5
from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \
SHARDED
from swift.container.sharder import CleavingContext, ContainerSharder
from swift.container.replicator import ContainerReplicator
from swiftclient import client, get_auth, ClientException
from swift.proxy.controllers.base import get_cache_key
from swift.proxy.controllers.obj import num_container_updates
from test import annotate_failure
from test.debug_logger import debug_logger
from test.probe import PROXY_BASE_URL
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest, get_server_number, \
wait_for_server_to_hangup, ENABLED_POLICIES, exclude_nodes
import mock
try:
from swiftclient.requests_compat import requests as client_requests
except ImportError:
# legacy location
from swiftclient.client import requests as client_requests
MIN_SHARD_CONTAINER_THRESHOLD = 4
MAX_SHARD_CONTAINER_THRESHOLD = 100
class ShardCollector(object):
"""
Returns map of node to tuples of (headers, shard ranges) returned from node
"""
def __init__(self):
self.ranges = {}
def __call__(self, cnode, cpart, account, container):
self.ranges[cnode['id']] = direct_client.direct_get_container(
cnode, cpart, account, container,
headers={'X-Backend-Record-Type': 'shard'})
class BaseTestContainerSharding(ReplProbeTest):
DELIM = '-'
def _maybe_skip_test(self):
try:
self.cont_configs = [
utils.readconf(p, 'container-sharder')
for p in self.configs['container-sharder'].values()]
except ValueError:
raise SkipTest('No [container-sharder] section found in '
'container-server configs')
self.max_shard_size = max(
int(c.get('shard_container_threshold', '1000000'))
for c in self.cont_configs)
skip_reasons = []
if not (MIN_SHARD_CONTAINER_THRESHOLD <= self.max_shard_size
<= MAX_SHARD_CONTAINER_THRESHOLD):
skip_reasons.append(
'shard_container_threshold %d must be between %d and %d' %
(self.max_shard_size, MIN_SHARD_CONTAINER_THRESHOLD,
MAX_SHARD_CONTAINER_THRESHOLD))
def skip_check(reason_list, option, required):
values = {int(c.get(option, required)) for c in self.cont_configs}
if values != {required}:
reason_list.append('%s must be %s' % (option, required))
skip_check(skip_reasons, 'shard_scanner_batch_size', 10)
skip_check(skip_reasons, 'shard_batch_size', 2)
if skip_reasons:
raise SkipTest(', '.join(skip_reasons))
def _load_rings_and_configs(self):
super(BaseTestContainerSharding, self)._load_rings_and_configs()
# perform checks for skipping test before starting services
self._maybe_skip_test()
def _make_object_names(self, number, start=0):
return ['obj%s%04d' % (self.DELIM, x)
for x in range(start, start + number)]
def _setup_container_name(self):
# Container where we're PUTting objects
self.container_name = 'container%s%s' % (self.DELIM, uuid.uuid4())
def setUp(self):
client.logger.setLevel(client.logging.WARNING)
client_requests.logging.getLogger().setLevel(
client_requests.logging.WARNING)
super(BaseTestContainerSharding, self).setUp()
_, self.admin_token = get_auth(
PROXY_BASE_URL + '/auth/v1.0', 'admin:admin', 'admin')
self._setup_container_name()
self.init_brain(self.container_name)
self.sharders = Manager(['container-sharder'])
self.internal_client = self.make_internal_client()
self.logger = debug_logger('sharder-test')
self.memcache = MemcacheRing(['127.0.0.1:11211'], logger=self.logger)
self.container_replicators = Manager(['container-replicator'])
def init_brain(self, container_name):
self.container_to_shard = container_name
self.brain = BrainSplitter(
self.url, self.token, self.container_to_shard,
None, 'container')
self.brain.put_container(policy_index=int(self.policy))
def stop_container_servers(self, node_numbers=None):
if node_numbers:
ipports = []
server2ipport = {v: k for k, v in self.ipport2server.items()}
for number in self.brain.node_numbers[node_numbers]:
self.brain.servers.stop(number=number)
server = 'container%d' % number
ipports.append(server2ipport[server])
else:
ipports = [k for k, v in self.ipport2server.items()
if v.startswith('container')]
self.brain.servers.stop()
for ipport in ipports:
wait_for_server_to_hangup(ipport)
def put_objects(self, obj_names, contents=None):
conn = client.Connection(preauthurl=self.url, preauthtoken=self.token)
results = []
for obj in obj_names:
rdict = {}
conn.put_object(self.container_name, obj,
contents=contents, response_dict=rdict)
results.append((obj, rdict['headers'].get('x-object-version-id')))
return results
def delete_objects(self, obj_names_and_versions):
conn = client.Connection(preauthurl=self.url, preauthtoken=self.token)
for obj in obj_names_and_versions:
if isinstance(obj, tuple):
obj, version = obj
conn.delete_object(self.container_name, obj,
query_string='version-id=%s' % version)
else:
conn.delete_object(self.container_name, obj)
def get_container_shard_ranges(self, account=None, container=None,
include_deleted=False):
account = account if account else self.account
container = container if container else self.container_to_shard
path = self.internal_client.make_path(account, container)
headers = {'X-Backend-Record-Type': 'shard'}
if include_deleted:
headers['X-Backend-Include-Deleted'] = 'true'
resp = self.internal_client.make_request(
'GET', path + '?format=json', headers, [200])
return [ShardRange.from_dict(sr) for sr in json.loads(resp.body)]
def direct_get_container_shard_ranges(self, account=None, container=None,
expect_failure=False):
collector = ShardCollector()
self.direct_container_op(
collector, account, container, expect_failure)
return collector.ranges
def get_storage_dir(self, part, node, account=None, container=None):
account = account or self.brain.account
container = container or self.container_name
server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
assert server_type == 'container'
repl_server = '%s-replicator' % server_type
conf = utils.readconf(self.configs[repl_server][config_number],
section_name=repl_server)
datadir = os.path.join(conf['devices'], node['device'], 'containers')
container_hash = utils.hash_path(account, container)
return (utils.storage_directory(datadir, part, container_hash),
container_hash)
def get_db_file(self, part, node, account=None, container=None):
container_dir, container_hash = self.get_storage_dir(
part, node, account=account, container=container)
db_file = os.path.join(container_dir, container_hash + '.db')
self.assertTrue(get_db_files(db_file)) # sanity check
return db_file
def get_broker(self, part, node, account=None, container=None):
return ContainerBroker(
self.get_db_file(part, node, account, container))
def get_shard_broker(self, shard_range, node_index=0):
shard_part, shard_nodes = self.brain.ring.get_nodes(
shard_range.account, shard_range.container)
return self.get_broker(
shard_part, shard_nodes[node_index], shard_range.account,
shard_range.container)
def categorize_container_dir_content(self, account=None, container=None):
account = account or self.brain.account
container = container or self.container_name
part, nodes = self.brain.ring.get_nodes(account, container)
storage_dirs = [
self.get_storage_dir(part, node, account=account,
container=container)[0]
for node in nodes]
result = {
'shard_dbs': [],
'normal_dbs': [],
'pendings': [],
'locks': [],
'other': [],
}
for storage_dir in storage_dirs:
for f in os.listdir(storage_dir):
path = os.path.join(storage_dir, f)
if path.endswith('.db'):
hash_, epoch, ext = parse_db_filename(path)
if epoch:
result['shard_dbs'].append(path)
else:
result['normal_dbs'].append(path)
elif path.endswith('.db.pending'):
result['pendings'].append(path)
elif path.endswith('/.lock'):
result['locks'].append(path)
else:
result['other'].append(path)
if result['other']:
self.fail('Found unexpected files in storage directory:\n %s' %
'\n '.join(result['other']))
return result
def assert_dict_contains(self, expected_items, actual_dict):
ignored = set(expected_items) ^ set(actual_dict)
filtered_actual = {k: actual_dict[k]
for k in actual_dict if k not in ignored}
self.assertEqual(expected_items, filtered_actual)
def assert_shard_ranges_contiguous(self, expected_number, shard_ranges,
first_lower='', last_upper=''):
if shard_ranges and isinstance(shard_ranges[0], ShardRange):
actual_shard_ranges = sorted(shard_ranges)
else:
actual_shard_ranges = sorted(ShardRange.from_dict(d)
for d in shard_ranges)
self.assertLengthEqual(actual_shard_ranges, expected_number)
if expected_number:
with annotate_failure('Ranges %s.' % actual_shard_ranges):
self.assertEqual(first_lower, actual_shard_ranges[0].lower_str)
for x, y in zip(actual_shard_ranges, actual_shard_ranges[1:]):
self.assertEqual(x.upper, y.lower)
self.assertEqual(last_upper, actual_shard_ranges[-1].upper_str)
def assert_shard_range_equal(self, expected, actual, excludes=None):
excludes = excludes or []
expected_dict = dict(expected)
actual_dict = dict(actual)
for k in excludes:
expected_dict.pop(k, None)
actual_dict.pop(k, None)
self.assertEqual(expected_dict, actual_dict)
def assert_shard_range_lists_equal(self, expected, actual, excludes=None):
self.assertEqual(len(expected), len(actual))
for expected, actual in zip(expected, actual):
self.assert_shard_range_equal(expected, actual, excludes=excludes)
def assert_shard_range_state(self, expected_state, shard_ranges):
if shard_ranges and not isinstance(shard_ranges[0], ShardRange):
shard_ranges = [ShardRange.from_dict(data)
for data in shard_ranges]
self.assertEqual([expected_state] * len(shard_ranges),
[sr.state for sr in shard_ranges])
def assert_total_object_count(self, expected_object_count, shard_ranges):
actual = sum(sr['object_count'] for sr in shard_ranges)
self.assertEqual(expected_object_count, actual)
def assert_container_listing(self, expected_listing, req_hdrs=None):
req_hdrs = req_hdrs if req_hdrs else {}
headers, actual_listing = client.get_container(
self.url, self.token, self.container_name, headers=req_hdrs)
self.assertIn('x-container-object-count', headers)
expected_obj_count = len(expected_listing)
self.assertEqual(expected_listing, [
x['name'].encode('utf-8') if six.PY2 else x['name']
for x in actual_listing])
self.assertEqual(str(expected_obj_count),
headers['x-container-object-count'])
return headers, actual_listing
def assert_container_object_count(self, expected_obj_count):
headers = client.head_container(
self.url, self.token, self.container_name)
self.assertIn('x-container-object-count', headers)
self.assertEqual(str(expected_obj_count),
headers['x-container-object-count'])
def assert_container_post_ok(self, meta_value):
key = 'X-Container-Meta-Assert-Post-Works'
headers = {key: meta_value}
client.post_container(
self.url, self.token, self.container_name, headers=headers)
resp_headers = client.head_container(
self.url, self.token, self.container_name)
self.assertEqual(meta_value, resp_headers.get(key.lower()))
def assert_container_post_fails(self, meta_value):
key = 'X-Container-Meta-Assert-Post-Works'
headers = {key: meta_value}
with self.assertRaises(ClientException) as cm:
client.post_container(
self.url, self.token, self.container_name, headers=headers)
self.assertEqual(404, cm.exception.http_status)
def assert_container_delete_fails(self):
with self.assertRaises(ClientException) as cm:
client.delete_container(self.url, self.token, self.container_name)
self.assertEqual(409, cm.exception.http_status)
def assert_container_not_found(self):
with self.assertRaises(ClientException) as cm:
client.get_container(self.url, self.token, self.container_name)
self.assertEqual(404, cm.exception.http_status)
# check for headers leaking out while deleted
resp_headers = cm.exception.http_response_headers
self.assertNotIn('X-Container-Object-Count', resp_headers)
self.assertNotIn('X-Container-Bytes-Used', resp_headers)
self.assertNotIn('X-Timestamp', resp_headers)
self.assertNotIn('X-PUT-Timestamp', resp_headers)
def assert_container_has_shard_sysmeta(self):
node_headers = self.direct_head_container()
for node_id, headers in node_headers.items():
with annotate_failure('%s in %s' % (node_id, node_headers.keys())):
for k, v in headers.items():
if k.lower().startswith('x-container-sysmeta-shard'):
break
else:
self.fail('No shard sysmeta found in %s' % headers)
def assert_container_state(self, node, expected_state, num_shard_ranges,
account=None, container=None, part=None,
override_deleted=False):
account = account or self.account
container = container or self.container_to_shard
part = part or self.brain.part
headers = {'X-Backend-Record-Type': 'shard'}
if override_deleted:
headers['x-backend-override-deleted'] = True
headers, shard_ranges = direct_client.direct_get_container(
node, part, account, container,
headers=headers)
self.assertEqual(num_shard_ranges, len(shard_ranges))
self.assertIn('X-Backend-Sharding-State', headers)
self.assertEqual(
expected_state, headers['X-Backend-Sharding-State'])
return [ShardRange.from_dict(sr) for sr in shard_ranges]
def assert_subprocess_success(self, cmd_args):
try:
return subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)
except Exception as exc:
# why not 'except CalledProcessError'? because in my py3.6 tests
# the CalledProcessError wasn't caught by that! despite type(exc)
# being a CalledProcessError, isinstance(exc, CalledProcessError)
# is False and the type has a different hash - could be
# related to https://github.com/eventlet/eventlet/issues/413
try:
# assume this is a CalledProcessError
self.fail('%s with output:\n%s' % (exc, exc.output))
except AttributeError:
raise exc
def get_part_and_node_numbers(self, shard_range):
"""Return the partition and node numbers for a shard range."""
part, nodes = self.brain.ring.get_nodes(
shard_range.account, shard_range.container)
return part, [n['id'] + 1 for n in nodes]
def run_sharders(self, shard_ranges, exclude_partitions=None):
"""Run the sharder on partitions for given shard ranges."""
if not isinstance(shard_ranges, (list, tuple, set)):
shard_ranges = (shard_ranges,)
exclude_partitions = exclude_partitions or []
shard_parts = []
for sr in shard_ranges:
sr_part = self.get_part_and_node_numbers(sr)[0]
if sr_part not in exclude_partitions:
shard_parts.append(str(sr_part))
partitions = ','.join(shard_parts)
self.sharders.once(additional_args='--partitions=%s' % partitions)
def run_sharder_sequentially(self, shard_range=None):
"""Run sharder node by node on partition for given shard range."""
if shard_range:
part, node_numbers = self.get_part_and_node_numbers(shard_range)
else:
part, node_numbers = self.brain.part, self.brain.node_numbers
for node_number in node_numbers:
self.sharders.once(number=node_number,
additional_args='--partitions=%s' % part)
def run_custom_sharder(self, conf_index, custom_conf, **kwargs):
return self.run_custom_daemon(ContainerSharder, 'container-sharder',
conf_index, custom_conf, **kwargs)
class BaseAutoContainerSharding(BaseTestContainerSharding):
def _maybe_skip_test(self):
super(BaseAutoContainerSharding, self)._maybe_skip_test()
auto_shard = all(config_true_value(c.get('auto_shard', False))
for c in self.cont_configs)
if not auto_shard:
raise SkipTest('auto_shard must be true '
'in all container_sharder configs')
class TestContainerShardingNonUTF8(BaseAutoContainerSharding):
def test_sharding_listing(self):
# verify parameterised listing of a container during sharding
all_obj_names = self._make_object_names(4 * self.max_shard_size)
obj_names = all_obj_names[::2]
obj_content = 'testing'
self.put_objects(obj_names, contents=obj_content)
# choose some names approx in middle of each expected shard range
markers = [
obj_names[i] for i in range(self.max_shard_size // 4,
2 * self.max_shard_size,
self.max_shard_size // 2)]
def check_listing(objects, req_hdrs=None, **params):
req_hdrs = req_hdrs if req_hdrs else {}
qs = '&'.join('%s=%s' % (k, quote(str(v)))
for k, v in params.items())
headers, listing = client.get_container(
self.url, self.token, self.container_name, query_string=qs,
headers=req_hdrs)
listing = [x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing]
if params.get('reverse'):
marker = params.get('marker', ShardRange.MAX)
end_marker = params.get('end_marker', ShardRange.MIN)
expected = [o for o in objects if end_marker < o < marker]
expected.reverse()
else:
marker = params.get('marker', ShardRange.MIN)
end_marker = params.get('end_marker', ShardRange.MAX)
expected = [o for o in objects if marker < o < end_marker]
if 'limit' in params:
expected = expected[:params['limit']]
self.assertEqual(expected, listing)
self.assertIn('x-timestamp', headers)
self.assertIn('last-modified', headers)
self.assertIn('x-trans-id', headers)
self.assertEqual('bytes', headers.get('accept-ranges'))
self.assertEqual('application/json; charset=utf-8',
headers.get('content-type'))
def check_listing_fails(exp_status, **params):
qs = '&'.join(['%s=%s' % param for param in params.items()])
with self.assertRaises(ClientException) as cm:
client.get_container(
self.url, self.token, self.container_name, query_string=qs)
self.assertEqual(exp_status, cm.exception.http_status)
return cm.exception
def do_listing_checks(objs, hdrs=None):
hdrs = hdrs if hdrs else {}
check_listing(objs, hdrs)
check_listing(objs, hdrs, marker=markers[0], end_marker=markers[1])
check_listing(objs, hdrs, marker=markers[0], end_marker=markers[2])
check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3])
check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3],
limit=self.max_shard_size // 4)
check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3],
limit=self.max_shard_size // 4)
check_listing(objs, hdrs, marker=markers[1], end_marker=markers[2],
limit=self.max_shard_size // 2)
check_listing(objs, hdrs, marker=markers[1], end_marker=markers[1])
check_listing(objs, hdrs, reverse=True)
check_listing(objs, hdrs, reverse=True, end_marker=markers[1])
check_listing(objs, hdrs, reverse=True, marker=markers[3],
end_marker=markers[1],
limit=self.max_shard_size // 4)
check_listing(objs, hdrs, reverse=True, marker=markers[3],
end_marker=markers[1], limit=0)
check_listing([], hdrs, marker=markers[0], end_marker=markers[0])
check_listing([], hdrs, marker=markers[0], end_marker=markers[1],
reverse=True)
check_listing(objs, hdrs, prefix='obj')
check_listing([], hdrs, prefix='zzz')
# delimiter
headers, listing = client.get_container(
self.url, self.token, self.container_name,
query_string='delimiter=' + quote(self.DELIM), headers=hdrs)
self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing)
headers, listing = client.get_container(
self.url, self.token, self.container_name,
query_string='delimiter=j' + quote(self.DELIM), headers=hdrs)
self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing)
limit = self.cluster_info['swift']['container_listing_limit']
exc = check_listing_fails(412, limit=limit + 1)
self.assertIn(b'Maximum limit', exc.http_response_content)
exc = check_listing_fails(400, delimiter='%ff')
self.assertIn(b'not valid UTF-8', exc.http_response_content)
# sanity checks
do_listing_checks(obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# First run the 'leader' in charge of scanning, which finds all shard
# ranges and cleaves first two
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# Then run sharder on other nodes which will also cleave first two
# shard ranges
for n in self.brain.node_numbers[1:]:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity check shard range states
for node in self.brain.nodes:
self.assert_container_state(node, 'sharding', 4)
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 4)
self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2])
self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:])
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted
self.assert_container_post_ok('sharding')
do_listing_checks(obj_names)
# put some new objects spread through entire namespace; object updates
# should be directed to the shard container (both the cleaved and the
# created shards)
new_obj_names = all_obj_names[1::4]
self.put_objects(new_obj_names, obj_content)
# new objects that fell into the first two cleaved shard ranges are
# reported in listing; new objects in the yet-to-be-cleaved shard
# ranges are not yet included in listing because listings prefer the
# root over the final two shards that are not yet-cleaved
exp_obj_names = [o for o in obj_names + new_obj_names
if o <= shard_ranges[1].upper]
exp_obj_names += [o for o in obj_names
if o > shard_ranges[1].upper]
exp_obj_names.sort()
do_listing_checks(exp_obj_names)
# run all the sharders again and the last two shard ranges get cleaved
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 4)
shard_ranges = self.get_container_shard_ranges()
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
# listings are now gathered from all four shard ranges so should have
# all the specified objects
exp_obj_names = obj_names + new_obj_names
exp_obj_names.sort()
do_listing_checks(exp_obj_names)
# shard ranges may now be cached by proxy so do listings checks again
# forcing backend request
do_listing_checks(exp_obj_names, hdrs={'X-Newest': 'true'})
# post more metadata to the container and check that it is read back
# correctly from backend (using x-newest) and cache
test_headers = {'x-container-meta-test': 'testing',
'x-container-read': 'read_acl',
'x-container-write': 'write_acl',
'x-container-sync-key': 'sync_key',
# 'x-container-sync-to': 'sync_to',
'x-versions-location': 'versions',
'x-container-meta-access-control-allow-origin': 'aa',
'x-container-meta-access-control-expose-headers': 'bb',
'x-container-meta-access-control-max-age': '123'}
client.post_container(self.url, self.admin_token, self.container_name,
headers=test_headers)
headers, listing = client.get_container(
self.url, self.token, self.container_name,
headers={'X-Newest': 'true'})
exp_headers = dict(test_headers)
exp_headers.update({
'x-container-object-count': str(len(exp_obj_names)),
'x-container-bytes-used':
str(len(exp_obj_names) * len(obj_content))
})
for k, v in exp_headers.items():
self.assertIn(k, headers)
self.assertEqual(v, headers[k], dict(headers))
cache_headers, listing = client.get_container(
self.url, self.token, self.container_name)
for k, v in exp_headers.items():
self.assertIn(k, cache_headers)
self.assertEqual(v, cache_headers[k], dict(exp_headers))
# we don't expect any of these headers to be equal...
for k in ('x-timestamp', 'last-modified', 'date', 'x-trans-id',
'x-openstack-request-id'):
headers.pop(k, None)
cache_headers.pop(k, None)
self.assertEqual(headers, cache_headers)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
# delete original objects
self.delete_objects(obj_names)
do_listing_checks(new_obj_names)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
class TestContainerShardingFunkyNames(TestContainerShardingNonUTF8):
DELIM = '\n'
def _make_object_names(self, number):
return ['obj\n%04d%%Ff' % x for x in range(number)]
def _setup_container_name(self):
self.container_name = 'container\n%%Ff\n%s' % uuid.uuid4()
class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
def _make_object_names(self, number, start=0):
# override default with names that include non-ascii chars
name_length = self.cluster_info['swift']['max_object_name_length']
obj_names = []
for x in range(start, start + number):
name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234-%04d' % x)
name = name.encode('utf8').ljust(name_length, b'o')
if not six.PY2:
name = name.decode('utf8')
obj_names.append(name)
return obj_names
def _setup_container_name(self):
# override default with max length name that includes non-ascii chars
super(TestContainerShardingUTF8, self)._setup_container_name()
name_length = self.cluster_info['swift']['max_container_name_length']
cont_name = \
self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234'
self.container_name = cont_name.encode('utf8').ljust(name_length, b'x')
if not six.PY2:
self.container_name = self.container_name.decode('utf8')
class TestContainerShardingObjectVersioning(BaseAutoContainerSharding):
def _maybe_skip_test(self):
super(TestContainerShardingObjectVersioning, self)._maybe_skip_test()
try:
vw_config = utils.readconf(self.configs['proxy-server'],
'filter:versioned_writes')
except ValueError:
raise SkipTest('No [filter:versioned_writes] section found in '
'proxy-server configs')
allow_object_versioning = config_true_value(
vw_config.get('allow_object_versioning', False))
if not allow_object_versioning:
raise SkipTest('allow_object_versioning must be true '
'in all versioned_writes configs')
def init_brain(self, container_name):
client.put_container(self.url, self.token, container_name, headers={
'X-Storage-Policy': self.policy.name,
'X-Versions-Enabled': 'true',
})
self.container_to_shard = '\x00versions\x00' + container_name
self.brain = BrainSplitter(
self.url, self.token, self.container_to_shard,
None, 'container')
def test_sharding_listing(self):
# verify parameterised listing of a container during sharding
all_obj_names = self._make_object_names(3) * self.max_shard_size
all_obj_names.extend(self._make_object_names(self.max_shard_size,
start=3))
obj_names = all_obj_names[::2]
obj_names_and_versions = self.put_objects(obj_names)
def sort_key(obj_and_ver):
obj, ver = obj_and_ver
return obj, ~Timestamp(ver)
obj_names_and_versions.sort(key=sort_key)
# choose some names approx in middle of each expected shard range
markers = [
obj_names_and_versions[i]
for i in range(self.max_shard_size // 4,
2 * self.max_shard_size,
self.max_shard_size // 2)]
def check_listing(objects, **params):
params['versions'] = ''
qs = '&'.join('%s=%s' % param for param in params.items())
headers, listing = client.get_container(
self.url, self.token, self.container_name, query_string=qs)
listing = [(x['name'].encode('utf-8') if six.PY2 else x['name'],
x['version_id'])
for x in listing]
if params.get('reverse'):
marker = (
params.get('marker', ShardRange.MAX),
~Timestamp(params['version_marker'])
if 'version_marker' in params else ~Timestamp('0'),
)
end_marker = (
params.get('end_marker', ShardRange.MIN),
Timestamp('0'),
)
expected = [o for o in objects
if end_marker < sort_key(o) < marker]
expected.reverse()
else:
marker = (
params.get('marker', ShardRange.MIN),
~Timestamp(params['version_marker'])
if 'version_marker' in params else Timestamp('0'),
)
end_marker = (
params.get('end_marker', ShardRange.MAX),
~Timestamp('0'),
)
expected = [o for o in objects
if marker < sort_key(o) < end_marker]
if 'limit' in params:
expected = expected[:params['limit']]
self.assertEqual(expected, listing)
def check_listing_fails(exp_status, **params):
params['versions'] = ''
qs = '&'.join('%s=%s' % param for param in params.items())
with self.assertRaises(ClientException) as cm:
client.get_container(
self.url, self.token, self.container_name, query_string=qs)
self.assertEqual(exp_status, cm.exception.http_status)
return cm.exception
def do_listing_checks(objects):
check_listing(objects)
check_listing(objects,
marker=markers[0][0], version_marker=markers[0][1])
check_listing(objects,
marker=markers[0][0], version_marker=markers[0][1],
limit=self.max_shard_size // 10)
check_listing(objects,
marker=markers[0][0], version_marker=markers[0][1],
limit=self.max_shard_size // 4)
check_listing(objects,
marker=markers[0][0], version_marker=markers[0][1],
limit=self.max_shard_size // 2)
check_listing(objects,
marker=markers[1][0], version_marker=markers[1][1])
check_listing(objects,
marker=markers[1][0], version_marker=markers[1][1],
limit=self.max_shard_size // 10)
check_listing(objects,
marker=markers[2][0], version_marker=markers[2][1],
limit=self.max_shard_size // 4)
check_listing(objects,
marker=markers[2][0], version_marker=markers[2][1],
limit=self.max_shard_size // 2)
check_listing(objects, reverse=True)
check_listing(objects, reverse=True,
marker=markers[1][0], version_marker=markers[1][1])
check_listing(objects, prefix='obj')
check_listing([], prefix='zzz')
# delimiter
headers, listing = client.get_container(
self.url, self.token, self.container_name,
query_string='delimiter=-')
self.assertEqual([{'subdir': 'obj-'}], listing)
headers, listing = client.get_container(
self.url, self.token, self.container_name,
query_string='delimiter=j-')
self.assertEqual([{'subdir': 'obj-'}], listing)
limit = self.cluster_info['swift']['container_listing_limit']
exc = check_listing_fails(412, limit=limit + 1)
self.assertIn(b'Maximum limit', exc.http_response_content)
exc = check_listing_fails(400, delimiter='%ff')
self.assertIn(b'not valid UTF-8', exc.http_response_content)
# sanity checks
do_listing_checks(obj_names_and_versions)
# Shard the container. Use an internal_client so we get an implicit
# X-Backend-Allow-Reserved-Names header
self.internal_client.set_container_metadata(
self.account, self.container_to_shard, {
'X-Container-Sysmeta-Sharding': 'True',
})
# First run the 'leader' in charge of scanning, which finds all shard
# ranges and cleaves first two
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# Then run sharder on other nodes which will also cleave first two
# shard ranges
for n in self.brain.node_numbers[1:]:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity check shard range states
for node in self.brain.nodes:
self.assert_container_state(node, 'sharding', 4)
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 4)
self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2])
self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:])
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted
self.assert_container_post_ok('sharding')
do_listing_checks(obj_names_and_versions)
# put some new objects spread through entire namespace
new_obj_names = all_obj_names[1::4]
new_obj_names_and_versions = self.put_objects(new_obj_names)
# new objects that fell into the first two cleaved shard ranges are
# reported in listing, new objects in the yet-to-be-cleaved shard
# ranges are not yet included in listing
exp_obj_names_and_versions = [
o for o in obj_names_and_versions + new_obj_names_and_versions
if '\x00' + o[0] <= shard_ranges[1].upper]
exp_obj_names_and_versions += [
o for o in obj_names_and_versions
if '\x00' + o[0] > shard_ranges[1].upper]
exp_obj_names_and_versions.sort(key=sort_key)
do_listing_checks(exp_obj_names_and_versions)
# run all the sharders again and the last two shard ranges get cleaved
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 4)
shard_ranges = self.get_container_shard_ranges()
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
exp_obj_names_and_versions = \
obj_names_and_versions + new_obj_names_and_versions
exp_obj_names_and_versions.sort(key=sort_key)
do_listing_checks(exp_obj_names_and_versions)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
# delete original objects
self.delete_objects(obj_names_and_versions)
new_obj_names_and_versions.sort(key=sort_key)
do_listing_checks(new_obj_names_and_versions)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
class TestContainerSharding(BaseAutoContainerSharding):
def _test_sharded_listing(self, run_replicators=False):
obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(obj_names)
# Verify that we start out with normal DBs, no shards
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['normal_dbs'], 3)
self.assertLengthEqual(found['shard_dbs'], 0)
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual('unsharded', broker.get_db_state())
self.assertLengthEqual(broker.get_shard_ranges(), 0)
headers, pre_sharding_listing = client.get_container(
self.url, self.token, self.container_name)
self.assertEqual(obj_names, [
x['name'].encode('utf-8') if six.PY2 else x['name']
for x in pre_sharding_listing]) # sanity
# Shard it
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
pre_sharding_headers = client.head_container(
self.url, self.admin_token, self.container_name)
self.assertEqual('True',
pre_sharding_headers.get('x-container-sharding'))
# Only run the one in charge of scanning
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# Verify that we have one sharded db -- though the other normal DBs
# received the shard ranges that got defined
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 1)
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
# sanity check - the shard db is on replica 0
self.assertEqual(found['shard_dbs'][0], broker.db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state())
orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
self.assertLengthEqual(orig_root_shard_ranges, 2)
self.assert_total_object_count(len(obj_names), orig_root_shard_ranges)
self.assert_shard_ranges_contiguous(2, orig_root_shard_ranges)
self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE],
[sr['state'] for sr in orig_root_shard_ranges])
# Contexts should still be there, and should be complete
contexts = set([ctx.done()
for ctx, _ in CleavingContext.load_all(broker)])
self.assertEqual({True}, contexts)
self.direct_delete_container(expect_failure=True)
self.assertLengthEqual(found['normal_dbs'], 2)
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual('unsharded', broker.get_db_state())
shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
self.assertEqual([ShardRange.CREATED, ShardRange.CREATED],
[sr['state'] for sr in shard_ranges])
# the sharded db had shard range meta_timestamps and state updated
# during cleaving, so we do not expect those to be equal on other
# nodes
self.assert_shard_range_lists_equal(
orig_root_shard_ranges, shard_ranges,
excludes=['meta_timestamp', 'state', 'state_timestamp'])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts) # length check
if run_replicators:
Manager(['container-replicator']).once()
# replication doesn't change the db file names
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 1)
self.assertLengthEqual(found['normal_dbs'], 2)
# Now that everyone has shard ranges, run *everyone*
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# Verify that we only have shard dbs now
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 3)
self.assertLengthEqual(found['normal_dbs'], 0)
# Shards stayed the same
for db_file in found['shard_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state())
# Well, except for meta_timestamps, since the shards each reported
self.assert_shard_range_lists_equal(
orig_root_shard_ranges, broker.get_shard_ranges(),
excludes=['meta_timestamp', 'state_timestamp'])
for orig, updated in zip(orig_root_shard_ranges,
broker.get_shard_ranges()):
self.assertGreaterEqual(updated.state_timestamp,
orig['state_timestamp'])
self.assertGreaterEqual(updated.meta_timestamp,
orig['meta_timestamp'])
# Contexts should still be there, and should be complete
contexts = set([ctx.done()
for ctx, _ in CleavingContext.load_all(broker)])
self.assertEqual({True}, contexts)
# Check that entire listing is available
headers, actual_listing = self.assert_container_listing(obj_names)
# ... and check some other container properties
self.assertEqual(headers['last-modified'],
pre_sharding_headers['last-modified'])
# It even works in reverse!
headers, listing = client.get_container(self.url, self.token,
self.container_name,
query_string='reverse=on')
self.assertEqual(pre_sharding_listing[::-1], listing)
# and repeat checks to use shard ranges now cached in proxy
headers, actual_listing = self.assert_container_listing(obj_names)
self.assertEqual(headers['last-modified'],
pre_sharding_headers['last-modified'])
headers, listing = client.get_container(self.url, self.token,
self.container_name,
query_string='reverse=on')
self.assertEqual(pre_sharding_listing[::-1], listing)
# Now put some new objects into first shard, taking its count to
# 3 shard ranges' worth
more_obj_names = [
'beta%03d' % x for x in range(self.max_shard_size)]
self.put_objects(more_obj_names)
# The listing includes new objects (shard ranges haven't changed, just
# their object content, so cached shard ranges are still correct)...
headers, listing = self.assert_container_listing(
more_obj_names + obj_names)
self.assertEqual(pre_sharding_listing, listing[len(more_obj_names):])
# ...but root object count is out of date until the sharders run and
# update the root
self.assert_container_object_count(len(obj_names))
# run sharders on the shard to get root updated
shard_1 = ShardRange.from_dict(orig_root_shard_ranges[0])
self.run_sharders(shard_1)
self.assert_container_object_count(len(more_obj_names + obj_names))
# we've added objects enough that we need to shard the first shard
# *again* into three new sub-shards, but nothing happens until the root
# leader identifies shard candidate...
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
self.assertLengthEqual(root_shards, 2)
with annotate_failure('node %s. ' % node):
self.assertEqual(
[ShardRange.ACTIVE] * 2,
[sr['state'] for sr in root_shards])
# orig shards 0, 1 should be contiguous
self.assert_shard_ranges_contiguous(2, root_shards)
# Now run the root leader to identify shard candidate...while one of
# the shard container servers is down
shard_1_part, shard_1_nodes = self.get_part_and_node_numbers(shard_1)
self.brain.servers.stop(number=shard_1_nodes[2])
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# ... so third replica of first shard state is not moved to sharding
found_for_shard = self.categorize_container_dir_content(
shard_1.account, shard_1.container)
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
self.assertEqual(
[ShardRange.SHARDING, ShardRange.SHARDING, ShardRange.ACTIVE],
[ContainerBroker(db_file).get_own_shard_range().state
for db_file in found_for_shard['normal_dbs']])
# ...then run first cycle of first shard sharders in order, leader
# first, to get to predictable state where all nodes have cleaved 2 out
# of 3 ranges...starting with first two nodes
for node_number in shard_1_nodes[:2]:
self.sharders.once(
number=node_number,
additional_args='--partitions=%s' % shard_1_part)
# ... first two replicas start sharding to sub-shards
found_for_shard = self.categorize_container_dir_content(
shard_1.account, shard_1.container)
self.assertLengthEqual(found_for_shard['shard_dbs'], 2)
for db_file in found_for_shard['shard_dbs'][:2]:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharding', broker.get_db_state())
self.assertEqual(
ShardRange.SHARDING, broker.get_own_shard_range().state)
shard_shards = broker.get_shard_ranges()
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED,
ShardRange.CREATED],
[sr.state for sr in shard_shards])
self.assert_shard_ranges_contiguous(
3, shard_shards,
first_lower=orig_root_shard_ranges[0]['lower'],
last_upper=orig_root_shard_ranges[0]['upper'])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual(len(contexts), 1)
context, _lm = contexts[0]
self.assertIs(context.cleaving_done, False)
self.assertIs(context.misplaced_done, True)
self.assertEqual(context.ranges_done, 2)
self.assertEqual(context.ranges_todo, 1)
self.assertEqual(context.max_row,
self.max_shard_size * 3 // 2)
# but third replica still has no idea it should be sharding
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
broker = ContainerBroker(found_for_shard['normal_dbs'][2])
self.assertEqual(ShardRange.ACTIVE, broker.get_own_shard_range().state)
# ...but once sharder runs on third replica it will learn its state and
# fetch its sub-shard ranges durng audit; note that any root replica on
# the stopped container server also won't know about the shards being
# in sharding state, so leave that server stopped for now so that shard
# fetches its state from an up-to-date root replica
self.sharders.once(
number=shard_1_nodes[2],
additional_args='--partitions=%s' % shard_1_part)
# third replica is sharding and has sub-shard ranges so can start
# cleaving...
found_for_shard = self.categorize_container_dir_content(
shard_1.account, shard_1.container)
self.assertLengthEqual(found_for_shard['shard_dbs'], 3)
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
sharding_broker = ContainerBroker(found_for_shard['normal_dbs'][2])
self.assertEqual('sharding', sharding_broker.get_db_state())
self.assertEqual(
ShardRange.SHARDING, sharding_broker.get_own_shard_range().state)
self.assertEqual(3, len(sharding_broker.get_shard_ranges()))
# there may also be a sub-shard replica missing so run replicators on
# all nodes to fix that if necessary
self.brain.servers.start(number=shard_1_nodes[2])
self.replicators.once()
# Now that the replicators have all run, third replica sees cleaving
# contexts for the first two (plus its own cleaving context)
contexts = list(CleavingContext.load_all(sharding_broker))
self.assertEqual(len(contexts), 3)
broker_id = broker.get_info()['id']
self.assertIn(broker_id, [ctx[0].ref for ctx in contexts])
# check original first shard range state and sub-shards - all replicas
# should now be in consistent state
found_for_shard = self.categorize_container_dir_content(
shard_1.account, shard_1.container)
self.assertLengthEqual(found_for_shard['shard_dbs'], 3)
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
for db_file in found_for_shard['shard_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharding', broker.get_db_state())
self.assertEqual(
ShardRange.SHARDING, broker.get_own_shard_range().state)
shard_shards = broker.get_shard_ranges()
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED,
ShardRange.CREATED],
[sr.state for sr in shard_shards])
self.assert_shard_ranges_contiguous(
3, shard_shards,
first_lower=orig_root_shard_ranges[0]['lower'],
last_upper=orig_root_shard_ranges[0]['upper'])
# check third sub-shard is in created state
sub_shard = shard_shards[2]
found_for_sub_shard = self.categorize_container_dir_content(
sub_shard.account, sub_shard.container)
self.assertFalse(found_for_sub_shard['shard_dbs'])
self.assertLengthEqual(found_for_sub_shard['normal_dbs'], 3)
for db_file in found_for_sub_shard['normal_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('sub shard db file %s. ' % db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('unsharded', broker.get_db_state())
self.assertEqual(
ShardRange.CREATED, broker.get_own_shard_range().state)
self.assertFalse(broker.get_shard_ranges())
# check root shard ranges
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
self.assertLengthEqual(root_shards, 5)
with annotate_failure('node %s. ' % node):
# shard ranges are sorted by upper, state, lower, so expect:
# sub-shards, orig shard 0, orig shard 1
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED,
ShardRange.CREATED, ShardRange.SHARDING,
ShardRange.ACTIVE],
[sr['state'] for sr in root_shards])
# sub-shards 0, 1, 2, orig shard 1 should be contiguous
self.assert_shard_ranges_contiguous(
4, root_shards[:3] + root_shards[4:])
# orig shards 0, 1 should be contiguous
self.assert_shard_ranges_contiguous(2, root_shards[3:])
self.assert_container_listing(more_obj_names + obj_names)
self.assert_container_object_count(len(more_obj_names + obj_names))
# Before writing, kill the cache
self.memcache.delete(get_cache_key(
self.account, self.container_name, shard='updating'))
# add another object that lands in the first of the new sub-shards
self.put_objects(['alpha'])
# check that alpha object is in the first new shard
shard_listings = self.direct_get_container(shard_shards[0].account,
shard_shards[0].container)
for node, (hdrs, listing) in shard_listings.items():
with annotate_failure(node):
self.assertIn('alpha', [o['name'] for o in listing])
self.assert_container_listing(['alpha'] + more_obj_names + obj_names)
# Run sharders again so things settle.
self.run_sharders(shard_1)
# Also run replicators to settle cleaving contexts
self.replicators.once()
# check original first shard range shards
for db_file in found_for_shard['shard_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state())
self.assertEqual(
[ShardRange.ACTIVE] * 3,
[sr.state for sr in broker.get_shard_ranges()])
# Contexts should still be there, and should be complete
contexts = set([ctx.done()
for ctx, _
in CleavingContext.load_all(broker)])
self.assertEqual({True}, contexts)
# check root shard ranges
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
# old first shard range should have been deleted
self.assertLengthEqual(root_shards, 4)
with annotate_failure('node %s. ' % node):
self.assertEqual(
[ShardRange.ACTIVE] * 4,
[sr['state'] for sr in root_shards])
self.assert_shard_ranges_contiguous(4, root_shards)
headers, final_listing = self.assert_container_listing(
['alpha'] + more_obj_names + obj_names)
# check root
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 3)
self.assertLengthEqual(found['normal_dbs'], 0)
new_shard_ranges = None
for db_file in found['shard_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state())
if new_shard_ranges is None:
new_shard_ranges = broker.get_shard_ranges(
include_deleted=True)
self.assertLengthEqual(new_shard_ranges, 5)
# Second half is still there, and unchanged
self.assertIn(
dict(orig_root_shard_ranges[1], meta_timestamp=None,
state_timestamp=None),
[dict(sr, meta_timestamp=None, state_timestamp=None)
for sr in new_shard_ranges])
# But the first half split in three, then deleted
by_name = {sr.name: sr for sr in new_shard_ranges}
self.assertIn(orig_root_shard_ranges[0]['name'], by_name)
old_shard_range = by_name.pop(
orig_root_shard_ranges[0]['name'])
self.assertTrue(old_shard_range.deleted)
self.assert_shard_ranges_contiguous(4, list(by_name.values()))
else:
# Everyone's on the same page. Well, except for
# meta_timestamps, since the shards each reported
other_shard_ranges = broker.get_shard_ranges(
include_deleted=True)
self.assert_shard_range_lists_equal(
new_shard_ranges, other_shard_ranges,
excludes=['meta_timestamp', 'state_timestamp'])
for orig, updated in zip(orig_root_shard_ranges,
other_shard_ranges):
self.assertGreaterEqual(updated.meta_timestamp,
orig['meta_timestamp'])
self.assert_container_delete_fails()
for obj in final_listing:
client.delete_object(
self.url, self.token, self.container_name, obj['name'])
# the objects won't be listed anymore
self.assert_container_listing([])
# but root container stats will not yet be aware of the deletions
self.assert_container_delete_fails()
# One server was down while the shard sharded its first two sub-shards,
# so there may be undeleted handoff db(s) for sub-shard(s) that were
# not fully replicated; run replicators now to clean up so they no
# longer report bogus stats to root.
self.replicators.once()
# Run sharder so that shard containers update the root. Do not run
# sharder on root container because that triggers shrinks which can
# cause root object count to temporarily be non-zero and prevent the
# final delete.
self.run_sharders(self.get_container_shard_ranges())
# then root is empty and can be deleted
self.assert_container_listing([])
self.assert_container_object_count(0)
client.delete_container(self.url, self.token, self.container_name)
def test_sharded_listing_no_replicators(self):
self._test_sharded_listing()
def test_sharded_listing_with_replicators(self):
self._test_sharded_listing(run_replicators=True)
def test_listing_under_populated_replica(self):
# the leader node and one other primary have all the objects and will
# cleave to 4 shard ranges, but the third primary only has 1 object in
# the final shard range
obj_names = self._make_object_names(2 * self.max_shard_size)
self.brain.servers.stop(number=self.brain.node_numbers[2])
self.put_objects(obj_names)
self.brain.servers.start(number=self.brain.node_numbers[2])
subset_obj_names = [obj_names[-1]]
self.put_objects(subset_obj_names)
self.brain.servers.stop(number=self.brain.node_numbers[2])
# sanity check: the first 2 primaries will list all objects
self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
# Run sharder on the fully populated nodes, starting with the leader
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
self.sharders.once(number=self.brain.node_numbers[1],
additional_args='--partitions=%s' % self.brain.part)
# Verify that the first 2 primary nodes have cleaved the first batch of
# 2 shard ranges
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
self.assertEqual('sharding', broker.get_db_state())
shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
self.assertLengthEqual(shard_ranges, 4)
self.assertEqual([ShardRange.CLEAVED, ShardRange.CLEAVED,
ShardRange.CREATED, ShardRange.CREATED],
[sr['state'] for sr in shard_ranges])
self.assertEqual(
{False},
set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]))
# listing is complete (from the fully populated primaries at least);
# the root serves the listing parts for the last 2 shard ranges which
# are not yet cleaved
self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
# Run the sharder on the under-populated node to get it fully
# cleaved.
self.brain.servers.start(number=self.brain.node_numbers[2])
Manager(['container-replicator']).once(
number=self.brain.node_numbers[2])
self.sharders.once(number=self.brain.node_numbers[2],
additional_args='--partitions=%s' % self.brain.part)
broker = self.get_broker(self.brain.part, self.brain.nodes[2])
self.assertEqual('sharded', broker.get_db_state())
shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
self.assertLengthEqual(shard_ranges, 4)
self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE,
ShardRange.ACTIVE, ShardRange.ACTIVE],
[sr['state'] for sr in shard_ranges])
self.assertEqual(
{True, False},
set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]))
# Get a consistent view of shard range states then check listing
Manager(['container-replicator']).once(
number=self.brain.node_numbers[2])
# oops, the listing is incomplete because the last 2 listing parts are
# now served by the under-populated shard ranges.
self.assert_container_listing(
obj_names[:self.max_shard_size] + subset_obj_names,
req_hdrs={'x-newest': 'true'})
# but once another replica has completed cleaving the listing is
# complete again
self.sharders.once(number=self.brain.node_numbers[1],
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
def test_async_pendings(self):
obj_names = self._make_object_names(self.max_shard_size * 2)
# There are some updates *everyone* gets
self.put_objects(obj_names[::5])
# But roll some outages so each container only get ~2/5 more object
# records i.e. total of 3/5 updates per container; and async pendings
# pile up
for i, n in enumerate(self.brain.node_numbers, start=1):
self.brain.servers.stop(number=n)
self.put_objects(obj_names[i::5])
self.brain.servers.start(number=n)
# But there are also 1/5 updates *no one* gets
self.brain.servers.stop()
self.put_objects(obj_names[4::5])
self.brain.servers.start()
# Shard it
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
headers = client.head_container(self.url, self.admin_token,
self.container_name)
self.assertEqual('True', headers.get('x-container-sharding'))
# sanity check
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 0)
self.assertLengthEqual(found['normal_dbs'], 3)
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
self.assertEqual(len(obj_names) * 3 // 5,
broker.get_info()['object_count'])
# Only run the 'leader' in charge of scanning.
# Each container has ~2 * max * 3/5 objects
# which are distributed from obj000 to obj<2 * max - 1>,
# so expect 3 shard ranges to be found: the first two will be complete
# shards with max/2 objects and lower/upper bounds spaced by approx:
# (2 * max - 1)/(2 * max * 3/5) * (max/2) =~ 5/6 * max
#
# Note that during this shard cycle the leader replicates to other
# nodes so they will end up with ~2 * max * 4/5 objects.
self.sharders.once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# Verify that we have one shard db -- though the other normal DBs
# received the shard ranges that got defined
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 1)
node_index_zero_db = found['shard_dbs'][0]
broker = ContainerBroker(node_index_zero_db)
self.assertIs(True, broker.is_root_container())
self.assertEqual(SHARDING, broker.get_db_state())
expected_shard_ranges = broker.get_shard_ranges()
self.assertLengthEqual(expected_shard_ranges, 3)
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED],
[sr.state for sr in expected_shard_ranges])
# Still have all three big DBs -- we've only cleaved 2 of the 3 shard
# ranges that got defined
self.assertLengthEqual(found['normal_dbs'], 3)
db_states = []
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container())
db_states.append(broker.get_db_state())
# the sharded db had shard range meta_timestamps updated during
# cleaving, so we do not expect those to be equal on other nodes
self.assert_shard_range_lists_equal(
expected_shard_ranges, broker.get_shard_ranges(),
excludes=['meta_timestamp', 'state_timestamp', 'state'])
self.assertEqual(len(obj_names) * 3 // 5,
broker.get_info()['object_count'])
self.assertEqual([SHARDING, UNSHARDED, UNSHARDED], sorted(db_states))
# Run the other sharders so we're all in (roughly) the same state
for n in self.brain.node_numbers[1:]:
self.sharders.once(
number=n,
additional_args='--partitions=%s' % self.brain.part)
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 3)
self.assertLengthEqual(found['normal_dbs'], 3)
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertEqual(SHARDING, broker.get_db_state())
# no new rows
self.assertEqual(len(obj_names) * 3 // 5,
broker.get_info()['object_count'])
# Run updaters to clear the async pendings
Manager(['object-updater']).once()
# Our "big" dbs didn't take updates
for db_file in found['normal_dbs']:
broker = ContainerBroker(db_file)
self.assertEqual(len(obj_names) * 3 // 5,
broker.get_info()['object_count'])
# confirm that the async pending updates got redirected to the shards
for sr in expected_shard_ranges:
shard_listings = self.direct_get_container(sr.account,
sr.container)
for node, (hdrs, listing) in shard_listings.items():
shard_listing_names = [
o['name'].encode('utf-8') if six.PY2 else o['name']
for o in listing]
for obj in obj_names[4::5]:
if obj in sr:
self.assertIn(obj, shard_listing_names)
else:
self.assertNotIn(obj, shard_listing_names)
# The entire listing is not yet available - we have two cleaved shard
# ranges, complete with async updates, but for the remainder of the
# namespace only what landed in the original container
headers, listing = client.get_container(self.url, self.token,
self.container_name)
start_listing = [
o for o in obj_names if o <= expected_shard_ranges[1].upper]
self.assertEqual(
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[:len(start_listing)]],
start_listing)
# we can't assert much about the remaining listing, other than that
# there should be something
self.assertTrue(
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[len(start_listing):]])
self.assertIn('x-container-object-count', headers)
self.assertEqual(str(len(listing)),
headers['x-container-object-count'])
headers, listing = client.get_container(self.url, self.token,
self.container_name,
query_string='reverse=on')
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[-len(start_listing):]],
list(reversed(start_listing)))
self.assertIn('x-container-object-count', headers)
self.assertEqual(str(len(listing)),
headers['x-container-object-count'])
self.assertTrue(
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[:-len(start_listing)]])
# Run the sharders again to get everything to settle
self.sharders.once()
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 3)
self.assertLengthEqual(found['normal_dbs'], 0)
# now all shards have been cleaved we should get the complete listing
headers, listing = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing],
obj_names)
def test_shrinking(self):
int_client = self.make_internal_client()
def check_node_data(node_data, exp_hdrs, exp_obj_count, exp_shards,
exp_sharded_root_range=False):
hdrs, range_data = node_data
self.assert_dict_contains(exp_hdrs, hdrs)
sharded_root_range = False
other_range_data = []
for data in range_data:
sr = ShardRange.from_dict(data)
if (sr.account == self.account and
sr.container == self.container_name and
sr.state == ShardRange.SHARDED):
# only expect one root range
self.assertFalse(sharded_root_range, range_data)
sharded_root_range = True
self.assertEqual(ShardRange.MIN, sr.lower, sr)
self.assertEqual(ShardRange.MAX, sr.upper, sr)
else:
# include active root range in further assertions
other_range_data.append(data)
self.assertEqual(exp_sharded_root_range, sharded_root_range)
self.assert_shard_ranges_contiguous(exp_shards, other_range_data)
self.assert_total_object_count(exp_obj_count, other_range_data)
def check_shard_nodes_data(node_data, expected_state='unsharded',
expected_shards=0, exp_obj_count=0,
exp_sharded_root_range=False):
# checks that shard range is consistent on all nodes
root_path = '%s/%s' % (self.account, self.container_name)
exp_shard_hdrs = {
'X-Container-Sysmeta-Shard-Quoted-Root': quote(root_path),
'X-Backend-Sharding-State': expected_state}
object_counts = []
bytes_used = []
for node_id, node_data in node_data.items():
with annotate_failure('Node id %s.' % node_id):
check_node_data(
node_data, exp_shard_hdrs, exp_obj_count,
expected_shards, exp_sharded_root_range)
hdrs = node_data[0]
object_counts.append(int(hdrs['X-Container-Object-Count']))
bytes_used.append(int(hdrs['X-Container-Bytes-Used']))
if len(set(object_counts)) != 1:
self.fail('Inconsistent object counts: %s' % object_counts)
if len(set(bytes_used)) != 1:
self.fail('Inconsistent bytes used: %s' % bytes_used)
return object_counts[0], bytes_used[0]
repeat = [0]
def do_shard_then_shrink():
repeat[0] += 1
obj_names = ['obj-%s-%03d' % (repeat[0], x)
for x in range(self.max_shard_size)]
self.put_objects(obj_names)
# these two object names will fall at start of first shard range...
alpha = 'alpha-%s' % repeat[0]
beta = 'beta-%s' % repeat[0]
# Enable sharding
client.post_container(
self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# sanity check
self.assert_container_listing(obj_names)
# Only run the one in charge of scanning
self.sharders.once(
number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# check root container
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
# nodes on which sharder has not run are still in unsharded state
# but have had shard ranges replicated to them
exp_obj_count = len(obj_names)
exp_hdrs = {'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': str(exp_obj_count)}
node_id = self.brain.node_numbers[1] - 1
check_node_data(
root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2)
node_id = self.brain.node_numbers[2] - 1
check_node_data(
root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2)
# only one that ran sharder is in sharded state
exp_hdrs['X-Backend-Sharding-State'] = 'sharded'
node_id = self.brain.node_numbers[0] - 1
check_node_data(
root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2)
orig_range_data = root_nodes_data[node_id][1]
orig_shard_ranges = [ShardRange.from_dict(r)
for r in orig_range_data]
# check first shard
shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[0].account, orig_shard_ranges[0].container)
obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data)
total_shard_object_count = obj_count
# check second shard
shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[1].account, orig_shard_ranges[1].container)
obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data)
total_shard_object_count += obj_count
self.assertEqual(exp_obj_count, total_shard_object_count)
# Now that everyone has shard ranges, run *everyone*
self.sharders.once(
additional_args='--partitions=%s' % self.brain.part)
# all root container nodes should now be in sharded state
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
check_node_data(node_data, exp_hdrs, exp_obj_count, 2)
# run updaters to update .sharded account; shard containers have
# not updated account since having objects replicated to them
self.updaters.once()
shard_cont_count, shard_obj_count = int_client.get_account_info(
orig_shard_ranges[0].account, [204])
self.assertEqual(2 * repeat[0], shard_cont_count)
# the shards account should always have zero object count to avoid
# double accounting
self.assertEqual(0, shard_obj_count)
# checking the listing also refreshes proxy container info cache so
# that the proxy becomes aware that container is sharded and will
# now look up the shard target for subsequent updates
self.assert_container_listing(obj_names)
# Before writing, kill the cache
self.memcache.delete(get_cache_key(
self.account, self.container_name, shard='updating'))
# delete objects from first shard range
first_shard_objects = [obj_name for obj_name in obj_names
if obj_name <= orig_shard_ranges[0].upper]
for obj in first_shard_objects:
client.delete_object(
self.url, self.token, self.container_name, obj)
with self.assertRaises(ClientException):
client.get_object(
self.url, self.token, self.container_name, obj)
second_shard_objects = [obj_name for obj_name in obj_names
if obj_name > orig_shard_ranges[1].lower]
self.assert_container_listing(second_shard_objects)
# put a new object 'alpha' in first shard range
self.put_objects([alpha])
second_shard_objects = [obj_name for obj_name in obj_names
if obj_name > orig_shard_ranges[1].lower]
self.assert_container_listing([alpha] + second_shard_objects)
# while container servers are down, but proxy has container info in
# cache from recent listing, put another object; this update will
# lurk in async pending until the updaters run again; because all
# the root container servers are down and therefore cannot respond
# to a GET for a redirect target, the object update will default to
# being targeted at the root container
self.stop_container_servers()
# Before writing, kill the cache
self.memcache.delete(get_cache_key(
self.account, self.container_name, shard='updating'))
self.put_objects([beta])
self.brain.servers.start()
async_pendings = self.gather_async_pendings(
self.get_all_object_nodes())
num_container_replicas = len(self.brain.nodes)
num_obj_replicas = self.policy.object_ring.replica_count
expected_num_updates = num_container_updates(
num_container_replicas, quorum_size(num_container_replicas),
num_obj_replicas, self.policy.quorum)
expected_num_pendings = min(expected_num_updates, num_obj_replicas)
# sanity check
with annotate_failure('policy %s. ' % self.policy):
self.assertLengthEqual(async_pendings, expected_num_pendings)
# root object count is not updated...
self.assert_container_object_count(len(obj_names))
self.assert_container_listing([alpha] + second_shard_objects)
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
check_node_data(node_data, exp_hdrs, exp_obj_count, 2)
range_data = node_data[1]
self.assert_shard_range_lists_equal(
orig_range_data, range_data,
excludes=['meta_timestamp', 'state_timestamp'])
# ...until the sharders run and update root; reclaim tombstones so
# that the shard is shrinkable
shard_0_part = self.get_part_and_node_numbers(
orig_shard_ranges[0])[0]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=[shard_0_part])
exp_obj_count = len(second_shard_objects) + 1
self.assert_container_object_count(exp_obj_count)
self.assert_container_listing([alpha] + second_shard_objects)
# root sharder finds donor, acceptor pair and pushes changes
self.sharders.once(
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_listing([alpha] + second_shard_objects)
# run sharder on donor to shrink and replicate to acceptor
self.run_sharders(orig_shard_ranges[0])
self.assert_container_listing([alpha] + second_shard_objects)
# run sharder on acceptor to update root with stats
self.run_sharders(orig_shard_ranges[1])
self.assert_container_listing([alpha] + second_shard_objects)
self.assert_container_object_count(len(second_shard_objects) + 1)
# check root container
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
exp_hdrs['X-Container-Object-Count'] = str(exp_obj_count)
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
# NB now only *one* shard range in root
check_node_data(node_data, exp_hdrs, exp_obj_count, 1)
# the acceptor shard is intact..
shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[1].account, orig_shard_ranges[1].container)
obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data)
# all objects should now be in this shard
self.assertEqual(exp_obj_count, obj_count)
# the donor shard is also still intact
donor = orig_shard_ranges[0]
shard_nodes_data = self.direct_get_container_shard_ranges(
donor.account, donor.container)
# donor has the acceptor shard range but not the root shard range
# because the root is still in ACTIVE state;
# the donor's shard range will have the acceptor's projected stats
obj_count, bytes_used = check_shard_nodes_data(
shard_nodes_data, expected_state='sharded', expected_shards=1,
exp_obj_count=len(second_shard_objects) + 1)
# but the donor is empty and so reports zero stats
self.assertEqual(0, obj_count)
self.assertEqual(0, bytes_used)
# check the donor own shard range state
part, nodes = self.brain.ring.get_nodes(
donor.account, donor.container)
for node in nodes:
with annotate_failure(node):
broker = self.get_broker(
part, node, donor.account, donor.container)
own_sr = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHRUNK, own_sr.state)
self.assertTrue(own_sr.deleted)
# delete all the second shard's object apart from 'alpha'
for obj in second_shard_objects:
client.delete_object(
self.url, self.token, self.container_name, obj)
self.assert_container_listing([alpha])
# run sharders: second range should not shrink away yet because it
# has tombstones
self.sharders.once() # second shard updates root stats
self.assert_container_listing([alpha])
self.sharders.once() # root finds shrinkable shard
self.assert_container_listing([alpha])
self.sharders.once() # shards shrink themselves
self.assert_container_listing([alpha])
# the acceptor shard is intact...
shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[1].account, orig_shard_ranges[1].container)
obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data)
self.assertEqual(1, obj_count)
# run sharders to reclaim tombstones so that the second shard is
# shrinkable
shard_1_part = self.get_part_and_node_numbers(
orig_shard_ranges[1])[0]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=[shard_1_part])
self.assert_container_listing([alpha])
# run sharders so second range shrinks away, requires up to 2
# cycles
self.sharders.once() # root finds shrinkable shard
self.assert_container_listing([alpha])
self.sharders.once() # shards shrink themselves
self.assert_container_listing([alpha])
# the second shard range has sharded and is empty
shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[1].account, orig_shard_ranges[1].container)
check_shard_nodes_data(
shard_nodes_data, expected_state='sharded', expected_shards=1,
exp_obj_count=0)
# check root container
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
exp_hdrs = {'X-Backend-Sharding-State': 'collapsed',
# just the alpha object
'X-Container-Object-Count': '1'}
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
# NB now no shard ranges in root
check_node_data(node_data, exp_hdrs, 0, 0)
# delete the alpha object
client.delete_object(
self.url, self.token, self.container_name, alpha)
# should now be able to delete the *apparently* empty container
client.delete_container(self.url, self.token, self.container_name)
self.assert_container_not_found()
self.direct_head_container(expect_failure=True)
# and the container stays deleted even after sharders run and shard
# send updates
self.sharders.once()
self.assert_container_not_found()
self.direct_head_container(expect_failure=True)
# now run updaters to deal with the async pending for the beta
# object
self.updaters.once()
# and the container is revived!
self.assert_container_listing([beta])
# finally, clear out the container
client.delete_object(
self.url, self.token, self.container_name, beta)
do_shard_then_shrink()
# repeat from starting point of a collapsed and previously deleted
# container
do_shard_then_shrink()
def test_delete_root_reclaim(self):
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# delete all objects - updates redirected to shards
self.delete_objects(all_obj_names)
self.assert_container_listing([])
self.assert_container_post_ok('has objects')
# root not yet updated with shard stats
self.assert_container_object_count(len(all_obj_names))
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
# run sharder on shard containers to update root stats
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
self.run_sharders(shard_ranges)
self.assert_container_listing([])
self.assert_container_post_ok('empty')
self.assert_container_object_count(0)
# and now we can delete it!
client.delete_container(self.url, self.token, self.container_name)
self.assert_container_post_fails('deleted')
self.assert_container_not_found()
# see if it will reclaim
Manager(['container-updater']).once()
for conf_file in self.configs['container-replicator'].values():
conf = utils.readconf(conf_file, 'container-replicator')
conf['reclaim_age'] = 0
ContainerReplicator(conf).run_once()
# we don't expect warnings from sharder root audits
for conf_index in self.configs['container-sharder'].keys():
sharder = self.run_custom_sharder(conf_index, {})
self.assertEqual([], sharder.logger.get_lines_for_level('warning'))
# until the root wants to start reclaiming but we haven't shrunk yet!
found_warning = False
for conf_index in self.configs['container-sharder'].keys():
sharder = self.run_custom_sharder(conf_index, {'reclaim_age': 0})
warnings = sharder.logger.get_lines_for_level('warning')
if warnings:
self.assertTrue(warnings[0].startswith(
'Reclaimable db stuck waiting for shrinking'))
self.assertEqual(1, len(warnings))
found_warning = True
self.assertTrue(found_warning)
# TODO: shrink empty shards and assert everything reclaims
def _setup_replication_scenario(self, num_shards, extra_objs=('alpha',)):
# Get cluster to state where 2 replicas are sharding or sharded but 3rd
# replica is unsharded and has an object that the first 2 are missing.
# put objects while all servers are up
obj_names = self._make_object_names(
num_shards * self.max_shard_size // 2)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
node_numbers = self.brain.node_numbers
# run replicators first time to get sync points set
self.replicators.once()
# stop the leader node and one other server
self.stop_container_servers(slice(0, 2))
# ...then put one more object in first shard range namespace
self.put_objects(extra_objs)
# start leader and first other server, stop third server
for number in node_numbers[:2]:
self.brain.servers.start(number=number)
self.brain.servers.stop(number=node_numbers[2])
self.assert_container_listing(obj_names) # sanity check
# shard the container - first two shard ranges are cleaved
for number in node_numbers[:2]:
self.sharders.once(
number=number,
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_listing(obj_names) # sanity check
return obj_names
def test_replication_to_sharding_container(self):
# verify that replication from an unsharded replica to a sharding
# replica does not replicate rows but does replicate shard ranges
obj_names = self._setup_replication_scenario(3)
for node in self.brain.nodes[:2]:
self.assert_container_state(node, 'sharding', 3)
# bring third server back up, run replicator
node_numbers = self.brain.node_numbers
self.brain.servers.start(number=node_numbers[2])
# sanity check...
self.assert_container_state(self.brain.nodes[2], 'unsharded', 0)
self.replicators.once(number=node_numbers[2])
# check db files unchanged
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 2)
self.assertLengthEqual(found['normal_dbs'], 3)
# the 'alpha' object is NOT replicated to the two sharded nodes
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
self.assertFalse(broker.get_objects())
self.assert_container_state(node, 'sharding', 3)
self.brain.servers.stop(number=node_numbers[2])
self.assert_container_listing(obj_names)
# all nodes now have shard ranges
self.brain.servers.start(number=node_numbers[2])
node_data = self.direct_get_container_shard_ranges()
for node, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(node):
self.assert_shard_ranges_contiguous(3, shard_ranges)
# complete cleaving third shard range on first two nodes
self.brain.servers.stop(number=node_numbers[2])
for number in node_numbers[:2]:
self.sharders.once(
number=number,
additional_args='--partitions=%s' % self.brain.part)
# ...and now they are in sharded state
self.assert_container_state(self.brain.nodes[0], 'sharded', 3)
self.assert_container_state(self.brain.nodes[1], 'sharded', 3)
# ...still no 'alpha' object in listing
self.assert_container_listing(obj_names)
# run the sharder on the third server, alpha object is included in
# shards that it cleaves
self.brain.servers.start(number=node_numbers[2])
self.assert_container_state(self.brain.nodes[2], 'unsharded', 3)
self.sharders.once(number=node_numbers[2],
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[2], 'sharding', 3)
self.sharders.once(number=node_numbers[2],
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[2], 'sharded', 3)
self.assert_container_listing(['alpha'] + obj_names)
def test_replication_to_sharded_container(self):
# verify that replication from an unsharded replica to a sharded
# replica does not replicate rows but does replicate shard ranges
obj_names = self._setup_replication_scenario(2)
for node in self.brain.nodes[:2]:
self.assert_container_state(node, 'sharded', 2)
# sanity check
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 2)
self.assertLengthEqual(found['normal_dbs'], 1)
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
info = broker.get_info()
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
self.assertEqual(len(obj_names), info['object_count'])
self.assertFalse(broker.get_objects())
# bring third server back up, run replicator
node_numbers = self.brain.node_numbers
self.brain.servers.start(number=node_numbers[2])
# sanity check...
self.assert_container_state(self.brain.nodes[2], 'unsharded', 0)
self.replicators.once(number=node_numbers[2])
# check db files unchanged
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 2)
self.assertLengthEqual(found['normal_dbs'], 1)
# the 'alpha' object is NOT replicated to the two sharded nodes
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
self.assertFalse(broker.get_objects())
self.assert_container_state(node, 'sharded', 2)
self.brain.servers.stop(number=node_numbers[2])
self.assert_container_listing(obj_names)
# all nodes now have shard ranges
self.brain.servers.start(number=node_numbers[2])
node_data = self.direct_get_container_shard_ranges()
for node, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(node):
self.assert_shard_ranges_contiguous(2, shard_ranges)
# run the sharder on the third server, alpha object is included in
# shards that it cleaves
self.assert_container_state(self.brain.nodes[2], 'unsharded', 2)
self.sharders.once(number=node_numbers[2],
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[2], 'sharded', 2)
self.assert_container_listing(['alpha'] + obj_names)
def test_sharding_requires_sufficient_replication(self):
# verify that cleaving only progresses if each cleaved shard range is
# sufficiently replicated
# put enough objects for 4 shard ranges
obj_names = self._make_object_names(2 * self.max_shard_size)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
node_numbers = self.brain.node_numbers
leader_node = self.brain.nodes[0]
leader_num = node_numbers[0]
# run replicators first time to get sync points set
self.replicators.once()
# start sharding on the leader node
self.sharders.once(number=leader_num,
additional_args='--partitions=%s' % self.brain.part)
shard_ranges = self.assert_container_state(leader_node, 'sharding', 4)
self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2,
[sr.state for sr in shard_ranges])
# Check the current progress. It shouldn't be complete.
recon = direct_client.direct_get_recon(leader_node, "sharding")
expected_in_progress = {'all': [{'account': 'AUTH_test',
'active': 0,
'cleaved': 2,
'created': 2,
'found': 0,
'db_state': 'sharding',
'state': 'sharding',
'error': None,
'file_size': mock.ANY,
'meta_timestamp': mock.ANY,
'node_index': 0,
'object_count': len(obj_names),
'container': mock.ANY,
'path': mock.ANY,
'root': mock.ANY}]}
actual = recon['sharding_stats']['sharding']['sharding_in_progress']
self.assertEqual(expected_in_progress, actual)
# stop *all* container servers for third shard range
sr_part, sr_node_nums = self.get_part_and_node_numbers(shard_ranges[2])
for node_num in sr_node_nums:
self.brain.servers.stop(number=node_num)
# attempt to continue sharding on the leader node
self.sharders.once(number=leader_num,
additional_args='--partitions=%s' % self.brain.part)
# no cleaving progress was made
for node_num in sr_node_nums:
self.brain.servers.start(number=node_num)
shard_ranges = self.assert_container_state(leader_node, 'sharding', 4)
self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2,
[sr.state for sr in shard_ranges])
# stop two of the servers for third shard range, not including any
# server that happens to be the leader node
stopped = []
for node_num in sr_node_nums:
if node_num != leader_num:
self.brain.servers.stop(number=node_num)
stopped.append(node_num)
if len(stopped) >= 2:
break
self.assertLengthEqual(stopped, 2) # sanity check
# attempt to continue sharding on the leader node
self.sharders.once(number=leader_num,
additional_args='--partitions=%s' % self.brain.part)
# no cleaving progress was made
for node_num in stopped:
self.brain.servers.start(number=node_num)
shard_ranges = self.assert_container_state(leader_node, 'sharding', 4)
self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2,
[sr.state for sr in shard_ranges])
# stop just one of the servers for third shard range
stopped = []
for node_num in sr_node_nums:
if node_num != leader_num:
self.brain.servers.stop(number=node_num)
stopped.append(node_num)
break
self.assertLengthEqual(stopped, 1) # sanity check
# attempt to continue sharding the container
self.sharders.once(number=leader_num,
additional_args='--partitions=%s' % self.brain.part)
# this time cleaving completed
self.brain.servers.start(number=stopped[0])
shard_ranges = self.assert_container_state(leader_node, 'sharded', 4)
self.assertEqual([ShardRange.ACTIVE] * 4,
[sr.state for sr in shard_ranges])
# Check the leader's progress again, this time is should be complete
recon = direct_client.direct_get_recon(leader_node, "sharding")
expected_in_progress = {'all': [{'account': 'AUTH_test',
'active': 4,
'cleaved': 0,
'created': 0,
'found': 0,
'db_state': 'sharded',
'state': 'sharded',
'error': None,
'file_size': mock.ANY,
'meta_timestamp': mock.ANY,
'node_index': 0,
'object_count': len(obj_names),
'container': mock.ANY,
'path': mock.ANY,
'root': mock.ANY}]}
actual = recon['sharding_stats']['sharding']['sharding_in_progress']
self.assertEqual(expected_in_progress, actual)
def test_sharded_delete(self):
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# delete all objects - updates redirected to shards
self.delete_objects(all_obj_names)
self.assert_container_listing([])
self.assert_container_post_ok('has objects')
# root not yet updated with shard stats
self.assert_container_object_count(len(all_obj_names))
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
# run sharder on shard containers to update root stats
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
self.run_sharders(shard_ranges)
self.assert_container_listing([])
self.assert_container_post_ok('empty')
self.assert_container_object_count(0)
# put a new object - update redirected to shard
self.put_objects(['alpha'])
self.assert_container_listing(['alpha'])
self.assert_container_object_count(0)
# before root learns about new object in shard, delete the container
client.delete_container(self.url, self.token, self.container_name)
self.assert_container_post_fails('deleted')
self.assert_container_not_found()
# run the sharders to update root with shard stats
self.run_sharders(shard_ranges)
self.assert_container_listing(['alpha'])
self.assert_container_object_count(1)
self.assert_container_delete_fails()
self.assert_container_post_ok('revived')
def _do_test_sharded_can_get_objects_different_policy(self,
policy_idx,
new_policy_idx):
# create sharded container
client.delete_container(self.url, self.token, self.container_name)
self.brain.put_container(policy_index=int(policy_idx))
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# empty and delete
self.delete_objects(all_obj_names)
shard_ranges = self.get_container_shard_ranges()
self.run_sharders(shard_ranges)
client.delete_container(self.url, self.token, self.container_name)
# re-create with new_policy_idx
self.brain.put_container(policy_index=int(new_policy_idx))
# we re-use shard ranges
new_shard_ranges = self.get_container_shard_ranges()
self.assertEqual(shard_ranges, new_shard_ranges)
self.put_objects(all_obj_names)
# The shard is still on the old policy index, but the root spi
# is passed to shard container server and is used to pull objects
# of that index out.
self.assert_container_listing(all_obj_names)
# although a head request is getting object count for the shard spi
self.assert_container_object_count(0)
# we can force the listing to use the old policy index in which case we
# expect no objects to be listed
try:
resp = self.internal_client.make_request(
'GET',
path=self.internal_client.make_path(
self.account, self.container_name),
headers={'X-Backend-Storage-Policy-Index': str(policy_idx)},
acceptable_statuses=(2,),
params={'format': 'json'}
)
except UnexpectedResponse as exc:
self.fail('Listing failed with %s' % exc.resp.status)
self.assertEqual([], json.loads(b''.join(resp.app_iter)))
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def test_sharded_can_get_objects_different_policy(self):
policy_idx = self.policy.idx
new_policy_idx = [pol.idx for pol in ENABLED_POLICIES
if pol != self.policy.idx][0]
self._do_test_sharded_can_get_objects_different_policy(
policy_idx, new_policy_idx)
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def test_sharded_can_get_objects_different_policy_reversed(self):
policy_idx = [pol.idx for pol in ENABLED_POLICIES
if pol != self.policy][0]
new_policy_idx = self.policy.idx
self._do_test_sharded_can_get_objects_different_policy(
policy_idx, new_policy_idx)
def test_object_update_redirection(self):
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# delete all objects - updates redirected to shards
self.delete_objects(all_obj_names)
self.assert_container_listing([])
self.assert_container_post_ok('has objects')
# run sharder on shard containers to update root stats; reclaim
# the tombstones so that the shards appear to be shrinkable
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
shard_partitions = [self.get_part_and_node_numbers(sr)[0]
for sr in shard_ranges]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=shard_partitions)
self.assert_container_object_count(0)
# First, test a misplaced object moving from one shard to another.
# with one shard server down, put a new 'alpha' object...
shard_part, shard_nodes = self.get_part_and_node_numbers(
shard_ranges[0])
self.brain.servers.stop(number=shard_nodes[2])
self.put_objects(['alpha'])
self.assert_container_listing(['alpha'])
self.assert_container_object_count(0)
self.assertLengthEqual(
self.gather_async_pendings(self.get_all_object_nodes()), 1)
self.brain.servers.start(number=shard_nodes[2])
# run sharder on root to discover first shrink candidate
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# then run sharder on the shard node without the alpha object
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=shard_nodes[2])
# root sees first shard has shrunk
self.assertLengthEqual(self.get_container_shard_ranges(), 1)
# cached shard ranges still show first shard range as active so listing
# will include 'alpha' if the shard listing is fetched from node (0,1)
# but not if fetched from node 2; to achieve predictability we use
# x-newest to use shard ranges from the root so that only the second
# shard range is used for listing, so alpha object not in listing
self.assert_container_listing([], req_hdrs={'x-newest': 'true'})
self.assert_container_object_count(0)
# run the updaters: the async pending update will be redirected from
# shrunk shard to second shard
self.updaters.once()
self.assert_container_listing(['alpha'])
self.assert_container_object_count(0) # root not yet updated
# then run sharder on other shard nodes to complete shrinking
for number in shard_nodes[:2]:
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=number)
# and get root updated
self.run_sharders(shard_ranges[1])
self.assert_container_listing(['alpha'])
self.assert_container_object_count(1)
self.assertLengthEqual(self.get_container_shard_ranges(), 1)
# Now we have just one active shard, test a misplaced object moving
# from that shard to the root.
# with one shard server down, delete 'alpha' and put a 'beta' object...
shard_part, shard_nodes = self.get_part_and_node_numbers(
shard_ranges[1])
self.brain.servers.stop(number=shard_nodes[2])
# Before writing, kill the cache
self.memcache.delete(get_cache_key(
self.account, self.container_name, shard='updating'))
self.delete_objects(['alpha'])
self.put_objects(['beta'])
self.assert_container_listing(['beta'])
self.assert_container_object_count(1)
self.assertLengthEqual(
self.gather_async_pendings(self.get_all_object_nodes()), 2)
self.brain.servers.start(number=shard_nodes[2])
# run sharder on root to discover second shrink candidate - root is not
# yet aware of the beta object
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# then run sharder on the shard node without the beta object, to shrink
# it to root - note this moves stale copy of alpha to the root db
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=shard_nodes[2])
# now there are no active shards
self.assertFalse(self.get_container_shard_ranges())
# with other two shard servers down, listing won't find beta object
for number in shard_nodes[:2]:
self.brain.servers.stop(number=number)
self.assert_container_listing(['alpha'])
self.assert_container_object_count(1)
# run the updaters: the async pending update will be redirected from
# shrunk shard to the root
self.updaters.once()
self.assert_container_listing(['beta'])
self.assert_container_object_count(1)
def test_misplaced_object_movement(self):
def merge_object(shard_range, name, deleted=0):
# it's hard to get a test to put a misplaced object into a shard,
# so this hack is used force an object record directly into a shard
# container db. Note: the actual object won't exist, we're just
# using this to test object records in container dbs.
shard_part, shard_nodes = self.brain.ring.get_nodes(
shard_range.account, shard_range.container)
shard_broker = self.get_broker(
shard_part, shard_nodes[0], shard_range.account,
shard_range.container)
shard_broker.merge_items(
[{'name': name, 'created_at': Timestamp.now().internal,
'size': 0, 'content_type': 'text/plain',
'etag': md5(usedforsecurity=False).hexdigest(),
'deleted': deleted,
'storage_policy_index': shard_broker.storage_policy_index}])
return shard_nodes[0]
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# delete all objects in first shard range - updates redirected to shard
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
shard_0_objects = [name for name in all_obj_names
if name in shard_ranges[0]]
shard_1_objects = [name for name in all_obj_names
if name in shard_ranges[1]]
self.delete_objects(shard_0_objects)
self.assert_container_listing(shard_1_objects)
self.assert_container_post_ok('has objects')
# run sharder on first shard container to update root stats; reclaim
# the tombstones so that the shard appears to be shrinkable
shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=[shard_0_part])
self.assert_container_object_count(len(shard_1_objects))
# First, test a misplaced object moving from one shard to another.
# run sharder on root to discover first shrink candidate
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# then run sharder on first shard range to shrink it
self.run_sharders(shard_ranges[0])
# force a misplaced object into the shrunken shard range to simulate
# a client put that was in flight when it started to shrink
misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0)
# root sees first shard has shrunk, only second shard range used for
# listing so alpha object not in listing
self.assertLengthEqual(self.get_container_shard_ranges(), 1)
self.assert_container_listing(shard_1_objects)
self.assert_container_object_count(len(shard_1_objects))
# until sharder runs on that node to move the misplaced object to the
# second shard range
shard_part, shard_nodes_numbers = self.get_part_and_node_numbers(
shard_ranges[0])
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=misplaced_node['id'] + 1)
self.assert_container_listing(['alpha'] + shard_1_objects)
# root not yet updated
self.assert_container_object_count(len(shard_1_objects))
# run sharder to get root updated
self.run_sharders(shard_ranges[1])
self.assert_container_listing(['alpha'] + shard_1_objects)
self.assert_container_object_count(len(shard_1_objects) + 1)
self.assertLengthEqual(self.get_container_shard_ranges(), 1)
# Now we have just one active shard, test a misplaced object moving
# from that shard to the root.
# delete most objects from second shard range, reclaim the tombstones,
# and run sharder on root to discover second shrink candidate
self.delete_objects(shard_1_objects)
shard_1_part = self.get_part_and_node_numbers(shard_ranges[1])[0]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=[shard_1_part])
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# then run sharder on the shard node to shrink it to root - note this
# moves alpha to the root db
self.run_sharders(shard_ranges[1])
# now there are no active shards
self.assertFalse(self.get_container_shard_ranges())
# force some misplaced object updates into second shrunk shard range
merge_object(shard_ranges[1], 'alpha', deleted=1)
misplaced_node = merge_object(shard_ranges[1], 'beta', deleted=0)
# root is not yet aware of them
self.assert_container_listing(['alpha'])
self.assert_container_object_count(1)
# until sharder runs on that node to move the misplaced object
shard_part, shard_nodes_numbers = self.get_part_and_node_numbers(
shard_ranges[1])
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=misplaced_node['id'] + 1)
self.assert_container_listing(['beta'])
self.assert_container_object_count(1)
self.assert_container_delete_fails()
def test_misplaced_object_movement_from_deleted_shard(self):
def merge_object(shard_range, name, deleted=0):
# it's hard to get a test to put a misplaced object into a shard,
# so this hack is used force an object record directly into a shard
# container db. Note: the actual object won't exist, we're just
# using this to test object records in container dbs.
shard_part, shard_nodes = self.brain.ring.get_nodes(
shard_range.account, shard_range.container)
shard_broker = self.get_shard_broker(shard_range)
# In this test we want to merge into a deleted container shard
shard_broker.delete_db(Timestamp.now().internal)
shard_broker.merge_items(
[{'name': name, 'created_at': Timestamp.now().internal,
'size': 0, 'content_type': 'text/plain',
'etag': md5(usedforsecurity=False).hexdigest(),
'deleted': deleted,
'storage_policy_index': shard_broker.storage_policy_index}])
return shard_nodes[0]
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names)
# Shard the container
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# delete all objects in first shard range - updates redirected to shard
shard_ranges = self.get_container_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
shard_0_objects = [name for name in all_obj_names
if name in shard_ranges[0]]
shard_1_objects = [name for name in all_obj_names
if name in shard_ranges[1]]
self.delete_objects(shard_0_objects)
self.assert_container_listing(shard_1_objects)
self.assert_container_post_ok('has objects')
# run sharder on first shard container to update root stats
shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0]
for conf_index in self.configs['container-sharder'].keys():
self.run_custom_sharder(conf_index, {'reclaim_age': 0},
override_partitions=[shard_0_part])
self.assert_container_object_count(len(shard_1_objects))
# First, test a misplaced object moving from one shard to another.
# run sharder on root to discover first shrink candidate
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
# then run sharder on first shard range to shrink it
self.run_sharders(shard_ranges[0])
# force a misplaced object into the shrunken shard range to simulate
# a client put that was in flight when it started to shrink
misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0)
# root sees first shard has shrunk, only second shard range used for
# listing so alpha object not in listing
self.assertLengthEqual(self.get_container_shard_ranges(), 1)
self.assert_container_listing(shard_1_objects)
self.assert_container_object_count(len(shard_1_objects))
# until sharder runs on that node to move the misplaced object to the
# second shard range
shard_part, shard_nodes_numbers = self.get_part_and_node_numbers(
shard_ranges[0])
self.sharders.once(additional_args='--partitions=%s' % shard_part,
number=misplaced_node['id'] + 1)
self.assert_container_listing(['alpha'] + shard_1_objects)
# root not yet updated
self.assert_container_object_count(len(shard_1_objects))
# check the deleted shard did not push the wrong root path into the
# other container
for replica in 0, 1, 2:
shard_x_broker = self.get_shard_broker(shard_ranges[1], replica)
self.assertEqual("%s/%s" % (self.account, self.container_name),
shard_x_broker.root_path)
# run the sharder of the existing shard to update the root stats
# to prove the misplaced object was moved to the other shard _and_
# the other shard still has the correct root because it updates root's
# stats
self.run_sharders(shard_ranges[1])
self.assert_container_object_count(len(shard_1_objects) + 1)
def test_replication_to_sharded_container_from_unsharded_old_primary(self):
primary_ids = [n['id'] for n in self.brain.nodes]
handoff_node = next(n for n in self.brain.ring.devs
if n['id'] not in primary_ids)
# start with two sharded replicas and one unsharded with extra object
obj_names = self._setup_replication_scenario(2)
for node in self.brain.nodes[:2]:
self.assert_container_state(node, 'sharded', 2)
# Fake a ring change - copy unsharded db which has no shard ranges to a
# handoff to create illusion of a new unpopulated primary node
node_numbers = self.brain.node_numbers
new_primary_node = self.brain.nodes[2]
new_primary_node_number = node_numbers[2]
new_primary_dir, container_hash = self.get_storage_dir(
self.brain.part, new_primary_node)
old_primary_dir, container_hash = self.get_storage_dir(
self.brain.part, handoff_node)
utils.mkdirs(os.path.dirname(old_primary_dir))
shutil.move(new_primary_dir, old_primary_dir)
# make the cluster more or less "healthy" again
self.brain.servers.start(number=new_primary_node_number)
# get a db on every node...
client.put_container(self.url, self.token, self.container_name)
self.assertTrue(os.path.exists(os.path.join(
new_primary_dir, container_hash + '.db')))
found = self.categorize_container_dir_content()
self.assertLengthEqual(found['normal_dbs'], 1) # "new" primary
self.assertLengthEqual(found['shard_dbs'], 2) # existing primaries
# catastrophic failure! drive dies and is replaced on unchanged primary
failed_node = self.brain.nodes[0]
failed_dir, _container_hash = self.get_storage_dir(
self.brain.part, failed_node)
shutil.rmtree(failed_dir)
# replicate the "old primary" to everybody except the "new primary"
self.brain.servers.stop(number=new_primary_node_number)
self.replicators.once(number=handoff_node['id'] + 1)
# We're willing to rsync the retiring db to the failed primary.
# This may or may not have shard ranges, depending on the order in
# which we hit the primaries, but it definitely *doesn't* have an
# epoch in its name yet. All objects are replicated.
self.assertTrue(os.path.exists(os.path.join(
failed_dir, container_hash + '.db')))
self.assertLengthEqual(os.listdir(failed_dir), 1)
broker = self.get_broker(self.brain.part, failed_node)
self.assertLengthEqual(broker.get_objects(), len(obj_names) + 1)
# The other out-of-date primary is within usync range but objects are
# not replicated to it because the handoff db learns about shard ranges
broker = self.get_broker(self.brain.part, self.brain.nodes[1])
self.assertLengthEqual(broker.get_objects(), 0)
# Handoff db still exists and now has shard ranges!
self.assertTrue(os.path.exists(os.path.join(
old_primary_dir, container_hash + '.db')))
broker = self.get_broker(self.brain.part, handoff_node)
shard_ranges = broker.get_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
self.assert_container_state(handoff_node, 'unsharded', 2)
# Replicate again, this time *including* "new primary"
self.brain.servers.start(number=new_primary_node_number)
self.replicators.once(number=handoff_node['id'] + 1)
# Ordinarily, we would have rsync_then_merge'd to "new primary"
# but instead we wait
broker = self.get_broker(self.brain.part, new_primary_node)
self.assertLengthEqual(broker.get_objects(), 0)
shard_ranges = broker.get_shard_ranges()
self.assertLengthEqual(shard_ranges, 2)
# so the next time the sharder comes along, it can push rows out
# and delete the big db
self.sharders.once(number=handoff_node['id'] + 1,
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(handoff_node, 'sharded', 2)
self.assertFalse(os.path.exists(os.path.join(
old_primary_dir, container_hash + '.db')))
# the sharded db hangs around until replication confirms durability
# first attempt is not sufficiently successful
self.brain.servers.stop(number=node_numbers[0])
self.replicators.once(number=handoff_node['id'] + 1)
self.assertTrue(os.path.exists(old_primary_dir))
self.assert_container_state(handoff_node, 'sharded', 2)
# second attempt is successful and handoff db is deleted
self.brain.servers.start(number=node_numbers[0])
self.replicators.once(number=handoff_node['id'] + 1)
self.assertFalse(os.path.exists(old_primary_dir))
# run all the sharders, get us into a consistent state
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
self.assert_container_listing(['alpha'] + obj_names)
def test_replication_to_empty_new_primary_from_sharding_old_primary(self):
primary_ids = [n['id'] for n in self.brain.nodes]
handoff_node = next(n for n in self.brain.ring.devs
if n['id'] not in primary_ids)
num_shards = 3
obj_names = self._make_object_names(
num_shards * self.max_shard_size // 2)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.replicators.once()
# start sharding on only the leader node
leader_node = self.brain.nodes[0]
leader_node_number = self.brain.node_numbers[0]
self.sharders.once(number=leader_node_number)
self.assert_container_state(leader_node, 'sharding', 3)
for node in self.brain.nodes[1:]:
self.assert_container_state(node, 'unsharded', 3)
# Fake a ring change - copy leader node db to a handoff to create
# illusion of a new unpopulated primary leader node
new_primary_dir, container_hash = self.get_storage_dir(
self.brain.part, leader_node)
old_primary_dir, container_hash = self.get_storage_dir(
self.brain.part, handoff_node)
utils.mkdirs(os.path.dirname(old_primary_dir))
shutil.move(new_primary_dir, old_primary_dir)
self.assert_container_state(handoff_node, 'sharding', 3)
# run replicator on handoff node to create a fresh db on new primary
self.assertFalse(os.path.exists(new_primary_dir))
self.replicators.once(number=handoff_node['id'] + 1)
self.assertTrue(os.path.exists(new_primary_dir))
self.assert_container_state(leader_node, 'sharded', 3)
broker = self.get_broker(self.brain.part, leader_node)
shard_ranges = broker.get_shard_ranges()
self.assertLengthEqual(shard_ranges, 3)
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED],
[sr.state for sr in shard_ranges])
# db still exists on handoff
self.assertTrue(os.path.exists(old_primary_dir))
self.assert_container_state(handoff_node, 'sharding', 3)
# continue sharding it...
self.sharders.once(number=handoff_node['id'] + 1)
self.assert_container_state(leader_node, 'sharded', 3)
# now handoff is fully sharded the replicator will delete it
self.replicators.once(number=handoff_node['id'] + 1)
self.assertFalse(os.path.exists(old_primary_dir))
# all primaries now have active shard ranges but only one is in sharded
# state
self.assert_container_state(leader_node, 'sharded', 3)
for node in self.brain.nodes[1:]:
self.assert_container_state(node, 'unsharded', 3)
node_data = self.direct_get_container_shard_ranges()
for node_id, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(
'node id %s from %s' % (node_id, node_data.keys)):
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
# check handoff cleaved all objects before it was deleted - stop all
# but leader node so that listing is fetched from shards
for number in self.brain.node_numbers[1:3]:
self.brain.servers.stop(number=number)
self.assert_container_listing(obj_names)
for number in self.brain.node_numbers[1:3]:
self.brain.servers.start(number=number)
self.sharders.once()
self.assert_container_state(leader_node, 'sharded', 3)
for node in self.brain.nodes[1:]:
self.assert_container_state(node, 'sharding', 3)
self.sharders.once()
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 3)
self.assert_container_listing(obj_names)
def test_sharded_account_updates(self):
# verify that .shards account updates have zero object count and bytes
# to avoid double accounting
all_obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(all_obj_names, contents='xyz')
# Shard the container into 2 shards
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
for n in self.brain.node_numbers:
self.sharders.once(
number=n, additional_args='--partitions=%s' % self.brain.part)
# sanity checks
for node in self.brain.nodes:
shard_ranges = self.assert_container_state(node, 'sharded', 2)
self.assert_container_delete_fails()
self.assert_container_has_shard_sysmeta()
self.assert_container_post_ok('sharded')
self.assert_container_listing(all_obj_names)
# run the updaters to get account stats updated
self.updaters.once()
# check user account stats
metadata = self.internal_client.get_account_metadata(self.account)
self.assertEqual(1, int(metadata.get('x-account-container-count')))
self.assertEqual(self.max_shard_size,
int(metadata.get('x-account-object-count')))
self.assertEqual(3 * self.max_shard_size,
int(metadata.get('x-account-bytes-used')))
# check hidden .shards account stats
metadata = self.internal_client.get_account_metadata(
shard_ranges[0].account)
self.assertEqual(2, int(metadata.get('x-account-container-count')))
self.assertEqual(0, int(metadata.get('x-account-object-count')))
self.assertEqual(0, int(metadata.get('x-account-bytes-used')))
class TestContainerShardingMoreUTF8(TestContainerSharding):
def _make_object_names(self, number):
# override default with names that include non-ascii chars
name_length = self.cluster_info['swift']['max_object_name_length']
obj_names = []
for x in range(number):
name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x)
name = name.encode('utf8').ljust(name_length, b'o')
if not six.PY2:
name = name.decode('utf8')
obj_names.append(name)
return obj_names
def _setup_container_name(self):
# override default with max length name that includes non-ascii chars
super(TestContainerShardingMoreUTF8, self)._setup_container_name()
name_length = self.cluster_info['swift']['max_container_name_length']
cont_name = \
self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234'
self.container_name = cont_name.encode('utf8').ljust(name_length, b'x')
if not six.PY2:
self.container_name = self.container_name.decode('utf8')
class TestManagedContainerSharding(BaseTestContainerSharding):
'''Test sharding using swift-manage-shard-ranges'''
def sharders_once(self, **kwargs):
# inhibit auto_sharding regardless of the config setting
additional_args = kwargs.get('additional_args', [])
if not isinstance(additional_args, list):
additional_args = [additional_args]
additional_args.append('--no-auto-shard')
kwargs['additional_args'] = additional_args
self.sharders.once(**kwargs)
def test_manage_shard_ranges(self):
obj_names = self._make_object_names(10)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.replicators.once()
# sanity check: we don't have nearly enough objects for this to shard
# automatically
self.sharders_once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[0], 'unsharded', 0)
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'find_and_replace', '3', '--enable', '--minimum-shard-size', '2'])
self.assert_container_state(self.brain.nodes[0], 'unsharded', 3)
# "Run container-replicator to replicate them to other nodes."
self.replicators.once()
# "Run container-sharder on all nodes to shard the container."
# first pass cleaves 2 shards
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[0], 'sharding', 3)
self.assert_container_state(self.brain.nodes[1], 'sharding', 3)
shard_ranges = self.assert_container_state(
self.brain.nodes[2], 'sharding', 3)
self.assert_container_listing(obj_names)
# make the un-cleaved shard update the root container...
self.assertEqual([3, 3, 4], [sr.object_count for sr in shard_ranges])
shard_part, nodes = self.get_part_and_node_numbers(shard_ranges[2])
self.sharders_once(additional_args='--partitions=%s' % shard_part)
shard_ranges = self.assert_container_state(
self.brain.nodes[2], 'sharding', 3)
# ...it does not report zero-stats despite being empty, because it has
# not yet reached CLEAVED state
self.assertEqual([3, 3, 4], [sr.object_count for sr in shard_ranges])
# second pass cleaves final shard
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# Everybody's settled
self.assert_container_state(self.brain.nodes[0], 'sharded', 3)
self.assert_container_state(self.brain.nodes[1], 'sharded', 3)
shard_ranges = self.assert_container_state(
self.brain.nodes[2], 'sharded', 3)
self.assertEqual([3, 3, 4], [sr.object_count for sr in shard_ranges])
self.assert_container_listing(obj_names)
def test_manage_shard_ranges_compact(self):
# verify shard range compaction using swift-manage-shard-ranges
obj_names = self._make_object_names(8)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set, and get container
# sharded into 4 shards
self.replicators.once()
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'find_and_replace', '2', '--enable'])
self.assert_container_state(self.brain.nodes[0], 'unsharded', 4)
self.replicators.once()
# run sharders twice to cleave all 4 shard ranges
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(self.brain.nodes[0], 'sharded', 4)
self.assert_container_state(self.brain.nodes[1], 'sharded', 4)
self.assert_container_state(self.brain.nodes[2], 'sharded', 4)
self.assert_container_listing(obj_names)
# now compact some ranges; use --max-shrinking to allow 2 shrinking
# shards
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'compact', '--max-expanding', '1', '--max-shrinking', '2',
'--yes'])
shard_ranges = self.assert_container_state(
self.brain.nodes[0], 'sharded', 4)
self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] * 2,
[sr.state for sr in shard_ranges])
self.replicators.once()
self.sharders_once()
# check there's now just 2 remaining shard ranges
shard_ranges = self.assert_container_state(
self.brain.nodes[0], 'sharded', 2)
self.assertEqual([ShardRange.ACTIVE] * 2,
[sr.state for sr in shard_ranges])
self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'})
# root container own shard range should still be SHARDED
for i, node in enumerate(self.brain.nodes):
with annotate_failure('node[%d]' % i):
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
# now compact the final two shard ranges to the root; use
# --max-shrinking to allow 2 shrinking shards
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'compact', '--yes', '--max-shrinking', '2'])
shard_ranges = self.assert_container_state(
self.brain.nodes[0], 'sharded', 2)
self.assertEqual([ShardRange.SHRINKING] * 2,
[sr.state for sr in shard_ranges])
self.replicators.once()
self.sharders_once()
self.assert_container_state(self.brain.nodes[0], 'collapsed', 0)
self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'})
# root container own shard range should now be ACTIVE
for i, node in enumerate(self.brain.nodes):
with annotate_failure('node[%d]' % i):
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
self.assertEqual(ShardRange.ACTIVE,
broker.get_own_shard_range().state)
def test_manage_shard_ranges_repair_root(self):
# provoke overlaps in root container and repair
obj_names = self._make_object_names(16)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.replicators.once()
# find 4 shard ranges on nodes[0] - let's denote these ranges 0.0, 0.1,
# 0.2 and 0.3 that are installed with epoch_0
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'find_and_replace', '4', '--enable'])
shard_ranges_0 = self.assert_container_state(self.brain.nodes[0],
'unsharded', 4)
# *Also* go find 3 shard ranges on *another node*, like a dumb-dumb -
# let's denote these ranges 1.0, 1.1 and 1.2 that are installed with
# epoch_1
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[1]),
'find_and_replace', '7', '--enable'])
shard_ranges_1 = self.assert_container_state(self.brain.nodes[1],
'unsharded', 3)
# Run sharder in specific order so that the replica with the older
# epoch_0 starts sharding first - this will prove problematic later!
# On first pass the first replica passes audit, creates shards and then
# syncs shard ranges with the other replicas, so it has a mix of 0.*
# shard ranges in CLEAVED state and 1.* ranges in FOUND state. It
# proceeds to cleave shard 0.0, but after 0.0 cleaving stalls because
# next in iteration is shard range 1.0 in FOUND state from the other
# replica that it cannot yet cleave.
self.sharders_once(number=self.brain.node_numbers[0],
additional_args='--partitions=%s' % self.brain.part)
# On first pass the second replica passes audit (it has its own found
# ranges and the first replica's created shard ranges but none in the
# same state overlap), creates its shards and then syncs shard ranges
# with the other replicas. All of the 7 shard ranges on this replica
# are now in CREATED state so it proceeds to cleave the first two shard
# ranges, 0.1 and 1.0.
self.sharders_once(number=self.brain.node_numbers[1],
additional_args='--partitions=%s' % self.brain.part)
self.replicators.once()
# Uh-oh
self.assert_container_state(self.brain.nodes[0], 'sharding', 7)
self.assert_container_state(self.brain.nodes[1], 'sharding', 7)
# There's a race: the third replica may be sharding, may be unsharded
# Try it again a few times
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
self.replicators.once()
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# It's not really fixing itself... the sharder audit will detect
# overlapping ranges which prevents cleaving proceeding; expect the
# shard ranges to be mostly still in created state, with one or two
# possibly cleaved during first pass before the sharding got stalled
shard_ranges = self.assert_container_state(self.brain.nodes[0],
'sharding', 7)
self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5,
[sr.state for sr in shard_ranges])
shard_ranges = self.assert_container_state(self.brain.nodes[1],
'sharding', 7)
self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5,
[sr.state for sr in shard_ranges])
# But hey, at least listings still work! They're just going to get
# horribly out of date as more objects are added
self.assert_container_listing(obj_names)
# 'swift-manage-shard-ranges repair' will choose the second set of 3
# shard ranges (1.*) over the first set of 4 (0.*) because that's the
# path with most cleaving progress, and so shrink shard ranges 0.*.
db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success(
['swift-manage-shard-ranges', db_file, 'repair', '--yes',
'--min-shard-age', '0'])
# make sure all root replicas now sync their shard ranges
self.replicators.once()
# Run sharder on the shrinking shards. This should not change the state
# of any of the acceptors, particularly the ones that have yet to have
# object cleaved from the roots, because we don't want the as yet
# uncleaved acceptors becoming prematurely active and creating 'holes'
# in listings. The shrinking shard ranges should however get deleted in
# root container table.
self.run_sharders(shard_ranges_0)
shard_ranges = self.assert_container_state(self.brain.nodes[1],
'sharding', 3)
self.assertEqual([ShardRange.CLEAVED] * 1 + [ShardRange.CREATED] * 2,
[sr.state for sr in shard_ranges])
self.assert_container_listing(obj_names)
# check the unwanted shards did shrink away...
for shard_range in shard_ranges_0:
with annotate_failure(shard_range):
found_for_shard = self.categorize_container_dir_content(
shard_range.account, shard_range.container)
self.assertLengthEqual(found_for_shard['shard_dbs'], 3)
actual = []
for shard_db in found_for_shard['shard_dbs']:
broker = ContainerBroker(shard_db)
own_sr = broker.get_own_shard_range()
actual.append(
(broker.get_db_state(), own_sr.state, own_sr.deleted))
self.assertEqual([(SHARDED, ShardRange.SHRUNK, True)] * 3,
actual)
# At this point one of the first two replicas may have done some useful
# cleaving of 1.* shards, the other may have only cleaved 0.* shards,
# and the third replica may have cleaved no shards. We therefore need
# two more passes of the sharder to get to a predictable state where
# all replicas have cleaved all three 0.* shards.
self.sharders_once()
self.sharders_once()
# now we expect all replicas to have just the three 1.* shards, with
# the 0.* shards all deleted
brokers = {}
exp_shard_ranges = sorted(
[sr.copy(state=ShardRange.SHRUNK, deleted=True)
for sr in shard_ranges_0] +
[sr.copy(state=ShardRange.ACTIVE)
for sr in shard_ranges_1],
key=ShardRange.sort_key)
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
shard_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges_1, shard_ranges)
shard_ranges = broker.get_shard_ranges(include_deleted=True)
self.assertLengthEqual(shard_ranges, len(exp_shard_ranges))
self.maxDiff = None
self.assertEqual(exp_shard_ranges, shard_ranges)
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
# Sadly, the first replica to start sharding is still reporting its db
# state to be 'unsharded' because, although it has sharded, its shard
# db epoch (epoch_0) does not match its own shard range epoch
# (epoch_1), and that is because the second replica (with epoch_1)
# updated the own shard range and replicated it to all other replicas.
# If we had run the sharder on the second replica before the first
# replica, then by the time the first replica started sharding it would
# have learnt the newer epoch_1 and we wouldn't see this inconsistency.
self.assertEqual(UNSHARDED, brokers[0].get_db_state())
self.assertEqual(SHARDED, brokers[1].get_db_state())
self.assertEqual(SHARDED, brokers[2].get_db_state())
epoch_1 = brokers[1].db_epoch
self.assertEqual(epoch_1, brokers[2].db_epoch)
self.assertLess(brokers[0].db_epoch, epoch_1)
# the root replica that thinks it is unsharded is problematic - it will
# not return shard ranges for listings, but has no objects, so it's
# luck of the draw whether we get a listing or not at this point :(
# Run the sharders again: the first replica that is still 'unsharded'
# because of the older epoch_0 in its db filename will now start to
# shard again with a newer epoch_1 db, and will start to re-cleave the
# 3 active shards, albeit with zero objects to cleave.
self.sharders_once()
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
shard_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges_1, shard_ranges)
shard_ranges = broker.get_shard_ranges(include_deleted=True)
self.assertLengthEqual(shard_ranges, len(exp_shard_ranges))
self.assertEqual(exp_shard_ranges, shard_ranges)
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
self.assertEqual(epoch_1, broker.db_epoch)
self.assertIn(brokers[0].get_db_state(), (SHARDING, SHARDED))
self.assertEqual(SHARDED, brokers[1].get_db_state())
self.assertEqual(SHARDED, brokers[2].get_db_state())
# This cycle of the sharders also guarantees that all shards have had
# their state updated to ACTIVE from the root; this was not necessarily
# true at end of the previous sharder pass because a shard audit (when
# the shard is updated from a root) may have happened before all roots
# have had their shard ranges transitioned to ACTIVE.
for shard_range in shard_ranges_1:
with annotate_failure(shard_range):
found_for_shard = self.categorize_container_dir_content(
shard_range.account, shard_range.container)
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
actual = []
for shard_db in found_for_shard['normal_dbs']:
broker = ContainerBroker(shard_db)
own_sr = broker.get_own_shard_range()
actual.append(
(broker.get_db_state(), own_sr.state, own_sr.deleted))
self.assertEqual([(UNSHARDED, ShardRange.ACTIVE, False)] * 3,
actual)
# We may need one more pass of the sharder before all three shard
# ranges are cleaved (2 per pass) and all the root replicas are
# predictably in sharded state. Note: the accelerated cleaving of >2
# zero-object shard ranges per cycle is defeated if a shard happens
# to exist on the same node as the root because the roots cleaving
# process doesn't think that it created the shard db and will therefore
# replicate it as per a normal cleave.
self.sharders_once()
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
shard_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges_1, shard_ranges)
shard_ranges = broker.get_shard_ranges(include_deleted=True)
self.assertLengthEqual(shard_ranges, len(exp_shard_ranges))
self.assertEqual(exp_shard_ranges, shard_ranges)
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
self.assertEqual(epoch_1, broker.db_epoch)
self.assertEqual(SHARDED, broker.get_db_state())
# Finally, with all root replicas in a consistent state, the listing
# will be be predictably correct
self.assert_container_listing(obj_names)
def test_manage_shard_ranges_repair_shard(self):
# provoke overlaps in a shard container and repair them
obj_names = self._make_object_names(24)
initial_obj_names = obj_names[::2]
# put 12 objects in container
self.put_objects(initial_obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.replicators.once()
# find 3 shard ranges on root nodes[0] and get the root sharded
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, self.brain.nodes[0]),
'find_and_replace', '4', '--enable'])
self.replicators.once()
# cleave first two shards
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# cleave third shard
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# ensure all shards learn their ACTIVE state from root
self.sharders_once()
for node in (0, 1, 2):
with annotate_failure('node %d' % node):
shard_ranges = self.assert_container_state(
self.brain.nodes[node], 'sharded', 3)
for sr in shard_ranges:
self.assertEqual(ShardRange.ACTIVE, sr.state)
self.assert_container_listing(initial_obj_names)
# add objects to second shard range so it has 8 objects ; this range
# has bounds (obj-0006,obj-0014]
root_shard_ranges = self.get_container_shard_ranges()
self.assertEqual(3, len(root_shard_ranges))
shard_1 = root_shard_ranges[1]
self.assertEqual(obj_names[6], shard_1.lower)
self.assertEqual(obj_names[14], shard_1.upper)
more_obj_names = obj_names[7:15:2]
self.put_objects(more_obj_names)
expected_obj_names = sorted(initial_obj_names + more_obj_names)
self.assert_container_listing(expected_obj_names)
shard_1_part, shard_1_nodes = self.brain.ring.get_nodes(
shard_1.account, shard_1.container)
# find 3 sub-shards on one shard node; use --force-commits to ensure
# the recently PUT objects are included when finding the shard range
# pivot points
self.assert_subprocess_success([
'swift-manage-shard-ranges', '--force-commits',
self.get_db_file(shard_1_part, shard_1_nodes[1], shard_1.account,
shard_1.container),
'find_and_replace', '3', '--enable'])
# ... and mistakenly find 4 shard ranges on a different shard node :(
self.assert_subprocess_success([
'swift-manage-shard-ranges', '--force-commits',
self.get_db_file(shard_1_part, shard_1_nodes[2], shard_1.account,
shard_1.container),
'find_and_replace', '2', '--enable'])
# replicate the muddle of shard ranges between shard replicas, merged
# result is:
# '' - 6 shard ACTIVE
# 6 - 8 sub-shard FOUND
# 6 - 9 sub-shard FOUND
# 8 - 10 sub-shard FOUND
# 9 - 12 sub-shard FOUND
# 10 - 12 sub-shard FOUND
# 12 - 14 sub-shard FOUND
# 12 - 14 sub-shard FOUND
# 6 - 14 shard SHARDING
# 14 - '' shard ACTIVE
self.replicators.once()
# try hard to shard the shard...
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
# sharding hasn't completed and there's overlaps in the shard and root:
# the sub-shards will have been cleaved in the order listed above, but
# sub-shards (10 -12) and one of (12 - 14) will be overlooked because
# the cleave cursor will have moved past their namespace before they
# were yielded by the shard range iterator, so we now have:
# '' - 6 shard ACTIVE
# 6 - 8 sub-shard ACTIVE
# 6 - 9 sub-shard ACTIVE
# 8 - 10 sub-shard ACTIVE
# 10 - 12 sub-shard CREATED
# 9 - 12 sub-shard ACTIVE
# 12 - 14 sub-shard CREATED
# 12 - 14 sub-shard ACTIVE
# 14 - '' shard ACTIVE
sub_shard_ranges = self.get_container_shard_ranges(
shard_1.account, shard_1.container)
self.assertEqual(7, len(sub_shard_ranges), sub_shard_ranges)
root_shard_ranges = self.get_container_shard_ranges()
self.assertEqual(9, len(root_shard_ranges), root_shard_ranges)
self.assertEqual([ShardRange.ACTIVE] * 4 +
[ShardRange.CREATED, ShardRange.ACTIVE] * 2 +
[ShardRange.ACTIVE],
[sr.state for sr in root_shard_ranges])
# fix the overlaps - a set of 3 ACTIVE sub-shards will be chosen and 4
# other sub-shards will be shrunk away; apply the fix at the root
# container
db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success(
['swift-manage-shard-ranges', db_file, 'repair', '--yes',
'--min-shard-age', '0'])
self.replicators.once()
self.sharders_once()
self.sharders_once()
# check root now has just 5 shard ranges
root_shard_ranges = self.get_container_shard_ranges()
self.assertEqual(5, len(root_shard_ranges), root_shard_ranges)
self.assertEqual([ShardRange.ACTIVE] * 5,
[sr.state for sr in root_shard_ranges])
# check there are 1 sharded shard and 4 shrunk sub-shard ranges in the
# root (note, shard_1's shard ranges aren't updated once it has sharded
# because the sub-shards report their state to the root; we cannot make
# assertions about shrunk states in shard_1's shard range table)
root_shard_ranges = self.get_container_shard_ranges(
include_deleted=True)
self.assertEqual(10, len(root_shard_ranges), root_shard_ranges)
shrunk_shard_ranges = [sr for sr in root_shard_ranges
if sr.state == ShardRange.SHRUNK]
self.assertEqual(4, len(shrunk_shard_ranges), root_shard_ranges)
self.assertEqual([True] * 4,
[sr.deleted for sr in shrunk_shard_ranges])
sharded_shard_ranges = [sr for sr in root_shard_ranges
if sr.state == ShardRange.SHARDED]
self.assertEqual(1, len(sharded_shard_ranges), root_shard_ranges)
self.assert_container_listing(expected_obj_names)
def test_manage_shard_ranges_repair_parent_child_ranges(self):
# Test repairing a transient parent-child shard range overlap in the
# root container, expect no repairs to be done.
# note: be careful not to add a container listing to this test which
# would get shard ranges into memcache
obj_names = self._make_object_names(4)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
# shard root
root_0_db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success([
'swift-manage-shard-ranges',
root_0_db_file,
'find_and_replace', '2', '--enable'])
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
for node in self.brain.nodes:
self.assert_container_state(node, 'unsharded', 2)
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# get shards to update state from parent...
self.sharders_once()
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
# sanity check, all is well
msg = self.assert_subprocess_success([
'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
'--dry-run'])
self.assertIn(b'No repairs necessary.', msg)
# shard first shard into 2 sub-shards while root node 0 is disabled
self.stop_container_servers(node_numbers=slice(0, 1))
shard_ranges = self.get_container_shard_ranges()
shard_brokers = [self.get_shard_broker(shard_ranges[0], node_index=i)
for i in range(3)]
self.assert_subprocess_success([
'swift-manage-shard-ranges',
shard_brokers[0].db_file,
'find_and_replace', '1', '--enable'])
shard_part, shard_nodes = self.brain.ring.get_nodes(
shard_ranges[0].account, shard_ranges[0].container)
self.container_replicators.once(
additional_args='--partitions=%s' % shard_part)
for node in exclude_nodes(shard_nodes, self.brain.nodes[0]):
self.assert_container_state(
node, 'unsharded', 2, account=shard_ranges[0].account,
container=shard_ranges[0].container, part=shard_part)
self.sharders_once(additional_args='--partitions=%s' % shard_part)
# get shards to update state from parent...
self.sharders_once()
for node in exclude_nodes(shard_nodes, self.brain.nodes[0]):
self.assert_container_state(
node, 'sharded', 2, account=shard_ranges[0].account,
container=shard_ranges[0].container, part=shard_part)
# put an object into the second of the 2 sub-shards so that the shard
# will update the root next time the sharder is run; do this before
# restarting root node 0 so that the object update is definitely
# redirected to a sub-shard by root node 1 or 2.
new_obj_name = obj_names[0] + 'a'
self.put_objects([new_obj_name])
# restart root node 0
self.brain.servers.start(number=self.brain.node_numbers[0])
# node 0 DB doesn't know about the sub-shards
root_brokers = [self.get_broker(self.brain.part, node)
for node in self.brain.nodes]
broker = root_brokers[0]
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in broker.get_shard_ranges(include_deleted=True)])
for broker in root_brokers[1:]:
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[0]),
(ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
(ShardRange.SHARDED, True, ShardRange.MIN, obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in broker.get_shard_ranges(include_deleted=True)])
sub_shard = root_brokers[1].get_shard_ranges()[1]
self.assertEqual(obj_names[0], sub_shard.lower)
self.assertEqual(obj_names[1], sub_shard.upper)
sub_shard_part, nodes = self.get_part_and_node_numbers(sub_shard)
# we want the sub-shard to update root node 0 but not the sharded
# shard, but there is a small chance the two will be in same partition
# TODO: how can we work around this?
self.assertNotEqual(sub_shard_part, shard_part,
'You were unlucky, try again')
self.sharders_once(additional_args='--partitions=%s' % sub_shard_part)
# now root node 0 has the original shards plus one of the sub-shards
# but all are active :(
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
# note: overlap!
(ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in root_brokers[0].get_shard_ranges(include_deleted=True)])
# try to fix the overlap and expect no repair has been done.
msg = self.assert_subprocess_success(
['swift-manage-shard-ranges', root_0_db_file, 'repair', '--yes',
'--min-shard-age', '0'])
self.assertIn(
b'1 donor shards ignored due to parent-child relationship checks',
msg)
# verify parent-child checks has prevented repair to be done.
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
# note: overlap!
(ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in root_brokers[0].get_shard_ranges(include_deleted=True)])
# the transient overlap is 'fixed' in subsequent sharder cycles...
self.sharders_once()
self.sharders_once()
self.container_replicators.once()
for broker in root_brokers:
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[0]),
(ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
(ShardRange.SHARDED, True, ShardRange.MIN, obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in broker.get_shard_ranges(include_deleted=True)])
def test_manage_shard_ranges_repair_root_gap(self):
# create a gap in root container; repair the gap.
# note: be careful not to add a container listing to this test which
# would get shard ranges into memcache
obj_names = self._make_object_names(8)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
# shard root
root_0_db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success([
'swift-manage-shard-ranges',
root_0_db_file,
'find_and_replace', '2', '--enable'])
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
for node in self.brain.nodes:
self.assert_container_state(node, 'unsharded', 4)
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# get shards to update state from parent...
self.sharders_once()
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 4)
# sanity check, all is well
msg = self.assert_subprocess_success([
'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
'--dry-run'])
self.assertIn(b'No repairs necessary.', msg)
# deliberately create a gap in root shard ranges (don't ever do this
# for real)
# TODO: replace direct broker modification with s-m-s-r merge
root_brokers = [self.get_broker(self.brain.part, node)
for node in self.brain.nodes]
shard_ranges = root_brokers[0].get_shard_ranges()
self.assertEqual(4, len(shard_ranges))
shard_ranges[2].set_deleted()
root_brokers[0].merge_shard_ranges(shard_ranges)
shard_ranges = root_brokers[0].get_shard_ranges()
self.assertEqual(3, len(shard_ranges))
self.container_replicators.once()
# confirm that we made a gap.
for broker in root_brokers:
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], obj_names[3]),
(ShardRange.ACTIVE, True, obj_names[3], obj_names[5]),
(ShardRange.ACTIVE, False, obj_names[5], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in broker.get_shard_ranges(include_deleted=True)])
msg = self.assert_subprocess_success([
'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
'--yes'])
self.assertIn(b'Repairs necessary to fill gaps.', msg)
self.sharders_once()
self.sharders_once()
self.container_replicators.once()
# yay! we fixed the gap (without creating an overlap)
for broker in root_brokers:
self.assertEqual(
[(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
(ShardRange.ACTIVE, False, obj_names[1], obj_names[3]),
(ShardRange.ACTIVE, True, obj_names[3], obj_names[5]),
(ShardRange.ACTIVE, False, obj_names[3], ShardRange.MAX)],
[(sr.state, sr.deleted, sr.lower, sr.upper)
for sr in broker.get_shard_ranges(include_deleted=True)])
msg = self.assert_subprocess_success([
'swift-manage-shard-ranges', root_0_db_file, 'repair',
'--dry-run', '--min-shard-age', '0'])
self.assertIn(b'No repairs necessary.', msg)
msg = self.assert_subprocess_success([
'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
'--dry-run'])
self.assertIn(b'No repairs necessary.', msg)
# put an object into the gap namespace
new_objs = [obj_names[4] + 'a']
self.put_objects(new_objs)
# get root stats up to date
self.sharders_once()
# new object is in listing but old objects in the gap have been lost -
# don't delete shard ranges!
self.assert_container_listing(obj_names[:4] + new_objs + obj_names[6:])
def test_manage_shard_ranges_unsharded_deleted_root(self):
# verify that a deleted DB will still be sharded
# choose a node that will not be sharded initially
sharded_nodes = []
unsharded_node = None
for node in self.brain.nodes:
if self.brain.node_numbers[node['index']] \
in self.brain.handoff_numbers:
unsharded_node = node
else:
sharded_nodes.append(node)
# put some objects - not enough to trigger auto-sharding
obj_names = self._make_object_names(MIN_SHARD_CONTAINER_THRESHOLD - 1)
self.put_objects(obj_names)
# run replicators first time to get sync points set and commit updates
self.replicators.once()
# setup sharding...
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, sharded_nodes[0]),
'find_and_replace', '2', '--enable', '--minimum-shard-size', '1'])
# Run container-replicator to replicate shard ranges
self.container_replicators.once()
self.assert_container_state(sharded_nodes[0], 'unsharded', 2)
self.assert_container_state(sharded_nodes[1], 'unsharded', 2)
self.assert_container_state(unsharded_node, 'unsharded', 2)
# Run container-sharder to shard the 2 primary replicas that did
# receive the object PUTs
for num in self.brain.primary_numbers:
self.sharders_once(
number=num,
additional_args='--partitions=%s' % self.brain.part)
# delete the objects - the proxy's will have cached container info with
# out-of-date db_state=unsharded, so updates go to the root DBs
self.delete_objects(obj_names)
# deal with DELETE's being misplaced in root db's...
for num in self.brain.primary_numbers:
self.sharders_once(
number=num,
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(sharded_nodes[0], 'sharded', 2)
self.assert_container_state(sharded_nodes[1], 'sharded', 2)
shard_ranges = self.assert_container_state(
unsharded_node, 'unsharded', 2)
# get root stats updated - but avoid sharding the remaining root DB
self.run_sharders(shard_ranges, exclude_partitions=[self.brain.part])
self.assert_container_listing([])
# delete the empty container
client.delete_container(self.url, self.admin_token,
self.container_name)
# sanity check - unsharded DB is deleted
broker = self.get_broker(self.brain.part, unsharded_node,
self.account, self.container_name)
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertTrue(broker.is_deleted())
self.assertEqual(0, broker.get_info()['object_count'])
self.assertEqual(0, broker.get_shard_usage()['object_count'])
# now shard the final DB
for num in self.brain.handoff_numbers:
self.sharders_once(
number=num,
additional_args='--partitions=%s' % self.brain.part)
# all DBs should now be sharded and still deleted
for node in self.brain.nodes:
with annotate_failure(
'node %s in %s'
% (node['index'], [n['index'] for n in self.brain.nodes])):
self.assert_container_state(node, 'sharded', 2,
override_deleted=True)
broker = self.get_broker(self.brain.part, node,
self.account, self.container_name)
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(0, broker.get_info()['object_count'])
self.assertEqual(0,
broker.get_shard_usage()['object_count'])
self.assertTrue(broker.is_deleted())
def test_manage_shard_ranges_unsharded_deleted_root_gets_undeleted(self):
# verify that an apparently deleted DB (no object rows in root db) will
# still be sharded and also become undeleted when objects are
# discovered in the shards
# choose a node that will not be sharded initially
sharded_nodes = []
unsharded_node = None
for node in self.brain.nodes:
if self.brain.node_numbers[node['index']] \
in self.brain.handoff_numbers:
unsharded_node = node
else:
sharded_nodes.append(node)
# put some objects, but only to 2 replicas - not enough to trigger
# auto-sharding
self.brain.stop_handoff_half()
obj_names = self._make_object_names(MIN_SHARD_CONTAINER_THRESHOLD - 1)
self.put_objects(obj_names)
# run replicators first time to get sync points set and commit puts
self.replicators.once()
# setup sharding...
self.assert_subprocess_success([
'swift-manage-shard-ranges',
self.get_db_file(self.brain.part, sharded_nodes[0]),
'find_and_replace', '2', '--enable', '--minimum-shard-size', '1'])
# Run container-replicator to replicate shard ranges - object rows will
# not be sync'd now there are shard ranges
for num in self.brain.primary_numbers:
self.container_replicators.once(number=num)
self.assert_container_state(sharded_nodes[0], 'unsharded', 2)
self.assert_container_state(sharded_nodes[1], 'unsharded', 2)
# revive the stopped node
self.brain.start_handoff_half()
self.assert_container_state(unsharded_node, 'unsharded', 0)
# delete the empty replica
direct_client.direct_delete_container(
unsharded_node, self.brain.part, self.account,
self.container_name)
# Run container-sharder to shard the 2 primary replicas that did
# receive the object PUTs
for num in self.brain.primary_numbers:
self.sharders_once(
number=num,
additional_args='--partitions=%s' % self.brain.part)
self.assert_container_state(sharded_nodes[0], 'sharded', 2)
self.assert_container_state(sharded_nodes[1], 'sharded', 2)
# the sharder syncs shard ranges ...
self.assert_container_state(unsharded_node, 'unsharded', 2,
override_deleted=True)
# sanity check - unsharded DB is empty and deleted
broker = self.get_broker(self.brain.part, unsharded_node,
self.account, self.container_name)
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertEqual(0, broker.get_info()['object_count'])
# the shard ranges do have object count but are in CREATED state so
# not reported in shard usage...
self.assertEqual(0, broker.get_shard_usage()['object_count'])
self.assertTrue(broker.is_deleted())
# now shard the final DB
for num in self.brain.handoff_numbers:
self.sharders_once(
number=num,
additional_args='--partitions=%s' % self.brain.part)
shard_ranges = self.assert_container_state(
unsharded_node, 'sharded', 2, override_deleted=True)
# and get roots updated and sync'd
self.container_replicators.once()
self.run_sharders(shard_ranges, exclude_partitions=[self.brain.part])
# all DBs should now be sharded and NOT deleted
for node in self.brain.nodes:
with annotate_failure(
'node %s in %s'
% (node['index'], [n['index'] for n in self.brain.nodes])):
broker = self.get_broker(self.brain.part, node,
self.account, self.container_name)
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(3, broker.get_info()['object_count'])
self.assertEqual(3,
broker.get_shard_usage()['object_count'])
self.assertFalse(broker.is_deleted())
def test_manage_shard_ranges_deleted_child_and_parent_gap(self):
# Test to produce a scenario where a parent container is stuck at
# sharding because of a gap in shard ranges. And the gap is caused by
# deleted child shard range which finishes sharding before its parent
# does.
# note: be careful not to add a container listing to this test which
# would get shard ranges into memcache.
obj_names = self._make_object_names(20)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})
# run replicators first time to get sync points set.
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
# shard root into two child-shards.
root_0_db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success([
'swift-manage-shard-ranges',
root_0_db_file,
'find_and_replace', '10', '--enable'])
# Run container-replicator to replicate them to other nodes.
self.container_replicators.once(
additional_args='--partitions=%s' % self.brain.part)
for node in self.brain.nodes:
self.assert_container_state(node, 'unsharded', 2)
# Run container-sharder on all nodes to shard the container.
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
# get shards to update state from parent...
self.sharders_once()
for node in self.brain.nodes:
self.assert_container_state(node, 'sharded', 2)
# shard first child shard into 2 grand-child-shards.
c_shard_ranges = self.get_container_shard_ranges()
c_shard_brokers = [self.get_shard_broker(
c_shard_ranges[0], node_index=i) for i in range(3)]
self.assert_subprocess_success([
'swift-manage-shard-ranges',
c_shard_brokers[0].db_file,
'find_and_replace', '5', '--enable'])
child_shard_part, c_shard_nodes = self.brain.ring.get_nodes(
c_shard_ranges[0].account, c_shard_ranges[0].container)
self.container_replicators.once(
additional_args='--partitions=%s' % child_shard_part)
for node in c_shard_nodes:
self.assert_container_state(
node, 'unsharded', 2, account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container, part=child_shard_part)
# run sharder on only 2 of the child replicas by renaming the third
# replica's DB file directory.
# NOTE: if we only rename the retiring DB file, other replicas will
# create a "fresh" DB with timestamp during replication, and then
# after we restore the retiring DB back, there will be two DB files
# in the same folder, and container state will appear to be "sharding"
# instead of "unsharded".
c_shard_dir = os.path.dirname(c_shard_brokers[2].db_file)
c_shard_tmp_dir = c_shard_dir + ".tmp"
os.rename(c_shard_dir, c_shard_tmp_dir)
self.sharders_once(additional_args='--partitions=%s' %
child_shard_part)
for node in c_shard_nodes[:2]:
self.assert_container_state(
node, 'sharded', 2, account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container, part=child_shard_part)
# get updates done...
self.sharders_once()
# shard first grand-child shard into 2 grand-grand-child-shards.
gc_shard_ranges = self.get_container_shard_ranges(
account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container)
shard_brokers = [self.get_shard_broker(
gc_shard_ranges[0],
node_index=i) for i in range(3)]
self.assert_subprocess_success([
'swift-manage-shard-ranges',
shard_brokers[0].db_file,
'find_and_replace', '3', '--enable'])
grandchild_shard_part, gc_shard_nodes = self.brain.ring.get_nodes(
gc_shard_ranges[0].account, gc_shard_ranges[0].container)
self.container_replicators.once(
additional_args='--partitions=%s' % grandchild_shard_part)
self.sharders_once(additional_args='--partitions=%s' %
grandchild_shard_part)
# get shards to update state from parent...
self.sharders_once()
self.sharders_once()
self.container_replicators.once(
additional_args='--partitions=%s' % child_shard_part)
# restore back the DB file directory of the disable child replica.
shutil.rmtree(c_shard_dir, ignore_errors=True)
os.rename(c_shard_tmp_dir, c_shard_dir)
# the 2 child shards that sharded earlier still have their original
# grand-child shards because they stopped updating form root once
# sharded.
for node in c_shard_nodes[:2]:
self.assert_container_state(
node, 'sharded', 2, account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container, part=child_shard_part)
# the child shard that did not shard earlier has not been touched by
# the sharder since, so still has two grand-child shards.
self.assert_container_state(
c_shard_nodes[2],
'unsharded', 2, account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container, part=child_shard_part)
# now, finally, run the sharder on the child that is still waiting to
# shard. It will get 2 great-grandchild ranges from root to replace
# deleted grandchild.
self.sharders_once(
additional_args=['--partitions=%s' %
child_shard_part, '--devices=%s' %
c_shard_nodes[2]['device']])
# batch size is 2 but this replicas has 3 shard ranges so we need two
# runs of the sharder
self.sharders_once(
additional_args=['--partitions=%s' %
child_shard_part, '--devices=%s' %
c_shard_nodes[2]['device']])
self.assert_container_state(
c_shard_nodes[2], 'sharded', 3, account=c_shard_ranges[0].account,
container=c_shard_ranges[0].container, part=child_shard_part)
| swift-master | test/probe/test_sharder.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from six.moves import http_client
from six.moves.urllib.parse import urlparse
from swiftclient import get_auth
from test.probe import PROXY_BASE_URL
from test.probe.common import ReplProbeTest
class TestAccountGetFakeResponsesMatch(ReplProbeTest):
def setUp(self):
super(TestAccountGetFakeResponsesMatch, self).setUp()
self.url, self.token = get_auth(
PROXY_BASE_URL + '/auth/v1.0', 'admin:admin', 'admin')
def _account_path(self, account):
_, _, path, _, _, _ = urlparse(self.url)
basepath, _ = path.rsplit('/', 1)
return basepath + '/' + account
def _get(self, *a, **kw):
kw['method'] = 'GET'
return self._account_request(*a, **kw)
def _account_request(self, account, method, headers=None):
if headers is None:
headers = {}
headers['X-Auth-Token'] = self.token
scheme, netloc, path, _, _, _ = urlparse(self.url)
host, port = netloc.partition(':')[::2]
if not port:
port = '443' if scheme == 'https' else '80'
port = int(port)
if scheme == 'https':
conn = http_client.HTTPSConnection(host, port)
else:
conn = http_client.HTTPConnection(host, port)
conn.request(method, self._account_path(account), headers=headers)
resp = conn.getresponse()
if resp.status // 100 != 2:
raise Exception("Unexpected status %s\n%s" %
(resp.status, resp.read()))
response_headers = {h.lower(): v for h, v in resp.getheaders()}
response_body = resp.read()
resp.close()
return response_headers, response_body
def test_main(self):
# Two accounts: "real" and "fake". The fake one doesn't have any .db
# files on disk; the real one does. The real one is empty.
#
# Make sure the important response fields match.
real_acct = "AUTH_real"
fake_acct = "AUTH_fake"
self._account_request(real_acct, 'POST',
{'X-Account-Meta-Bert': 'Ernie'})
# text
real_headers, real_body = self._get(real_acct)
fake_headers, fake_body = self._get(fake_acct)
self.assertEqual(real_body, fake_body)
self.assertEqual(real_headers['content-type'],
fake_headers['content-type'])
# json
real_headers, real_body = self._get(
real_acct, headers={'Accept': 'application/json'})
fake_headers, fake_body = self._get(
fake_acct, headers={'Accept': 'application/json'})
self.assertEqual(real_body, fake_body)
self.assertEqual(real_headers['content-type'],
fake_headers['content-type'])
# xml
real_headers, real_body = self._get(
real_acct, headers={'Accept': 'application/xml'})
fake_headers, fake_body = self._get(
fake_acct, headers={'Accept': 'application/xml'})
# the account name is in the XML response
real_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', real_body)
fake_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', fake_body)
self.assertEqual(real_body, fake_body)
self.assertEqual(real_headers['content-type'],
fake_headers['content-type'])
if __name__ == '__main__':
unittest.main()
| swift-master | test/probe/test_account_get_fake_responses_match.py |
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import sys
from io import BytesIO
import itertools
import uuid
from optparse import OptionParser
import random
import six
from six.moves.urllib.parse import urlparse, parse_qs, quote
from swift.common.manager import Manager
from swift.common import utils, ring
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from swiftclient import client, get_auth, ClientException
from test.probe import PROXY_BASE_URL
from test.probe.common import ENABLED_POLICIES
TIMEOUT = 60
def meta_command(name, bases, attrs):
"""
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__.
"""
commands = {}
docs = {}
for attr, value in attrs.items():
if getattr(value, '__command__', False):
commands[attr] = value
# methods always have a __doc__ attribute, sometimes empty
docs[attr] = (getattr(value, '__doc__', None) or
'perform the %s command' % attr).strip()
attrs['__commands__'] = commands
attrs['__docs__'] = docs
def run(self, command, *args, **kwargs):
return self.__commands__[command](self, *args, **kwargs)
attrs.setdefault('run', run)
return type(name, bases, attrs)
def command(f):
f.__command__ = True
return f
@six.add_metaclass(meta_command)
class BaseBrain(object):
def _setup(self, account, container_name, object_name,
server_type, policy):
self.account = account
self.container_name = container_name
self.object_name = object_name
server_list = ['%s-server' % server_type] if server_type else ['all']
self.servers = Manager(server_list)
policies = list(ENABLED_POLICIES)
random.shuffle(policies)
self.policies = itertools.cycle(policies)
o = object_name if server_type == 'object' else None
c = container_name if server_type in ('object', 'container') else None
if server_type in ('container', 'account'):
if policy:
raise TypeError('Metadata server brains do not '
'support specific storage policies')
self.policy = None
self.ring = ring.Ring(
'/etc/swift/%s.ring.gz' % server_type)
elif server_type == 'object':
if not policy:
raise TypeError('Object BrainSplitters need to '
'specify the storage policy')
self.policy = policy
policy.load_ring('/etc/swift')
self.ring = policy.object_ring
else:
raise ValueError('Unknown server_type: %r' % server_type)
self.server_type = server_type
self.part, self.nodes = self.ring.get_nodes(self.account, c, o)
self.node_numbers = [n['id'] + 1 for n in self.nodes]
if 1 in self.node_numbers and 2 in self.node_numbers:
self.primary_numbers = (1, 2)
self.handoff_numbers = (3, 4)
else:
self.primary_numbers = (3, 4)
self.handoff_numbers = (1, 2)
@command
def start_primary_half(self):
"""
start servers 1 & 2
"""
tuple(self.servers.start(number=n) for n in self.primary_numbers)
@command
def stop_primary_half(self):
"""
stop servers 1 & 2
"""
tuple(self.servers.stop(number=n) for n in self.primary_numbers)
@command
def start_handoff_half(self):
"""
start servers 3 & 4
"""
tuple(self.servers.start(number=n) for n in self.handoff_numbers)
@command
def stop_handoff_half(self):
"""
stop servers 3 & 4
"""
tuple(self.servers.stop(number=n) for n in self.handoff_numbers)
@command
def put_container(self, policy_index=None):
"""
put container with next storage policy
"""
if policy_index is not None:
policy = POLICIES.get_by_index(int(policy_index))
if not policy:
raise ValueError('Unknown policy with index %s' % policy)
elif not self.policy:
policy = next(self.policies)
else:
policy = self.policy
headers = {'X-Storage-Policy': policy.name}
self.client.put_container(self.container_name, headers=headers)
@command
def delete_container(self):
"""
delete container
"""
self.client.delete_container(self.container_name)
@command
def put_object(self, headers=None, contents=None):
"""
issue put for test object
"""
self.client.put_object(self.container_name, self.object_name,
headers=headers, contents=contents)
@command
def delete_object(self):
"""
issue delete for test object
"""
self.client.delete_object(self.container_name, self.object_name)
@command
def get_object(self):
"""
issue GET for test object
"""
return self.client.get_object(self.container_name, self.object_name)
class PublicBrainClient(object):
def __init__(self, url, token):
self.url = url
self.token = token
self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
def put_container(self, container_name, headers):
return client.put_container(self.url, self.token, container_name,
headers=headers)
def post_container(self, container_name, headers):
return client.post_container(self.url, self.token, container_name,
headers)
def delete_container(self, container_name):
return client.delete_container(self.url, self.token, container_name)
def put_object(self, container_name, object_name, headers, contents,
query_string=None):
return client.put_object(self.url, self.token, container_name,
object_name, headers=headers,
contents=contents, query_string=query_string)
def delete_object(self, container_name, object_name):
try:
client.delete_object(self.url, self.token,
container_name, object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
def head_object(self, container_name, object_name):
return client.head_object(self.url, self.token, container_name,
object_name)
def get_object(self, container_name, object_name, query_string=None):
return client.get_object(self.url, self.token,
container_name, object_name,
query_string=query_string)
def translate_client_exception(m):
@functools.wraps(m)
def wrapper(*args, **kwargs):
try:
return m(*args, **kwargs)
except UnexpectedResponse as err:
raise ClientException(
err.args[0],
http_scheme=err.resp.environ['wsgi.url_scheme'],
http_host=err.resp.environ['SERVER_NAME'],
http_port=err.resp.environ['SERVER_PORT'],
http_path=quote(err.resp.environ['PATH_INFO']),
http_query=err.resp.environ['QUERY_STRING'],
http_status=err.resp.status_int,
http_reason=err.resp.explanation,
http_response_content=err.resp.body,
http_response_headers=err.resp.headers,
)
return wrapper
class InternalBrainClient(object):
def __init__(self, conf_file, account='AUTH_test'):
self.swift = InternalClient(conf_file, 'probe-test', 3)
self.account = account
@translate_client_exception
def put_container(self, container_name, headers):
return self.swift.create_container(self.account, container_name,
headers=headers)
@translate_client_exception
def post_container(self, container_name, headers):
return self.swift.set_container_metadata(self.account, container_name,
headers)
@translate_client_exception
def delete_container(self, container_name):
return self.swift.delete_container(self.account, container_name)
def parse_qs(self, query_string):
if query_string is not None:
return {k: v[-1] for k, v in parse_qs(query_string).items()}
@translate_client_exception
def put_object(self, container_name, object_name, headers, contents,
query_string=None):
return self.swift.upload_object(BytesIO(contents), self.account,
container_name, object_name,
headers=headers,
params=self.parse_qs(query_string))
@translate_client_exception
def delete_object(self, container_name, object_name):
return self.swift.delete_object(
self.account, container_name, object_name)
@translate_client_exception
def head_object(self, container_name, object_name):
return self.swift.get_object_metadata(
self.account, container_name, object_name)
@translate_client_exception
def get_object(self, container_name, object_name, query_string=None):
status, headers, resp_iter = self.swift.get_object(
self.account, container_name, object_name,
params=self.parse_qs(query_string))
return headers, b''.join(resp_iter)
class BrainSplitter(BaseBrain):
def __init__(self, url, token, container_name='test', object_name='test',
server_type='container', policy=None):
self.client = PublicBrainClient(url, token)
self._setup(self.client.account, container_name, object_name,
server_type, policy)
class InternalBrainSplitter(BaseBrain):
def __init__(self, conf, container_name='test', object_name='test',
server_type='container', policy=None):
self.client = InternalBrainClient(conf)
self._setup(self.client.account, container_name, object_name,
server_type, policy)
parser = OptionParser('%prog [options] '
'<command>[:<args>[,<args>...]] [<command>...]')
parser.usage += '\n\nCommands:\n\t' + \
'\n\t'.join("%s - %s" % (name, doc) for name, doc in
BrainSplitter.__docs__.items())
parser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),
help='set container name')
parser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),
help='set object name')
parser.add_option('-s', '--server_type', default='container',
help='set server type')
parser.add_option('-P', '--policy_name', default=None,
help='set policy')
def main():
options, commands = parser.parse_args()
if not commands:
parser.print_help()
return 'ERROR: must specify at least one command'
for cmd_args in commands:
cmd = cmd_args.split(':', 1)[0]
if cmd not in BrainSplitter.__commands__:
parser.print_help()
return 'ERROR: unknown command %s' % cmd
url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',
'test:tester', 'testing')
if options.server_type == 'object' and not options.policy_name:
options.policy_name = POLICIES.default.name
if options.policy_name:
options.server_type = 'object'
policy = POLICIES.get_by_name(options.policy_name)
if not policy:
return 'ERROR: unknown policy %r' % options.policy_name
else:
policy = None
brain = BrainSplitter(url, token, options.container, options.object,
options.server_type, policy=policy)
for cmd_args in commands:
parts = cmd_args.split(':', 1)
command = parts[0]
if len(parts) > 1:
args = utils.list_from_csv(parts[1])
else:
args = ()
try:
brain.run(command, *args)
except ClientException as e:
print('**WARNING**: %s raised %s' % (command, e))
print('STATUS'.join(['*' * 25] * 2))
brain.servers.status()
sys.exit()
if __name__ == "__main__":
sys.exit(main())
| swift-master | test/probe/brain.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from unittest import main
from uuid import uuid4
from eventlet import GreenPool, Timeout
import eventlet
from sqlite3 import connect
from swift.common.manager import Manager
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import readconf
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server
eventlet.monkey_patch(all=False, socket=True)
class TestContainerFailures(ReplProbeTest):
def test_one_node_fails(self):
# Create container1
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
# Kill container1 servers excepting two of the primaries
kill_nonprimary_server(cnodes, self.ipport2server)
kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
# Delete container1
client.delete_container(self.url, self.token, container1)
# Restart other container1 primary server
start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
# Create container1/object1 (allowed because at least server thinks the
# container exists)
client.put_object(self.url, self.token, container1, 'object1', '123')
# Get to a final state
self.get_to_final_state()
# Assert all container1 servers indicate container1 is alive and
# well with object1
for cnode in cnodes:
self.assertEqual(
[o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container1)[1]],
['object1'])
# Assert account level also indicates container1 is alive and
# well with object1
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '1')
self.assertEqual(headers['x-account-object-count'], '1')
self.assertEqual(headers['x-account-bytes-used'], '3')
def test_metadata_replicated_with_no_timestamp_update(self):
self.maxDiff = None
# Create container1
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
Manager(['container-replicator']).once()
exp_hdrs = None
for cnode in cnodes:
hdrs = direct_client.direct_head_container(
cnode, cpart, self.account, container1)
hdrs.pop('Date')
if exp_hdrs:
self.assertEqual(exp_hdrs, hdrs)
exp_hdrs = hdrs
self.assertIsNotNone(exp_hdrs)
self.assertIn('Last-Modified', exp_hdrs)
put_time = float(exp_hdrs['X-Backend-Put-Timestamp'])
# Post to only one replica of container1 at least 1 second after the
# put (to reveal any unexpected change in Last-Modified which is
# rounded to seconds)
time.sleep(put_time + 1 - time.time())
post_hdrs = {'x-container-meta-foo': 'bar',
'x-backend-no-timestamp-update': 'true'}
direct_client.direct_post_container(
cnodes[1], cpart, self.account, container1, headers=post_hdrs)
# verify that put_timestamp was not modified
exp_hdrs.update({'x-container-meta-foo': 'bar'})
hdrs = direct_client.direct_head_container(
cnodes[1], cpart, self.account, container1)
hdrs.pop('Date')
self.assertDictEqual(exp_hdrs, hdrs)
# Get to a final state
Manager(['container-replicator']).once()
# Assert all container1 servers have consistent metadata
for cnode in cnodes:
hdrs = direct_client.direct_head_container(
cnode, cpart, self.account, container1)
hdrs.pop('Date')
self.assertDictEqual(exp_hdrs, hdrs)
# sanity check: verify the put_timestamp is modified without
# x-backend-no-timestamp-update
post_hdrs = {'x-container-meta-foo': 'baz'}
exp_hdrs.update({'x-container-meta-foo': 'baz'})
direct_client.direct_post_container(
cnodes[1], cpart, self.account, container1, headers=post_hdrs)
# verify that put_timestamp was modified
hdrs = direct_client.direct_head_container(
cnodes[1], cpart, self.account, container1)
self.assertLess(exp_hdrs['x-backend-put-timestamp'],
hdrs['x-backend-put-timestamp'])
self.assertNotEqual(exp_hdrs['last-modified'], hdrs['last-modified'])
hdrs.pop('Date')
for key in ('x-backend-put-timestamp',
'x-put-timestamp',
'last-modified'):
self.assertNotEqual(exp_hdrs[key], hdrs[key])
exp_hdrs.pop(key)
hdrs.pop(key)
self.assertDictEqual(exp_hdrs, hdrs)
def test_two_nodes_fail(self):
# Create container1
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
# Kill container1 servers excepting one of the primaries
cnp_ipport = kill_nonprimary_server(cnodes, self.ipport2server)
kill_server((cnodes[0]['ip'], cnodes[0]['port']),
self.ipport2server)
kill_server((cnodes[1]['ip'], cnodes[1]['port']),
self.ipport2server)
# Delete container1 directly to the one primary still up
direct_client.direct_delete_container(cnodes[2], cpart, self.account,
container1)
# Restart other container1 servers
start_server((cnodes[0]['ip'], cnodes[0]['port']),
self.ipport2server)
start_server((cnodes[1]['ip'], cnodes[1]['port']),
self.ipport2server)
start_server(cnp_ipport, self.ipport2server)
# Get to a final state
self.get_to_final_state()
# Assert all container1 servers indicate container1 is gone (happens
# because the one node that knew about the delete replicated to the
# others.)
for cnode in cnodes:
try:
direct_client.direct_get_container(cnode, cpart, self.account,
container1)
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Assert account level also indicates container1 is gone
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '0')
self.assertEqual(headers['x-account-object-count'], '0')
self.assertEqual(headers['x-account-bytes-used'], '0')
def test_all_nodes_fail(self):
# Create container1
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
client.put_object(self.url, self.token, container1, 'obj1', 'data1')
# All primaries go down
for cnode in cnodes:
kill_server((cnode['ip'], cnode['port']), self.ipport2server)
# Can't GET the container
with self.assertRaises(client.ClientException) as caught:
client.get_container(self.url, self.token, container1)
self.assertEqual(caught.exception.http_status, 503)
# But we can still write objects! The old info is still in memcache
client.put_object(self.url, self.token, container1, 'obj2', 'data2')
# Can't POST the container, either
with self.assertRaises(client.ClientException) as caught:
client.post_container(self.url, self.token, container1, {})
self.assertEqual(caught.exception.http_status, 503)
# Though it *does* evict the cache
with self.assertRaises(client.ClientException) as caught:
client.put_object(self.url, self.token, container1, 'obj3', 'x')
self.assertEqual(caught.exception.http_status, 503)
def test_locked_container_dbs(self):
def run_test(num_locks, catch_503):
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
# Get the container info into memcache (so no stray
# get_container_info calls muck up our timings)
client.get_container(self.url, self.token, container)
db_files = self.get_container_db_files(container)
db_conns = []
for i in range(num_locks):
db_conn = connect(db_files[i])
db_conn.execute('begin exclusive transaction')
db_conns.append(db_conn)
if catch_503:
try:
client.delete_container(self.url, self.token, container)
except client.ClientException as err:
self.assertEqual(err.http_status, 503)
else:
self.fail("Expected ClientException but didn't get it")
else:
client.delete_container(self.url, self.token, container)
proxy_conf = readconf(self.configs['proxy-server'],
section_name='app:proxy-server')
node_timeout = int(proxy_conf.get('node_timeout', 10))
pool = GreenPool()
try:
with Timeout(node_timeout + 5):
pool.spawn(run_test, 1, False)
pool.spawn(run_test, 2, True)
pool.spawn(run_test, 3, True)
pool.waitall()
except Timeout as err:
raise Exception(
"The server did not return a 503 on container db locks, "
"it just hangs: %s" % err)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_container_failures.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from contextlib import contextmanager
import unittest
import uuid
import random
import time
import six
from swift.common.direct_client import DirectClientException
from swift.common.manager import Manager
from swift.common.utils import md5
from swift.obj.reconstructor import ObjectReconstructor
from test.probe.common import ECProbeTest
from swift.common import direct_client
from swiftclient import client, ClientException
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.total = int(total)
self.hasher = md5(usedforsecurity=False)
self.size = 0
self.chunk = b'test' * 16 * 2 ** 10
@property
def etag(self):
return self.hasher.hexdigest()
def __iter__(self):
return self
def __next__(self):
if self.size > self.total:
raise StopIteration()
self.size += len(self.chunk)
self.hasher.update(self.chunk)
return self.chunk
# for py2 compat
next = __next__
class TestReconstructorRebuild(ECProbeTest):
def setUp(self):
super(TestReconstructorRebuild, self).setUp()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# PUT object and POST some metadata
self.proxy_put()
self.headers_post = {
self._make_name('x-object-meta-').decode('utf8'):
self._make_name('meta-bar-').decode('utf8')}
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=dict(self.headers_post))
self.opart, self.onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
# stash frag etags and metadata for later comparison
self.frag_headers, self.frag_etags = self._assert_all_nodes_have_frag()
for node_index, hdrs in self.frag_headers.items():
# sanity check
self.assertIn(
'X-Backend-Durable-Timestamp', hdrs,
'Missing durable timestamp in %r' % self.frag_headers)
def _format_node(self, node):
return '%s#%s' % (node['device'], node['index'])
def _assert_all_nodes_have_frag(self, extra_headers=None):
# check all frags are in place
failures = []
frag_etags = {}
frag_headers = {}
for node in self.onodes:
try:
headers, etag = self.direct_get(node, self.opart,
extra_headers=extra_headers)
frag_etags[node['index']] = etag
del headers['Date'] # Date header will vary so remove it
frag_headers[node['index']] = headers
except direct_client.DirectClientException as err:
failures.append((node, err))
if failures:
self.fail('\n'.join([' Node %r raised %r' %
(self._format_node(node), exc)
for (node, exc) in failures]))
return frag_headers, frag_etags
@contextmanager
def _annotate_failure_with_scenario(self, failed, non_durable):
try:
yield
except (AssertionError, ClientException) as err:
self.fail(
'Scenario with failed nodes: %r, non-durable nodes: %r\n'
' failed with:\n%s' %
([self._format_node(self.onodes[n]) for n in failed],
[self._format_node(self.onodes[n]) for n in non_durable], err)
)
def _test_rebuild_scenario(self, failed, non_durable,
reconstructor_cycles):
# helper method to test a scenario with some nodes missing their
# fragment and some nodes having non-durable fragments
with self._annotate_failure_with_scenario(failed, non_durable):
self.break_nodes(self.onodes, self.opart, failed, non_durable)
# make sure we can still GET the object and it is correct; the
# proxy is doing decode on remaining fragments to get the obj
with self._annotate_failure_with_scenario(failed, non_durable):
headers, etag = self.proxy_get()
self.assertEqual(self.etag, etag)
for key in self.headers_post:
self.assertIn(key, headers)
self.assertEqual(self.headers_post[key], headers[key])
# fire up reconstructor
for i in range(reconstructor_cycles):
self.reconstructor.once()
# check GET via proxy returns expected data and metadata
with self._annotate_failure_with_scenario(failed, non_durable):
headers, etag = self.proxy_get()
self.assertEqual(self.etag, etag)
for key in self.headers_post:
self.assertIn(key, headers)
self.assertEqual(self.headers_post[key], headers[key])
# check all frags are intact, durable and have expected metadata
with self._annotate_failure_with_scenario(failed, non_durable):
frag_headers, frag_etags = self._assert_all_nodes_have_frag()
self.assertEqual(self.frag_etags, frag_etags)
# self._frag_headers include X-Backend-Durable-Timestamp so this
# assertion confirms that the rebuilt frags are all durable
self.assertEqual(self.frag_headers, frag_headers)
def test_rebuild_missing_frags(self):
# build up a list of node lists to kill data from,
# first try a single node
# then adjacent nodes and then nodes >1 node apart
single_node = (random.randint(0, 5),)
adj_nodes = (0, 5)
far_nodes = (0, 4)
for failed_nodes in [single_node, adj_nodes, far_nodes]:
self._test_rebuild_scenario(failed_nodes, [], 1)
def test_rebuild_non_durable_frags(self):
# build up a list of node lists to make non-durable,
# first try a single node
# then adjacent nodes and then nodes >1 node apart
single_node = (random.randint(0, 5),)
adj_nodes = (0, 5)
far_nodes = (0, 4)
for non_durable_nodes in [single_node, adj_nodes, far_nodes]:
self._test_rebuild_scenario([], non_durable_nodes, 1)
def test_rebuild_with_missing_frags_and_non_durable_frags(self):
# pick some nodes with parts deleted, some with non-durable fragments
scenarios = [
# failed, non-durable
((0, 2), (4,)),
((0, 4), (2,)),
]
for failed, non_durable in scenarios:
self._test_rebuild_scenario(failed, non_durable, 3)
scenarios = [
# failed, non-durable
((0, 1), (2,)),
((0, 2), (1,)),
]
for failed, non_durable in scenarios:
# why 2 repeats? consider missing fragment on nodes 0, 1 and
# missing durable on node 2: first reconstructor cycle on node 3
# will make node 2 durable, first cycle on node 5 will rebuild on
# node 0; second cycle on node 0 or 2 will rebuild on node 1. Note
# that it is possible, that reconstructor processes on each node
# run in order such that all rebuild complete in once cycle, but
# that is not guaranteed, we allow 2 cycles to be sure.
self._test_rebuild_scenario(failed, non_durable, 2)
scenarios = [
# failed, non-durable
((0, 2), (1, 3, 5)),
((0,), (1, 2, 4, 5)),
]
for failed, non_durable in scenarios:
# why 3 repeats? consider missing fragment on node 0 and single
# durable on node 3: first reconstructor cycle on node 3 will make
# nodes 2 and 4 durable, second cycle on nodes 2 and 4 will make
# node 1 and 5 durable, third cycle on nodes 1 or 5 will
# reconstruct the missing fragment on node 0.
self._test_rebuild_scenario(failed, non_durable, 3)
def test_rebuild_partner_down(self):
# we have to pick a lower index because we have few handoffs
nodes = self.onodes[:2]
random.shuffle(nodes) # left or right is fine
primary_node, partner_node = nodes
# capture fragment etag from partner
failed_partner_meta, failed_partner_etag = self.direct_get(
partner_node, self.opart)
# and 507 the failed partner device
device_path = self.device_dir(partner_node)
self.kill_drive(device_path)
# reconstruct from the primary, while one of it's partners is 507'd
self.reconstructor.once(number=self.config_number(primary_node))
# a handoff will pickup the rebuild
hnodes = list(self.object_ring.get_more_nodes(self.opart))
for node in hnodes:
try:
found_meta, found_etag = self.direct_get(
node, self.opart)
except DirectClientException as e:
if e.http_status != 404:
raise
else:
break
else:
self.fail('Unable to fetch rebuilt frag from handoffs %r '
'given primary nodes %r with %s unmounted '
'trying to rebuild from %s' % (
[h['device'] for h in hnodes],
[n['device'] for n in self.onodes],
partner_node['device'],
primary_node['device'],
))
self.assertEqual(failed_partner_etag, found_etag)
del failed_partner_meta['Date']
del found_meta['Date']
self.assertEqual(failed_partner_meta, found_meta)
# just to be nice
self.revive_drive(device_path)
def test_sync_expired_object(self):
# verify that missing frag can be rebuilt for an expired object
delete_after = 2
self.proxy_put(extra_headers={'x-delete-after': delete_after})
self.proxy_get() # sanity check
orig_frag_headers, orig_frag_etags = self._assert_all_nodes_have_frag(
extra_headers={'X-Backend-Replication': 'True'})
# wait for object to expire
timeout = time.time() + delete_after + 1
while time.time() < timeout:
try:
self.proxy_get()
except ClientException as e:
if e.http_status == 404:
break
else:
raise
else:
self.fail('Timed out waiting for %s/%s to expire after %ss' % (
self.container_name, self.object_name, delete_after))
# sanity check - X-Backend-Replication let's us get expired frag...
fail_node = random.choice(self.onodes)
self.assert_direct_get_succeeds(
fail_node, self.opart,
extra_headers={'X-Backend-Replication': 'True'})
# ...until we remove the frag from fail_node
self.break_nodes(
self.onodes, self.opart, [self.onodes.index(fail_node)], [])
# ...now it's really gone
with self.assertRaises(DirectClientException) as cm:
self.direct_get(fail_node, self.opart,
extra_headers={'X-Backend-Replication': 'True'})
self.assertEqual(404, cm.exception.http_status)
self.assertNotIn('X-Backend-Timestamp', cm.exception.http_headers)
# run the reconstructor
self.reconstructor.once()
# the missing frag is now in place but expired
with self.assertRaises(DirectClientException) as cm:
self.direct_get(fail_node, self.opart)
self.assertEqual(404, cm.exception.http_status)
self.assertIn('X-Backend-Timestamp', cm.exception.http_headers)
# check all frags are intact, durable and have expected metadata
frag_headers, frag_etags = self._assert_all_nodes_have_frag(
extra_headers={'X-Backend-Replication': 'True'})
self.assertEqual(orig_frag_etags, frag_etags)
self.maxDiff = None
self.assertEqual(orig_frag_headers, frag_headers)
def test_sync_unexpired_object_metadata(self):
# verify that metadata can be sync'd to a frag that has missed a POST
# and consequently that frag appears to be expired, when in fact the
# POST removed the x-delete-at header
client.put_container(self.url, self.token, self.container_name,
headers={'x-storage-policy': self.policy.name})
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
delete_at = int(time.time() + 3)
contents = ('body-%s' % uuid.uuid4()).encode()
headers = {'x-delete-at': delete_at}
client.put_object(self.url, self.token, self.container_name,
self.object_name, headers=headers, contents=contents)
# fail a primary
post_fail_node = random.choice(onodes)
post_fail_path = self.device_dir(post_fail_node)
self.kill_drive(post_fail_path)
# post over w/o x-delete-at
client.post_object(self.url, self.token, self.container_name,
self.object_name, {'content-type': 'something-new'})
# revive failed primary
self.revive_drive(post_fail_path)
# wait for the delete_at to pass, and check that it thinks the object
# is expired
timeout = time.time() + 5
err = None
while time.time() < timeout:
try:
direct_client.direct_head_object(
post_fail_node, opart, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': int(self.policy)})
except direct_client.ClientException as client_err:
if client_err.http_status != 404:
raise
err = client_err
break
else:
time.sleep(0.1)
else:
self.fail('Failed to get a 404 from node with expired object')
self.assertEqual(err.http_status, 404)
self.assertIn('X-Backend-Timestamp', err.http_headers)
# but from the proxy we've got the whole story
headers, body = client.get_object(self.url, self.token,
self.container_name,
self.object_name)
self.assertNotIn('X-Delete-At', headers)
self.reconstructor.once()
# ... and all the nodes have the final unexpired state
for node in onodes:
headers = direct_client.direct_head_object(
node, opart, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': int(self.policy)})
self.assertNotIn('X-Delete-At', headers)
def test_rebuild_quarantines_lonely_frag(self):
# fail one device while the object is deleted so we are left with one
# fragment and some tombstones
failed_node = self.onodes[0]
device_path = self.device_dir(failed_node)
self.kill_drive(device_path)
self.assert_direct_get_fails(failed_node, self.opart, 507) # sanity
# delete object
client.delete_object(self.url, self.token, self.container_name,
self.object_name)
# check we have tombstones
for node in self.onodes[1:]:
err = self.assert_direct_get_fails(node, self.opart, 404)
self.assertIn('X-Backend-Timestamp', err.http_headers)
# run the reconstructor with zero reclaim age to clean up tombstones
for conf_index in self.configs['object-reconstructor'].keys():
self.run_custom_daemon(
ObjectReconstructor, 'object-reconstructor', conf_index,
{'reclaim_age': '0'})
# check we no longer have tombstones
for node in self.onodes[1:]:
err = self.assert_direct_get_fails(node, self.opart, 404)
self.assertNotIn('X-Timestamp', err.http_headers)
# revive the failed device and check it has a fragment
self.revive_drive(device_path)
self.assert_direct_get_succeeds(failed_node, self.opart)
# restart proxy to clear error-limiting so that the revived drive
# participates again
Manager(['proxy-server']).restart()
# client GET will fail with 503 ...
with self.assertRaises(ClientException) as cm:
client.get_object(self.url, self.token, self.container_name,
self.object_name)
self.assertEqual(503, cm.exception.http_status)
# ... but client GET succeeds
headers = client.head_object(self.url, self.token, self.container_name,
self.object_name)
for key in self.headers_post:
self.assertIn(key, headers)
self.assertEqual(self.headers_post[key], headers[key])
# run the reconstructor without quarantine_threshold set
error_lines = []
warning_lines = []
for conf_index in self.configs['object-reconstructor'].keys():
reconstructor = self.run_custom_daemon(
ObjectReconstructor, 'object-reconstructor', conf_index,
{'quarantine_age': '0'})
logger = reconstructor.logger.logger
error_lines.append(logger.get_lines_for_level('error'))
warning_lines.append(logger.get_lines_for_level('warning'))
# check logs for errors
found_lines = False
for lines in error_lines:
if not lines:
continue
self.assertFalse(found_lines, error_lines)
found_lines = True
for line in itertools.islice(lines, 0, 6, 2):
self.assertIn(
'Unable to get enough responses (1/4 from 1 ok '
'responses)', line, lines)
for line in itertools.islice(lines, 1, 7, 2):
self.assertIn(
'Unable to get enough responses (4 x 404 error '
'responses)', line, lines)
self.assertTrue(found_lines, 'error lines not found')
for lines in warning_lines:
self.assertEqual([], lines)
# check we have still have a single fragment and no tombstones
self.assert_direct_get_succeeds(failed_node, self.opart)
for node in self.onodes[1:]:
err = self.assert_direct_get_fails(node, self.opart, 404)
self.assertNotIn('X-Timestamp', err.http_headers)
# run the reconstructor to quarantine the lonely frag
error_lines = []
warning_lines = []
for conf_index in self.configs['object-reconstructor'].keys():
reconstructor = self.run_custom_daemon(
ObjectReconstructor, 'object-reconstructor', conf_index,
{'quarantine_age': '0', 'quarantine_threshold': '1'})
logger = reconstructor.logger.logger
error_lines.append(logger.get_lines_for_level('error'))
warning_lines.append(logger.get_lines_for_level('warning'))
# check logs for errors
found_lines = False
for index, lines in enumerate(error_lines):
if not lines:
continue
self.assertFalse(found_lines, error_lines)
found_lines = True
for line in itertools.islice(lines, 0, 6, 2):
self.assertIn(
'Unable to get enough responses (1/4 from 1 ok '
'responses)', line, lines)
for line in itertools.islice(lines, 1, 7, 2):
self.assertIn(
'Unable to get enough responses (6 x 404 error '
'responses)', line, lines)
self.assertTrue(found_lines, 'error lines not found')
# check logs for quarantine warning
found_lines = False
for lines in warning_lines:
if not lines:
continue
self.assertFalse(found_lines, warning_lines)
found_lines = True
self.assertEqual(1, len(lines), lines)
self.assertIn('Quarantined object', lines[0])
self.assertTrue(found_lines, 'warning lines not found')
# check we have nothing
for node in self.onodes:
err = self.assert_direct_get_fails(node, self.opart, 404)
self.assertNotIn('X-Backend-Timestamp', err.http_headers)
# client HEAD and GET now both 404
with self.assertRaises(ClientException) as cm:
client.get_object(self.url, self.token, self.container_name,
self.object_name)
self.assertEqual(404, cm.exception.http_status)
with self.assertRaises(ClientException) as cm:
client.head_object(self.url, self.token, self.container_name,
self.object_name)
self.assertEqual(404, cm.exception.http_status)
# run the reconstructor once more - should see no errors in logs!
error_lines = []
warning_lines = []
for conf_index in self.configs['object-reconstructor'].keys():
reconstructor = self.run_custom_daemon(
ObjectReconstructor, 'object-reconstructor', conf_index,
{'quarantine_age': '0', 'quarantine_threshold': '1'})
logger = reconstructor.logger.logger
error_lines.append(logger.get_lines_for_level('error'))
warning_lines.append(logger.get_lines_for_level('warning'))
for lines in error_lines:
self.assertEqual([], lines)
for lines in warning_lines:
self.assertEqual([], lines)
if six.PY2:
# The non-ASCII chars in metadata cause test hangs in
# _assert_all_nodes_have_frag because of https://bugs.python.org/issue37093
class TestReconstructorRebuildUTF8(TestReconstructorRebuild):
def _make_name(self, prefix):
return b'%s\xc3\xa8-%s' % (
prefix.encode(), str(uuid.uuid4()).encode())
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_reconstructor_rebuild.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import gc
import json
import mock
import os
from subprocess import Popen, PIPE
import sys
from tempfile import mkdtemp
from textwrap import dedent
from time import sleep, time
from collections import defaultdict
import unittest
from uuid import uuid4
import shutil
import six
from six.moves.http_client import HTTPConnection
from six.moves.urllib.parse import urlparse
from swiftclient import get_auth, head_account, client
from swift.common import internal_client, direct_client, utils
from swift.common.direct_client import DirectClientException
from swift.common.ring import Ring
from swift.common.utils import hash_path, md5, \
readconf, renamer, rsync_module_interpolation
from swift.common.manager import Manager
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.obj.diskfile import get_data_dir
from test.debug_logger import capture_logger
from test.probe import CHECK_SERVER_TIMEOUT, VALIDATE_RSYNC, PROXY_BASE_URL
ENABLED_POLICIES = [p for p in POLICIES if not p.is_deprecated]
POLICIES_BY_TYPE = defaultdict(list)
for p in POLICIES:
POLICIES_BY_TYPE[p.policy_type].append(p)
def get_server_number(ipport, ipport2server):
server_number = ipport2server[ipport]
server, number = server_number[:-1], server_number[-1:]
try:
number = int(number)
except ValueError:
# probably the proxy
return server_number, None
return server, number
def start_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).start(number=number, wait=True)
if err:
raise Exception('unable to start %s' % (
server if not number else '%s%s' % (server, number)))
return check_server(ipport, ipport2server)
def _check_storage(ipport, path):
conn = HTTPConnection(*ipport)
conn.request('GET', path)
resp = conn.getresponse()
# 404 because it's a nonsense path (and mount_check is false)
# 507 in case the test target is a VM using mount_check
if resp.status not in (404, 507):
raise Exception(
'Unexpected status %s' % resp.status)
return resp
def _check_proxy(user, key):
url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',
user, key)
account = url.split('/')[-1]
head_account(url, token)
return url, token, account
def _retry_timeout(f, args=None, kwargs=None, timeout=CHECK_SERVER_TIMEOUT):
args = args or ()
kwargs = kwargs or {}
try_until = time() + timeout
while True:
try:
return f(*args, **kwargs)
except Exception as err:
if time() > try_until:
print(err)
fsignature = '%s(*%r, **%r)' % (f.__name__, args, kwargs)
print('Giving up on %s after %s seconds.' % (
fsignature, timeout))
raise err
sleep(0.1)
def check_server(ipport, ipport2server):
server = ipport2server[ipport]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
return None
path = '/connect/1/2'
if server[:-1] == 'container':
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
rv = _retry_timeout(_check_storage, args=(ipport, path))
else:
rv = _retry_timeout(_check_proxy, args=(
'test:tester', 'testing'))
return rv
def kill_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).kill(number=number)
if err:
raise Exception('unable to kill %s' % (server if not number else
'%s%s' % (server, number)))
return wait_for_server_to_hangup(ipport)
def wait_for_server_to_hangup(ipport):
try_until = time() + 30
while True:
try:
conn = HTTPConnection(*ipport)
conn.request('GET', '/')
conn.getresponse()
except Exception:
break
if time() > try_until:
raise Exception(
'Still answering on %s:%s after 30 seconds' % ipport)
sleep(0.1)
def kill_nonprimary_server(primary_nodes, ipport2server):
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
for ipport, server in ipport2server.items():
if ipport in primary_ipports:
server_type = server[:-1]
break
else:
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for ipport, server in list(ipport2server.items()):
if server[:-1] == server_type and ipport not in primary_ipports:
kill_server(ipport, ipport2server)
return ipport
def add_ring_devs_to_ipport2server(ring, server_type, ipport2server,
servers_per_port=0):
# We'll number the servers by order of unique occurrence of:
# IP, if servers_per_port > 0 OR there > 1 IP in ring
# ipport, otherwise
unique_ip_count = len({dev['ip'] for dev in ring.devs if dev})
things_to_number = {}
number = 0
for dev in filter(None, ring.devs):
ip = dev['ip']
ipport = (ip, dev['port'])
unique_by = ip if servers_per_port or unique_ip_count > 1 else ipport
if unique_by not in things_to_number:
number += 1
things_to_number[unique_by] = number
ipport2server[ipport] = '%s%d' % (server_type,
things_to_number[unique_by])
def store_config_paths(name, configs):
server_names = [name, '%s-replicator' % name]
if name == 'container':
server_names.append('container-sharder')
elif name == 'object':
server_names.append('object-reconstructor')
for server_name in server_names:
for server in Manager([server_name]):
for i, conf in enumerate(server.conf_files(), 1):
configs[server.server][i] = conf
def get_ring(ring_name, required_replicas, required_devices,
server=None, force_validate=None, ipport2server=None,
config_paths=None):
if not server:
server = ring_name
ring = Ring('/etc/swift', ring_name=ring_name)
if ipport2server is None:
ipport2server = {} # used internally, even if not passed in
if config_paths is None:
config_paths = defaultdict(dict)
store_config_paths(server, config_paths)
repl_name = '%s-replicator' % server
repl_configs = {i: readconf(c, section_name=repl_name)
for i, c in config_paths[repl_name].items()}
servers_per_port = any(int(c.get('servers_per_port', '0'))
for c in repl_configs.values())
add_ring_devs_to_ipport2server(ring, server, ipport2server,
servers_per_port=servers_per_port)
if not VALIDATE_RSYNC and not force_validate:
return ring
# easy sanity checks
if ring.replica_count != required_replicas:
raise unittest.SkipTest('%s has %s replicas instead of %s' % (
ring.serialized_path, ring.replica_count, required_replicas))
devs = [dev for dev in ring.devs if dev is not None]
if len(devs) != required_devices:
raise unittest.SkipTest('%s has %s devices instead of %s' % (
ring.serialized_path, len(devs), required_devices))
for dev in devs:
# verify server is exposing mounted device
ipport = (dev['ip'], dev['port'])
_, server_number = get_server_number(ipport, ipport2server)
conf = repl_configs[server_number]
for device in os.listdir(conf['devices']):
if device == dev['device']:
dev_path = os.path.join(conf['devices'], device)
full_path = os.path.realpath(dev_path)
if not os.path.exists(full_path):
raise unittest.SkipTest(
'device %s in %s was not found (%s)' %
(device, conf['devices'], full_path))
break
else:
raise unittest.SkipTest(
"unable to find ring device %s under %s's devices (%s)" % (
dev['device'], server, conf['devices']))
# verify server is exposing rsync device
rsync_export = conf.get('rsync_module', '').rstrip('/')
if not rsync_export:
rsync_export = '{replication_ip}::%s' % server
cmd = "rsync %s" % rsync_module_interpolation(rsync_export, dev)
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
if p.returncode:
raise unittest.SkipTest('unable to connect to rsync '
'export %s (%s)' % (rsync_export, cmd))
for line in stdout.decode().splitlines():
if line.rsplit(None, 1)[-1] == dev['device']:
break
else:
raise unittest.SkipTest("unable to find ring device %s under "
"rsync's exported devices for %s (%s)" %
(dev['device'], rsync_export, cmd))
return ring
def get_policy(**kwargs):
kwargs.setdefault('is_deprecated', False)
# go through the policies and make sure they match the
# requirements of kwargs
for policy in POLICIES:
# TODO: for EC, pop policy type here and check it first
matches = True
for key, value in kwargs.items():
try:
if getattr(policy, key) != value:
matches = False
except AttributeError:
matches = False
if matches:
return policy
raise unittest.SkipTest('No policy matching %s' % kwargs)
def run_cleanup(cmd):
p = Popen(cmd + " 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
if p.returncode:
raise AssertionError(
'Cleanup with %r failed: stdout: %s, stderr: %s'
% (cmd, stdout, _stderr))
print(stdout)
Manager(['all']).stop()
def resetswift():
run_cleanup("resetswift")
def kill_orphans():
run_cleanup("swift-orphans -a 0 -k 9")
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.length = int(total)
self.hasher = md5(usedforsecurity=False)
self.read_amount = 0
self.chunk = uuid4().hex.encode('ascii') * 2 ** 10
self.buff = b''
@property
def etag(self):
return self.hasher.hexdigest()
def __len__(self):
return self.length
def read(self, amount):
if len(self.buff) < amount:
try:
self.buff += next(self)
except StopIteration:
pass
rv, self.buff = self.buff[:amount], self.buff[amount:]
return rv
def __iter__(self):
return self
def __next__(self):
if self.buff:
rv, self.buff = self.buff, b''
return rv
if self.read_amount >= self.length:
raise StopIteration()
rv = self.chunk[:int(self.length - self.read_amount)]
self.read_amount += len(rv)
self.hasher.update(rv)
return rv
# for py2 compat:
next = __next__
def exclude_nodes(nodes, *excludes):
"""
Iterate over ``nodes`` yielding only those not in ``excludes``.
The index key of the node dicts is ignored when matching nodes against the
``excludes`` nodes. Index is not a fundamental property of a node but a
variable annotation added by the Ring depending upon the partition for
which the nodes were generated.
:param nodes: an iterable of node dicts.
:param *excludes: one or more node dicts that should not be yielded.
:return: yields node dicts.
"""
for node in nodes:
match_node = {k: mock.ANY if k == 'index' else v
for k, v in node.items()}
if any(exclude == match_node for exclude in excludes):
continue
yield node
class ProbeTest(unittest.TestCase):
"""
Don't instantiate this directly, use a child class instead.
"""
def _load_rings_and_configs(self):
self.ipport2server = {}
self.configs = defaultdict(dict)
self.account_ring = get_ring(
'account',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.container_ring = get_ring(
'container',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.policy = get_policy(**self.policy_requirements)
self.object_ring = get_ring(
self.policy.ring_name,
self.obj_required_replicas,
self.obj_required_devices,
server='object',
ipport2server=self.ipport2server,
config_paths=self.configs)
for server in Manager(['proxy-server']):
for conf in server.conf_files():
self.configs['proxy-server'] = conf
def setUp(self):
# previous test may have left DatabaseBroker instances in garbage with
# open connections to db files which will prevent unmounting devices in
# resetswift, so collect garbage now
gc.collect()
resetswift()
kill_orphans()
self._load_rings_and_configs()
try:
self.servers_per_port = any(
int(readconf(c, section_name='object-replicator').get(
'servers_per_port', '0'))
for c in self.configs['object-replicator'].values())
Manager(['main']).start(wait=True)
for ipport in self.ipport2server:
check_server(ipport, self.ipport2server)
proxy_conf = readconf(self.configs['proxy-server'],
section_name='app:proxy-server')
proxy_ipport = (proxy_conf.get('bind_ip', '127.0.0.1'),
int(proxy_conf.get('bind_port', 8080)))
self.ipport2server[proxy_ipport] = 'proxy'
self.url, self.token, self.account = check_server(
proxy_ipport, self.ipport2server)
self.account_1 = {
'url': self.url, 'token': self.token, 'account': self.account}
rv = _retry_timeout(_check_proxy, args=(
'test2:tester2', 'testing2'))
self.account_2 = {
k: v for (k, v) in zip(('url', 'token', 'account'), rv)}
self.replicators = Manager(
['account-replicator', 'container-replicator',
'object-replicator'])
self.updaters = Manager(['container-updater', 'object-updater'])
except BaseException:
try:
raise
finally:
try:
Manager(['all']).kill()
except Exception:
pass
info_url = "%s://%s/info" % (urlparse(self.url).scheme,
urlparse(self.url).netloc)
proxy_conn = client.http_connection(info_url)
self.cluster_info = client.get_capabilities(proxy_conn)
def tearDown(self):
Manager(['all']).kill()
def assertLengthEqual(self, obj, length):
obj_len = len(obj)
self.assertEqual(obj_len, length, 'len(%r) == %d, not %d' % (
obj, obj_len, length))
def device_dir(self, node):
server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
repl_server = '%s-replicator' % server_type
conf = readconf(self.configs[repl_server][config_number],
section_name=repl_server)
return os.path.join(conf['devices'], node['device'])
def storage_dir(self, node, part=None, policy=None):
policy = policy or self.policy
device_path = self.device_dir(node)
path_parts = [device_path, get_data_dir(policy)]
if part is not None:
path_parts.append(str(part))
return os.path.join(*path_parts)
def config_number(self, node):
_server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
return config_number
def is_local_to(self, node1, node2):
"""
Return True if both ring devices are "local" to each other (on the same
"server".
"""
if self.servers_per_port:
return node1['ip'] == node2['ip']
# Without a disambiguating IP, for SAIOs, we have to assume ports
# uniquely identify "servers". SAIOs should be configured to *either*
# have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
# ports per server (i.e. sdb1 & sdb5 would have same port numbers in
# the 8-disk EC ring).
return node1['port'] == node2['port']
def get_to_final_state(self):
# these .stop()s are probably not strictly necessary,
# but may prevent race conditions
self.replicators.stop()
self.updaters.stop()
self.replicators.once()
self.updaters.once()
self.replicators.once()
def kill_drive(self, device):
if os.path.ismount(device):
os.system('sudo umount %s' % device)
else:
renamer(device, device + "X")
def revive_drive(self, device):
disabled_name = device + "X"
if os.path.isdir(disabled_name):
renamer(disabled_name, device)
else:
os.system('sudo mount %s' % device)
def make_internal_client(self):
tempdir = mkdtemp()
try:
conf_path = os.path.join(tempdir, 'internal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors cache copy proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = True
[filter:copy]
use = egg:swift#copy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
"""
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
return internal_client.InternalClient(conf_path, 'test', 1)
finally:
shutil.rmtree(tempdir)
def get_all_object_nodes(self):
"""
Returns a list of all nodes in all object storage policies.
:return: a list of node dicts.
"""
all_obj_nodes = {}
for policy in ENABLED_POLICIES:
for dev in policy.object_ring.devs:
all_obj_nodes[dev['device']] = dev
return all_obj_nodes.values()
def gather_async_pendings(self, onodes):
"""
Returns a list of paths to async pending files found on given nodes.
:param onodes: a list of nodes.
:return: a list of file paths.
"""
async_pendings = []
for onode in onodes:
device_dir = self.device_dir(onode)
for ap_pol_dir in os.listdir(device_dir):
if not ap_pol_dir.startswith('async_pending'):
# skip 'objects', 'containers', etc.
continue
async_pending_dir = os.path.join(device_dir, ap_pol_dir)
try:
ap_dirs = os.listdir(async_pending_dir)
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise
else:
for ap_dir in ap_dirs:
ap_dir_fullpath = os.path.join(
async_pending_dir, ap_dir)
async_pendings.extend([
os.path.join(ap_dir_fullpath, ent)
for ent in os.listdir(ap_dir_fullpath)])
return async_pendings
def run_custom_daemon(self, klass, conf_section, conf_index,
custom_conf, **kwargs):
conf_file = self.configs[conf_section][conf_index]
conf = utils.readconf(conf_file, conf_section)
conf.update(custom_conf)
# Use a CaptureLogAdapter in order to preserve the pattern of tests
# calling the log accessor methods (e.g. get_lines_for_level) directly
# on the logger instance
with capture_logger(conf, conf.get('log_name', conf_section),
log_to_console=kwargs.pop('verbose', False),
log_route=conf_section) as log_adapter:
daemon = klass(conf, log_adapter)
daemon.run_once(**kwargs)
return daemon
def _get_db_file_path(obj_dir):
files = sorted(os.listdir(obj_dir), reverse=True)
for filename in files:
if filename.endswith('db'):
return os.path.join(obj_dir, filename)
class ReplProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 3
obj_required_devices = 4
policy_requirements = {'policy_type': REPL_POLICY}
def direct_container_op(self, func, account=None, container=None,
expect_failure=False):
account = account if account else self.account
container = container if container else self.container_to_shard
cpart, cnodes = self.container_ring.get_nodes(account, container)
unexpected_responses = []
results = {}
for cnode in cnodes:
try:
results[cnode['id']] = func(cnode, cpart, account, container)
except DirectClientException as err:
if not expect_failure:
unexpected_responses.append((cnode, err))
else:
if expect_failure:
unexpected_responses.append((cnode, 'success'))
if unexpected_responses:
self.fail('Unexpected responses: %s' % unexpected_responses)
return results
def direct_delete_container(self, account=None, container=None,
expect_failure=False):
self.direct_container_op(direct_client.direct_delete_container,
account, container, expect_failure)
def direct_head_container(self, account=None, container=None,
expect_failure=False):
return self.direct_container_op(direct_client.direct_head_container,
account, container, expect_failure)
def direct_get_container(self, account=None, container=None,
expect_failure=False):
return self.direct_container_op(direct_client.direct_get_container,
account, container, expect_failure)
def get_container_db_files(self, container):
opart, onodes = self.container_ring.get_nodes(self.account, container)
db_files = []
for onode in onodes:
node_id = self.config_number(onode)
device = onode['device']
hash_str = hash_path(self.account, container)
server_conf = readconf(self.configs['container-server'][node_id])
devices = server_conf['app:container-server']['devices']
obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices,
device, opart,
hash_str[-3:], hash_str)
db_files.append(_get_db_file_path(obj_dir))
return db_files
class ECProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 6
obj_required_devices = 8
policy_requirements = {'policy_type': EC_POLICY}
def _make_name(self, prefix):
return ('%s%s' % (prefix, uuid4())).encode()
def setUp(self):
super(ECProbeTest, self).setUp()
self.container_name = self._make_name('container-')
self.object_name = self._make_name('object-')
# sanity
self.assertEqual(self.policy.policy_type, EC_POLICY)
self.reconstructor = Manager(["object-reconstructor"])
def proxy_put(self, extra_headers=None):
contents = Body()
headers = {
self._make_name('x-object-meta-').decode('utf8'):
self._make_name('meta-foo-').decode('utf8'),
}
if extra_headers:
headers.update(extra_headers)
self.etag = client.put_object(self.url, self.token,
self.container_name,
self.object_name,
contents=contents, headers=headers)
def proxy_get(self):
# GET object
headers, body = client.get_object(self.url, self.token,
self.container_name,
self.object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5(usedforsecurity=False)
for chunk in body:
resp_checksum.update(chunk)
return headers, resp_checksum.hexdigest()
def direct_get(self, node, part, require_durable=True, extra_headers=None):
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
if extra_headers:
req_headers.update(extra_headers)
if not require_durable:
req_headers.update(
{'X-Backend-Fragment-Preferences': json.dumps([])})
# node dict has unicode values so utf8 decode our path parts too in
# case they have non-ascii characters
if six.PY2:
acc, con, obj = (s.decode('utf8') for s in (
self.account, self.container_name, self.object_name))
else:
acc, con, obj = self.account, self.container_name, self.object_name
headers, data = direct_client.direct_get_object(
node, part, acc, con, obj, headers=req_headers,
resp_chunk_size=64 * 2 ** 20)
hasher = md5(usedforsecurity=False)
for chunk in data:
hasher.update(chunk)
return headers, hasher.hexdigest()
def assert_direct_get_fails(self, onode, opart, status,
require_durable=True):
try:
self.direct_get(onode, opart, require_durable=require_durable)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, status)
return err
else:
self.fail('Node data on %r was not fully destroyed!' % (onode,))
def assert_direct_get_succeeds(self, onode, opart, require_durable=True,
extra_headers=None):
try:
return self.direct_get(onode, opart,
require_durable=require_durable,
extra_headers=extra_headers)
except direct_client.DirectClientException as err:
self.fail('Node data on %r was not available: %s' % (onode, err))
def break_nodes(self, nodes, opart, failed, non_durable):
# delete partitions on the failed nodes and remove durable marker from
# non-durable nodes
made_non_durable = 0
for i, node in enumerate(nodes):
part_dir = self.storage_dir(node, part=opart)
if i in failed:
shutil.rmtree(part_dir, True)
try:
self.direct_get(node, opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
elif i in non_durable:
for dirs, subdirs, files in os.walk(part_dir):
for fname in sorted(files, reverse=True):
# make the newest durable be non-durable
if fname.endswith('.data'):
made_non_durable += 1
non_durable_fname = fname.replace('#d', '')
os.rename(os.path.join(dirs, fname),
os.path.join(dirs, non_durable_fname))
break
headers, etag = self.direct_get(node, opart,
require_durable=False)
self.assertNotIn('X-Backend-Durable-Timestamp', headers)
try:
os.remove(os.path.join(part_dir, 'hashes.pkl'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return made_non_durable
def make_durable(self, nodes, opart):
# ensure all data files on the specified nodes are durable
made_durable = 0
for i, node in enumerate(nodes):
part_dir = self.storage_dir(node, part=opart)
for dirs, subdirs, files in os.walk(part_dir):
for fname in sorted(files, reverse=True):
# make the newest non-durable be durable
if (fname.endswith('.data') and
not fname.endswith('#d.data')):
made_durable += 1
non_durable_fname = fname.replace('.data', '#d.data')
os.rename(os.path.join(dirs, fname),
os.path.join(dirs, non_durable_fname))
break
headers, etag = self.assert_direct_get_succeeds(node, opart)
self.assertIn('X-Backend-Durable-Timestamp', headers)
try:
os.remove(os.path.join(part_dir, 'hashes.pkl'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return made_durable
if __name__ == "__main__":
for server in ('account', 'container'):
try:
get_ring(server, 3, 4,
force_validate=True)
except unittest.SkipTest as err:
sys.exit('%s ERROR: %s' % (server, err))
print('%s OK' % server)
for policy in POLICIES:
try:
get_ring(policy.ring_name, 3, 4,
server='object', force_validate=True)
except unittest.SkipTest as err:
sys.exit('object ERROR (%s): %s' % (policy.name, err))
print('object OK (%s)' % policy.name)
| swift-master | test/probe/common.py |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from swift.common.manager import Manager
from swiftclient import client
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
def chunker(body):
'''Helper to ensure swiftclient sends a chunked request.'''
yield body
class TestPutIfNoneMatchRepl(ReplProbeTest):
def setUp(self):
super(TestPutIfNoneMatchRepl, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
def _do_test(self, overwrite_contents):
self.brain.put_container()
self.brain.stop_primary_half()
# put object to only 1 of 3 primaries
self.brain.put_object(contents=b'VERIFY')
self.brain.start_primary_half()
# Restart services and attempt to overwrite
with self.assertRaises(client.ClientException) as exc_mgr:
self.brain.put_object(headers={'If-None-Match': '*'},
contents=overwrite_contents)
self.assertEqual(exc_mgr.exception.http_status, 412)
# make sure we're GETting from the servers that missed the original PUT
self.brain.stop_handoff_half()
# verify the PUT did not complete
with self.assertRaises(client.ClientException) as exc_mgr:
client.get_object(
self.url, self.token, self.container_name, self.object_name)
self.assertEqual(exc_mgr.exception.http_status, 404)
# for completeness, run replicators...
Manager(['object-replicator']).once()
# ...and verify the object was not overwritten
_headers, body = client.get_object(
self.url, self.token, self.container_name, self.object_name)
self.assertEqual(body, b'VERIFY')
def test_content_length_nonzero(self):
self._do_test(b'OVERWRITE')
def test_content_length_zero(self):
self._do_test(b'')
def test_chunked(self):
self._do_test(chunker(b'OVERWRITE'))
def test_chunked_empty(self):
self._do_test(chunker(b''))
| swift-master | test/probe/test_object_conditional_requests.py |
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import random
import unittest
from six.moves.urllib.parse import urlparse
from swiftclient import client, ClientException
from swift.common.http import HTTP_NOT_FOUND
from swift.common.manager import Manager
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
def get_info(url):
parts = urlparse(url)
url = parts.scheme + '://' + parts.netloc + '/info'
http_conn = client.http_connection(url)
try:
return client.get_capabilities(http_conn)
except client.ClientException:
raise unittest.SkipTest('Unable to retrieve cluster info')
def get_current_realm_cluster(info):
try:
realms = info['container_sync']['realms']
except KeyError:
raise unittest.SkipTest('Unable to find container sync realms')
for realm, realm_info in realms.items():
for cluster, options in realm_info['clusters'].items():
if options.get('current', False):
return realm, cluster
raise unittest.SkipTest('Unable find current realm cluster')
class BaseTestContainerSync(ReplProbeTest):
def setUp(self):
super(BaseTestContainerSync, self).setUp()
self.info = get_info(self.url)
self.realm, self.cluster = get_current_realm_cluster(self.info)
def _setup_synced_containers(
self, source_overrides=None, dest_overrides=None):
# these defaults are used to create both source and dest containers
# unless overridden by source_overrides and/or dest_overrides
default_params = {'url': self.url,
'token': self.token,
'account': self.account,
'sync_key': 'secret'}
# setup dest container
dest = dict(default_params)
dest['name'] = 'dest-container-%s' % uuid.uuid4()
dest.update(dest_overrides or {})
dest_headers = {}
dest_policy = None
if len(ENABLED_POLICIES) > 1:
dest_policy = random.choice(ENABLED_POLICIES)
dest_headers['X-Storage-Policy'] = dest_policy.name
if dest['sync_key'] is not None:
dest_headers['X-Container-Sync-Key'] = dest['sync_key']
client.put_container(dest['url'], dest['token'], dest['name'],
headers=dest_headers)
# setup source container
source = dict(default_params)
source['name'] = 'source-container-%s' % uuid.uuid4()
source.update(source_overrides or {})
source_headers = {}
sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, dest['account'],
dest['name'])
source_headers['X-Container-Sync-To'] = sync_to
if source['sync_key'] is not None:
source_headers['X-Container-Sync-Key'] = source['sync_key']
if dest_policy:
source_policy = random.choice([p for p in ENABLED_POLICIES
if p is not dest_policy])
source_headers['X-Storage-Policy'] = source_policy.name
client.put_container(source['url'], source['token'], source['name'],
headers=source_headers)
return source['name'], dest['name']
class TestContainerSync(BaseTestContainerSync):
def test_sync(self):
source_container, dest_container = self._setup_synced_containers()
# upload to source
object_name = 'object-%s' % uuid.uuid4()
put_headers = {'X-Object-Meta-Test': 'put_value'}
client.put_object(self.url, self.token, source_container, object_name,
'test-body', headers=put_headers)
# cycle container-sync
Manager(['container-sync']).once()
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'test-body')
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('put_value', resp_headers['x-object-meta-test'])
# update metadata with a POST
post_headers = {'Content-Type': 'image/jpeg',
'X-Object-Meta-Test': 'post_value'}
int_client = self.make_internal_client()
int_client.set_object_metadata(self.account, source_container,
object_name, post_headers)
# sanity checks...
resp_headers = client.head_object(
self.url, self.token, source_container, object_name)
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('post_value', resp_headers['x-object-meta-test'])
self.assertEqual('image/jpeg', resp_headers['content-type'])
# cycle container-sync
Manager(['container-sync']).once()
# verify that metadata changes were sync'd
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'test-body')
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('post_value', resp_headers['x-object-meta-test'])
self.assertEqual('image/jpeg', resp_headers['content-type'])
# delete the object
client.delete_object(
self.url, self.token, source_container, object_name)
with self.assertRaises(ClientException) as cm:
client.get_object(
self.url, self.token, source_container, object_name)
self.assertEqual(404, cm.exception.http_status) # sanity check
# cycle container-sync
Manager(['container-sync']).once()
# verify delete has been sync'd
with self.assertRaises(ClientException) as cm:
client.get_object(
self.url, self.token, dest_container, object_name)
self.assertEqual(404, cm.exception.http_status) # sanity check
def test_sync_slo_manifest(self):
# Verify that SLO manifests are sync'd even if their segments can not
# be found in the destination account at time of sync'ing.
# Create source and dest containers for manifest in separate accounts.
dest_account = self.account_2
source_container, dest_container = self._setup_synced_containers(
dest_overrides=dest_account
)
# Create source and dest containers for segments in separate accounts.
# These containers must have same name for the destination SLO manifest
# to be able to resolve segments. Initially the destination has no sync
# key so segments will not sync.
segs_container = 'segments-%s' % uuid.uuid4()
dest_segs_info = dict(dest_account)
dest_segs_info.update({'name': segs_container, 'sync_key': None})
self._setup_synced_containers(
source_overrides={'name': segs_container, 'sync_key': 'segs_key'},
dest_overrides=dest_segs_info)
# upload a segment to source
segment_name = 'segment-%s' % uuid.uuid4()
segment_data = b'segment body' # it's ok for first segment to be small
segment_etag = client.put_object(
self.url, self.token, segs_container, segment_name,
segment_data)
manifest = [{'etag': segment_etag,
'size_bytes': len(segment_data),
'path': '/%s/%s' % (segs_container, segment_name)}]
manifest_name = 'manifest-%s' % uuid.uuid4()
put_headers = {'X-Object-Meta-Test': 'put_value'}
client.put_object(
self.url, self.token, source_container, manifest_name,
json.dumps(manifest), headers=put_headers,
query_string='multipart-manifest=put')
resp_headers, manifest_body = client.get_object(
self.url, self.token, source_container, manifest_name,
query_string='multipart-manifest=get')
int_manifest = json.loads(manifest_body)
# cycle container-sync
Manager(['container-sync']).once()
# verify manifest was sync'd
resp_headers, dest_listing = client.get_container(
dest_account['url'], dest_account['token'], dest_container)
self.assertFalse(dest_listing[1:])
self.assertEqual(manifest_name, dest_listing[0]['name'])
# verify manifest body
resp_headers, body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
manifest_name, query_string='multipart-manifest=get')
self.assertEqual(int_manifest, json.loads(body))
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('put_value', resp_headers['x-object-meta-test'])
# attempt to GET the SLO will fail because the segment wasn't sync'd
with self.assertRaises(ClientException) as cm:
client.get_object(dest_account['url'], dest_account['token'],
dest_container, manifest_name)
self.assertEqual(409, cm.exception.http_status)
# now set sync key on destination segments container
client.put_container(
dest_account['url'], dest_account['token'], segs_container,
headers={'X-Container-Sync-Key': 'segs_key'})
# cycle container-sync
Manager(['container-sync']).once()
# sanity check - verify manifest body
resp_headers, body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
manifest_name, query_string='multipart-manifest=get')
self.assertEqual(int_manifest, json.loads(body))
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('put_value', resp_headers['x-object-meta-test'])
# verify GET of SLO manifest now succeeds
resp_headers, body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
manifest_name)
self.assertEqual(segment_data, body)
def test_sync_lazy_skey(self):
# Create synced containers, but with no key at source
source_container, dest_container =\
self._setup_synced_containers(source_overrides={'sync_key': None})
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'test-body')
# cycle container-sync, nothing should happen
Manager(['container-sync']).once()
with self.assertRaises(ClientException) as err:
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND)
# amend source key
source_headers = {'X-Container-Sync-Key': 'secret'}
client.put_container(self.url, self.token, source_container,
headers=source_headers)
# cycle container-sync, should replicate
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'test-body')
def test_sync_lazy_dkey(self):
# Create synced containers, but with no key at dest
source_container, dest_container =\
self._setup_synced_containers(dest_overrides={'sync_key': None})
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'test-body')
# cycle container-sync, nothing should happen
Manager(['container-sync']).once()
with self.assertRaises(ClientException) as err:
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND)
# amend dest key
dest_headers = {'X-Container-Sync-Key': 'secret'}
client.put_container(self.url, self.token, dest_container,
headers=dest_headers)
# cycle container-sync, should replicate
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'test-body')
def test_sync_with_stale_container_rows(self):
source_container, dest_container = self._setup_synced_containers()
brain = BrainSplitter(self.url, self.token, source_container,
None, 'container')
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'test-body')
# check source container listing
_, listing = client.get_container(
self.url, self.token, source_container)
for expected_obj_dict in listing:
if expected_obj_dict['name'] == object_name:
break
else:
self.fail('Failed to find source object %r in container listing %r'
% (object_name, listing))
# stop all container servers
brain.stop_primary_half()
brain.stop_handoff_half()
# upload new object content to source - container updates will fail
client.put_object(self.url, self.token, source_container, object_name,
'new-test-body')
source_headers = client.head_object(
self.url, self.token, source_container, object_name)
# start all container servers
brain.start_primary_half()
brain.start_handoff_half()
# sanity check: source container listing should not have changed
_, listing = client.get_container(
self.url, self.token, source_container)
for actual_obj_dict in listing:
if actual_obj_dict['name'] == object_name:
self.assertDictEqual(expected_obj_dict, actual_obj_dict)
break
else:
self.fail('Failed to find source object %r in container listing %r'
% (object_name, listing))
# cycle container-sync - object should be correctly sync'd despite
# stale info in container row
Manager(['container-sync']).once()
# verify sync'd object has same content and headers
dest_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'new-test-body')
mismatched_headers = []
for k in ('etag', 'content-length', 'content-type', 'x-timestamp',
'last-modified'):
if source_headers[k] == dest_headers[k]:
continue
mismatched_headers.append((k, source_headers[k], dest_headers[k]))
if mismatched_headers:
msg = '\n'.join([('Mismatched header %r, expected %r but got %r'
% item) for item in mismatched_headers])
self.fail(msg)
def test_sync_newer_remote(self):
source_container, dest_container = self._setup_synced_containers()
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'old-source-body')
# upload to dest with same name
client.put_object(self.url, self.token, dest_container, object_name,
'new-test-body')
# cycle container-sync
Manager(['container-sync']).once()
# verify that the remote object did not change
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'new-test-body')
def test_sync_delete_when_object_never_synced(self):
source_container, dest_container = self._setup_synced_containers()
# create a tombstone row
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container,
object_name, 'source-body')
client.delete_object(self.url, self.token, source_container,
object_name)
# upload some other name, too
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'other-source-body')
# cycle container-sync
Manager(['container-sync']).once()
# verify that the deletes (which 404ed) didn't block
# that last row from syncing
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, b'other-source-body')
class TestContainerSyncAndSymlink(BaseTestContainerSync):
def setUp(self):
super(TestContainerSyncAndSymlink, self).setUp()
symlinks_enabled = self.info.get('symlink') or False
if not symlinks_enabled:
raise unittest.SkipTest("Symlinks not enabled")
def test_sync_symlink(self):
# Verify that symlinks are sync'd as symlinks.
dest_account = self.account_2
source_container, dest_container = self._setup_synced_containers(
dest_overrides=dest_account
)
# Create source and dest containers for target objects in separate
# accounts.
# These containers must have same name for the destination symlink
# to use the same target object. Initially the destination has no sync
# key so target will not sync.
tgt_container = 'targets-%s' % uuid.uuid4()
dest_tgt_info = dict(dest_account)
dest_tgt_info.update({'name': tgt_container, 'sync_key': None})
self._setup_synced_containers(
source_overrides={'name': tgt_container, 'sync_key': 'tgt_key'},
dest_overrides=dest_tgt_info)
# upload a target to source
target_name = 'target-%s' % uuid.uuid4()
target_body = b'target body'
client.put_object(
self.url, self.token, tgt_container, target_name,
target_body)
# Note that this tests when the target object is in the same account
target_path = '%s/%s' % (tgt_container, target_name)
symlink_name = 'symlink-%s' % uuid.uuid4()
put_headers = {'X-Symlink-Target': target_path}
# upload the symlink
client.put_object(
self.url, self.token, source_container, symlink_name,
'', headers=put_headers)
# verify object is a symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# verify symlink behavior
resp_headers, actual_target_body = client.get_object(
self.url, self.token, source_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
# cycle container-sync
Manager(['container-sync']).once()
# verify symlink was sync'd
resp_headers, dest_listing = client.get_container(
dest_account['url'], dest_account['token'], dest_container)
self.assertFalse(dest_listing[1:])
self.assertEqual(symlink_name, dest_listing[0]['name'])
# verify symlink remained only a symlink
resp_headers, symlink_body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# attempt to GET the target object via symlink will fail because
# the target wasn't sync'd
with self.assertRaises(ClientException) as cm:
client.get_object(dest_account['url'], dest_account['token'],
dest_container, symlink_name)
self.assertEqual(404, cm.exception.http_status)
# now set sync key on destination target container
client.put_container(
dest_account['url'], dest_account['token'], tgt_container,
headers={'X-Container-Sync-Key': 'tgt_key'})
# cycle container-sync
Manager(['container-sync']).once()
# sanity:
resp_headers, body = client.get_object(
dest_account['url'], dest_account['token'],
tgt_container, target_name)
# sanity check - verify symlink remained only a symlink
resp_headers, symlink_body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# verify GET of target object via symlink now succeeds
resp_headers, actual_target_body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
symlink_name)
self.assertEqual(target_body, actual_target_body)
def test_sync_cross_acc_symlink(self):
# Verify that cross-account symlinks are sync'd as cross-account
# symlinks.
source_container, dest_container = self._setup_synced_containers()
# Sync'd symlinks will have the same target path "/a/c/o".
# So if we want to execute probe test with syncing targets,
# two swift clusters will be required.
# Therefore, for probe test in single cluster, target object is not
# sync'd in this test.
tgt_account = self.account_2
tgt_container = 'targets-%s' % uuid.uuid4()
tgt_container_headers = {'X-Container-Read': 'test:tester'}
if len(ENABLED_POLICIES) > 1:
tgt_policy = random.choice(ENABLED_POLICIES)
tgt_container_headers['X-Storage-Policy'] = tgt_policy.name
client.put_container(tgt_account['url'], tgt_account['token'],
tgt_container, headers=tgt_container_headers)
# upload a target to source
target_name = 'target-%s' % uuid.uuid4()
target_body = b'target body'
client.put_object(tgt_account['url'], tgt_account['token'],
tgt_container, target_name, target_body)
# Note that this tests when the target object is in a different account
target_path = '%s/%s' % (tgt_container, target_name)
symlink_name = 'symlink-%s' % uuid.uuid4()
put_headers = {
'X-Symlink-Target': target_path,
'X-Symlink-Target-Account': tgt_account['account']}
# upload the symlink
client.put_object(
self.url, self.token, source_container, symlink_name,
'', headers=put_headers)
# verify object is a cross-account symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-account', resp_headers)
# verify symlink behavior
resp_headers, actual_target_body = client.get_object(
self.url, self.token, source_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
# cycle container-sync
Manager(['container-sync']).once()
# verify symlink was sync'd
resp_headers, dest_listing = client.get_container(
self.url, self.token, dest_container)
self.assertFalse(dest_listing[1:])
self.assertEqual(symlink_name, dest_listing[0]['name'])
# verify symlink remained only a symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-account', resp_headers)
# verify GET of target object via symlink now succeeds
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
def test_sync_static_symlink_different_container(self):
source_container, dest_container = self._setup_synced_containers()
symlink_cont = 'symlink-container-%s' % uuid.uuid4()
client.put_container(self.url, self.token, symlink_cont)
# upload a target to symlink container
target_name = 'target-%s' % uuid.uuid4()
target_body = b'target body'
etag = client.put_object(
self.url, self.token, symlink_cont, target_name,
target_body)
# upload a regular object
regular_name = 'regular-%s' % uuid.uuid4()
regular_body = b'regular body'
client.put_object(
self.url, self.token, source_container, regular_name,
regular_body)
# static symlink
target_path = '%s/%s' % (symlink_cont, target_name)
symlink_name = 'symlink-%s' % uuid.uuid4()
put_headers = {'X-Symlink-Target': target_path,
'X-Symlink-Target-Etag': etag}
# upload the symlink
client.put_object(
self.url, self.token, source_container, symlink_name,
'', headers=put_headers)
# verify object is a symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-etag', resp_headers)
# verify symlink behavior
resp_headers, actual_target_body = client.get_object(
self.url, self.token, source_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
self.assertIn('content-location', resp_headers)
content_location = resp_headers['content-location']
# cycle container-sync
Manager(['container-sync']).once()
# regular object should have synced
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, regular_name)
self.assertEqual(regular_body, actual_target_body)
# static symlink gets synced, too
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
self.assertIn('content-location', resp_headers)
self.assertEqual(content_location, resp_headers['content-location'])
def test_sync_busted_static_symlink_different_container(self):
source_container, dest_container = self._setup_synced_containers()
symlink_cont = 'symlink-container-%s' % uuid.uuid4()
client.put_container(self.url, self.token, symlink_cont)
# upload a target to symlink container
target_name = 'target-%s' % uuid.uuid4()
target_body = b'target body'
etag = client.put_object(
self.url, self.token, symlink_cont, target_name,
target_body)
# upload a regular object
regular_name = 'regular-%s' % uuid.uuid4()
regular_body = b'regular body'
client.put_object(
self.url, self.token, source_container, regular_name,
regular_body)
# static symlink
target_path = '%s/%s' % (symlink_cont, target_name)
symlink_name = 'symlink-%s' % uuid.uuid4()
put_headers = {'X-Symlink-Target': target_path,
'X-Symlink-Target-Etag': etag}
# upload the symlink
client.put_object(
self.url, self.token, source_container, symlink_name,
'', headers=put_headers)
# verify object is a symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-etag', resp_headers)
# verify symlink behavior
resp_headers, actual_target_body = client.get_object(
self.url, self.token, source_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
self.assertIn('content-location', resp_headers)
content_location = resp_headers['content-location']
# Break the link
client.put_object(
self.url, self.token, symlink_cont, target_name,
b'something else')
# cycle container-sync
Manager(['container-sync']).once()
# regular object should have synced
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, regular_name)
self.assertEqual(regular_body, actual_target_body)
# static symlink gets synced, too, even though the target's different!
with self.assertRaises(ClientException) as cm:
client.get_object(
self.url, self.token, dest_container, symlink_name)
self.assertEqual(409, cm.exception.http_status)
resp_headers = cm.exception.http_response_headers
self.assertIn('content-location', resp_headers)
self.assertEqual(content_location, resp_headers['content-location'])
def test_sync_static_symlink(self):
source_container, dest_container = self._setup_synced_containers()
# upload a target to symlink container
target_name = 'target-%s' % uuid.uuid4()
target_body = b'target body'
etag = client.put_object(
self.url, self.token, source_container, target_name,
target_body)
# static symlink
target_path = '%s/%s' % (source_container, target_name)
symlink_name = 'symlink-%s' % uuid.uuid4()
put_headers = {'X-Symlink-Target': target_path,
'X-Symlink-Target-Etag': etag}
# upload the symlink
client.put_object(
self.url, self.token, source_container, symlink_name,
'', headers=put_headers)
# verify object is a symlink
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-etag', resp_headers)
# verify symlink behavior
resp_headers, actual_target_body = client.get_object(
self.url, self.token, source_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
# cycle container-sync
Manager(['container-sync']).once()
# regular object should have synced
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, target_name)
self.assertEqual(target_body, actual_target_body)
# and static link too
resp_headers, actual_target_body = client.get_object(
self.url, self.token, dest_container, symlink_name)
self.assertEqual(target_body, actual_target_body)
class TestContainerSyncAndVersioning(BaseTestContainerSync):
def setUp(self):
super(TestContainerSyncAndVersioning, self).setUp()
if 'object_versioning' not in self.info:
raise unittest.SkipTest("Object Versioning not enabled")
def _test_syncing(self, source_container, dest_container):
# test syncing and versioning
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'version1')
# cycle container-sync
Manager(['container-sync']).once()
# overwrite source
client.put_object(self.url, self.token, source_container, object_name,
'version2')
# cycle container-sync
Manager(['container-sync']).once()
resp_headers, listing = client.get_container(
self.url, self.token, dest_container,
query_string='versions')
self.assertEqual(2, len(listing))
def test_enable_versioning_while_syncing_container(self):
source_container, dest_container = self._setup_synced_containers()
version_hdr = {'X-Versions-Enabled': 'true'}
# Cannot enable versioning on source container
with self.assertRaises(ClientException) as cm:
client.post_container(self.url, self.token, source_container,
headers=version_hdr)
self.assertEqual(400, cm.exception.http_status) # sanity check
self.assertEqual(b'Cannot enable object versioning on a container '
b'configured as source of container syncing.',
cm.exception.http_response_content)
# but destination is ok!
client.post_container(self.url, self.token, dest_container,
headers=version_hdr)
headers = client.head_container(self.url, self.token,
dest_container)
self.assertEqual('True', headers.get('x-versions-enabled'))
self.assertEqual('secret', headers.get('x-container-sync-key'))
self._test_syncing(source_container, dest_container)
def test_enable_syncing_while_versioned(self):
source_container, dest_container = self._setup_synced_containers()
container_name = 'versioned-%s' % uuid.uuid4()
version_hdr = {'X-Versions-Enabled': 'true'}
client.put_container(self.url, self.token, container_name,
headers=version_hdr)
# fails to configure as a container-sync source
sync_headers = {'X-Container-Sync-Key': 'secret'}
sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
dest_container)
sync_headers['X-Container-Sync-To'] = sync_to
with self.assertRaises(ClientException) as cm:
client.post_container(self.url, self.token, container_name,
headers=sync_headers)
self.assertEqual(400, cm.exception.http_status) # sanity check
# but works if it's just a container-sync destination
sync_headers = {'X-Container-Sync-Key': 'secret'}
client.post_container(self.url, self.token, container_name,
headers=sync_headers)
headers = client.head_container(self.url, self.token,
container_name)
self.assertEqual('True', headers.get('x-versions-enabled'))
self.assertEqual('secret', headers.get('x-container-sync-key'))
# update source header to sync to versioned container
source_headers = {'X-Container-Sync-Key': 'secret'}
sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
container_name)
source_headers['X-Container-Sync-To'] = sync_to
client.post_container(self.url, self.token, source_container,
headers=source_headers)
self._test_syncing(source_container, container_name)
def test_skip_sync_when_misconfigured(self):
source_container, dest_container = self._setup_synced_containers()
container_name = 'versioned-%s' % uuid.uuid4()
version_hdr = {'X-Versions-Enabled': 'true'}
client.put_container(self.url, self.token, container_name,
headers=version_hdr)
# some sanity checks
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, container_name, object_name,
'version1')
client.put_object(self.url, self.token, container_name, object_name,
'version2')
resp_headers, listing = client.get_container(
self.url, self.token, container_name,
query_string='versions')
self.assertEqual(2, len(listing))
sync_headers = {}
sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
dest_container)
sync_headers['X-Container-Sync-To'] = sync_to
sync_headers['X-Container-Sync-Key'] = 'secret'
# use internal client to set container-sync headers
# since it doesn't have container_sync middleware in pipeline
# allowing us to bypass checks
int_client = self.make_internal_client()
# TODO: what a terrible hack, maybe we need to extend internal
# client to allow caller to become a swift_owner??
int_client.app._pipeline_final_app.swift_owner_headers = []
int_client.set_container_metadata(self.account, container_name,
metadata=sync_headers)
headers = client.head_container(self.url, self.token,
container_name)
# This should never happen, but if it does because of eventual
# consistency or a messed up pipeline, container-sync should
# skip syncing container.
self.assertEqual('True', headers.get('x-versions-enabled'))
self.assertEqual('secret', headers.get('x-container-sync-key'))
self.assertEqual(sync_to, headers.get('x-container-sync-to'))
# cycle container-sync
Manager(['container-sync']).once()
with self.assertRaises(ClientException) as cm:
client.get_object(
self.url, self.token, dest_container, object_name)
self.assertEqual(404, cm.exception.http_status) # sanity check
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_container_sync.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from swiftclient import client
from unittest import main
from swift.common.exceptions import LockTimeout
from swift.common.manager import Manager
from swift.common.utils import hash_path, readconf, Timestamp
from swift.container.backend import ContainerBroker
from test.probe.common import (
kill_nonprimary_server, kill_server, start_server, ReplProbeTest)
# Why is this not called test_container_orphan? Because the crash
# happens in the account server, so both account and container
# services are involved.
#
# The common way users do this is to use TripleO to deploy an overcloud
# and add Gnocchi. Gnocchi is hammering Swift, its container has updates
# all the time. Then, users crash the overcloud and re-deploy it,
# using the new suffix in swift.conf. Thereafter, container service
# inherits old container with outstanding updates, container updater
# tries to send updates to the account server, while the account cannot
# be found anymore. In this situation, in Swift 2.25.0, account server
# tracebacks, and the cycle continues without end.
class TestOrphanContainer(ReplProbeTest):
def get_account_db_files(self, account):
# This is "more correct" than (port_num%100)//10, but is it worth it?
# We have the assumption about port_num vs node_id embedded all over.
account_configs = {}
for _, cname in self.configs['account-server'].items():
conf = readconf(cname)
# config parser cannot know if it's a number or not, so int()
port = int(conf['app:account-server']['bind_port'])
account_configs[port] = conf
part, nodes = self.account_ring.get_nodes(account)
hash_str = hash_path(account)
ret = []
for node in nodes:
data_dir = 'accounts'
device = node['device']
conf = account_configs[node['port']]
devices = conf['app:account-server']['devices']
# os.path.join is for the weak
db_file = '%s/%s/%s/%s/%s/%s/%s.db' % (
devices, device, data_dir, part,
hash_str[-3:], hash_str, hash_str)
ret.append(db_file)
return ret
def test_update_pending(self):
# Create container
container = 'contx'
client.put_container(self.url, self.token, container)
part, nodes = self.account_ring.get_nodes(self.account)
anode = nodes[0]
# Stop a quorum of account servers
# This allows the put to continue later.
kill_nonprimary_server(nodes, self.ipport2server)
kill_server((anode['ip'], anode['port']), self.ipport2server)
# Put object
# This creates an outstanding update.
client.put_object(self.url, self.token, container, 'object1', b'123')
cont_db_files = self.get_container_db_files(container)
self.assertEqual(len(cont_db_files), 3)
# Collect the observable state from containers
outstanding_files = []
for cfile in cont_db_files:
broker = ContainerBroker(cfile)
try:
info = broker.get_info()
except LockTimeout:
self.fail('LockTimeout at %s' % (cfile,))
if Timestamp(info['put_timestamp']) <= 0:
self.fail('No put_timestamp at %s' % (cfile,))
# Correct even if reported_put_timestamp is zero.
if info['put_timestamp'] > info['reported_put_timestamp']:
outstanding_files.append(cfile)
self.assertGreater(len(outstanding_files), 0)
# At this point the users shut everything down and screw up the
# hash in swift.conf. But we destroy the account DB instead.
files = self.get_account_db_files(self.account)
for afile in files:
os.unlink(afile)
# Restart the stopped primary server
start_server((anode['ip'], anode['port']), self.ipport2server)
# Make sure updaters run
Manager(['container-updater']).once()
# Collect the observable state from containers again and examine it
outstanding_files_new = []
for cfile in cont_db_files:
# We aren't catching DatabaseConnectionError, because
# we only want to approve of DBs that were quarantined,
# and not otherwise damaged. So if the code below throws
# an exception for other reason, we want the test to fail.
if not os.path.exists(cfile):
continue
broker = ContainerBroker(cfile)
try:
info = broker.get_info()
except LockTimeout:
self.fail('LockTimeout at %s' % (cfile,))
if Timestamp(info['put_timestamp']) <= 0:
self.fail('No put_timestamp at %s' % (cfile,))
# Correct even if reported_put_timestamp is zero.
if info['put_timestamp'] > info['reported_put_timestamp']:
outstanding_files_new.append(cfile)
self.assertLengthEqual(outstanding_files_new, 0)
self.get_to_final_state()
if __name__ == '__main__':
main()
| swift-master | test/probe/test_orphan_container.py |
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
import random
import unittest
from swift.common.manager import Manager
from swift.common.internal_client import InternalClient
from swift.common import utils, direct_client
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from swift.common.utils import md5
from swift.container.reconciler import MISPLACED_OBJECTS_ACCOUNT
from test.probe.brain import BrainSplitter, InternalBrainSplitter
from swift.common.request_helpers import get_reserved_name
from test.probe.common import (ReplProbeTest, ENABLED_POLICIES,
POLICIES_BY_TYPE, REPL_POLICY)
from swiftclient import ClientException
TIMEOUT = 60
class TestContainerMergePolicyIndex(ReplProbeTest):
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def setUp(self):
super(TestContainerMergePolicyIndex, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'container')
def _get_object_patiently(self, policy_index):
# use proxy to access object (bad container info might be cached...)
timeout = time.time() + TIMEOUT
while time.time() < timeout:
try:
return self.brain.get_object()
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
time.sleep(1)
else:
self.fail('could not GET /%s/%s/%s/ from policy %s '
'after %s seconds.' % (
self.account, self.container_name, self.object_name,
int(policy_index), TIMEOUT))
def test_merge_storage_policy_index(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
contents=b'VERIFY')
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertGreater(
len(found_policy_indexes), 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException:
continue
orig_policy_index = policy_index
break
if orig_policy_index is not None:
break
else:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
self.get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertEqual(len(found_policy_indexes), 1,
'primary nodes disagree about policy index %r' %
head_responses)
expected_policy_index = found_policy_indexes.pop()
self.assertNotEqual(orig_policy_index, expected_policy_index)
# validate object placement
orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
'/etc/swift')
for node in orig_policy_ring.devs:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': orig_policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
raise
else:
self.fail('Found /%s/%s/%s in %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index))
# verify that the object data read by external client is correct
headers, data = self._get_object_patiently(expected_policy_index)
self.assertEqual(b'VERIFY', data)
self.assertEqual('custom-meta', headers['x-object-meta-test'])
def test_reconcile_delete(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.put_object()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.delete_object()
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertGreater(
len(found_policy_indexes), 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = ts_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if 'x-backend-timestamp' in err.http_headers:
ts_policy_index = policy_index
break
else:
orig_policy_index = policy_index
break
if not orig_policy_index:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
if not ts_policy_index:
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
self.get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
node_to_policy = {
node['port']: metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
policies = set(node_to_policy.values())
self.assertEqual(len(policies), 1,
'primary nodes disagree about policy index %r' %
node_to_policy)
expected_policy_index = policies.pop()
self.assertEqual(orig_policy_index, expected_policy_index)
# validate object fully deleted
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
else:
self.fail('Found /%s/%s/%s in %s on %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index, node))
def get_object_name(self, name):
"""
hook for sublcass to translate object names
"""
return name
def test_reconcile_manifest(self):
if 'slo' not in self.cluster_info:
raise unittest.SkipTest(
"SLO not enabled in proxy; can't test manifest reconciliation")
# this test is not only testing a split brain scenario on
# multiple policies with mis-placed objects - it even writes out
# a static large object directly to the storage nodes while the
# objects are unavailably mis-placed from *behind* the proxy and
# doesn't know how to do that for EC_POLICY (clayg: why did you
# guys let me write a test that does this!?) - so we force
# wrong_policy (where the manifest gets written) to be one of
# any of your configured REPL_POLICY (we know you have one
# because this is a ReplProbeTest)
wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
policy = random.choice([p for p in ENABLED_POLICIES
if p is not wrong_policy])
manifest_data = []
def write_part(i):
body = b'VERIFY%0.2d' % i + b'\x00' * 1048576
part_name = self.get_object_name('manifest_part_%0.2d' % i)
manifest_entry = {
"path": "/%s/%s" % (self.container_name, part_name),
"etag": md5(body, usedforsecurity=False).hexdigest(),
"size_bytes": len(body),
}
self.brain.client.put_object(self.container_name, part_name, {},
body)
manifest_data.append(manifest_entry)
# get an old container stashed
self.brain.stop_primary_half()
self.brain.put_container(int(policy))
self.brain.start_primary_half()
# write some parts
for i in range(10):
write_part(i)
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# write some more parts
for i in range(10, 20):
write_part(i)
# write manifest
with self.assertRaises(ClientException) as catcher:
self.brain.client.put_object(
self.container_name, self.object_name,
{}, utils.json.dumps(manifest_data),
query_string='multipart-manifest=put')
# so as it works out, you can't really upload a multi-part
# manifest for objects that are currently misplaced - you have to
# wait until they're all available - which is about the same as
# some other failure that causes data to be unavailable to the
# proxy at the time of upload
self.assertEqual(catcher.exception.http_status, 400)
# but what the heck, we'll sneak one in just to see what happens...
direct_manifest_name = self.object_name + '-direct-test'
object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, direct_manifest_name)
container_part = self.container_ring.get_part(self.account,
self.container_name)
def translate_direct(data):
return {
'hash': data['etag'],
'bytes': data['size_bytes'],
'name': data['path'],
}
direct_manifest_data = [translate_direct(item)
for item in manifest_data]
headers = {
'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
in self.container_ring.devs),
'x-container-device': ','.join(n['device'] for n in
self.container_ring.devs),
'x-container-partition': container_part,
'X-Backend-Storage-Policy-Index': wrong_policy.idx,
'X-Static-Large-Object': 'True',
}
body = utils.json.dumps(direct_manifest_data).encode('ascii')
for node in nodes:
direct_client.direct_put_object(
node, part, self.account, self.container_name,
direct_manifest_name,
contents=body,
headers=headers)
break # one should do it...
self.brain.start_handoff_half()
self.get_to_final_state()
Manager(['container-reconciler']).once()
# clear proxy cache
self.brain.client.post_container(self.container_name, {})
# let's see how that direct upload worked out...
metadata, body = self.brain.client.get_object(
self.container_name, direct_manifest_name,
query_string='multipart-manifest=get')
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
for i, entry in enumerate(utils.json.loads(body)):
for key in ('hash', 'bytes', 'name'):
self.assertEqual(entry[key], direct_manifest_data[i][key])
metadata, body = self.brain.client.get_object(
self.container_name, direct_manifest_name)
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
self.assertEqual(body, b''.join(b'VERIFY%0.2d' % i + b'\x00' * 1048576
for i in range(20)))
# and regular upload should work now too
self.brain.client.put_object(
self.container_name, self.object_name, {},
utils.json.dumps(manifest_data).encode('ascii'),
query_string='multipart-manifest=put')
metadata = self.brain.client.head_object(self.container_name,
self.object_name)
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
def test_reconcile_symlink(self):
if 'symlink' not in self.cluster_info:
raise unittest.SkipTest(
"Symlink not enabled in proxy; can't test "
"symlink reconciliation")
wrong_policy = random.choice(ENABLED_POLICIES)
policy = random.choice([p for p in ENABLED_POLICIES
if p is not wrong_policy])
# get an old container stashed
self.brain.stop_primary_half()
self.brain.put_container(int(policy))
self.brain.start_primary_half()
# write some target data
target_name = self.get_object_name('target')
self.brain.client.put_object(self.container_name, target_name, {},
b'this is the target data')
# write the symlink
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
symlink_name = self.get_object_name('symlink')
self.brain.client.put_object(
self.container_name, symlink_name, {
'X-Symlink-Target': '%s/%s' % (
self.container_name, target_name),
'Content-Type': 'application/symlink',
}, b'')
# at this point we have a broken symlink (the container_info has the
# proxy looking for the target in the wrong policy)
with self.assertRaises(ClientException) as ctx:
self.brain.client.get_object(self.container_name, symlink_name)
self.assertEqual(ctx.exception.http_status, 404)
# of course the symlink itself is fine
metadata, body = self.brain.client.get_object(
self.container_name, symlink_name, query_string='symlink=get')
self.assertEqual(metadata['x-symlink-target'],
utils.quote('%s/%s' % (
self.container_name, target_name)))
self.assertEqual(metadata['content-type'], 'application/symlink')
self.assertEqual(body, b'')
# ... although in the wrong policy
object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, symlink_name)
for node in nodes:
metadata = direct_client.direct_head_object(
node, part, self.account, self.container_name, symlink_name,
headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
utils.quote('%s/%s' % (
self.container_name, target_name)))
# let the reconciler run
self.brain.start_handoff_half()
self.get_to_final_state()
Manager(['container-reconciler']).once()
# clear proxy cache
self.brain.client.post_container(self.container_name, {})
# now the symlink works
metadata, body = self.brain.client.get_object(
self.container_name, symlink_name)
self.assertEqual(body, b'this is the target data')
# and it's in the correct policy
object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, symlink_name)
for node in nodes:
metadata = direct_client.direct_head_object(
node, part, self.account, self.container_name, symlink_name,
headers={'X-Backend-Storage-Policy-Index': int(policy)})
self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
utils.quote('%s/%s' % (
self.container_name, target_name)))
def test_reconciler_move_object_twice(self):
# select some policies
old_policy = random.choice(ENABLED_POLICIES)
new_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# setup a split brain
self.brain.stop_handoff_half()
# get old_policy on two primaries
self.brain.put_container(policy_index=int(old_policy))
self.brain.start_handoff_half()
self.brain.stop_primary_half()
# force a recreate on handoffs
self.brain.put_container(policy_index=int(old_policy))
self.brain.delete_container()
self.brain.put_container(policy_index=int(new_policy))
self.brain.put_object() # populate memcache with new_policy
self.brain.start_primary_half()
# at this point two primaries have old policy
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = [
(node, direct_client.direct_head_container(
node, container_part, self.account, self.container_name))
for node in container_nodes]
old_container_nodes = [
node for node, metadata in head_responses
if int(old_policy) ==
int(metadata['X-Backend-Storage-Policy-Index'])]
self.assertEqual(2, len(old_container_nodes))
# hopefully memcache still has the new policy cached
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
contents=b'VERIFY')
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
int_client = InternalClient(conf_file, 'probe-test', 3)
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# shutdown the containers that know about the new policy
self.brain.stop_handoff_half()
# and get rows enqueued from old nodes
for server_type in ('container-replicator', 'container-updater'):
server = Manager([server_type])
for node in old_container_nodes:
server.once(number=self.config_number(node))
# verify entry in the queue for the "misplaced" new_policy
for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
container['name']):
expected = '%d:/%s/%s/%s' % (new_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# verify object in old_policy
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# verify object is *not* in new_policy
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
self.get_to_final_state()
# verify entry in the queue
for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
container['name']):
expected = '%d:/%s/%s/%s' % (old_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# and now it flops back
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# make sure the queue is settled
self.get_to_final_state()
for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
container['name']):
self.fail('Found unexpected object %r in the queue' % obj)
# verify that the object data read by external client is correct
headers, data = self._get_object_patiently(int(new_policy))
self.assertEqual(b'VERIFY', data)
self.assertEqual('custom-meta', headers['x-object-meta-test'])
class TestReservedNamespaceMergePolicyIndex(TestContainerMergePolicyIndex):
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def setUp(self):
super(TestReservedNamespaceMergePolicyIndex, self).setUp()
self.container_name = get_reserved_name('container', str(uuid.uuid4()))
self.object_name = get_reserved_name('object', str(uuid.uuid4()))
self.brain = InternalBrainSplitter('/etc/swift/internal-client.conf',
self.container_name,
self.object_name, 'container')
def get_object_name(self, name):
return get_reserved_name(name)
def test_reconcile_manifest(self):
raise unittest.SkipTest(
'SLO does not allow parts in the reserved namespace')
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_container_merge_policy_index.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
import unittest
import os
import uuid
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.exceptions import DiskFileDeleted
from swift.common.internal_client import UnexpectedResponse
from swift.common.swob import normalize_etag
from swift.container.backend import ContainerBroker
from swift.common import utils
from swiftclient import client
from swift.common.ring import Ring
from swift.common.utils import Timestamp, get_logger, hash_path
from swift.obj.diskfile import DiskFileManager
from swift.common.storage_policy import POLICIES
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
class Test(ReplProbeTest):
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(Test, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
self.container_brain = BrainSplitter(self.url, self.token,
self.container_name)
self.int_client = self.make_internal_client()
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
def _assert_consistent_object_metadata(self):
obj_info = []
for i in range(1, 5):
info_i = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info_i:
obj_info.append(info_i)
self.assertGreater(len(obj_info), 1)
for other in obj_info[1:]:
self.assertDictEqual(obj_info[0], other)
def _assert_consistent_deleted_object(self):
for i in range(1, 5):
try:
info = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info is not None:
self.fail('Expected no disk file info but found %s' % info)
except DiskFileDeleted:
pass
def _get_db_info(self, account, container, number):
server_type = 'container'
obj_conf = self.configs['%s-server' % server_type]
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:container-server')
root = options.get('devices')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = Ring(swift_dir, ring_name=server_type)
part, nodes = ring.get_nodes(account, container)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
path_hash = utils.hash_path(account, container)
_dir = utils.storage_directory('%ss' % server_type, part, path_hash)
db_dir = os.path.join(root, device, _dir)
db_file = os.path.join(db_dir, '%s.db' % path_hash)
db = ContainerBroker(db_file)
return db.get_info()
def _assert_consistent_container_dbs(self):
db_info = []
for i in range(1, 5):
info_i = self._get_db_info(self.account, self.container_name, i)
if info_i:
db_info.append(info_i)
self.assertGreater(len(db_info), 1)
for other in db_info[1:]:
self.assertEqual(db_info[0]['hash'], other['hash'],
'Container db hash mismatch: %s != %s'
% (db_info[0]['hash'], other['hash']))
def _assert_object_metadata_matches_listing(self, listing, metadata):
self.assertEqual(listing['bytes'], int(metadata['content-length']))
self.assertEqual(listing['hash'], normalize_etag(metadata['etag']))
self.assertEqual(listing['content_type'], metadata['content-type'])
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
def _put_object(self, headers=None, body=b'stuff'):
headers = headers or {}
self.int_client.upload_object(BytesIO(body), self.account,
self.container_name,
self.object_name, headers)
def _post_object(self, headers):
self.int_client.set_object_metadata(self.account, self.container_name,
self.object_name, headers)
def _delete_object(self):
self.int_client.delete_object(self.account, self.container_name,
self.object_name)
def _get_object(self, headers=None, expect_statuses=(2,)):
return self.int_client.get_object(self.account,
self.container_name,
self.object_name,
headers,
acceptable_statuses=expect_statuses)
def _get_object_metadata(self):
return self.int_client.get_object_metadata(self.account,
self.container_name,
self.object_name)
def _assert_consistent_suffix_hashes(self):
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
name_hash = hash_path(
self.account, self.container_name, self.object_name)
results = []
for node in onodes:
results.append(
(node,
direct_get_suffix_hashes(node, opart, [name_hash[-3:]])))
for (node, hashes) in results[1:]:
self.assertEqual(results[0][1], hashes,
'Inconsistent suffix hashes found: %s' % results)
def test_object_delete_is_replicated(self):
self.brain.put_container()
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._get_object()
def test_object_after_replication_with_subsequent_post(self):
self.brain.put_container()
# put object
self._put_object(headers={'Content-Type': 'foo'}, body=b'older')
# put newer object to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'}, body=b'newer')
metadata = self._get_object_metadata()
etag = metadata['etag']
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# post some user meta to all servers
self._post_object({'x-object-meta-bar': 'meta-bar'})
# run replicator
self.get_to_final_state()
# check that newer data has been replicated to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
self.assertEqual(etag, metadata['etag'])
self.assertEqual('bar', metadata['content-type'])
self.assertEqual('meta-bar', metadata['x-object-meta-bar'])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_put(self):
sysmeta = {'x-object-sysmeta-foo': 'older'}
sysmeta2 = {'x-object-sysmeta-foo': 'newer'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container()
# put object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# put object with updated sysmeta to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers=sysmeta2)
metadata = self._get_object_metadata()
for key in sysmeta2:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check sysmeta has been replicated to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertIn(key, metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check user sysmeta ok on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertIn(key, metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
transient_sysmeta = {
'x-object-transient-sysmeta-bar': 'transient-sysmeta-bar'}
self.brain.put_container()
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
user_and_transient_sysmeta = dict(usermeta)
user_and_transient_sysmeta.update(transient_sysmeta)
self._post_object(user_and_transient_sysmeta)
metadata = self._get_object_metadata()
for key in user_and_transient_sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], user_and_transient_sysmeta[key])
for key in sysmeta:
self.assertNotIn(key, metadata)
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
expected.update(transient_sysmeta)
for key in expected.keys():
self.assertIn(key, metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertIn(key, metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
transient_sysmeta = {
'x-object-transient-sysmeta-bar': 'transient-sysmeta-bar'}
self.brain.put_container()
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
user_and_transient_sysmeta = dict(usermeta)
user_and_transient_sysmeta.update(transient_sysmeta)
self._post_object(user_and_transient_sysmeta)
metadata = self._get_object_metadata()
for key in user_and_transient_sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], user_and_transient_sysmeta[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# run replicator
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in user_and_transient_sysmeta:
self.assertNotIn(key, metadata)
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertIn(key, metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in user_and_transient_sysmeta:
self.assertNotIn(key, metadata)
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_previous_incomplete_puts(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = baz
#
# ...run replicator and expect...
#
# t1.data:
# t2.meta: ctype = baz
self.brain.put_container()
# incomplete write to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# content-type update to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'baz'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_post(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t2.meta:
self.brain.put_container()
# incomplete write
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# metadata update with newest data unavailable
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
else:
self.fail('obj not found in container listing')
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_subsequent_post_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = bif
# t3.data: ctype = baz, color = 'Red'
# t4.meta: color = Blue
#
# ...run replicator and expect...
#
# t1.data:
# t4-delta.meta: ctype = baz, color = Blue
self.brain.put_container()
# incomplete write
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete post with content type
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'bif'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post to handoff with content type
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'baz',
'X-Object-Meta-Color': 'Red'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# complete post with no content type
self._post_object(headers={'X-Object-Meta-Color': 'Blue',
'X-Object-Sysmeta-Test': 'ignored'})
# 'baz' wins over 'bar' but 'Blue' wins over 'Red'
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_posts_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
# t3.meta
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t3.meta
self.brain.put_container()
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
# incomplete write to handoff half
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete post with no content type to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Red',
'X-Object-Sysmeta-Test': 'ignored'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with no content type to handoff half
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_posted_metadata_only_persists_after_prior_put(self):
# newer metadata posted to subset of nodes should persist after an
# earlier put on other nodes, but older content-type on that subset
# should not persist
self.brain.put_container()
# incomplete put to handoff
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with content-type to handoff
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newer',
'X-Object-Meta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'newest',
'X-Object-Meta-Test': 'newer'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with no content-type to handoff which still has
# out of date content-type
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Test': 'newest'})
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newer')
self.brain.start_primary_half()
self.container_brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
self.assertEqual(obj['content_type'], 'newest')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_trumped_by_prior_delete(self):
# new metadata and content-type posted to subset of nodes should not
# cause object to persist after replication of an earlier delete on
# other nodes.
self.brain.put_container()
# incomplete put
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put then delete
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self._delete_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff post
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'ignored',
'X-Object-Meta-Test': 'newest'})
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-sysmeta-test'], 'oldest')
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# delete trumps later post
self.get_to_final_state()
# check object is now deleted
self.assertRaises(UnexpectedResponse, self._get_object_metadata)
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual(0, len(objs))
self._assert_consistent_container_dbs()
self._assert_consistent_deleted_object()
self._assert_consistent_suffix_hashes()
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_object_metadata_replication.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import main
import random
from swiftclient import client
from swift.common import direct_client
from swift.common.request_helpers import get_reserved_name
from swift.obj import reconstructor
from test.probe.common import ReplProbeTest, ECProbeTest
class TestObjectVersioning(ReplProbeTest):
def _assert_account_level(self, container_name, hdr_cont_count,
hdr_obj_count, hdr_bytes, cont_count,
cont_bytes):
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(hdr_cont_count, headers['x-account-container-count'])
self.assertEqual(hdr_obj_count, headers['x-account-object-count'])
self.assertEqual(hdr_bytes, headers['x-account-bytes-used'])
self.assertEqual(len(containers), 1)
container = containers[0]
self.assertEqual(container_name, container['name'])
self.assertEqual(cont_count, container['count'])
self.assertEqual(cont_bytes, container['bytes'])
def test_account_listing(self):
versions_header_key = 'X-Versions-Enabled'
# Create container1
container_name = 'container1'
obj_name = 'object1'
client.put_container(self.url, self.token, container_name)
# Assert account level sees it
self._assert_account_level(
container_name,
hdr_cont_count='1',
hdr_obj_count='0',
hdr_bytes='0',
cont_count=0,
cont_bytes=0)
# Enable versioning
hdrs = {versions_header_key: 'True'}
client.post_container(self.url, self.token, container_name, hdrs)
# write multiple versions of same obj
client.put_object(self.url, self.token, container_name, obj_name,
'version1')
client.put_object(self.url, self.token, container_name, obj_name,
'version2')
# Assert account level doesn't see object data yet, but it
# does see the update for the hidden container
self._assert_account_level(
container_name,
hdr_cont_count='2',
hdr_obj_count='0',
hdr_bytes='0',
cont_count=0,
cont_bytes=0)
# Get to final state
self.get_to_final_state()
# Assert account level now sees updated values
# N.B: Note difference in values between header and container listing
# header object count is counting both symlink + object versions
# listing count is counting only symlink (in primary container)
self._assert_account_level(
container_name,
hdr_cont_count='2',
hdr_obj_count='3',
hdr_bytes='16',
cont_count=1,
cont_bytes=16)
client.delete_object(self.url, self.token, container_name, obj_name)
_headers, current_versions = client.get_container(
self.url, self.token, container_name)
self.assertEqual(len(current_versions), 0)
_headers, all_versions = client.get_container(
self.url, self.token, container_name, query_string='versions')
self.assertEqual(len(all_versions), 3)
# directly delete primary container to leave an orphan hidden
# container
self.direct_delete_container(container=container_name)
# Get to final state
self.get_to_final_state()
# The container count decreases, as well as object count. But bytes
# do not. The discrepancy between header object count, container
# object count and bytes should indicate orphan hidden container is
# still around consuming storage
self._assert_account_level(
container_name,
hdr_cont_count='1',
hdr_obj_count='3',
hdr_bytes='16',
cont_count=0,
cont_bytes=16)
# Can't HEAD or list anything, though
with self.assertRaises(client.ClientException) as caught:
client.head_container(self.url, self.token, container_name)
self.assertEqual(caught.exception.http_status, 404)
with self.assertRaises(client.ClientException) as caught:
client.get_container(self.url, self.token, container_name)
self.assertEqual(caught.exception.http_status, 404)
with self.assertRaises(client.ClientException) as caught:
client.get_container(self.url, self.token, container_name,
query_string='versions')
self.assertEqual(caught.exception.http_status, 404)
with self.assertRaises(client.ClientException) as caught:
client.get_object(
self.url, self.token, container_name, all_versions[1]['name'],
query_string='version-id=%s' % all_versions[1]['version_id'])
# A little funny -- maybe this should 404 instead?
self.assertEqual(caught.exception.http_status, 400)
# Fix isn't too bad -- just make the container again!
client.put_container(self.url, self.token, container_name)
_headers, current_versions = client.get_container(
self.url, self.token, container_name)
self.assertEqual(len(current_versions), 0)
_headers, all_versions = client.get_container(
self.url, self.token, container_name, query_string='versions')
self.assertEqual(len(all_versions), 3)
# ... but to actually *access* the versions, you have to enable
# versioning again
with self.assertRaises(client.ClientException) as caught:
client.get_object(
self.url, self.token, container_name, all_versions[1]['name'],
query_string='version-id=%s' % all_versions[1]['version_id'])
self.assertEqual(caught.exception.http_status, 400)
self.assertIn(b'version-aware operations require',
caught.exception.http_response_content)
client.post_container(self.url, self.token, container_name,
headers={'X-Versions-Enabled': 'true'})
client.get_object(
self.url, self.token, container_name, all_versions[1]['name'],
query_string='version-id=%s' % all_versions[1]['version_id'])
def test_missing_versions_container(self):
versions_header_key = 'X-Versions-Enabled'
# Create container1
container_name = 'container1'
obj_name = 'object1'
client.put_container(self.url, self.token, container_name)
# Write some data
client.put_object(self.url, self.token, container_name, obj_name,
b'null version')
# Enable versioning
hdrs = {versions_header_key: 'True'}
client.post_container(self.url, self.token, container_name, hdrs)
# But directly delete hidden container to leave an orphan primary
# container
self.direct_delete_container(container=get_reserved_name(
'versions', container_name))
# Could be worse; we can still list versions and GET data
_headers, all_versions = client.get_container(
self.url, self.token, container_name, query_string='versions')
self.assertEqual(len(all_versions), 1)
self.assertEqual(all_versions[0]['name'], obj_name)
self.assertEqual(all_versions[0]['version_id'], 'null')
_headers, data = client.get_object(
self.url, self.token, container_name, obj_name)
self.assertEqual(data, b'null version')
_headers, data = client.get_object(
self.url, self.token, container_name, obj_name,
query_string='version-id=null')
self.assertEqual(data, b'null version')
# But most any write is going to fail
with self.assertRaises(client.ClientException) as caught:
client.put_object(self.url, self.token, container_name, obj_name,
b'new version')
self.assertEqual(caught.exception.http_status, 500)
with self.assertRaises(client.ClientException) as caught:
client.delete_object(self.url, self.token, container_name,
obj_name)
self.assertEqual(caught.exception.http_status, 500)
# Version-aware delete can work, though!
client.delete_object(self.url, self.token, container_name, obj_name,
query_string='version-id=null')
# Re-enabling versioning should square us
hdrs = {versions_header_key: 'True'}
client.post_container(self.url, self.token, container_name, hdrs)
client.put_object(self.url, self.token, container_name, obj_name,
b'new version')
_headers, all_versions = client.get_container(
self.url, self.token, container_name, query_string='versions')
self.assertEqual(len(all_versions), 1)
self.assertEqual(all_versions[0]['name'], obj_name)
self.assertNotEqual(all_versions[0]['version_id'], 'null')
_headers, data = client.get_object(
self.url, self.token, container_name, obj_name)
self.assertEqual(data, b'new version')
class TestECObjectVersioning(ECProbeTest):
def setUp(self):
super(TestECObjectVersioning, self).setUp()
self.part, self.nodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
def test_versioning_with_metadata_replication(self):
# Enable versioning
client.put_container(self.url, self.token, self.container_name,
headers={
'X-Storage-Policy': self.policy.name,
'X-Versions-Enabled': 'True',
})
# create version with metadata in a handoff location
failed_primary = random.choice(self.nodes)
failed_primary_device_path = self.device_dir(failed_primary)
self.kill_drive(failed_primary_device_path)
headers = {'x-object-meta-foo': 'meta-foo'}
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents='some data',
headers=headers)
headers_post = {'x-object-meta-bar': 'meta-bar'}
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=headers_post)
# find the handoff
primary_ids = [n['id'] for n in self.nodes]
for handoff in self.object_ring.devs:
if handoff['id'] in primary_ids:
continue
try:
headers, etag = self.direct_get(handoff, self.part)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
break
else:
self.fail('unable to find object on handoffs')
# we want to repair the fault, but avoid doing the handoff revert
self.revive_drive(failed_primary_device_path)
handoff_config = self.config_number(handoff)
failed_config = self.config_number(failed_primary)
partner_nodes = reconstructor._get_partners(
failed_primary['index'], self.nodes)
random.shuffle(partner_nodes)
for partner in partner_nodes:
fix_config = self.config_number(partner)
if fix_config not in (handoff_config, failed_config):
break
else:
self.fail('unable to find fix_config in %r excluding %r & %r' % (
[(d['device'], self.config_number(d)) for d in partner_nodes],
handoff_config, failed_config))
self.reconstructor.once(number=fix_config)
# validate object in all locations
missing = []
etags = set()
metadata = []
for node in self.nodes:
try:
headers, etag = self.direct_get(node, self.part)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
missing.append(node)
continue
etags.add(headers['X-Object-Sysmeta-Ec-Etag'])
metadata.append(headers['X-Object-Meta-Bar'])
if missing:
self.fail('Ran reconstructor config #%s to repair %r but '
'found 404 on primary: %r' % (
fix_config, failed_primary['device'],
[d['device'] for d in missing]))
self.assertEqual(1, len(etags))
self.assertEqual(['meta-bar'] * len(self.nodes), metadata)
# process revert
self.reconstructor.once(number=handoff_config)
# validate object (still?) in primary locations
etags = set()
metadata = []
for node in self.nodes:
headers, etag = self.direct_get(node, self.part)
etags.add(headers['X-Object-Sysmeta-Ec-Etag'])
metadata.append(headers['X-Object-Meta-Bar'])
self.assertEqual(1, len(etags))
self.assertEqual(['meta-bar'] * len(self.nodes), metadata)
# and removed form handoff
with self.assertRaises(direct_client.DirectClientException) as ctx:
headers, etag = self.direct_get(handoff, self.part)
self.assertEqual(ctx.exception.http_status, 404)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_object_versioning.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
import random
import shutil
from collections import defaultdict
from swift.obj.reconstructor import ObjectReconstructor
from test.probe.common import ECProbeTest, Body
from swift.common import direct_client
from swift.obj import reconstructor
from swiftclient import client
class TestReconstructorRevert(ECProbeTest):
def test_revert_object(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
hnodes = self.object_ring.get_more_nodes(opart)
# kill 2 a parity count number of primary nodes so we can
# force data onto handoffs, we do that by renaming dev dirs
# to induce 507
p_dev1 = self.device_dir(onodes[0])
p_dev2 = self.device_dir(onodes[1])
self.kill_drive(p_dev1)
self.kill_drive(p_dev2)
# PUT object
contents = Body()
headers = {'x-object-meta-foo': 'meta-foo',
u'x-object-meta-non-ascii-value1': u'meta-f\xf6\xf6'}
headers_post = {'x-object-meta-bar': 'meta-bar',
u'x-object-meta-non-ascii-value2': u'meta-b\xe4r'}
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents,
headers=headers)
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=headers_post)
# (Some versions of?) swiftclient will mutate the headers dict on post
headers_post.pop('X-Auth-Token', None)
# these primaries can't serve the data any more, we expect 507
# here and not 404 because we're using mount_check to kill nodes
for onode in (onodes[0], onodes[1]):
self.assert_direct_get_fails(onode, opart, 507)
# now take out another primary
p_dev3 = self.device_dir(onodes[2])
self.kill_drive(p_dev3)
# this node can't serve the data any more
self.assert_direct_get_fails(onodes[2], opart, 507)
# make sure we can still GET the object and its correct
# we're now pulling from handoffs and reconstructing
_headers, etag = self.proxy_get()
self.assertEqual(etag, contents.etag)
# rename the dev dirs so they don't 507 anymore
self.revive_drive(p_dev1)
self.revive_drive(p_dev2)
self.revive_drive(p_dev3)
# fire up reconstructor on handoff nodes only
for hnode in hnodes:
hnode_id = self.config_number(hnode)
self.reconstructor.once(number=hnode_id)
# first three primaries have data again
for onode in (onodes[0], onodes[2]):
self.assert_direct_get_succeeds(onode, opart)
# check meta
meta = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
for key in headers_post:
self.assertIn(key, meta)
self.assertEqual(meta[key], headers_post[key])
# handoffs are empty
for hnode in hnodes:
self.assert_direct_get_fails(hnode, opart, 404)
def test_delete_propagate(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
hnodes = list(itertools.islice(
self.object_ring.get_more_nodes(opart), 2))
# PUT object
contents = Body()
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents)
# now lets shut down a couple of primaries
failed_nodes = random.sample(onodes, 2)
for node in failed_nodes:
self.kill_drive(self.device_dir(node))
# Write tombstones over the nodes that are still online
client.delete_object(self.url, self.token,
self.container_name,
self.object_name)
# spot check the primary nodes that are still online
delete_timestamp = None
for node in onodes:
if node in failed_nodes:
continue
try:
self.direct_get(node, opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
delete_timestamp = err.http_headers['X-Backend-Timestamp']
else:
self.fail('Node data on %r was not fully destroyed!' %
(node,))
# run the reconstructor on the handoff node multiple times until
# tombstone is pushed out - each handoff node syncs to a few
# primaries each time
iterations = 0
while iterations < 52:
self.reconstructor.once(number=self.config_number(hnodes[0]))
iterations += 1
# see if the tombstone is reverted
try:
self.direct_get(hnodes[0], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
if 'X-Backend-Timestamp' not in err.http_headers:
# this means the tombstone is *gone* so it's reverted
break
else:
self.fail('Still found tombstone on %r after %s iterations' % (
hnodes[0], iterations))
# tombstone is still on the *second* handoff
try:
self.direct_get(hnodes[1], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
self.assertEqual(err.http_headers['X-Backend-Timestamp'],
delete_timestamp)
else:
self.fail('Found obj data on %r' % hnodes[1])
# repair the primaries
self.revive_drive(self.device_dir(failed_nodes[0]))
self.revive_drive(self.device_dir(failed_nodes[1]))
# run reconstructor on second handoff
self.reconstructor.once(number=self.config_number(hnodes[1]))
# verify tombstone is reverted on the first pass
try:
self.direct_get(hnodes[1], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
self.assertNotIn('X-Backend-Timestamp', err.http_headers)
else:
self.fail('Found obj data on %r' % hnodes[1])
# sanity make sure proxy get can't find it
try:
self.proxy_get()
except Exception as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destroyed!' %
(onodes[0]))
def test_reconstruct_from_reverted_fragment_archive(self):
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
# find a primary server that only has one of it's devices in the
# primary node list
group_nodes_by_config = defaultdict(list)
for n in onodes:
group_nodes_by_config[self.config_number(n)].append(n)
for config_number, node_list in group_nodes_by_config.items():
if len(node_list) == 1:
break
else:
self.fail('ring balancing did not use all available nodes')
primary_node = node_list[0]
# ... and 507 it's device
primary_device = self.device_dir(primary_node)
self.kill_drive(primary_device)
# PUT object
contents = Body()
etag = client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents)
self.assertEqual(contents.etag, etag)
# fix the primary device and sanity GET
self.revive_drive(primary_device)
_headers, actual_etag = self.proxy_get()
self.assertEqual(etag, actual_etag)
# find a handoff holding the fragment
for hnode in self.object_ring.get_more_nodes(opart):
try:
_hdrs, reverted_fragment_etag = self.direct_get(hnode, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
break
else:
self.fail('Unable to find handoff fragment!')
# we'll force the handoff device to revert instead of potentially
# racing with rebuild by deleting any other fragments that may be on
# the same server
handoff_fragment_etag = None
for node in onodes:
if self.is_local_to(node, hnode):
# we'll keep track of the etag of this fragment we're removing
# in case we need it later (queue forshadowing music)...
try:
_hdrs, handoff_fragment_etag = self.direct_get(node, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
# this just means our handoff device was on the same
# machine as the primary!
continue
# use the primary nodes device - not the hnode device
part_dir = self.storage_dir(node, part=opart)
shutil.rmtree(part_dir, True)
# revert from handoff device with reconstructor
self.reconstructor.once(number=self.config_number(hnode))
# verify fragment reverted to primary server
self.assertEqual(reverted_fragment_etag,
self.direct_get(primary_node, opart)[1])
# now we'll remove some data on one of the primary node's partners
partner = random.choice(reconstructor._get_partners(
primary_node['index'], onodes))
try:
_hdrs, rebuilt_fragment_etag = self.direct_get(partner, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
# partner already had it's fragment removed
if (handoff_fragment_etag is not None and
self.is_local_to(hnode, partner)):
# oh, well that makes sense then...
rebuilt_fragment_etag = handoff_fragment_etag
else:
# I wonder what happened?
self.fail('Partner inexplicably missing fragment!')
part_dir = self.storage_dir(partner, part=opart)
shutil.rmtree(part_dir, True)
# sanity, it's gone
try:
self.direct_get(partner, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
self.fail('successful GET of removed partner fragment archive!?')
# and force the primary node to do a rebuild
self.reconstructor.once(number=self.config_number(primary_node))
# and validate the partners rebuilt_fragment_etag
try:
self.assertEqual(rebuilt_fragment_etag,
self.direct_get(partner, opart)[1])
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
self.fail('Did not find rebuilt fragment on partner node')
def test_handoff_non_durable(self):
# verify that reconstructor reverts non-durable frags from handoff to
# primary (and also durable frag of same object on same handoff) and
# cleans up non-durable data files on handoffs after revert
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
pdevs = [self.device_dir(onode) for onode in onodes]
hnodes = list(itertools.islice(
self.object_ring.get_more_nodes(opart), 2))
# kill a primary nodes so we can force data onto a handoff
self.kill_drive(pdevs[0])
# PUT object at t1
contents = Body(total=3.5 * 2 ** 20)
headers = {'x-object-meta-foo': 'meta-foo'}
headers_post = {'x-object-meta-bar': 'meta-bar'}
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents,
headers=headers)
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=headers_post)
# (Some versions of?) swiftclient will mutate the headers dict on post
headers_post.pop('X-Auth-Token', None)
# this primary can't serve the data; we expect 507 here and not 404
# because we're using mount_check to kill nodes
self.assert_direct_get_fails(onodes[0], opart, 507)
# these primaries and first handoff do have the data
for onode in (onodes[1:]):
self.assert_direct_get_succeeds(onode, opart)
_hdrs, older_frag_etag = self.assert_direct_get_succeeds(hnodes[0],
opart)
self.assert_direct_get_fails(hnodes[1], opart, 404)
# make sure we can GET the object; there's 5 primaries and 1 handoff
headers, older_obj_etag = self.proxy_get()
self.assertEqual(contents.etag, older_obj_etag)
self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
# PUT object at t2; make all frags non-durable so that the previous
# durable frags at t1 remain on object server; use InternalClient so
# that x-backend-no-commit is passed through
internal_client = self.make_internal_client()
contents2 = Body(total=2.5 * 2 ** 20) # different content
self.assertNotEqual(contents2.etag, older_obj_etag) # sanity check
headers = {'x-backend-no-commit': 'True',
'x-object-meta-bar': 'meta-bar-new'}
internal_client.upload_object(contents2, self.account,
self.container_name.decode('utf8'),
self.object_name.decode('utf8'),
headers)
# GET should still return the older durable object
headers, obj_etag = self.proxy_get()
self.assertEqual(older_obj_etag, obj_etag)
self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
# on handoff we have older durable and newer non-durable
_hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
self.assertEqual(older_frag_etag, frag_etag)
_hdrs, newer_frag_etag = self.assert_direct_get_succeeds(
hnodes[0], opart, require_durable=False)
self.assertNotEqual(older_frag_etag, newer_frag_etag)
# now make all the newer frags durable only on the 5 primaries
self.assertEqual(5, self.make_durable(onodes[1:], opart))
# now GET will return the newer object
headers, newer_obj_etag = self.proxy_get()
self.assertEqual(contents2.etag, newer_obj_etag)
self.assertNotEqual(older_obj_etag, newer_obj_etag)
self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))
# fix the 507'ing primary
self.revive_drive(pdevs[0])
# fire up reconstructor on handoff node only; commit_window is
# set to zero to ensure the nondurable handoff frag is purged
hnode_id = self.config_number(hnodes[0])
self.run_custom_daemon(
ObjectReconstructor, 'object-reconstructor', hnode_id,
{'commit_window': '0'})
# primary now has only the newer non-durable frag
self.assert_direct_get_fails(onodes[0], opart, 404)
_hdrs, frag_etag = self.assert_direct_get_succeeds(
onodes[0], opart, require_durable=False)
self.assertEqual(newer_frag_etag, frag_etag)
# handoff has only the older durable
_hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
self.assertEqual(older_frag_etag, frag_etag)
headers, frag_etag = self.assert_direct_get_succeeds(
hnodes[0], opart, require_durable=False)
self.assertEqual(older_frag_etag, frag_etag)
self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
# fire up reconstructor on handoff node only, again
self.reconstructor.once(number=hnode_id)
# primary now has the newer non-durable frag and the older durable frag
headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
self.assertEqual(older_frag_etag, frag_etag)
self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
headers, frag_etag = self.assert_direct_get_succeeds(
onodes[0], opart, require_durable=False)
self.assertEqual(newer_frag_etag, frag_etag)
self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))
# handoff has nothing
self.assert_direct_get_fails(hnodes[0], opart, 404,
require_durable=False)
# kill all but first two primaries
for pdev in pdevs[2:]:
self.kill_drive(pdev)
# fire up reconstructor on the remaining primary[1]; without the
# other primaries, primary[1] cannot rebuild the frag but it can let
# primary[0] know that its non-durable frag can be made durable
self.reconstructor.once(number=self.config_number(onodes[1]))
# first primary now has a *durable* *newer* frag - it *was* useful to
# sync the non-durable!
headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
self.assertEqual(newer_frag_etag, frag_etag)
self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))
# revive primaries (in case we want to debug)
for pdev in pdevs[2:]:
self.revive_drive(pdev)
if __name__ == "__main__":
unittest.main()
| swift-master | test/probe/test_reconstructor_revert.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import main
from swiftclient import client
from swift.common import direct_client
from swift.common.manager import Manager
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server
class TestAccountFailures(ReplProbeTest):
def test_main(self):
# Create container1 and container2
container1 = 'container1'
client.put_container(self.url, self.token, container1)
container2 = 'container2'
client.put_container(self.url, self.token, container2)
# Assert account level sees them
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '2')
self.assertEqual(headers['x-account-object-count'], '0')
self.assertEqual(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEqual(container['count'], 0)
self.assertEqual(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 0)
self.assertEqual(container['bytes'], 0)
self.assertTrue(found1)
self.assertTrue(found2)
# Create container2/object1
client.put_object(self.url, self.token, container2, 'object1', '1234')
# Assert account level doesn't see it yet
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '2')
self.assertEqual(headers['x-account-object-count'], '0')
self.assertEqual(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEqual(container['count'], 0)
self.assertEqual(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 0)
self.assertEqual(container['bytes'], 0)
self.assertTrue(found1)
self.assertTrue(found2)
# Get to final state
self.get_to_final_state()
# Assert account level now sees the container2/object1
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '2')
self.assertEqual(headers['x-account-object-count'], '1')
self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEqual(container['count'], 0)
self.assertEqual(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 1)
self.assertEqual(container['bytes'], 4)
self.assertTrue(found1)
self.assertTrue(found2)
apart, anodes = self.account_ring.get_nodes(self.account)
kill_nonprimary_server(anodes, self.ipport2server)
kill_server((anodes[0]['ip'], anodes[0]['port']), self.ipport2server)
# Kill account servers excepting two of the primaries
# Delete container1
client.delete_container(self.url, self.token, container1)
# Put container2/object2
client.put_object(self.url, self.token, container2, 'object2', '12345')
# Assert account level knows container1 is gone but doesn't know about
# container2/object2 yet
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '1')
self.assertEqual(headers['x-account-object-count'], '1')
self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 1)
self.assertEqual(container['bytes'], 4)
self.assertFalse(found1)
self.assertTrue(found2)
# Run container updaters
Manager(['container-updater']).once()
# Assert account level now knows about container2/object2
headers, containers = client.get_account(self.url, self.token)
self.assertEqual(headers['x-account-container-count'], '1')
self.assertEqual(headers['x-account-object-count'], '2')
self.assertEqual(headers['x-account-bytes-used'], '9')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 2)
self.assertEqual(container['bytes'], 9)
self.assertFalse(found1)
self.assertTrue(found2)
# Restart other primary account server
start_server((anodes[0]['ip'], anodes[0]['port']), self.ipport2server)
# Assert that server doesn't know about container1's deletion or the
# new container2/object2 yet
headers, containers = \
direct_client.direct_get_account(anodes[0], apart, self.account)
self.assertEqual(headers['x-account-container-count'], '2')
self.assertEqual(headers['x-account-object-count'], '1')
self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 1)
self.assertEqual(container['bytes'], 4)
self.assertTrue(found1)
self.assertTrue(found2)
# Get to final state
self.get_to_final_state()
# Assert that server is now up to date
headers, containers = \
direct_client.direct_get_account(anodes[0], apart, self.account)
self.assertEqual(headers['x-account-container-count'], '1')
self.assertEqual(headers['x-account-object-count'], '2')
self.assertEqual(headers['x-account-bytes-used'], '9')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
elif container['name'] == container2:
found2 = True
self.assertEqual(container['count'], 2)
self.assertEqual(container['bytes'], 9)
self.assertEqual(container['bytes'], 9)
self.assertFalse(found1)
self.assertTrue(found2)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_account_failures.py |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from unittest import main
from uuid import uuid4
import os
import time
import shutil
import re
from swiftclient import client
from swift.obj.diskfile import get_data_dir
from test.probe.common import ReplProbeTest
from swift.common.request_helpers import get_reserved_name
from swift.common.utils import readconf
EXCLUDE_FILES = re.compile(r'^(hashes\.(pkl|invalid)|lock(-\d+)?)$')
def collect_info(path_list):
"""
Recursive collect dirs and files in path_list directory.
:param path_list: start directory for collecting
:return: files_list, dir_list tuple of included
directories and files
"""
files_list = []
dir_list = []
for path in path_list:
temp_files_list = []
temp_dir_list = []
for root, dirs, files in os.walk(path):
files = [f for f in files if not EXCLUDE_FILES.match(f)]
temp_files_list += files
temp_dir_list += dirs
files_list.append(temp_files_list)
dir_list.append(temp_dir_list)
return files_list, dir_list
def find_max_occupancy_node(dir_list):
"""
Find node with maximum occupancy.
:param dir_list: list of directories for each node.
:return: number number node in list_dir
"""
count = 0
number = 0
length = 0
for dirs in dir_list:
if length < len(dirs):
length = len(dirs)
number = count
count += 1
return number
class TestReplicatorFunctions(ReplProbeTest):
"""
Class for testing replicators and replication servers.
By default configuration - replication servers not used.
For testing separate replication servers servers need to change
ring's files using set_info command or new ring's files with
different port values.
"""
def put_data(self):
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, 'VERIFY')
def test_main(self):
# Create one account, container and object file.
# Find node with account, container and object replicas.
# Delete all directories and files from this node (device).
# Wait 60 seconds and check replication results.
# Delete directories and files in objects storage without
# deleting file "hashes.pkl".
# Check, that files not replicated.
# Delete file "hashes.pkl".
# Check, that all files were replicated.
path_list = []
data_dir = get_data_dir(self.policy)
# Figure out where the devices are
for node_id in range(1, 5):
conf = readconf(self.configs['object-server'][node_id])
device_path = conf['app:object-server']['devices']
for dev in self.object_ring.devs:
if dev['port'] == int(conf['app:object-server']['bind_port']):
device = dev['device']
path_list.append(os.path.join(device_path, device))
# Put data to storage nodes
self.put_data()
# Get all data file information
(files_list, dir_list) = collect_info(path_list)
num = find_max_occupancy_node(dir_list)
test_node = path_list[num]
test_node_files_list = []
for files in files_list[num]:
if not files.endswith('.pending'):
test_node_files_list.append(files)
test_node_dir_list = []
for d in dir_list[num]:
if not d.startswith('tmp'):
test_node_dir_list.append(d)
# Run all replicators
try:
# Delete some files
for directory in os.listdir(test_node):
shutil.rmtree(os.path.join(test_node, directory))
self.assertFalse(os.listdir(test_node))
self.replicators.start()
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
(new_files_list, new_dir_list) = collect_info([test_node])
try:
# Check replicate files and dir
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for directory in test_node_dir_list:
self.assertIn(directory, new_dir_list[0])
# We want to make sure that replication is completely
# settled; any invalidated hashes should be rehashed so
# hashes.pkl is stable
for directory in os.listdir(
os.path.join(test_node, data_dir)):
hashes_invalid_path = os.path.join(
test_node, data_dir, directory, 'hashes.invalid')
self.assertEqual(os.stat(
hashes_invalid_path).st_size, 0)
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
self.replicators.stop()
# Delete directories and files in objects storage without
# deleting file "hashes.pkl".
for directory in os.listdir(os.path.join(test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
if os.path.isdir(os.path.join(
test_node, data_dir, directory, input_dir)):
shutil.rmtree(os.path.join(
test_node, data_dir, directory, input_dir))
self.replicators.once()
# Check, that files not replicated.
for directory in os.listdir(os.path.join(
test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
self.assertFalse(os.path.isdir(
os.path.join(test_node, data_dir,
directory, input_dir)))
self.replicators.start()
# Now, delete file "hashes.pkl".
# Check, that all files were replicated.
for directory in os.listdir(os.path.join(test_node, data_dir)):
os.remove(os.path.join(
test_node, data_dir, directory, 'hashes.pkl'))
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
try:
(new_files_list, new_dir_list) = collect_info([test_node])
# Check replicate files and dirs
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for directory in test_node_dir_list:
self.assertIn(directory, new_dir_list[0])
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
finally:
self.replicators.stop()
class TestReplicatorFunctionsReservedNames(TestReplicatorFunctions):
def put_data(self):
int_client = self.make_internal_client()
int_client.create_account(self.account)
container = get_reserved_name('container', str(uuid4()))
int_client.create_container(self.account, container,
headers={'X-Storage-Policy':
self.policy.name})
obj = get_reserved_name('object', str(uuid4()))
int_client.upload_object(
BytesIO(b'VERIFY'), self.account, container, obj)
if __name__ == '__main__':
main()
| swift-master | test/probe/test_replication_servers_working.py |
#!/usr/bin/python -u
# Copyright (c) 2019 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from io import BytesIO
from uuid import uuid4
from swift.common.request_helpers import get_reserved_name
from test.probe.common import ReplProbeTest
from swiftclient import client, ClientException
class TestReservedNames(ReplProbeTest):
def test_simple_crud(self):
int_client = self.make_internal_client()
# Create reserve named container
user_cont = 'container-%s' % uuid4()
reserved_cont = get_reserved_name('container-%s' % uuid4())
client.put_container(self.url, self.token, user_cont)
int_client.create_container(self.account, reserved_cont)
# Check that we can list both reserved and non-reserved containers
self.assertEqual([reserved_cont, user_cont], [
c['name'] for c in int_client.iter_containers(self.account)])
# sanity, user can't get to reserved name
with self.assertRaises(ClientException) as cm:
client.head_container(self.url, self.token, reserved_cont)
self.assertEqual(412, cm.exception.http_status)
user_obj = 'obj-%s' % uuid4()
reserved_obj = get_reserved_name('obj-%s' % uuid4())
# InternalClient can write & read reserved names fine
int_client.upload_object(
BytesIO(b'data'), self.account, reserved_cont, reserved_obj)
int_client.get_object_metadata(
self.account, reserved_cont, reserved_obj)
_, _, app_iter = int_client.get_object(
self.account, reserved_cont, reserved_obj)
self.assertEqual(b''.join(app_iter), b'data')
self.assertEqual([reserved_obj], [
o['name']
for o in int_client.iter_objects(self.account, reserved_cont)])
# But reserved objects must be in reserved containers, and
# user objects must be in user containers (at least for now)
int_client.upload_object(
BytesIO(b'data'), self.account, reserved_cont, user_obj,
acceptable_statuses=(400,))
int_client.upload_object(
BytesIO(b'data'), self.account, user_cont, reserved_obj,
acceptable_statuses=(400,))
# Make sure we can clean up, too
int_client.delete_object(self.account, reserved_cont, reserved_obj)
int_client.delete_container(self.account, reserved_cont)
def test_symlink_target(self):
if 'symlink' not in self.cluster_info:
raise unittest.SkipTest(
"Symlink not enabled in proxy; can't test "
"symlink to reserved name")
int_client = self.make_internal_client()
# create link container first, ensure account gets created too
client.put_container(self.url, self.token, 'c1')
# Create reserve named container
tgt_cont = get_reserved_name('container-%s' % uuid4())
int_client.create_container(self.account, tgt_cont)
# sanity, user can't get to reserved name
with self.assertRaises(ClientException) as cm:
client.head_container(self.url, self.token, tgt_cont)
self.assertEqual(412, cm.exception.http_status)
tgt_obj = get_reserved_name('obj-%s' % uuid4())
int_client.upload_object(
BytesIO(b'target object'), self.account, tgt_cont, tgt_obj)
metadata = int_client.get_object_metadata(
self.account, tgt_cont, tgt_obj)
etag = metadata['etag']
# users can write a dynamic symlink that targets a reserved
# name object
client.put_object(
self.url, self.token, 'c1', 'symlink',
headers={
'X-Symlink-Target': '%s/%s' % (tgt_cont, tgt_obj),
'Content-Type': 'application/symlink',
})
# but can't read the symlink
with self.assertRaises(ClientException) as cm:
client.get_object(self.url, self.token, 'c1', 'symlink')
self.assertEqual(412, cm.exception.http_status)
# user's can't create static symlink to reserved name
with self.assertRaises(ClientException) as cm:
client.put_object(
self.url, self.token, 'c1', 'static-symlink',
headers={
'X-Symlink-Target': '%s/%s' % (tgt_cont, tgt_obj),
'X-Symlink-Target-Etag': etag,
'Content-Type': 'application/symlink',
})
self.assertEqual(412, cm.exception.http_status)
# clean-up
client.delete_object(self.url, self.token, 'c1', 'symlink')
int_client.delete_object(self.account, tgt_cont, tgt_obj)
int_client.delete_container(self.account, tgt_cont)
| swift-master | test/probe/test_reserved_name.py |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# swift documentation build configuration file
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
import warnings
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_mode": "toc",
}
extensions = [
'os_api_ref',
'openstackdocstheme'
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Object Storage API Reference'
copyright = u'2010-present, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/swift'
openstackdocs_bug_project = 'swift'
openstackdocs_bug_tag = 'api-ref'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'swiftdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'swift.tex', u'OpenStack Object Storage API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| swift-master | api-ref/source/conf.py |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import gettext
import warnings
__version__ = None
# First, try to get our version out of PKG-INFO. If we're installed,
# this'll let us find our version without pulling in pbr. After all, if
# we're installed on a system, we're not in a Git-managed source tree, so
# pbr doesn't really buy us anything.
try:
import importlib.metadata
except ImportError:
# python < 3.8
import pkg_resources
try:
__version__ = __canonical_version__ = pkg_resources.get_provider(
pkg_resources.Requirement.parse('swift')).version
except pkg_resources.DistributionNotFound:
pass
else:
try:
__version__ = __canonical_version__ = importlib.metadata.distribution(
'swift').version
except importlib.metadata.PackageNotFoundError:
pass
if __version__ is None:
# No PKG-INFO? We're probably running from a checkout, then. Let pbr do
# its thing to figure out a version number.
import pbr.version
_version_info = pbr.version.VersionInfo('swift')
__version__ = _version_info.release_string()
__canonical_version__ = _version_info.version_string()
_localedir = os.environ.get('SWIFT_LOCALEDIR')
_t = gettext.translation('swift', localedir=_localedir, fallback=True)
def gettext_(msg):
return _t.gettext(msg)
if (3, 0) <= sys.version_info[:2] <= (3, 5):
# In the development of py3, json.loads() stopped accepting byte strings
# for a while. https://bugs.python.org/issue17909 got fixed for py36, but
# since it was termed an enhancement and not a regression, we don't expect
# any backports. At the same time, it'd be better if we could avoid
# leaving a whole bunch of json.loads(resp.body.decode(...)) scars in the
# code that'd probably persist even *after* we drop support for 3.5 and
# earlier. So, monkey patch stdlib.
import json
if not getattr(json.loads, 'patched_to_decode', False):
class JsonLoadsPatcher(object):
def __init__(self, orig):
self._orig = orig
def __call__(self, s, **kw):
if isinstance(s, bytes):
# No fancy byte-order mark detection for us; just assume
# UTF-8 and raise a UnicodeDecodeError if appropriate.
s = s.decode('utf8')
return self._orig(s, **kw)
def __getattribute__(self, attr):
if attr == 'patched_to_decode':
return True
if attr == '_orig':
return super().__getattribute__(attr)
# Pass through all other attrs to the original; among other
# things, this preserves doc strings, etc.
return getattr(self._orig, attr)
json.loads = JsonLoadsPatcher(json.loads)
del JsonLoadsPatcher
warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=(
'Python 2 is no longer supported by the Python core team. '
'Support for it is now deprecated in cryptography'))
warnings.filterwarnings('ignore', message=(
'Python 3.6 is no longer supported by the Python core team. '
'Therefore, support for it is deprecated in cryptography'))
| swift-master | swift/__init__.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet.greenio
import eventlet.wsgi
from eventlet import sleep
from six.moves import urllib
from swift.common import exceptions
from swift.common import http
from swift.common import swob
from swift.common import utils
from swift.common import request_helpers
from swift.common.utils import Timestamp
class SsyncClientDisconnected(Exception):
pass
def decode_missing(line):
"""
Parse a string of the form generated by
:py:func:`~swift.obj.ssync_sender.encode_missing` and return a dict
with keys ``object_hash``, ``ts_data``, ``ts_meta``, ``ts_ctype``,
``durable``.
The encoder for this line is
:py:func:`~swift.obj.ssync_sender.encode_missing`
"""
result = {}
parts = line.decode('ascii').split()
result['object_hash'] = urllib.parse.unquote(parts[0])
t_data = urllib.parse.unquote(parts[1])
result['ts_data'] = ts_data = Timestamp(t_data)
result['ts_meta'] = result['ts_ctype'] = ts_data
result['durable'] = True # default to True in case this key isn't sent
if len(parts) > 2:
# allow for a comma separated list of k:v pairs to future-proof
subparts = urllib.parse.unquote(parts[2]).split(',')
for item in [subpart for subpart in subparts if ':' in subpart]:
k, v = item.split(':')
if k == 'm':
v, _, o = v.partition('__')
# ignore ts_data offset when calculating ts_meta
result['ts_meta'] = Timestamp(ts_data.normal,
delta=int(v, 16),
offset=int(o or '0', 16))
elif k == 't':
v, _, o = v.partition('__')
# ignore ts_data offset when calculating ts_ctype
result['ts_ctype'] = Timestamp(Timestamp(ts_data).normal,
delta=int(v, 16),
offset=int(o or '0', 16))
elif k == 'durable':
result['durable'] = utils.config_true_value(v)
return result
def encode_wanted(remote, local):
"""
Compare a remote and local results and generate a wanted line.
:param remote: a dict, with ts_data and ts_meta keys in the form
returned by :py:func:`decode_missing`
:param local: a dict, possibly empty, with ts_data and ts_meta keys
in the form returned :py:meth:`Receiver._check_local`
The decoder for this line is
:py:func:`~swift.obj.ssync_sender.decode_wanted`
"""
want = {}
if 'ts_data' in local:
# we have something, let's get just the right stuff
if remote['ts_data'] > local['ts_data']:
want['data'] = True
if 'ts_meta' in local and remote['ts_meta'] > local['ts_meta']:
want['meta'] = True
if ('ts_ctype' in local and remote['ts_ctype'] > local['ts_ctype']
and remote['ts_ctype'] > remote['ts_data']):
want['meta'] = True
else:
# we got nothing, so we'll take whatever the remote has
want['data'] = True
want['meta'] = True
if want:
# this is the inverse of _decode_wanted's key_map
key_map = dict(data='d', meta='m')
parts = ''.join(v for k, v in sorted(key_map.items()) if want.get(k))
return '%s %s' % (urllib.parse.quote(remote['object_hash']), parts)
return None
class Receiver(object):
"""
Handles incoming SSYNC requests to the object server.
These requests come from the object-replicator daemon that uses
:py:mod:`.ssync_sender`.
The number of concurrent SSYNC requests is restricted by
use of a replication_semaphore and can be configured with the
object-server.conf [object-server] replication_concurrency
setting.
An SSYNC request is really just an HTTP conduit for
sender/receiver replication communication. The overall
SSYNC request should always succeed, but it will contain
multiple requests within its request and response bodies. This
"hack" is done so that replication concurrency can be managed.
The general process inside an SSYNC request is:
1. Initialize the request: Basic request validation, mount check,
acquire semaphore lock, etc..
2. Missing check: Sender sends the hashes and timestamps of
the object information it can send, receiver sends back
the hashes it wants (doesn't have or has an older
timestamp).
3. Updates: Sender sends the object information requested.
4. Close down: Release semaphore lock, etc.
"""
def __init__(self, app, request):
self.app = app
self.request = request
self.device = None
self.partition = None
self.fp = None
# We default to dropping the connection in case there is any exception
# raised during processing because otherwise the sender could send for
# quite some time before realizing it was all in vain.
self.disconnect = True
self.initialize_request()
def __call__(self):
"""
Processes an SSYNC request.
Acquires a semaphore lock and then proceeds through the steps
of the SSYNC process.
"""
# The general theme for functions __call__ calls is that they should
# raise exceptions.MessageTimeout for client timeouts (logged locally),
# exceptions.ChunkReadError for client disconnects (logged locally),
# swob.HTTPException classes for exceptions to return to the caller but
# not log locally (unmounted, for example), and any other Exceptions
# will be logged with a full stack trace.
# This is because the client is never just some random user but
# is instead also our code and we definitely want to know if our code
# is broken or doing something unexpected.
try:
# Double try blocks in case our main error handlers fail.
try:
# Need to send something to trigger wsgi to return response
# headers and kick off the ssync exchange.
yield b'\r\n'
# If semaphore is in use, try to acquire it, non-blocking, and
# return a 503 if it fails.
if self.app.replication_semaphore:
if not self.app.replication_semaphore.acquire(False):
raise swob.HTTPServiceUnavailable()
try:
with self.diskfile_mgr.replication_lock(self.device,
self.policy,
self.partition):
for data in self.missing_check():
yield data
for data in self.updates():
yield data
# We didn't raise an exception, so end the request
# normally.
self.disconnect = False
finally:
if self.app.replication_semaphore:
self.app.replication_semaphore.release()
except SsyncClientDisconnected:
self.app.logger.error('ssync client disconnected')
self.disconnect = True
except exceptions.LockTimeout as err:
self.app.logger.debug(
'%s/%s/%s SSYNC LOCK TIMEOUT: %s' % (
self.request.remote_addr, self.device, self.partition,
err))
yield (':ERROR: %d %r\n' % (0, str(err))).encode('utf8')
except exceptions.MessageTimeout as err:
self.app.logger.error(
'%s/%s/%s TIMEOUT in ssync.Receiver: %s' % (
self.request.remote_addr, self.device, self.partition,
err))
yield (':ERROR: %d %r\n' % (408, str(err))).encode('utf8')
except exceptions.ChunkReadError as err:
self.app.logger.error(
'%s/%s/%s read failed in ssync.Receiver: %s' % (
self.request.remote_addr, self.device, self.partition,
err))
except swob.HTTPException as err:
body = b''.join(err({}, lambda *args: None))
yield (':ERROR: %d %r\n' % (
err.status_int, body)).encode('utf8')
except Exception as err:
self.app.logger.exception(
'%s/%s/%s EXCEPTION in ssync.Receiver' %
(self.request.remote_addr, self.device, self.partition))
yield (':ERROR: %d %r\n' % (0, str(err))).encode('utf8')
except Exception:
self.app.logger.exception('EXCEPTION in ssync.Receiver')
if self.disconnect:
# This makes the socket close early so the remote side doesn't have
# to send its whole request while the lower Eventlet-level just
# reads it and throws it away. Instead, the connection is dropped
# and the remote side will get a broken-pipe exception.
try:
socket = self.request.environ['wsgi.input'].get_socket()
eventlet.greenio.shutdown_safe(socket)
socket.close()
except Exception:
pass # We're okay with the above failing.
def initialize_request(self):
"""
Basic validation of request and mount check.
This function will be called before attempting to acquire a
replication semaphore lock, so contains only quick checks.
"""
# This environ override has been supported since eventlet 0.14:
# https://bitbucket.org/eventlet/eventlet/commits/ \
# 4bd654205a4217970a57a7c4802fed7ff2c8b770
self.request.environ['eventlet.minimum_write_chunk_size'] = 0
self.device, self.partition, self.policy = \
request_helpers.get_name_and_placement(self.request, 2, 2, False)
self.frag_index = None
if self.request.headers.get('X-Backend-Ssync-Frag-Index'):
try:
self.frag_index = int(
self.request.headers['X-Backend-Ssync-Frag-Index'])
except ValueError:
raise swob.HTTPBadRequest(
'Invalid X-Backend-Ssync-Frag-Index %r' %
self.request.headers['X-Backend-Ssync-Frag-Index'])
utils.validate_device_partition(self.device, self.partition)
self.diskfile_mgr = self.app._diskfile_router[self.policy]
if not self.diskfile_mgr.get_dev_path(self.device):
raise swob.HTTPInsufficientStorage(drive=self.device)
self.fp = self.request.environ['wsgi.input']
def _readline(self, context):
# try to read a line from the wsgi input; annotate any timeout or read
# errors with a description of the calling context
with exceptions.MessageTimeout(
self.app.client_timeout, context):
try:
line = self.fp.readline(self.app.network_chunk_size)
except (eventlet.wsgi.ChunkReadError, IOError) as err:
raise exceptions.ChunkReadError('%s: %s' % (context, err))
return line
def _check_local(self, remote, make_durable=True):
"""
Parse local diskfile and return results of current
representative for comparison to remote.
:param object_hash: the hash of the remote object being offered
"""
try:
df = self.diskfile_mgr.get_diskfile_from_hash(
self.device, self.partition, remote['object_hash'],
self.policy, frag_index=self.frag_index, open_expired=True)
except exceptions.DiskFileNotExist:
return {}
try:
df.open()
except exceptions.DiskFileDeleted as err:
result = {'ts_data': err.timestamp}
except exceptions.DiskFileError:
# e.g. a non-durable EC frag
result = {}
else:
result = {
'ts_data': df.data_timestamp,
'ts_meta': df.timestamp,
'ts_ctype': df.content_type_timestamp,
}
if ((df.durable_timestamp is None or
df.durable_timestamp < remote['ts_data']) and
df.fragments and
remote['ts_data'] in df.fragments and
self.frag_index in df.fragments[remote['ts_data']]):
# The remote is offering a fragment that we already have but is
# *newer* than anything *durable* that we have
if remote['durable']:
# We have the frag, just missing durable state, so make the
# frag durable now. Try this just once to avoid looping if
# it fails.
if make_durable:
try:
with df.create() as writer:
writer.commit(remote['ts_data'])
return self._check_local(remote, make_durable=False)
except Exception:
# if commit fails then log exception and fall back to
# wanting a full update
self.app.logger.exception(
'%s/%s/%s EXCEPTION in ssync.Receiver while '
'attempting commit of %s'
% (self.request.remote_addr, self.device,
self.partition, df._datadir))
else:
# We have the non-durable frag that is on offer, but our
# ts_data may currently be set to an older durable frag, so
# bump our ts_data to prevent the remote frag being wanted.
result['ts_data'] = remote['ts_data']
return result
def _check_missing(self, line):
"""
Parse offered object from sender, and compare to local diskfile,
responding with proper protocol line to represented needed data
or None if in sync.
Anchor point for tests to mock legacy protocol changes.
"""
remote = decode_missing(line)
local = self._check_local(remote)
return encode_wanted(remote, local)
def missing_check(self):
"""
Handles the receiver-side of the MISSING_CHECK step of a
SSYNC request.
Receives a list of hashes and timestamps of object
information the sender can provide and responds with a list
of hashes desired, either because they're missing or have an
older timestamp locally.
The process is generally:
1. Sender sends ``:MISSING_CHECK: START`` and begins
sending `hash timestamp` lines.
2. Receiver gets ``:MISSING_CHECK: START`` and begins
reading the `hash timestamp` lines, collecting the
hashes of those it desires.
3. Sender sends ``:MISSING_CHECK: END``.
4. Receiver gets ``:MISSING_CHECK: END``, responds with
``:MISSING_CHECK: START``, followed by the list of
<wanted_hash> specifiers it collected as being wanted
(one per line), ``:MISSING_CHECK: END``, and flushes any
buffers.
Each <wanted_hash> specifier has the form <hash>[ <parts>] where
<parts> is a string containing characters 'd' and/or 'm'
indicating that only data or meta part of object respectively is
required to be sync'd.
5. Sender gets ``:MISSING_CHECK: START`` and reads the list
of hashes desired by the receiver until reading
``:MISSING_CHECK: END``.
The collection and then response is so the sender doesn't
have to read while it writes to ensure network buffers don't
fill up and block everything.
"""
line = self._readline('missing_check start')
if not line:
# Guess they hung up
raise SsyncClientDisconnected
if line.strip() != b':MISSING_CHECK: START':
raise Exception(
'Looking for :MISSING_CHECK: START got %r'
% utils.cap_length(line, 1024))
object_hashes = []
nlines = 0
while True:
line = self._readline('missing_check line')
if not line or line.strip() == b':MISSING_CHECK: END':
break
want = self._check_missing(line)
if want:
object_hashes.append(want)
if nlines % 5 == 0:
sleep() # Gives a chance for other greenthreads to run
nlines += 1
yield b':MISSING_CHECK: START\r\n'
if object_hashes:
yield b'\r\n'.join(hsh.encode('ascii') for hsh in object_hashes)
yield b'\r\n'
yield b':MISSING_CHECK: END\r\n'
def updates(self):
"""
Handles the UPDATES step of an SSYNC request.
Receives a set of PUT and DELETE subrequests that will be
routed to the object server itself for processing. These
contain the information requested by the MISSING_CHECK step.
The PUT and DELETE subrequests are formatted pretty much
exactly like regular HTTP requests, excepting the HTTP
version on the first request line.
The process is generally:
1. Sender sends ``:UPDATES: START`` and begins sending the
PUT and DELETE subrequests.
2. Receiver gets ``:UPDATES: START`` and begins routing the
subrequests to the object server.
3. Sender sends ``:UPDATES: END``.
4. Receiver gets ``:UPDATES: END`` and sends ``:UPDATES:
START`` and ``:UPDATES: END`` (assuming no errors).
5. Sender gets ``:UPDATES: START`` and ``:UPDATES: END``.
If too many subrequests fail, as configured by
replication_failure_threshold and replication_failure_ratio,
the receiver will hang up the request early so as to not
waste any more time.
At step 4, the receiver will send back an error if there were
any failures (that didn't cause a hangup due to the above
thresholds) so the sender knows the whole was not entirely a
success. This is so the sender knows if it can remove an out
of place partition, for example.
"""
line = self._readline('updates start')
if not line:
# Guess they hung up waiting for us to process the missing check
raise SsyncClientDisconnected
if line.strip() != b':UPDATES: START':
raise Exception('Looking for :UPDATES: START got %r'
% utils.cap_length(line, 1024))
successes = 0
failures = 0
updates = 0
while True:
line = self._readline('updates line')
if not line or line.strip() == b':UPDATES: END':
break
# Read first line METHOD PATH of subrequest.
method, path = swob.bytes_to_wsgi(line.strip()).split(' ', 1)
subreq = swob.Request.blank(
'/%s/%s%s' % (self.device, self.partition, path),
environ={'REQUEST_METHOD': method})
# Read header lines.
content_length = None
replication_headers = []
while True:
line = self._readline('updates line')
if not line:
raise Exception(
'Got no headers for %s %s' % (method, path))
line = line.strip()
if not line:
break
header, value = line.split(b':', 1)
header = swob.bytes_to_wsgi(header.strip().lower())
value = swob.bytes_to_wsgi(value.strip())
subreq.headers[header] = value
if header not in ('etag', 'x-backend-no-commit'):
# we'll use X-Backend-Replication-Headers to force the
# object server to write all sync'd metadata, but with some
# exceptions:
# - make sure ssync doesn't cause 'Etag' to be added to
# obj metadata in addition to 'ETag' which object server
# sets (note capitalization)
# - filter out x-backend-no-commit which ssync sender may
# have added to the subrequest
replication_headers.append(header)
if header == 'content-length':
content_length = int(value)
# Establish subrequest body, if needed.
if method in ('DELETE', 'POST'):
if content_length not in (None, 0):
raise Exception(
'%s subrequest with content-length %s'
% (method, path))
elif method == 'PUT':
if content_length is None:
raise Exception(
'No content-length sent for %s %s' % (method, path))
def subreq_iter():
left = content_length
while left > 0:
with exceptions.MessageTimeout(
self.app.client_timeout,
'updates content'):
chunk = self.fp.read(
min(left, self.app.network_chunk_size))
if not chunk:
raise exceptions.ChunkReadError(
'Early termination for %s %s' % (method, path))
left -= len(chunk)
yield chunk
subreq.environ['wsgi.input'] = utils.FileLikeIter(
subreq_iter())
else:
raise Exception('Invalid subrequest method %s' % method)
subreq.headers['X-Backend-Storage-Policy-Index'] = int(self.policy)
subreq.headers['X-Backend-Replication'] = 'True'
if self.frag_index is not None:
# primary node should not 409 if it has a non-primary fragment
subreq.headers['X-Backend-Ssync-Frag-Index'] = self.frag_index
if replication_headers:
subreq.headers['X-Backend-Replication-Headers'] = \
' '.join(replication_headers)
# Route subrequest and translate response.
resp = subreq.get_response(self.app)
if http.is_success(resp.status_int) or \
resp.status_int == http.HTTP_NOT_FOUND:
successes += 1
else:
self.app.logger.warning(
'ssync subrequest failed with %s: %s %s (%s)' %
(resp.status_int, method, subreq.path, resp.body))
failures += 1
if failures >= self.app.replication_failure_threshold and (
not successes or
float(failures) / successes >
self.app.replication_failure_ratio):
raise Exception(
'Too many %d failures to %d successes' %
(failures, successes))
# The subreq may have failed, but we want to read the rest of the
# body from the remote side so we can continue on with the next
# subreq.
for junk in subreq.environ['wsgi.input']:
pass
if updates % 5 == 0:
sleep() # Gives a chance for other greenthreads to run
updates += 1
if failures:
raise swob.HTTPInternalServerError(
'ERROR: With :UPDATES: %d failures to %d successes' %
(failures, successes))
yield b':UPDATES: START\r\n'
yield b':UPDATES: END\r\n'
| swift-master | swift/obj/ssync_receiver.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six
import six.moves.cPickle as pickle
from six.moves.urllib.parse import unquote
import json
import os
import multiprocessing
import time
import traceback
import socket
from eventlet import sleep, wsgi, Timeout, tpool
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes, safe_json_loads, \
config_auto_int_value, split_path, get_redirect_data, \
normalize_timestamp, md5
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.common.request_helpers import \
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX
from swift.obj import ssync_receiver
from swift.common.http import is_success, HTTP_MOVED_PERMANENTLY
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header, is_sys_meta, validate_internal_obj
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError, bytes_to_wsgi, wsgi_to_bytes, wsgi_to_str, normalize_etag
from swift.obj.diskfile import RESERVED_DATAFILE_META, DiskFileRouter
from swift.obj.expirer import build_task_obj
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
def get_obj_name_and_placement(request):
"""
Split and validate path for an object.
:param request: a swob request
:returns: a tuple of path parts and storage policy
"""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
validate_internal_obj(account, container, obj)
return device, partition, account, container, obj, policy
def _make_backend_fragments_header(fragments):
if fragments:
result = {}
for ts, frag_list in fragments.items():
result[ts.internal] = frag_list
return json.dumps(result)
return None
class EventletPlungerString(bytes):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted.
If we want to send the response body behind Eventlet's back, perhaps
with some zero-copy wizardry, then we have to unclog the plumbing in
eventlet.wsgi to force the headers out, so we use an
EventletPlungerString to empty out all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = float(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
self.keep_cache_slo_manifest = \
config_true_value(conf.get('keep_cache_slo_manifest', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
cache-control,
content-language,
expires,
x-robots-tag
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in RESERVED_DATAFILE_META:
self.allowed_headers.add(header)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
self.auto_create_account_prefix = \
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
if six.PY2:
socket._fileobject.default_bufsize = self.network_chunk_size
# TODO: find a way to enable similar functionality in py3
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
if servers_per_port:
# The typical servers-per-port deployment also uses one port per
# disk, so you really get N servers per disk. In that case,
# having a pool of 20 threads per server per disk is far too
# much. For example, given a 60-disk chassis and 4 servers per
# disk, the default configuration will give us 21 threads per
# server (the main thread plus the twenty tpool threads), for a
# total of around 60 * 21 * 4 = 5040 threads. This is clearly
# too high.
#
# Instead, we use a tpool size of 1, giving us 2 threads per
# process. In the example above, that's 60 * 2 * 4 = 480
# threads, which is reasonable since there are 240 processes.
default_tpool_size = 1
else:
# If we're not using servers-per-port, then leave the tpool size
# alone. The default (20) is typically good enough for one
# object server handling requests for many disks.
default_tpool_size = None
tpool_size = config_auto_int_value(
conf.get('eventlet_tpool_num_threads'),
default_tpool_size)
if tpool_size:
tpool.set_num_threads(tpool_size)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None, container_path=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
:param container_path: optional path in the form `<account/container>`
to which the update should be sent. If given this path will be used
instead of constructing a path from the ``account`` and
``container`` params.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
if container_path:
# use explicitly specified container path
full_path = '/%s/%s' % (container_path, obj)
else:
full_path = '/%s/%s/%s' % (account, container, obj)
redirect_data = None
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
if response.status == HTTP_MOVED_PERMANENTLY:
try:
redirect_data = get_redirect_data(response)
except ValueError as err:
self.logger.error(
'Container update failed for %r; problem with '
'redirect location: %s' % (obj, err))
else:
self.logger.error(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s',
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)',
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
if redirect_data:
self.logger.debug(
'Update to %(path)s redirected to %(redirect)s',
{'path': full_path, 'redirect': redirect_data[0]})
container_path = redirect_data[0]
if container_path:
data['container_path'] = container_path
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"', {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
contpath = headers_in.get('X-Backend-Quoted-Container-Path')
if contpath:
contpath = unquote(contpath)
else:
contpath = headers_in.get('X-Backend-Container-Path')
if contpath:
try:
# TODO: this is very late in request handling to be validating
# a header - if we did *not* check and the header was bad
# presumably the update would fail and we would fall back to an
# async update to the root container, which might be best
# course of action rather than aborting update altogether?
split_path('/' + contpath, minsegs=2, maxsegs=2)
except ValueError:
self.logger.error(
"Invalid X-Backend-Container-Path, should be of the form "
"'account/container' but got %r." % contpath)
# fall back to updating root container
contpath = None
if contpartition:
# In py3, zip() continues to work for our purposes... But when
# we want to log an error, consumed items are not longer present
# in the zip, making the logs useless for operators. So, list().
updates = list(zip(conthosts, contdevices))
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals,
container_path=contpath)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
hosts = headers_in.get('X-Delete-At-Host', None)
if hosts is None:
# If header is missing, no update needed as sufficient other
# object servers should perform the required update.
return
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
# older proxy servers did not send X-Delete-At-Container so for
# backwards compatibility calculate the value here, but also
# log a warning because this is prone to inconsistent
# expiring_objects_container_divisor configurations.
# See https://bugs.launchpad.net/swift/+bug/1187200
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
if not config_true_value(
request.headers.get(
'X-Backend-Clean-Expiring-Object-Queue', 't')):
return
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
build_task_obj(delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
try:
return file_like.read(self.network_chunk_size)
except (IOError, ValueError):
raise ChunkReadError
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
return self._parse_footer(footer_hdrs, footer_iter)
def _parse_footer(self, footer_hdrs, footer_iter):
"""
Validate footer metadata and translate JSON body into HeaderKeyDict.
"""
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = b''.join(iter(timeout_reader, b''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body, usedforsecurity=False).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX]
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < req_timestamp:
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')),
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = bytes_to_wsgi(
wsgi_to_bytes(header_key).title())
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
# Add current content-type and sysmeta to response
resp_headers = {
'X-Backend-Content-Type': content_type_headers['Content-Type']}
for key, value in orig_metadata.items():
if is_sys_meta('object', key):
resp_headers[key] = value
return HTTPAccepted(request=request, headers=resp_headers)
def _pre_create_checks(self, request, device, partition,
account, container, obj, policy):
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
raise error_response
try:
fsize = request.message_length()
except ValueError as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests, in which case
# get_diskfile will ignore non-matching on-disk data files
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index,
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
raise HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
raise HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = {}
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
raise HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so raise 412
raise HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
raise HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
return disk_file, fsize, orig_metadata
def _do_multi_stage_mime_continue_headers(self, request, obj_input):
"""
If the proxy wants to send us object metadata after the object body, it
sets some headers. We have to tell the proxy, in the 100 Continue
response, that we're able to parse a multipart MIME document and
extract the object and metadata from it. If we don't, then the proxy
won't actually send the footer metadata.
If the proxy doesn't want to do any of that, this is the identity
function for obj_input and multi_stage_mime_state will be False-y.
:returns: a tuple, (obj_input, multi_stage_mime_state)
"""
have_metadata_footer = False
use_multiphase_commit = False
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = wsgi_to_bytes(request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary'))
if not mime_boundary:
raise HTTPBadRequest("no MIME boundary")
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
multi_stage_mime_state = {
'have_metadata_footer': have_metadata_footer,
'use_multiphase_commit': use_multiphase_commit,
'mime_documents_iter': mime_documents_iter,
}
else:
multi_stage_mime_state = {}
return obj_input, multi_stage_mime_state
def _stage_obj_data(self, request, device, obj_input, writer, fsize):
"""
Feed the object_input into the writer.
:returns: a tuple, (upload_size, etag)
"""
writer.open()
elapsed_time = 0
upload_expiration = time.time() + self.max_upload_time
timeout_reader = self._make_timeout_reader(obj_input)
for chunk in iter(timeout_reader, b''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
raise HTTPRequestTimeout(request=request)
writer.write(chunk)
elapsed_time += time.time() - start_time
upload_size, etag = writer.chunks_finished()
if fsize is not None and fsize != upload_size:
raise HTTPClientDisconnect(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
return upload_size, etag
def _get_request_metadata(self, request, upload_size, etag):
"""
Pull object metadata off the request.
:returns: metadata, a dict of object metadata
"""
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'Content-Length': str(upload_size),
'ETag': etag,
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = bytes_to_wsgi(
wsgi_to_bytes(header_key).title())
metadata[header_caps] = request.headers[header_key]
return metadata
def _read_mime_footers_metadata(self, have_metadata_footer,
mime_documents_iter, **kwargs):
"""
Read footer metadata from the bottom of the multi-stage MIME body.
:returns: metadata, a dict
"""
if have_metadata_footer:
metadata = self._read_metadata_footer(
mime_documents_iter)
footer_etag = metadata.pop('etag', '').lower()
if footer_etag:
metadata['ETag'] = footer_etag
else:
metadata = {}
return metadata
def _apply_extra_metadata(self, request, metadata, footers_metadata):
"""
Apply extra metadata precedence to prepare metadata for storage.
"""
metadata.update(val for val in footers_metadata.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
# N.B. footers_metadata is a HeaderKeyDict
received_etag = normalize_etag(footers_metadata.get(
'etag', request.headers.get('etag', '')))
if received_etag and received_etag != metadata['ETag']:
raise HTTPUnprocessableEntity(request=request)
def _send_multi_stage_continue_headers(self, request,
use_multiphase_commit,
mime_documents_iter, **kwargs):
"""
If the PUT requires a two-phase commit (a data and a commit phase) send
the proxy server another 100-continue response to indicate that we are
finished writing object data
"""
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
raise HTTPServerError(request=request)
def _drain_mime_request(self, mime_documents_iter, **kwargs):
"""
Drain any remaining MIME docs from the socket. There shouldn't be any,
but we must read the whole request body.
"""
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
def _post_commit_updates(self, request, device,
account, container, obj, policy,
orig_metadata, footers_metadata, metadata):
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footers_metadata)
self.container_update(
'PUT', account, container, obj, request,
update_headers, device, policy)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
disk_file, fsize, orig_metadata = self._pre_create_checks(
request, device, partition, account, container, obj, policy)
writer = disk_file.writer(size=fsize)
try:
obj_input = request.environ['wsgi.input']
obj_input, multi_stage_mime_state = \
self._do_multi_stage_mime_continue_headers(request, obj_input)
upload_size, etag = self._stage_obj_data(
request, device, obj_input, writer, fsize)
metadata = self._get_request_metadata(request, upload_size, etag)
if multi_stage_mime_state:
footers_metadata = self._read_mime_footers_metadata(
**multi_stage_mime_state)
else:
footers_metadata = {}
self._apply_extra_metadata(request, metadata, footers_metadata)
writer.put(metadata)
if multi_stage_mime_state:
self._send_multi_stage_continue_headers(
request, **multi_stage_mime_state)
if not config_true_value(
request.headers.get('X-Backend-No-Commit', False)):
writer.commit(request.timestamp)
if multi_stage_mime_state:
self._drain_mime_request(**multi_stage_mime_state)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
finally:
writer.close()
self._post_commit_updates(request, device,
account, container, obj, policy,
orig_metadata, footers_metadata, metadata)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
request.headers.setdefault('X-Timestamp',
normalize_timestamp(time.time()))
req_timestamp = valid_timestamp(request)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs,
open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')))
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open(current_time=req_timestamp):
metadata = disk_file.get_metadata()
ignore_range_headers = set(
h.strip().lower()
for h in request.headers.get(
'X-Backend-Ignore-Range-If-Metadata-Present',
'').split(','))
if ignore_range_headers.intersection(
h.lower() for h in metadata):
request.headers.pop('Range', None)
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (
self.keep_cache_private
or (
"X-Auth-Token" not in request.headers
and "X-Storage-Token" not in request.headers
)
or (
self.keep_cache_slo_manifest
and config_true_value(
metadata.get("X-Static-Large-Object")
)
)
)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = file_x_ts.ceil()
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
request.headers.setdefault('X-Timestamp',
normalize_timestamp(time.time()))
req_timestamp = valid_timestamp(request)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs,
open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')))
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = ts.ceil()
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
req_timestamp = valid_timestamp(request)
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = Timestamp(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = Timestamp(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_timestamp >= req_timestamp:
# Found a newer object -- return 409 as work item is stale
return HTTPConflict()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal,
'X-Backend-Content-Type': orig_metadata.get(
'Content-Type', '')})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy,
skip_rehash=bool(suffixes))
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
# force pickle protocol for compatibility with py2 nodes
resp = Response(body=pickle.dumps(hashes, protocol=2))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
# the ssync sender may want to send PUT subrequests for non-durable
# data that should not be committed; legacy behaviour has been to
# commit all PUTs (subject to EC footer metadata), so we need to
# indicate to the sender that this object server has been upgraded to
# understand the X-Backend-No-Commit header.
headers = {'X-Backend-Accept-No-Commit': True}
return Response(app_iter=ssync_receiver.Receiver(self, request)(),
headers=headers)
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(wsgi_to_str(req.path_info), internal=True):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(
'ERROR __call__ error with %(method)s'
' %(path)s ', {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
res.fix_conditional_response()
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '', self.log_format,
self.anonymization_method,
self.anonymization_salt)
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield b''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| swift-master | swift/obj/server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
import eventlet
from eventlet import GreenPool, queue, tpool, Timeout, sleep
from eventlet.green import subprocess
from swift.common.constraints import check_drive
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, \
rsync_module_interpolation, mkdirs, config_true_value, \
config_auto_int_value, storage_directory, \
load_recon_cache, PrefixLoggerAdapter, parse_override_options, \
distribute_evenly, listdir, node_to_string
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
from swift.obj import ssync_sender
from swift.obj.diskfile import get_data_dir, get_tmp_dir, DiskFileRouter
from swift.common.storage_policy import POLICIES, REPL_POLICY
from swift.common.exceptions import PartitionLockTimeout
DEFAULT_RSYNC_TIMEOUT = 900
def _do_listdir(partition, replication_cycle):
return (((partition + replication_cycle) % 10) == 0)
class Stats(object):
fields = ['attempted', 'failure', 'hashmatch', 'remove', 'rsync',
'success', 'suffix_count', 'suffix_hash', 'suffix_sync',
'failure_nodes']
@classmethod
def from_recon(cls, dct):
return cls(**{k: v for k, v in dct.items() if k in cls.fields})
def to_recon(self):
return {k: getattr(self, k) for k in self.fields}
def __init__(self, attempted=0, failure=0, hashmatch=0, remove=0, rsync=0,
success=0, suffix_count=0, suffix_hash=0,
suffix_sync=0, failure_nodes=None):
self.attempted = attempted
self.failure = failure
self.hashmatch = hashmatch
self.remove = remove
self.rsync = rsync
self.success = success
self.suffix_count = suffix_count
self.suffix_hash = suffix_hash
self.suffix_sync = suffix_sync
self.failure_nodes = defaultdict(lambda: defaultdict(int),
(failure_nodes or {}))
def __add__(self, other):
total = type(self)()
total.attempted = self.attempted + other.attempted
total.failure = self.failure + other.failure
total.hashmatch = self.hashmatch + other.hashmatch
total.remove = self.remove + other.remove
total.rsync = self.rsync + other.rsync
total.success = self.success + other.success
total.suffix_count = self.suffix_count + other.suffix_count
total.suffix_hash = self.suffix_hash + other.suffix_hash
total.suffix_sync = self.suffix_sync + other.suffix_sync
all_failed_ips = (set(list(self.failure_nodes.keys()) +
list(other.failure_nodes.keys())))
for ip in all_failed_ips:
self_devs = self.failure_nodes.get(ip, {})
other_devs = other.failure_nodes.get(ip, {})
this_ip_failures = {}
for dev in set(list(self_devs.keys()) + list(other_devs.keys())):
this_ip_failures[dev] = (
self_devs.get(dev, 0) + other_devs.get(dev, 0))
total.failure_nodes[ip] = this_ip_failures
return total
def add_failure_stats(self, failures):
"""
Note the failure of one or more devices.
:param failures: a list of (ip, device-name) pairs that failed
"""
self.failure += len(failures)
for ip, device in failures:
self.failure_nodes[ip][device] += 1
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = PrefixLoggerAdapter(
logger or get_logger(conf, log_route='object-replicator'), {})
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.replicator_workers = int(conf.get('replicator_workers', 0))
self.policies = [policy for policy in POLICIES
if policy.policy_type == REPL_POLICY]
self.stats_interval = float(conf.get('stats_interval', '300'))
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.replication_cycle = random.randint(0, 9)
self.partition_times = []
self.interval = float(conf.get('interval') or
conf.get('run_pause') or 30)
if 'run_pause' in conf:
if 'interval' in conf:
self.logger.warning(
'Option object-replicator/run_pause is deprecated and '
'object-replicator/interval is already configured. You '
'can safely remove run_pause; it is now ignored and will '
'be removed in a future version.')
else:
self.logger.warning(
'Option object-replicator/run_pause is deprecated and '
'will be removed in a future version. Update your '
'configuration to use option object-replicator/interval.')
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
self.http_timeout = int(conf.get('http_timeout', 60))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
self._next_rcache_update = time.time() + self.stats_interval
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.log_rsync_transfers = config_true_value(
conf.get('log_rsync_transfers', True))
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self.is_multiprocess_worker = None
self._df_router = DiskFileRouter(conf, self.logger)
self._child_process_reaper_queue = queue.LightQueue()
self.rings_mtime = None
def _zero_stats(self):
self.stats_for_dev = defaultdict(Stats)
@property
def total_stats(self):
return sum(self.stats_for_dev.values(), Stats())
def _emplace_log_prefix(self, worker_index):
self.logger.set_prefix("[worker %d/%d pid=%d] " % (
worker_index + 1, # use 1-based indexing for more readable logs
self.replicator_workers,
os.getpid()))
def _child_process_reaper(self):
"""
Consume processes from self._child_process_reaper_queue and wait() for
them
"""
procs = set()
done = False
while not done:
timeout = 60 if procs else None
try:
new_proc = self._child_process_reaper_queue.get(
timeout=timeout)
if new_proc is not None:
procs.add(new_proc)
else:
done = True
except queue.Empty:
pass
reaped_procs = set()
for proc in procs:
# this will reap the process if it has exited, but
# otherwise will not wait
if proc.poll() is not None:
reaped_procs.add(proc)
procs -= reaped_procs
def get_worker_args(self, once=False, **kwargs):
if self.replicator_workers < 1:
return []
override_opts = parse_override_options(once=once, **kwargs)
have_overrides = bool(override_opts.devices or override_opts.partitions
or override_opts.policies)
# save this off for ring-change detection later in is_healthy()
self.all_local_devices = self.get_local_devices()
if override_opts.devices:
devices_to_replicate = [
d for d in override_opts.devices
if d in self.all_local_devices]
else:
# The sort isn't strictly necessary since we're just trying to
# spread devices around evenly, but it makes testing easier.
devices_to_replicate = sorted(self.all_local_devices)
# Distribute devices among workers as evenly as possible
self.replicator_workers = min(self.replicator_workers,
len(devices_to_replicate))
return [{'override_devices': devs,
'override_partitions': override_opts.partitions,
'override_policies': override_opts.policies,
'have_overrides': have_overrides,
'multiprocess_worker_index': index}
for index, devs in enumerate(
distribute_evenly(devices_to_replicate,
self.replicator_workers))]
def is_healthy(self):
"""
Check whether our set of local devices remains the same.
If devices have been added or removed, then we return False here so
that we can kill off any worker processes and then distribute the
new set of local devices across a new set of workers so that all
devices are, once again, being worked on.
This function may also cause recon stats to be updated.
:returns: False if any local devices have been added or removed,
True otherwise
"""
# We update recon here because this is the only function we have in
# a multiprocess replicator that gets called periodically in the
# parent process.
if time.time() >= self._next_rcache_update:
update = self.aggregate_recon_update()
dump_recon_cache(update, self.rcache, self.logger)
rings_mtime = [os.path.getmtime(self.load_object_ring(
policy).serialized_path) for policy in self.policies]
if self.rings_mtime == rings_mtime:
return True
self.rings_mtime = rings_mtime
return self.get_local_devices() == self.all_local_devices
def get_local_devices(self):
"""
Returns a set of all local devices in all replication-type storage
policies.
This is the device names, e.g. "sdq" or "d1234" or something, not
the full ring entries.
"""
ips = whataremyips(self.ring_ip)
local_devices = set()
for policy in self.policies:
self.load_object_ring(policy)
for device in policy.object_ring.devs:
if device and is_local_device(
ips, self.port,
device['replication_ip'],
device['replication_port']):
local_devices.add(device['device'])
return local_devices
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _limit_rsync_log(self, line):
"""
If rsync_error_log_line_length is defined then
limit the error to that length
:param line: rsync log line
:return: If enabled the line limited to rsync_error_log_line_length
otherwise the initial line.
"""
if self.rsync_error_log_line_length:
return line[:self.rsync_error_log_line_length]
return line
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
proc = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(
self._limit_rsync_log(
"Killing long-running rsync after %ds: %s" % (
self.rsync_timeout, str(args))))
if proc:
proc.kill()
try:
# Note: Python 2.7's subprocess.Popen class doesn't take
# any arguments for wait(), but Python 3's does.
# However, Eventlet's replacement Popen takes a timeout
# argument regardless of Python version, so we don't
# need any conditional code here.
proc.wait(timeout=1.0)
except subprocess.TimeoutExpired:
# Sometimes a process won't die immediately even after a
# SIGKILL. This can be due to failing disks, high load,
# or other reasons. We can't wait for it forever since
# we're taking up a slot in the (green)thread pool, so
# we send it over to another greenthread, not part of
# our pool, whose sole duty is to wait for child
# processes to exit.
self._child_process_reaper_queue.put(proc)
return 1 # failure response code
total_time = time.time() - start_time
for result in results.decode('utf8').split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if result.startswith('<') and not self.log_rsync_transfers:
continue
if not ret_val:
self.logger.debug(result)
else:
self.logger.error(result)
if ret_val:
self.logger.error(
self._limit_rsync_log(
'Bad rsync return code: %(ret)d <- %(args)s' %
{'args': str(args), 'ret': ret_val}))
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
"Successful rsync of %(src)s to %(dst)s (%(time).03f)",
{'src': args[-2][:-3] + '...', 'dst': args[-1],
'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
success = (self._rsync(args) == 0)
# TODO: Catch and swallow (or at least minimize) timeouts when doing
# an update job; if we don't manage to notify the remote, we should
# catch it on the next pass
if success or not job['delete']:
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
return success, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
stats = self.stats_for_dev[job['device']]
stats.attempted += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
df_mgr = self._df_router[job['policy']]
# Only object-server can take this lock if an incoming SSYNC is
# running on the same partition. Taking the lock here ensure we
# won't enter a race condition where both nodes try to
# cross-replicate the same partition and both delete it.
with df_mgr.partition_lock(job['device'], job['policy'],
job['partition'], name='replication',
timeout=0.2):
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
stats.rsync += 1
kwargs = {}
if self.conf.get('sync_method', 'rsync') == 'ssync' \
and node['region'] in synced_remote_regions:
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
if success and node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
responses.append(success)
for cand_objs in synced_remote_regions.values():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
successes_count = len([resp for resp in responses if resp])
delete_handoff = successes_count >= self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
stats.remove += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info("Removing %s objects",
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it
# failed, the remote devices which are target of the
# replication from the hand-off device will be marked.
# Because cleanup after replication failed means
# replicator needs to replicate again with the same
# info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except PartitionLockTimeout:
self.logger.info("Unable to lock handoff partition %s for "
"replication on device %s policy %d",
job['partition'], job['device'],
job['policy'])
self.logger.increment('partition.lock-failure.count')
except (Exception, Timeout):
self.logger.exception("Error syncing handoff partition")
finally:
stats.add_failure_stats(failure_devs_info)
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
stats.success += len(target_devs_info - failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info("Removing partition: %s", path)
try:
tpool.execute(shutil.rmtree, path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.ENODATA):
# Don't worry if there was a race to create or delete,
# or some disk corruption that happened after the sync
raise
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir %r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
stats = self.stats_for_dev[job['device']]
stats.attempted += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
df_mgr = self._df_router[job['policy']]
try:
hashed, local_hash = tpool.execute(
df_mgr._get_hashes, job['device'],
job['partition'], job['policy'],
do_listdir=_do_listdir(
int(job['partition']),
self.replication_cycle))
stats.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
node_str = node_to_string(node, replication=True)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error('%s responded as unmounted',
node_str)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(
"Invalid response %(resp)s from %(remote)s",
{'resp': resp.status, 'remote': node_str})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
stats.hashmatch += 1
continue
hashed, recalc_hash = tpool.execute(
df_mgr._get_hashes,
job['device'], job['partition'], job['policy'],
recalculate=suffixes)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
stats.hashmatch += 1
continue
stats.rsync += 1
success, _junk = self.sync(node, job, suffixes)
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
stats.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception("Error syncing with node: %s",
node_str)
stats.suffix_count += len(local_hash)
except StopIteration:
self.logger.error('Ran out of handoffs while replicating '
'partition %s of policy %d',
job['partition'], int(job['policy']))
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self.logger.exception("Error syncing partition")
finally:
stats.add_failure_stats(failure_devs_info)
stats.success += len(target_devs_info - failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
stats = self.total_stats
replication_count = stats.attempted
if replication_count > self.last_replication_count:
self.last_replication_count = replication_count
elapsed = (time.time() - self.start) or 0.000001
rate = replication_count / elapsed
self.logger.info(
"%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)",
{'replicated': replication_count, 'total': self.job_count,
'percentage': replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
replication_count,
self.job_count)})
self.logger.info('%(success)s successes, %(failure)s failures',
dict(success=stats.success,
failure=stats.failure))
if stats.suffix_count:
self.logger.info(
"%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced",
{'checked': stats.suffix_count,
'hashed':
(stats.suffix_hash * 100.0) / stats.suffix_count,
'synced':
(stats.suffix_sync * 100.0) / stats.suffix_count})
self.partition_times.sort()
self.logger.info(
"Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs",
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
"Nothing replicated for %s seconds.",
(time.time() - self.start))
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
df_mgr = self._df_router[policy]
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
local_dev_stats = self.stats_for_dev[local_dev['device']]
try:
dev_path = check_drive(self.devices_dir, local_dev['device'],
self.mount_check)
except ValueError as err:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning("%s", err)
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in listdir(obj_path):
if (override_partitions is not None and partition.isdigit()
and int(partition) not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.ring_ip)
for policy in self.policies:
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator can't
# detect them and would create a second copy of the file if not
# yet existing - and this might double the actual transferred
# and stored data
next_part_power = getattr(
policy.object_ring, 'next_part_power', None)
if next_part_power is not None:
self.logger.warning(
"next_part_power set in policy '%s'. Skipping",
policy.name)
continue
if (override_policies is not None and
policy.idx not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None, start_time=None):
"""Run a replication pass"""
if start_time is None:
start_time = time.time()
self.start = start_time
self.last_replication_count = 0
self.replication_cycle = (self.replication_cycle + 1) % 10
self.partition_times = []
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
dev_stats = None
num_jobs = 0
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
dev_stats = self.stats_for_dev[job['device']]
num_jobs += 1
current_nodes = job['nodes']
try:
check_drive(self.devices_dir, job['device'],
self.mount_check)
except ValueError as err:
dev_stats.add_failure_stats([
(failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning("%s", err)
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass.")
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info("Ring change detected. Aborting "
"current replication pass.")
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
self.run_pool.waitall()
except (Exception, Timeout) as err:
if dev_stats:
if current_nodes:
dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
dev_stats.add_failure_stats(self.all_devs_info)
self.logger.exception(
"Exception in top-level replication loop: %s", err)
finally:
stats.kill()
self.stats_line()
def update_recon(self, total, end_time, override_devices):
# Called at the end of a replication pass to update recon stats.
if self.is_multiprocess_worker:
# If it weren't for the failure_nodes field, we could do this as
# a bunch of shared memory using multiprocessing.Value, which
# would be nice because it'd avoid dealing with existing data
# during an upgrade.
update = {
'object_replication_per_disk': {
od: {'replication_stats':
self.stats_for_dev[od].to_recon(),
'replication_time': total,
'replication_last': end_time,
'object_replication_time': total,
'object_replication_last': end_time}
for od in override_devices}}
else:
update = {'replication_stats': self.total_stats.to_recon(),
'replication_time': total,
'replication_last': end_time,
'object_replication_time': total,
'object_replication_last': end_time}
dump_recon_cache(update, self.rcache, self.logger)
def aggregate_recon_update(self):
per_disk_stats = load_recon_cache(self.rcache).get(
'object_replication_per_disk', {})
recon_update = {}
min_repl_last = float('inf')
min_repl_time = float('inf')
# If every child has reported some stats, then aggregate things.
if all(ld in per_disk_stats for ld in self.all_local_devices):
aggregated = Stats()
for device_name, data in per_disk_stats.items():
aggregated += Stats.from_recon(data['replication_stats'])
min_repl_time = min(
min_repl_time, data['object_replication_time'])
min_repl_last = min(
min_repl_last, data['object_replication_last'])
recon_update['replication_stats'] = aggregated.to_recon()
recon_update['replication_last'] = min_repl_last
recon_update['replication_time'] = min_repl_time
recon_update['object_replication_last'] = min_repl_last
recon_update['object_replication_time'] = min_repl_time
# Clear out entries for old local devices that we no longer have
devices_to_remove = set(per_disk_stats) - set(self.all_local_devices)
if devices_to_remove:
recon_update['object_replication_per_disk'] = {
dtr: {} for dtr in devices_to_remove}
return recon_update
def run_once(self, multiprocess_worker_index=None,
have_overrides=False, *args, **kwargs):
if multiprocess_worker_index is not None:
self.is_multiprocess_worker = True
self._emplace_log_prefix(multiprocess_worker_index)
rsync_reaper = eventlet.spawn(self._child_process_reaper)
self._zero_stats()
self.logger.info("Running object replicator in script mode.")
override_opts = parse_override_options(once=True, **kwargs)
devices = override_opts.devices or None
partitions = override_opts.partitions or None
policies = override_opts.policies or None
start_time = time.time()
self.replicate(
override_devices=devices,
override_partitions=partitions,
override_policies=policies,
start_time=start_time)
end_time = time.time()
total = (end_time - start_time) / 60
self.logger.info(
"Object replication complete (once). (%.02f minutes)", total)
# If we've been manually run on a subset of
# policies/devices/partitions, then our recon stats are not
# representative of how replication is doing, so we don't publish
# them.
if self.is_multiprocess_worker:
# The main process checked for overrides and determined that
# there were none
should_update_recon = not have_overrides
else:
# We are single-process, so update recon only if we worked on
# everything
should_update_recon = not (partitions or devices or policies)
if should_update_recon:
self.update_recon(total, end_time, devices)
# Give rsync processes one last chance to exit, then bail out and
# let them be init's problem
self._child_process_reaper_queue.put(None)
rsync_reaper.wait()
def run_forever(self, multiprocess_worker_index=None,
override_devices=None, *args, **kwargs):
if multiprocess_worker_index is not None:
self.is_multiprocess_worker = True
self._emplace_log_prefix(multiprocess_worker_index)
self.logger.info("Starting object replicator in daemon mode.")
eventlet.spawn_n(self._child_process_reaper)
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info("Starting object replication pass.")
# Run the replicator
start = time.time()
self.replicate(override_devices=override_devices)
end = time.time()
total = (end - start) / 60
self.logger.info(
"Object replication complete. (%.02f minutes)", total)
self.update_recon(total, end, override_devices)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
def post_multiprocess_run(self):
# This method is called after run_once using multiple workers.
update = self.aggregate_recon_update()
dump_recon_cache(update, self.rcache, self.logger)
| swift-master | swift/obj/replicator.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from random import random
from time import time
from os.path import join
from collections import defaultdict, deque
from eventlet import sleep, Timeout
from eventlet.greenpool import GreenPool
from swift.common.constraints import AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.daemon import Daemon
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, dump_recon_cache, split_path, \
Timestamp, config_true_value, normalize_delete_at_timestamp, \
RateLimitedIterator, md5
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_PRECONDITION_FAILED
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
from swift.container.reconciler import direct_delete_container_entry
MAX_OBJECTS_TO_CACHE = 100000
ASYNC_DELETE_TYPE = 'application/async-deleted'
def build_task_obj(timestamp, target_account, target_container,
target_obj, high_precision=False):
"""
:return: a task object name in format of
"<timestamp>-<target_account>/<target_container>/<target_obj>"
"""
timestamp = Timestamp(timestamp)
return '%s-%s/%s/%s' % (
normalize_delete_at_timestamp(timestamp, high_precision),
target_account, target_container, target_obj)
def parse_task_obj(task_obj):
"""
:param task_obj: a task object name in format of
"<timestamp>-<target_account>/<target_container>" +
"/<target_obj>"
:return: 4-tuples of (delete_at_time, target_account, target_container,
target_obj)
"""
timestamp, target_path = task_obj.split('-', 1)
timestamp = Timestamp(timestamp)
target_account, target_container, target_obj = \
split_path('/' + target_path, 3, 3, True)
return timestamp, target_account, target_container, target_obj
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden task accounts to discover objects
that need to be deleted.
:param conf: The daemon configuration.
"""
log_route = 'object-expirer'
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route=self.log_route)
self.interval = float(conf.get('interval') or 300)
self.tasks_per_second = float(conf.get('tasks_per_second', 50.0))
self.conf_path = \
self.conf.get('__file__') or '/etc/swift/object-expirer.conf'
# True, if the conf file is 'object-expirer.conf'.
is_legacy_conf = 'expirer' in self.conf_path
# object-expirer.conf supports only legacy queue
self.dequeue_from_legacy = \
True if is_legacy_conf else \
config_true_value(conf.get('dequeue_from_legacy', 'false'))
if is_legacy_conf:
self.ic_conf_path = self.conf_path
else:
self.ic_conf_path = \
self.conf.get('internal_client_conf_path') or \
'/etc/swift/internal-client.conf'
self.read_conf_for_queue_access(swift)
self.report_interval = float(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = join(self.recon_cache_path, RECON_OBJECT_FILE)
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
# This option defines how long an un-processable expired object
# marker will be retried before it is abandoned. It is not coupled
# with the tombstone reclaim age in the consistency engine.
self.reclaim_age = int(conf.get('reclaim_age', 604800))
def read_conf_for_queue_access(self, swift):
if self.conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
auto_create_account_prefix = \
self.conf['auto_create_account_prefix']
else:
auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.expiring_objects_account = auto_create_account_prefix + \
(self.conf.get('expiring_objects_account_name') or
'expiring_objects')
# This is for common parameter with general task queue in future
self.task_container_prefix = ''
request_tries = int(self.conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
self.ic_conf_path, 'Swift Object Expirer', request_tries,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % self.conf.get(
'log_name', self.log_route)})
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(
'Pass completed in %(time)ds; %(objects)d objects expired', {
'time': elapsed, 'objects': self.report_objects})
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(
'Pass so far %(time)ds; %(objects)d objects expired', {
'time': elapsed, 'objects': self.report_objects})
self.report_last_time = time()
def parse_task_obj(self, task_obj):
return parse_task_obj(task_obj)
def round_robin_order(self, task_iter):
"""
Change order of expiration tasks to avoid deleting objects in a
certain container continuously.
:param task_iter: An iterator of delete-task dicts, which should each
have a ``target_path`` key.
"""
obj_cache = defaultdict(deque)
cnt = 0
def dump_obj_cache_in_round_robin():
while obj_cache:
for key in sorted(obj_cache):
if obj_cache[key]:
yield obj_cache[key].popleft()
else:
del obj_cache[key]
for delete_task in task_iter:
try:
target_account, target_container, _junk = \
split_path('/' + delete_task['target_path'], 3, 3, True)
cache_key = '%s/%s' % (target_account, target_container)
# sanity
except ValueError:
self.logger.error('Unexcepted error handling task %r' %
delete_task)
continue
obj_cache[cache_key].append(delete_task)
cnt += 1
if cnt > MAX_OBJECTS_TO_CACHE:
for task in dump_obj_cache_in_round_robin():
yield task
cnt = 0
for task in dump_obj_cache_in_round_robin():
yield task
def hash_mod(self, name, divisor):
"""
:param name: a task object name
:param divisor: a divisor number
:return: an integer to decide which expirer is assigned to the task
"""
if not isinstance(name, bytes):
name = name.encode('utf8')
# md5 is only used for shuffling mod
return int(md5(
name, usedforsecurity=False).hexdigest(), 16) % divisor
def iter_task_accounts_to_expire(self):
"""
Yields (task_account, my_index, divisor).
my_index and divisor is used to assign task obj to only one
expirer. In expirer method, expirer calculates assigned index for each
expiration task. The assigned index is in [0, 1, ..., divisor - 1].
Expirers have their own "my_index" for each task_account. Expirer whose
"my_index" is equal to the assigned index executes the task. Because
each expirer have different "my_index", task objects are executed by
only one expirer.
"""
if self.processes > 0:
yield self.expiring_objects_account, self.process, self.processes
else:
yield self.expiring_objects_account, 0, 1
def delete_at_time_of_task_container(self, task_container):
"""
get delete_at timestamp from task_container name
"""
# task_container name is timestamp
return Timestamp(task_container)
def iter_task_containers_to_expire(self, task_account):
"""
Yields task_container names under the task_account if the delete at
timestamp of task_container is past.
"""
for c in self.swift.iter_containers(task_account,
prefix=self.task_container_prefix):
task_container = str(c['name'])
timestamp = self.delete_at_time_of_task_container(task_container)
if timestamp > Timestamp.now():
break
yield task_container
def iter_task_to_expire(self, task_account_container_list,
my_index, divisor):
"""
Yields task expire info dict which consists of task_account,
task_container, task_object, timestamp_to_delete, and target_path
"""
for task_account, task_container in task_account_container_list:
container_empty = True
for o in self.swift.iter_objects(task_account, task_container):
container_empty = False
if six.PY2:
task_object = o['name'].encode('utf8')
else:
task_object = o['name']
try:
delete_timestamp, target_account, target_container, \
target_object = parse_task_obj(task_object)
except ValueError:
self.logger.exception('Unexcepted error handling task %r' %
task_object)
continue
if delete_timestamp > Timestamp.now():
# we shouldn't yield the object that doesn't reach
# the expiration date yet.
break
# Only one expirer daemon assigned for one task
if self.hash_mod('%s/%s' % (task_container, task_object),
divisor) != my_index:
continue
is_async = o.get('content_type') == ASYNC_DELETE_TYPE
yield {'task_account': task_account,
'task_container': task_container,
'task_object': task_object,
'target_path': '/'.join([
target_account, target_container, target_object]),
'delete_timestamp': delete_timestamp,
'is_async_delete': is_async}
if container_empty:
try:
self.swift.delete_container(
task_account, task_container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
'Exception while deleting container %(account)s '
'%(container)s %(err)s', {
'account': task_account,
'container': task_container, 'err': str(err)})
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
# This if-clause will be removed when general task queue feature is
# implemented.
if not self.dequeue_from_legacy:
self.logger.info('This node is not configured to dequeue tasks '
'from the legacy queue. This node will '
'not process any expiration tasks. At least '
'one node in your cluster must be configured '
'with dequeue_from_legacy == true.')
return
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
for task_account, my_index, divisor in \
self.iter_task_accounts_to_expire():
container_count, obj_count = \
self.swift.get_account_info(task_account)
# the task account is skipped if there are no task container
if not container_count:
continue
self.logger.info(
'Pass beginning for task account %(account)s; '
'%(container_count)s possible containers; '
'%(obj_count)s possible objects', {
'account': task_account,
'container_count': container_count,
'obj_count': obj_count})
task_account_container_list = \
[(task_account, task_container) for task_container in
self.iter_task_containers_to_expire(task_account)]
# delete_task_iter is a generator to yield a dict of
# task_account, task_container, task_object, delete_timestamp,
# target_path to handle delete actual object and pop the task
# from the queue.
delete_task_iter = \
self.round_robin_order(self.iter_task_to_expire(
task_account_container_list, my_index, divisor))
rate_limited_iter = RateLimitedIterator(
delete_task_iter,
elements_per_second=self.tasks_per_second)
for delete_task in rate_limited_iter:
pool.spawn_n(self.delete_object, **delete_task)
pool.waitall()
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception('Unhandled exception')
def run_forever(self, *args, **kwargs):
"""
Executes passes forever, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
sleep(random() * self.interval)
while True:
begin = time()
try:
self.run_once(*args, **kwargs)
except (Exception, Timeout):
self.logger.exception('Unhandled exception')
elapsed = time() - begin
if elapsed < self.interval:
sleep(random() * (self.interval - elapsed))
def get_process_values(self, kwargs):
"""
Sets self.processes and self.process from the kwargs if those
values exist, otherwise, leaves those values as they were set in
the config file.
:param kwargs: Keyword args passed into the run_forever(), run_once()
methods. They have values specified on the command
line when the daemon is run.
"""
if kwargs.get('processes') is not None:
self.processes = int(kwargs['processes'])
if kwargs.get('process') is not None:
self.process = int(kwargs['process'])
if self.process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if self.processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if self.processes and self.process >= self.processes:
raise ValueError(
'process must be less than processes')
def delete_object(self, target_path, delete_timestamp,
task_account, task_container, task_object,
is_async_delete):
start_time = time()
try:
try:
self.delete_actual_object(target_path, delete_timestamp,
is_async_delete)
except UnexpectedResponse as err:
if err.resp.status_int not in {HTTP_NOT_FOUND,
HTTP_PRECONDITION_FAILED}:
raise
if float(delete_timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(task_account, task_container, task_object)
self.report_objects += 1
self.logger.increment('objects')
except UnexpectedResponse as err:
self.logger.increment('errors')
self.logger.error(
'Unexpected response while deleting object '
'%(account)s %(container)s %(obj)s: %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err.resp.status_int)})
self.logger.debug(err.resp.body)
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
'Exception while deleting object %(account)s %(container)s '
'%(obj)s %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err)})
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, task_account, task_container, task_object):
"""
Issue a delete object request to the task_container for the expiring
object queue entry.
"""
direct_delete_container_entry(self.swift.container_ring, task_account,
task_container, task_object)
def delete_actual_object(self, actual_obj, timestamp, is_async_delete):
"""
Deletes the end-user object indicated by the actual object name given
'<account>/<container>/<object>' if and only if the X-Delete-At value
of the object is exactly the timestamp given.
:param actual_obj: The name of the end-user object to delete:
'<account>/<container>/<object>'
:param timestamp: The swift.common.utils.Timestamp instance the
X-Delete-At value must match to perform the actual
delete.
:param is_async_delete: False if the object should be deleted because
of "normal" expiration, or True if it should
be async-deleted.
:raises UnexpectedResponse: if the delete was unsuccessful and
should be retried later
"""
if is_async_delete:
headers = {'X-Timestamp': timestamp.normal}
acceptable_statuses = (2, HTTP_CONFLICT, HTTP_NOT_FOUND)
else:
headers = {'X-Timestamp': timestamp.normal,
'X-If-Delete-At': timestamp.normal,
'X-Backend-Clean-Expiring-Object-Queue': 'no'}
acceptable_statuses = (2, HTTP_CONFLICT)
self.swift.delete_object(*split_path('/' + actual_obj, 3, 3, True),
headers=headers,
acceptable_statuses=acceptable_statuses)
| swift-master | swift/obj/expirer.py |
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Disk File Interface for the Swift Object Server
The `DiskFile`, `DiskFileWriter` and `DiskFileReader` classes combined define
the on-disk abstraction layer for supporting the object server REST API
interfaces (excluding `REPLICATE`). Other implementations wishing to provide
an alternative backend for the object server must implement the three
classes. An example alternative implementation can be found in the
`mem_server.py` and `mem_diskfile.py` modules along size this one.
The `DiskFileManager` is a reference implemenation specific class and is not
part of the backend API.
The remaining methods in this module are considered implementation specific and
are also not considered part of the backend API.
"""
import six.moves.cPickle as pickle
import binascii
import copy
import errno
import fcntl
import json
import os
import re
import time
import uuid
import logging
import traceback
import xattr
from os.path import basename, dirname, exists, join, splitext
from random import shuffle
from tempfile import mkstemp
from contextlib import contextmanager
from collections import defaultdict
from datetime import timedelta
from eventlet import Timeout, tpool
from eventlet.hubs import trampoline
import six
from pyeclib.ec_iface import ECDriverError, ECInvalidFragmentMetadata, \
ECBadFragmentChecksum, ECInvalidParameter
from swift.common.constraints import check_drive
from swift.common.request_helpers import is_sys_meta
from swift.common.utils import mkdirs, Timestamp, \
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
fsync_dir, drop_buffer_cache, lock_path, write_pickle, \
config_true_value, listdir, split_path, remove_file, \
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \
MD5_OF_EMPTY_STRING, link_fd_to_path, \
O_TMPFILE, makedirs_count, replace_partition_in_path, remove_directory, \
md5, is_file_older, non_negative_float
from swift.common.splice import splice, tee
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
ReplicationLockTimeout, DiskFileExpired, DiskFileXattrNotSupported, \
DiskFileBadMetadataChecksum, PartitionLockTimeout
from swift.common.swob import multi_range_iterator
from swift.common.storage_policy import (
get_policy_string, split_policy_string, PolicyError, POLICIES,
REPL_POLICY, EC_POLICY)
PICKLE_PROTOCOL = 2
DEFAULT_RECLAIM_AGE = timedelta(weeks=1).total_seconds()
DEFAULT_COMMIT_WINDOW = 60.0
HASH_FILE = 'hashes.pkl'
HASH_INVALIDATIONS_FILE = 'hashes.invalid'
METADATA_KEY = b'user.swift.metadata'
METADATA_CHECKSUM_KEY = b'user.swift.metadata_checksum'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
RESERVED_DATAFILE_META = {'content-length', 'deleted', 'etag'}
DATAFILE_SYSTEM_META = {'x-static-large-object'}
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
MIN_TIME_UPDATE_AUDITOR_STATUS = 60
# This matches rsync tempfiles, like ".<timestamp>.data.Xy095a"
RE_RSYNC_TEMPFILE = re.compile(r'^\..*\.([a-zA-Z0-9_]){6}$')
def get_data_dir(policy_or_index):
'''
Get the data dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``objects`` or ``objects-<N>`` as appropriate
'''
return get_policy_string(DATADIR_BASE, policy_or_index)
def get_async_dir(policy_or_index):
'''
Get the async dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``async_pending`` or ``async_pending-<N>`` as appropriate
'''
return get_policy_string(ASYNCDIR_BASE, policy_or_index)
def get_tmp_dir(policy_or_index):
'''
Get the temp dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``tmp`` or ``tmp-<N>`` as appropriate
'''
return get_policy_string(TMP_BASE, policy_or_index)
def _get_filename(fd):
"""
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename.
"""
if hasattr(fd, 'name'):
# fd object
return fd.name
# fd is a filename
return fd
def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
if six.PY2:
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8', 'surrogateescape')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
def _decode_metadata(metadata, metadata_written_by_py3):
"""
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
:param metadata_written_by_py3:
"""
if six.PY2:
def to_str(item, is_name=False):
# For years, py2 and py3 handled non-ascii metadata differently;
# see https://bugs.launchpad.net/swift/+bug/2012531
if metadata_written_by_py3 and not is_name:
# do our best to read new-style data replicated from a py3 node
item = item.decode('utf8').encode('latin1')
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def to_str(item, is_name=False):
# For years, py2 and py3 handled non-ascii metadata differently;
# see https://bugs.launchpad.net/swift/+bug/2012531
if not metadata_written_by_py3 and isinstance(item, bytes) \
and not is_name:
# do our best to read old py2 data
item = item.decode('latin1')
if isinstance(item, six.binary_type):
return item.decode('utf8', 'surrogateescape')
return item
return {to_str(k): to_str(v, k == b'name') for k, v in metadata.items()}
def read_metadata(fd, add_missing_checksum=False):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:param add_missing_checksum: if set and checksum is missing, add it
:returns: dictionary of metadata
"""
metadata = b''
key = 0
try:
while True:
metadata += xattr.getxattr(
fd, METADATA_KEY + str(key or '').encode('ascii'))
key += 1
except (IOError, OSError) as e:
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
# TODO: we might want to re-raise errors that don't denote a missing
# xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX.
metadata_checksum = None
try:
metadata_checksum = xattr.getxattr(fd, METADATA_CHECKSUM_KEY)
except (IOError, OSError):
# All the interesting errors were handled above; the only thing left
# here is ENODATA / ENOATTR to indicate that this attribute doesn't
# exist. This is fine; it just means that this object predates the
# introduction of metadata checksums.
if add_missing_checksum:
new_checksum = (md5(metadata, usedforsecurity=False)
.hexdigest().encode('ascii'))
try:
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, new_checksum)
except (IOError, OSError) as e:
logging.error("Error adding metadata: %s" % e)
if metadata_checksum:
computed_checksum = (md5(metadata, usedforsecurity=False)
.hexdigest().encode('ascii'))
if metadata_checksum != computed_checksum:
raise DiskFileBadMetadataChecksum(
"Metadata checksum mismatch for %s: "
"stored checksum='%s', computed='%s'" % (
fd, metadata_checksum, computed_checksum))
metadata_written_by_py3 = (b'_codecs\nencode' in metadata[:32])
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
if six.PY2:
metadata = pickle.loads(metadata)
else:
metadata = pickle.loads(metadata, encoding='bytes')
return _decode_metadata(metadata, metadata_written_by_py3)
def write_metadata(fd, metadata, xattr_size=65536):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(_encode_metadata(metadata), PICKLE_PROTOCOL)
metastr_md5 = (
md5(metastr, usedforsecurity=False).hexdigest().encode('ascii'))
key = 0
try:
while metastr:
xattr.setxattr(fd, METADATA_KEY + str(key or '').encode('ascii'),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, metastr_md5)
except IOError as e:
# errno module doesn't always have both of these, hence the ugly
# check
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
elif e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise
def extract_policy(obj_path):
"""
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/30/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/30/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None
"""
try:
obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return None
try:
base, policy = split_policy_string(obj_dirname)
except PolicyError:
return None
return policy
def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename
"""
policy = extract_policy(corrupted_file_path)
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined',
get_data_dir(policy),
basename(from_dir))
if len(basename(from_dir)) == 3:
# quarantining whole suffix
invalidate_hash(from_dir)
else:
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir, fsync=False)
return to_dir
def valid_suffix(value):
if not isinstance(value, six.string_types) or len(value) != 3:
return False
return all(c in '0123456789abcdef' for c in value)
def read_hashes(partition_dir):
"""
Read the existing hashes.pkl
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
hashes_file = join(partition_dir, HASH_FILE)
hashes = {'valid': False}
try:
with open(hashes_file, 'rb') as hashes_fp:
pickled_hashes = hashes_fp.read()
except (IOError, OSError):
pass
else:
try:
hashes = pickle.loads(pickled_hashes)
except Exception:
# pickle.loads() can raise a wide variety of exceptions when
# given invalid input depending on the way in which the
# input is invalid.
pass
# Check for corrupted data that could break os.listdir()
if not all(valid_suffix(key) or key in ('valid', 'updated')
for key in hashes):
return {'valid': False}
# hashes.pkl w/o valid updated key is "valid" but "forever old"
hashes.setdefault('valid', True)
hashes.setdefault('updated', -1)
return hashes
def write_hashes(partition_dir, hashes):
"""
Write hashes to hashes.pkl
The updated key is added to hashes before it is written.
"""
hashes_file = join(partition_dir, HASH_FILE)
# 'valid' key should always be set by the caller; however, if there's a bug
# setting invalid is most safe
hashes.setdefault('valid', False)
hashes['updated'] = time.time()
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
def consolidate_hashes(partition_dir):
"""
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param partition_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
hashes = read_hashes(partition_dir)
found_invalidation_entry = False
try:
with open(invalidations_file, 'r') as inv_fh:
for line in inv_fh:
found_invalidation_entry = True
suffix = line.strip()
hashes[suffix] = None
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
if found_invalidation_entry:
write_hashes(partition_dir, hashes)
# Now that all the invalidations are reflected in hashes.pkl, it's
# safe to clear out the invalidations file.
with open(invalidations_file, 'wb') as inv_fh:
pass
return hashes
def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating
"""
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
if not isinstance(suffix, bytes):
suffix = suffix.encode('utf-8')
with lock_path(partition_dir), open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + b"\n")
def relink_paths(target_path, new_target_path, ignore_missing=True):
"""
Hard-links a file located in ``target_path`` using the second path
``new_target_path``. Creates intermediate directories if required.
:param target_path: current absolute filename
:param new_target_path: new absolute filename for the hardlink
:param ignore_missing: if True then no exception is raised if the link
could not be made because ``target_path`` did not exist, otherwise an
OSError will be raised.
:raises: OSError if the hard link could not be created, unless the intended
hard link already exists or the ``target_path`` does not exist and
``must_exist`` if False.
:returns: True if the link was created by the call to this method, False
otherwise.
"""
link_created = False
if target_path != new_target_path:
new_target_dir = os.path.dirname(new_target_path)
try:
os.makedirs(new_target_dir)
except OSError as err:
if err.errno != errno.EEXIST:
raise
try:
os.link(target_path, new_target_path)
link_created = True
except OSError as err:
# there are some circumstances in which it may be ok that the
# attempted link failed
ok = False
if err.errno == errno.ENOENT:
# this is ok if the *target* path doesn't exist anymore
ok = not os.path.exists(target_path) and ignore_missing
if err.errno == errno.EEXIST:
# this is ok *if* the intended link has already been made
try:
orig_stat = os.stat(target_path)
except OSError as sub_err:
# this is ok: the *target* path doesn't exist anymore
ok = sub_err.errno == errno.ENOENT and ignore_missing
else:
try:
new_stat = os.stat(new_target_path)
ok = new_stat.st_ino == orig_stat.st_ino
except OSError:
# squash this exception; the original will be raised
pass
if not ok:
raise err
return link_created
def get_part_path(dev_path, policy, partition):
"""
Given the device path, policy, and partition, returns the full
path to the partition
"""
return os.path.join(dev_path, get_data_dir(policy), str(partition))
class AuditLocation(object):
"""
Represents an object location to be audited.
Other than being a bucket of data, the only useful thing this does is
stringify to a filesystem path so the auditor's logs look okay.
"""
def __init__(self, path, device, partition, policy):
self.path, self.device, self.partition, self.policy = (
path, device, partition, policy)
def __str__(self):
return str(self.path)
def object_audit_location_generator(devices, datadir, mount_check=True,
logger=None, device_dirs=None,
auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory for the given datadir (policy),
if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
for the objects under the entries in device_dirs. The AuditLocation only
knows the path to the hash directory, not to the .data file therein
(if any). This is to avoid a double listdir(hash_dir); the DiskFile object
will always do one, so we don't.
:param devices: parent directory of the devices to be audited
:param datadir: objects directory
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF
"""
if not device_dirs:
device_dirs = listdir(devices)
else:
# remove bogus devices and duplicates from device_dirs
device_dirs = list(
set(listdir(devices)).intersection(set(device_dirs)))
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
base, policy = split_policy_string(datadir)
for device in device_dirs:
try:
check_drive(devices, device, mount_check)
except ValueError as err:
if logger:
logger.debug('Skipping: %s', err)
continue
datadir_path = os.path.join(devices, device, datadir)
if not os.path.exists(datadir_path):
continue
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type)
def get_auditor_status(datadir_path, logger, auditor_type):
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
status = {}
try:
if six.PY3:
statusfile = open(auditor_status, encoding='utf8')
else:
statusfile = open(auditor_status, 'rb')
with statusfile:
status = statusfile.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT and logger:
logger.warning('Cannot read %(auditor_status)s (%(err)s)',
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
try:
status = json.loads(status)
except ValueError as e:
logger.warning('Loading JSON from %(auditor_status)s failed'
' (%(err)s)',
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
return status['partitions']
def update_auditor_status(datadir_path, logger, partitions, auditor_type):
status = json.dumps({'partitions': partitions})
if six.PY3:
status = status.encode('utf8')
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
try:
mtime = os.stat(auditor_status).st_mtime
except OSError:
mtime = 0
recently_updated = (mtime + MIN_TIME_UPDATE_AUDITOR_STATUS) > time.time()
if recently_updated and len(partitions) > 0:
if logger:
logger.debug(
'Skipping the update of recently changed %s' % auditor_status)
return
try:
with open(auditor_status, "wb") as statusfile:
statusfile.write(status)
except (OSError, IOError) as e:
if logger:
logger.warning('Cannot write %(auditor_status)s (%(err)s)',
{'auditor_status': auditor_status, 'err': e})
def clear_auditor_status(devices, datadir, auditor_type="ALL"):
device_dirs = listdir(devices)
for device in device_dirs:
datadir_path = os.path.join(devices, device, datadir)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
class DiskFileRouter(object):
def __init__(self, *args, **kwargs):
self.policy_to_manager = {}
for policy in POLICIES:
# create diskfile managers now to provoke any errors
self.policy_to_manager[int(policy)] = \
policy.get_diskfile_manager(*args, **kwargs)
def __getitem__(self, policy):
return self.policy_to_manager[int(policy)]
class BaseDiskFileManager(object):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
the object server REST API layer).
The `get_diskfile()` method is how this implementation creates a `DiskFile`
object.
.. note::
This class is reference implementation specific and not part of the
pluggable on-disk backend API.
.. note::
TODO(portante): Not sure what the right name to recommend here, as
"manager" seemed generic enough, though suggestions are welcome.
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
diskfile_cls = None # must be set by subclasses
policy = None # must be set by subclasses
invalidate_hash = staticmethod(invalidate_hash)
consolidate_hashes = staticmethod(consolidate_hashes)
quarantine_renamer = staticmethod(quarantine_renamer)
def __init__(self, conf, logger):
self.logger = logger
self.devices = conf.get('devices', '/srv/node')
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.reclaim_age = int(conf.get('reclaim_age', DEFAULT_RECLAIM_AGE))
self.commit_window = non_negative_float(conf.get(
'commit_window', DEFAULT_COMMIT_WINDOW))
replication_concurrency_per_device = conf.get(
'replication_concurrency_per_device')
replication_one_per_device = conf.get('replication_one_per_device')
if replication_concurrency_per_device is None \
and replication_one_per_device is not None:
self.logger.warning('Option replication_one_per_device is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option replication_concurrency_per_device.')
if config_true_value(replication_one_per_device):
replication_concurrency_per_device = 1
else:
replication_concurrency_per_device = 0
elif replication_one_per_device is not None:
self.logger.warning('Option replication_one_per_device ignored as '
'replication_concurrency_per_device is '
'defined.')
if replication_concurrency_per_device is None:
self.replication_concurrency_per_device = 1
else:
self.replication_concurrency_per_device = int(
replication_concurrency_per_device)
self.replication_lock_timeout = int(conf.get(
'replication_lock_timeout', 15))
self.use_splice = False
self.pipe_size = None
conf_wants_splice = config_true_value(conf.get('splice', 'no'))
# If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available:
self.logger.warning(
"Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. "
"splice() will not be used." % conf.get('splice'))
elif conf_wants_splice and splice.available:
try:
sockfd = get_md5_socket()
os.close(sockfd)
except IOError as err:
# AF_ALG socket support was introduced in kernel 2.6.38; on
# systems with older kernels (or custom-built kernels lacking
# AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT:
raise
self.logger.warning("MD5 sockets not supported. "
"splice() will not be used.")
else:
self.use_splice = True
with open('/proc/sys/fs/pipe-max-size') as f:
max_pipe_size = int(f.read())
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
self.use_linkat = True
@classmethod
def check_policy(cls, policy):
if policy.policy_type != cls.policy:
raise ValueError('Invalid policy_type: %s' % policy.policy_type)
def make_on_disk_filename(self, timestamp, ext=None,
ctype_timestamp=None, *a, **kw):
"""
Returns filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:returns: a file name
"""
rv = timestamp.internal
if ext == '.meta' and ctype_timestamp:
# If ctype_timestamp is None then the filename is simply the
# internal form of the timestamp. If ctype_timestamp is not None
# then the difference between the raw values of the two timestamps
# is appended as a hex number, with its sign.
#
# There are two reasons for encoding the content-type timestamp
# in the filename in this way. First, it means that two .meta files
# having the same timestamp but different content-type timestamps
# (and potentially different content-type values) will be distinct
# and therefore will be independently replicated when rsync
# replication is used. That ensures that all nodes end up having
# all content-type values after replication (with the most recent
# value being selected when the diskfile is opened). Second, having
# the content-type encoded in timestamp in the filename makes it
# possible for the on disk file search code to determine that
# timestamp by inspecting only the filename, and not needing to
# open the file and read its xattrs.
rv = encode_timestamps(timestamp, ctype_timestamp, explicit=True)
if ext:
rv = '%s%s' % (rv, ext)
return rv
def parse_on_disk_filename(self, filename, policy):
"""
Parse an on disk file name.
:param filename: the file name including extension
:param policy: storage policy used to store the file
:returns: a dict, with keys for timestamp, ext and ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension.
Subclasses may override this method to add further keys to the
returned dict.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
ts_ctype = None
fname, ext = splitext(filename)
try:
if ext == '.meta':
timestamp, ts_ctype = decode_timestamps(
fname, explicit=True)[:2]
else:
timestamp = Timestamp(fname)
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
return {
'timestamp': timestamp,
'ext': ext,
'ctype_timestamp': ts_ctype
}
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Called by get_ondisk_files(). Should be over-ridden to implement
subclass specific handling of files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
raise NotImplementedError
def _verify_ondisk_files(self, results, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param results: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
data_file, meta_file, ts_file = tuple(
[results[key]
for key in ('data_file', 'meta_file', 'ts_file')])
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
def _split_list(self, original_list, condition):
"""
Split a list into two lists. The first list contains the first N items
of the original list, in their original order, where 0 < N <=
len(original list). The second list contains the remaining items of the
original list, in their original order.
The index, N, at which the original list is split is the index of the
first item in the list that does not satisfy the given condition. Note
that the original list should be appropriately sorted if the second
list is to contain no items that satisfy the given condition.
:param original_list: the list to be split.
:param condition: a single argument function that will be used to test
for the list item to split on.
:return: a tuple of two lists.
"""
for i, item in enumerate(original_list):
if not condition(item):
return original_list[:i], original_list[i:]
return original_list, []
def _split_gt_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than timestamp, and items at same time or
older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] > timestamp)
def _split_gte_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than or at same time as the timestamp, and
items older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] >= timestamp)
def get_ondisk_files(self, files, datadir, verify=True, policy=None,
**kwargs):
"""
Given a simple list of files names, determine the files that constitute
a valid fileset i.e. a set of files that defines the state of an
object, and determine the files that are obsolete and could be deleted.
Note that some files may fall into neither category.
If a file is considered part of a valid fileset then its info dict will
be added to the results dict, keyed by <extension>_info. Any files that
are no longer required will have their info dicts added to a list
stored under the key 'obsolete'.
The results dict will always contain entries with keys 'ts_file',
'data_file' and 'meta_file'. Their values will be the fully qualified
path to a file of the corresponding type if there is such a file in the
valid fileset, or None.
:param files: a list of file names.
:param datadir: directory name files are from; this is used to
construct file paths in the results, but the datadir is
not modified by this method.
:param verify: if True verify that the ondisk file contract has not
been violated, otherwise do not verify.
:param policy: storage policy used to store the files. Used to
validate fragment indexes for EC policies.
:returns: a dict that will contain keys:
ts_file -> path to a .ts file or None
data_file -> path to a .data file or None
meta_file -> path to a .meta file or None
ctype_file -> path to a .meta file or None
and may contain keys:
ts_info -> a file info dict for a .ts file
data_info -> a file info dict for a .data file
meta_info -> a file info dict for a .meta file
ctype_info -> a file info dict for a .meta file which
contains the content-type value
unexpected -> a list of file paths for unexpected
files
possible_reclaim -> a list of file info dicts for possible
reclaimable files
obsolete -> a list of file info dicts for obsolete files
"""
# Build the exts data structure:
# exts is a dict that maps file extensions to a list of file_info
# dicts for the files having that extension. The file_info dicts are of
# the form returned by parse_on_disk_filename, with the filename added.
# Each list is sorted in reverse timestamp order.
# the results dict is used to collect results of file filtering
results = {}
# The exts dict will be modified during subsequent processing as files
# are removed to be discarded or ignored.
exts = defaultdict(list)
for afile in files:
# Categorize files by extension
try:
file_info = self.parse_on_disk_filename(afile, policy)
file_info['filename'] = afile
exts[file_info['ext']].append(file_info)
except DiskFileError as e:
file_path = os.path.join(datadir or '', afile)
results.setdefault('unexpected', []).append(file_path)
# log warnings if it's not a rsync temp file
if RE_RSYNC_TEMPFILE.match(afile):
self.logger.debug('Rsync tempfile: %s', file_path)
else:
self.logger.warning('Unexpected file %s: %s',
file_path, e)
for ext in exts:
# For each extension sort files into reverse chronological order.
exts[ext] = sorted(
exts[ext], key=lambda info: info['timestamp'], reverse=True)
if exts.get('.ts'):
# non-tombstones older than or equal to latest tombstone are
# obsolete
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
exts[ext], older = self._split_gt_timestamp(
exts[ext], exts['.ts'][0]['timestamp'])
results.setdefault('obsolete', []).extend(older)
# all but most recent .ts are obsolete
results.setdefault('obsolete', []).extend(exts['.ts'][1:])
exts['.ts'] = exts['.ts'][:1]
if exts.get('.meta'):
# retain the newest meta file
retain = 1
if exts['.meta'][1:]:
# there are other meta files so find the one with newest
# ctype_timestamp...
exts['.meta'][1:] = sorted(
exts['.meta'][1:],
key=lambda info: info['ctype_timestamp'] or 0,
reverse=True)
# ...and retain this IFF its ctype_timestamp is greater than
# newest meta file
if ((exts['.meta'][1]['ctype_timestamp'] or 0) >
(exts['.meta'][0]['ctype_timestamp'] or 0)):
if (exts['.meta'][1]['timestamp'] ==
exts['.meta'][0]['timestamp']):
# both at same timestamp so retain only the one with
# newest ctype
exts['.meta'][:2] = [exts['.meta'][1],
exts['.meta'][0]]
retain = 1
else:
# retain both - first has newest metadata, second has
# newest ctype
retain = 2
# discard all meta files not being retained...
results.setdefault('obsolete', []).extend(exts['.meta'][retain:])
exts['.meta'] = exts['.meta'][:retain]
# delegate to subclass handler
self._process_ondisk_files(exts, results, **kwargs)
# set final choice of files
if 'data_info' in results:
if exts.get('.meta'):
# only report a meta file if a data file has been chosen
results['meta_info'] = exts['.meta'][0]
ctype_info = exts['.meta'].pop()
if (ctype_info['ctype_timestamp']
> results['data_info']['timestamp']):
results['ctype_info'] = ctype_info
elif exts.get('.ts'):
# only report a ts file if a data file has not been chosen
# (ts files will commonly already have been removed from exts if
# a data file was chosen, but that may not be the case if
# non-durable EC fragment(s) were chosen, hence the elif here)
results['ts_info'] = exts['.ts'][0]
# set ts_file, data_file, meta_file and ctype_file with path to
# chosen file or None
for info_key in ('data_info', 'meta_info', 'ts_info', 'ctype_info'):
info = results.get(info_key)
key = info_key[:-5] + '_file'
results[key] = join(datadir, info['filename']) if info else None
if verify:
assert self._verify_ondisk_files(
results, **kwargs), \
"On-disk file search algorithm contract is broken: %s" \
% str(results)
return results
def cleanup_ondisk_files(self, hsh_path, **kwargs):
"""
Clean up on-disk files that are obsolete and gather the set of valid
on-disk files for an object.
:param hsh_path: object hash path
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'; a list of files remaining in the directory,
reverse sorted, stored under the key 'files'.
"""
def is_reclaimable(timestamp):
return (time.time() - float(timestamp)) > self.reclaim_age
try:
files = os.listdir(hsh_path)
except OSError as err:
if err.errno == errno.ENOENT:
results = self.get_ondisk_files(
[], hsh_path, verify=False, **kwargs)
results['files'] = []
return results
else:
raise
files.sort(reverse=True)
results = self.get_ondisk_files(
files, hsh_path, verify=False, **kwargs)
if 'ts_info' in results and is_reclaimable(
results['ts_info']['timestamp']):
remove_file(join(hsh_path, results['ts_info']['filename']))
files.remove(results.pop('ts_info')['filename'])
for file_info in results.get('possible_reclaim', []):
# stray files are not deleted until reclaim-age; non-durable data
# files are not deleted unless they were written before
# commit_window
filepath = join(hsh_path, file_info['filename'])
if (is_reclaimable(file_info['timestamp']) and
(file_info.get('durable', True) or
self.commit_window <= 0 or
is_file_older(filepath, self.commit_window))):
results.setdefault('obsolete', []).append(file_info)
for file_info in results.get('obsolete', []):
remove_file(join(hsh_path, file_info['filename']))
files.remove(file_info['filename'])
results['files'] = files
if not files: # everything got unlinked
try:
os.rmdir(hsh_path)
except OSError as err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.debug(
'Error cleaning up empty hash directory %s: %s',
hsh_path, err)
# else, no real harm; pass
return results
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
raise NotImplementedError
def _hash_suffix_dir(self, path, policy):
"""
:param path: full path to directory
:param policy: storage policy used
"""
if six.PY2:
hashes = defaultdict(lambda: md5(usedforsecurity=False))
else:
class shim(object):
def __init__(self):
self.md5 = md5(usedforsecurity=False)
def update(self, s):
if isinstance(s, str):
self.md5.update(s.encode('utf-8'))
else:
self.md5.update(s)
def hexdigest(self):
return self.md5.hexdigest()
hashes = defaultdict(shim)
try:
path_contents = sorted(os.listdir(path))
except OSError as err:
if err.errno in (errno.ENOTDIR, errno.ENOENT):
raise PathNotDir()
raise
for hsh in path_contents:
hsh_path = join(path, hsh)
try:
ondisk_info = self.cleanup_ondisk_files(
hsh_path, policy=policy)
except OSError as err:
partition_path = dirname(path)
objects_path = dirname(partition_path)
device_path = dirname(objects_path)
if err.errno == errno.ENOTDIR:
# The made-up filename is so that the eventual dirpath()
# will result in this object directory that we care about.
# Some failures will result in an object directory
# becoming a file, thus causing the parent directory to
# be qarantined.
quar_path = quarantine_renamer(device_path,
join(hsh_path,
"made-up-filename"))
logging.exception(
'Quarantined %(hsh_path)s to %(quar_path)s because '
'it is not a directory', {'hsh_path': hsh_path,
'quar_path': quar_path})
continue
elif err.errno == errno.ENODATA:
try:
# We've seen cases where bad sectors lead to ENODATA
# here; use a similar hack as above
quar_path = quarantine_renamer(
device_path,
join(hsh_path, "made-up-filename"))
orig_path = hsh_path
except (OSError, IOError):
# We've *also* seen the bad sectors lead to us needing
# to quarantine the whole suffix
quar_path = quarantine_renamer(device_path, hsh_path)
orig_path = path
logging.exception(
'Quarantined %(orig_path)s to %(quar_path)s because '
'it could not be listed', {'orig_path': orig_path,
'quar_path': quar_path})
continue
raise
if not ondisk_info['files']:
continue
# ondisk_info has info dicts containing timestamps for those
# files that could determine the state of the diskfile if it were
# to be opened. We update the suffix hash with the concatenation of
# each file's timestamp and extension. The extension is added to
# guarantee distinct hash values from two object dirs that have
# different file types at the same timestamp(s).
#
# Files that may be in the object dir but would have no effect on
# the state of the diskfile are not used to update the hash.
for key in (k for k in ('meta_info', 'ts_info')
if k in ondisk_info):
info = ondisk_info[key]
hashes[None].update(info['timestamp'].internal + info['ext'])
# delegate to subclass for data file related updates...
self._update_suffix_hashes(hashes, ondisk_info)
if 'ctype_info' in ondisk_info:
# We have a distinct content-type timestamp so update the
# hash. As a precaution, append '_ctype' to differentiate this
# value from any other timestamp value that might included in
# the hash in future. There is no .ctype file so use _ctype to
# avoid any confusion.
info = ondisk_info['ctype_info']
hashes[None].update(info['ctype_timestamp'].internal
+ '_ctype')
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
raise PathNotDir()
else:
# if we remove it, pretend like it wasn't there to begin with so
# that the suffix key gets removed
raise PathNotDir()
return hashes
def _hash_suffix(self, path, policy=None):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:param policy: storage policy used to store the files
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
"""
raise NotImplementedError
def _get_hashes(self, *args, **kwargs):
hashed, hashes = self.__get_hashes(*args, **kwargs)
hashes.pop('updated', None)
hashes.pop('valid', None)
return hashed, hashes
def __get_hashes(self, device, partition, policy, recalculate=None,
do_listdir=False):
"""
Get hashes for each suffix dir in a partition. do_listdir causes it to
mistrust the hash cache for suffix existence at the (unexpectedly high)
cost of a listdir.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param policy: the StoragePolicy instance
:param recalculate: list of suffixes which should be recalculated when
got
:param do_listdir: force existence check for all hashes in the
partition
:returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
"""
hashed = 0
dev_path = self.get_dev_path(device)
partition_path = get_part_path(dev_path, policy, partition)
hashes_file = join(partition_path, HASH_FILE)
modified = False
orig_hashes = {'valid': False}
if recalculate is None:
recalculate = []
try:
orig_hashes = self.consolidate_hashes(partition_path)
except Exception:
self.logger.warning('Unable to read %r', hashes_file,
exc_info=True)
if not orig_hashes['valid']:
# This is the only path to a valid hashes from invalid read (e.g.
# does not exist, corrupt, etc.). Moreover, in order to write this
# valid hashes we must read *the exact same* invalid state or we'll
# trigger race detection.
do_listdir = True
hashes = {'valid': True}
# If the exception handling around consolidate_hashes fired we're
# going to do a full rehash regardless; but we need to avoid
# needless recursion if the on-disk hashes.pkl is actually readable
# (worst case is consolidate_hashes keeps raising exceptions and we
# eventually run out of stack).
# N.B. orig_hashes invalid only effects new parts and error/edge
# conditions - so try not to get overly caught up trying to
# optimize it out unless you manage to convince yourself there's a
# bad behavior.
orig_hashes = read_hashes(partition_path)
else:
hashes = copy.deepcopy(orig_hashes)
if do_listdir:
for suff in os.listdir(partition_path):
if len(suff) == 3:
hashes.setdefault(suff, None)
modified = True
self.logger.debug('Run listdir on %s', partition_path)
hashes.update((suffix, None) for suffix in recalculate)
for suffix, hash_ in list(hashes.items()):
if suffix in ('valid', 'updated'):
continue
if not hash_:
suffix_dir = join(partition_path, suffix)
try:
hashes[suffix] = self._hash_suffix(
suffix_dir, policy=policy)
hashed += 1
except PathNotDir:
del hashes[suffix]
except OSError:
logging.exception('Error hashing suffix')
modified = True
if modified:
with lock_path(partition_path):
if read_hashes(partition_path) == orig_hashes:
write_hashes(partition_path, hashes)
return hashed, hashes
return self.__get_hashes(device, partition, policy,
recalculate=recalculate,
do_listdir=do_listdir)
else:
return hashed, hashes
def construct_dev_path(self, device):
"""
Construct the path to a device without checking if it is mounted.
:param device: name of target device
:returns: full path to the device
"""
return os.path.join(self.devices, device)
def get_dev_path(self, device, mount_check=None):
"""
Return the path to a device, first checking to see if either it
is a proper mount point, or at least a directory depending on
the mount_check configuration option.
:param device: name of target device
:param mount_check: whether or not to check mountedness of device.
Defaults to bool(self.mount_check).
:returns: full path to the device, None if the path to the device is
not a proper mount point or directory.
"""
if mount_check is False:
# explicitly forbidden from syscall, just return path
return join(self.devices, device)
# we'll do some kind of check if not explicitly forbidden
try:
return check_drive(self.devices, device,
mount_check or self.mount_check)
except ValueError:
return None
@contextmanager
def replication_lock(self, device, policy, partition):
"""
A context manager that will lock on the partition and, if configured
to do so, on the device given.
:param device: name of target device
:param policy: policy targeted by the replication request
:param partition: partition targeted by the replication request
:raises ReplicationLockTimeout: If the lock on the device
cannot be granted within the configured timeout.
"""
limit_time = time.time() + self.replication_lock_timeout
with self.partition_lock(device, policy, partition, name='replication',
timeout=self.replication_lock_timeout):
if self.replication_concurrency_per_device:
with lock_path(self.get_dev_path(device),
timeout=limit_time - time.time(),
timeout_class=ReplicationLockTimeout,
limit=self.replication_concurrency_per_device):
yield True
else:
yield True
@contextmanager
def partition_lock(self, device, policy, partition, name=None,
timeout=None):
"""
A context manager that will lock on the partition given.
:param device: device targeted by the lock request
:param policy: policy targeted by the lock request
:param partition: partition targeted by the lock request
:raises PartitionLockTimeout: If the lock on the partition
cannot be granted within the configured timeout.
"""
if timeout is None:
timeout = self.replication_lock_timeout
part_path = os.path.join(self.get_dev_path(device),
get_data_dir(policy), str(partition))
with lock_path(part_path, timeout=timeout,
timeout_class=PartitionLockTimeout, limit=1, name=name):
yield True
def pickle_async_update(self, device, account, container, obj, data,
timestamp, policy):
"""
Write data describing a container update notification to a pickle file
in the async_pending directory.
:param device: name of target device
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param data: update data to be written to pickle file
:param timestamp: a Timestamp
:param policy: the StoragePolicy instance
"""
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, get_async_dir(policy))
tmp_dir = os.path.join(device_path, get_tmp_dir(policy))
mkdirs(tmp_dir)
ohash = hash_path(account, container, obj)
write_pickle(
data,
os.path.join(async_dir, ohash[-3:], ohash + '-' +
Timestamp(timestamp).internal),
tmp_dir)
self.logger.increment('async_pendings')
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Returns a BaseDiskFile instance for an object based on the object's
partition, path parts and policy.
:param device: name of target device
:param partition: partition on device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return self.diskfile_cls(self, dev_path,
partition, account, container, obj,
policy=policy, use_splice=self.use_splice,
pipe_size=self.pipe_size, **kwargs)
def clear_auditor_status(self, policy, auditor_type="ALL"):
datadir = get_data_dir(policy)
clear_auditor_status(self.devices, datadir, auditor_type)
def object_audit_location_generator(self, policy, device_dirs=None,
auditor_type="ALL"):
"""
Yield an AuditLocation for all objects stored under device_dirs.
:param policy: the StoragePolicy instance
:param device_dirs: directory of target device
:param auditor_type: either ALL or ZBF
"""
datadir = get_data_dir(policy)
return object_audit_location_generator(self.devices, datadir,
self.mount_check,
self.logger, device_dirs,
auditor_type)
def get_diskfile_from_audit_location(self, audit_location):
"""
Returns a BaseDiskFile instance for an object at the given
AuditLocation.
:param audit_location: object location to be audited
"""
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
return self.diskfile_cls.from_hash_dir(
self, audit_location.path, dev_path,
audit_location.partition, policy=audit_location.policy)
def get_diskfile_and_filenames_from_hash(self, device, partition,
object_hash, policy, **kwargs):
"""
Returns a tuple of (a DiskFile instance for an object at the given
object_hash, the basenames of the files in the object's hash dir).
Just in case someone thinks of refactoring, be sure DiskFileDeleted is
*not* raised, but the DiskFile instance representing the tombstoned
object is returned instead.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param object_hash: the hash of an object path
:param policy: the StoragePolicy instance
:raises DiskFileNotExist: if the object does not exist
:returns: a tuple comprising (an instance of BaseDiskFile, a list of
file basenames)
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
object_path = os.path.join(
dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
object_hash)
try:
filenames = self.cleanup_ondisk_files(object_path)['files']
except OSError as err:
if err.errno == errno.ENOTDIR:
# The made-up filename is so that the eventual dirpath()
# will result in this object directory that we care about.
# Some failures will result in an object directory
# becoming a file, thus causing the parent directory to
# be qarantined.
quar_path = self.quarantine_renamer(dev_path,
join(object_path,
"made-up-filename"))
logging.exception(
'Quarantined %(object_path)s to %(quar_path)s because '
'it is not a directory', {'object_path': object_path,
'quar_path': quar_path})
raise DiskFileNotExist()
elif err.errno == errno.ENODATA:
try:
# We've seen cases where bad sectors lead to ENODATA here;
# use a similar hack as above
quar_path = self.quarantine_renamer(
dev_path,
join(object_path, "made-up-filename"))
orig_path = object_path
except (OSError, IOError):
# We've *also* seen the bad sectors lead to us needing to
# quarantine the whole suffix, not just the hash dir
quar_path = self.quarantine_renamer(dev_path, object_path)
orig_path = os.path.dirname(object_path)
logging.exception(
'Quarantined %(orig_path)s to %(quar_path)s because '
'it could not be listed', {'orig_path': orig_path,
'quar_path': quar_path})
raise DiskFileNotExist()
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist()
if not filenames:
raise DiskFileNotExist()
try:
metadata = read_metadata(os.path.join(object_path, filenames[-1]))
except EOFError:
raise DiskFileNotExist()
try:
account, container, obj = split_path(
metadata.get('name', ''), 3, 3, True)
except ValueError:
raise DiskFileNotExist()
df = self.diskfile_cls(self, dev_path, partition, account, container,
obj, policy=policy, **kwargs)
return df, filenames
def get_diskfile_from_hash(self, device, partition, object_hash, policy,
**kwargs):
"""
Returns a DiskFile instance for an object at the given object_hash.
Just in case someone thinks of refactoring, be sure DiskFileDeleted is
*not* raised, but the DiskFile instance representing the tombstoned
object is returned instead.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param object_hash: the hash of an object path
:param policy: the StoragePolicy instance
:raises DiskFileNotExist: if the object does not exist
:returns: an instance of BaseDiskFile
"""
return self.get_diskfile_and_filenames_from_hash(
device, partition, object_hash, policy, **kwargs)[0]
def get_hashes(self, device, partition, suffixes, policy,
skip_rehash=False):
"""
:param device: name of target device
:param partition: partition name
:param suffixes: a list of suffix directories to be recalculated
:param policy: the StoragePolicy instance
:param skip_rehash: just mark the suffixes dirty; return None
:returns: a dictionary that maps suffix directories
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
suffixes = [suf for suf in suffixes or [] if valid_suffix(suf)]
if skip_rehash:
for suffix in suffixes:
self.invalidate_hash(os.path.join(partition_path, suffix))
hashes = None
elif not os.path.exists(partition_path):
hashes = {}
else:
_junk, hashes = tpool.execute(
self._get_hashes, device, partition, policy,
recalculate=suffixes)
return hashes
def _listdir(self, path):
"""
:param path: full path to directory
"""
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.error(
'ERROR: Skipping %r due to error with listdir attempt: %s',
path, err)
return []
def yield_suffixes(self, device, partition, policy):
"""
Yields tuples of (full_path, suffix_only) for suffixes stored
on the given device and partition.
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
for suffix in self._listdir(partition_path):
if len(suffix) != 3:
continue
try:
int(suffix, 16)
except ValueError:
continue
yield (os.path.join(partition_path, suffix), suffix)
def yield_hashes(self, device, partition, policy,
suffixes=None, **kwargs):
"""
Yields tuples of (hash_only, timestamps) for object
information stored for the given device, partition, and
(optionally) suffixes. If suffixes is None, all stored
suffixes will be searched for object hashes. Note that if
suffixes is not None but empty, such as [], then nothing will
be yielded.
timestamps is a dict which may contain items mapping:
- ts_data -> timestamp of data or tombstone file,
- ts_meta -> timestamp of meta file, if one exists
- ts_ctype -> timestamp of meta file containing most recent
content-type value, if one exists
- durable -> True if data file at ts_data is durable, False otherwise
where timestamps are instances of
:class:`~swift.common.utils.Timestamp`
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
:param suffixes: optional list of suffix directories to be searched
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
if suffixes is None:
suffixes = self.yield_suffixes(device, partition, policy)
else:
suffixes = (
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
# define keys that we need to extract the result from the on disk info
# data:
# (x, y, z) -> result[x] should take the value of y[z]
key_map = (
('ts_meta', 'meta_info', 'timestamp'),
('ts_data', 'data_info', 'timestamp'),
('ts_data', 'ts_info', 'timestamp'),
('ts_ctype', 'ctype_info', 'ctype_timestamp'),
('durable', 'data_info', 'durable'),
)
# cleanup_ondisk_files() will remove empty hash dirs, and we'll
# invalidate any empty suffix dirs so they'll get cleaned up on
# the next rehash
for suffix_path, suffix in suffixes:
found_files = False
for object_hash in self._listdir(suffix_path):
object_path = os.path.join(suffix_path, object_hash)
try:
diskfile_info = self.cleanup_ondisk_files(
object_path, **kwargs)
if diskfile_info['files']:
found_files = True
result = {}
for result_key, diskfile_info_key, info_key in key_map:
if diskfile_info_key not in diskfile_info:
continue
info = diskfile_info[diskfile_info_key]
if info_key in info:
# durable key not returned from replicated Diskfile
result[result_key] = info[info_key]
if 'ts_data' not in result:
# file sets that do not include a .data or .ts
# file cannot be opened and therefore cannot
# be ssync'd
continue
yield object_hash, result
except AssertionError as err:
self.logger.debug('Invalid file set in %s (%s)' % (
object_path, err))
except DiskFileError as err:
self.logger.debug(
'Invalid diskfile filename in %r (%s)' % (
object_path, err))
if not found_files:
self.invalidate_hash(suffix_path)
class BaseDiskFileWriter(object):
"""
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.create` method.
.. note::
It is the responsibility of the
:func:`swift.obj.diskfile.DiskFile.create` method context manager to
close the open file descriptor.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param name: name of object from REST API
:param datadir: on-disk directory object will end up in on
:func:`swift.obj.diskfile.DiskFileWriter.put`
:param fd: open file descriptor of temporary file to receive data
:param tmppath: full path name of the opened file descriptor
:param bytes_per_sync: number bytes written between sync calls
:param diskfile: the diskfile creating this DiskFileWriter instance
:param next_part_power: the next partition power to be used
"""
def __init__(self, name, datadir, size, bytes_per_sync, diskfile,
next_part_power):
# Parameter tracking
self._name = name
self._datadir = datadir
self._fd = None
self._tmppath = None
self._size = size
self._chunks_etag = md5(usedforsecurity=False)
self._bytes_per_sync = bytes_per_sync
self._diskfile = diskfile
self.next_part_power = next_part_power
# Internal attributes
self._upload_size = 0
self._last_sync = 0
self._extension = '.data'
self._put_succeeded = False
@property
def manager(self):
return self._diskfile.manager
@property
def logger(self):
return self.manager.logger
def _get_tempfile(self):
tmppath = None
if self.manager.use_linkat:
self._dirs_created = makedirs_count(self._datadir)
try:
fd = os.open(self._datadir, O_TMPFILE | os.O_WRONLY)
except OSError as err:
if err.errno in (errno.EOPNOTSUPP, errno.EISDIR, errno.EINVAL):
msg = 'open(%s, O_TMPFILE | O_WRONLY) failed: %s \
Falling back to using mkstemp()' \
% (self._datadir, os.strerror(err.errno))
self.logger.debug(msg)
self.manager.use_linkat = False
else:
raise
if not self.manager.use_linkat:
tmpdir = join(self._diskfile._device_path,
get_tmp_dir(self._diskfile.policy))
if not exists(tmpdir):
mkdirs(tmpdir)
fd, tmppath = mkstemp(dir=tmpdir)
return fd, tmppath
def open(self):
if self._fd is not None:
raise ValueError('DiskFileWriter is already open')
try:
self._fd, self._tmppath = self._get_tempfile()
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
# No more inodes in filesystem
raise DiskFileNoSpace()
raise
if self._size is not None and self._size > 0:
try:
fallocate(self._fd, self._size)
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
raise DiskFileNoSpace()
raise
return self
def close(self):
if self._fd:
try:
os.close(self._fd)
except OSError:
pass
self._fd = None
if self._tmppath and not self._put_succeeded:
# Try removing the temp file only if put did NOT succeed.
#
# dfw.put_succeeded is set to True after renamer() succeeds in
# DiskFileWriter._finalize_put()
try:
# when mkstemp() was used
os.unlink(self._tmppath)
except OSError:
self.logger.exception('Error removing tempfile: %s' %
self._tmppath)
self._tmppath = None
def write(self, chunk):
"""
Write a chunk of data to disk. All invocations of this method must
come before invoking the :func:
For this implementation, the data is written into a temporary file.
:param chunk: the chunk of data to write as a string object
"""
if not self._fd:
raise ValueError('Writer is not open')
self._chunks_etag.update(chunk)
while chunk:
written = os.write(self._fd, chunk)
self._upload_size += written
chunk = chunk[written:]
# For large files sync every 512MB (by default) written
diff = self._upload_size - self._last_sync
if diff >= self._bytes_per_sync:
tpool.execute(fdatasync, self._fd)
drop_buffer_cache(self._fd, self._last_sync, diff)
self._last_sync = self._upload_size
def chunks_finished(self):
"""
Expose internal stats about written chunks.
:returns: a tuple, (upload_size, etag)
"""
return self._upload_size, self._chunks_etag.hexdigest()
def _finalize_put(self, metadata, target_path, cleanup,
logger_thread_locals):
if logger_thread_locals is not None:
self.logger.thread_locals = logger_thread_locals
# Write the metadata before calling fsync() so that both data and
# metadata are flushed to disk.
write_metadata(self._fd, metadata)
# We call fsync() before calling drop_cache() to lower the amount of
# redundant work the drop cache code will perform on the pages (now
# that after fsync the pages will be all clean).
fsync(self._fd)
# From the Department of the Redundancy Department, make sure we call
# drop_cache() after fsync() to avoid redundant work (pages all
# clean).
drop_buffer_cache(self._fd, 0, self._upload_size)
self.manager.invalidate_hash(dirname(self._datadir))
# After the rename/linkat completes, this object will be available for
# requests to reference.
if self._tmppath:
# It was a named temp file created by mkstemp()
renamer(self._tmppath, target_path)
else:
# It was an unnamed temp file created by open() with O_TMPFILE
link_fd_to_path(self._fd, target_path,
self._diskfile._dirs_created)
# Check if the partition power will/has been increased
new_target_path = None
if self.next_part_power:
new_target_path = replace_partition_in_path(
self.manager.devices, target_path, self.next_part_power)
if target_path != new_target_path:
try:
fsync_dir(os.path.dirname(target_path))
self.manager.logger.debug(
'Relinking %s to %s due to next_part_power set',
target_path, new_target_path)
relink_paths(target_path, new_target_path)
except OSError as exc:
self.manager.logger.exception(
'Relinking %s to %s failed: %s',
target_path, new_target_path, exc)
# If rename is successful, flag put as succeeded. This is done to avoid
# unnecessary os.unlink() of tempfile later. As renamer() has
# succeeded, the tempfile would no longer exist at its original path.
self._put_succeeded = True
if cleanup:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError:
logging.exception('Problem cleaning up %s', self._datadir)
self._part_power_cleanup(target_path, new_target_path)
def _put(self, metadata, cleanup=True, *a, **kw):
"""
Helper method for subclasses.
For this implementation, this method is responsible for renaming the
temporary file to the final name and directory location. This method
should be called after the final call to
:func:`swift.obj.diskfile.DiskFileWriter.write`.
:param metadata: dictionary of metadata to be associated with the
object
:param cleanup: a Boolean. If True then obsolete files will be removed
from the object dir after the put completes, otherwise
obsolete files are left in place.
"""
timestamp = Timestamp(metadata['X-Timestamp'])
ctype_timestamp = metadata.get('Content-Type-Timestamp')
if ctype_timestamp:
ctype_timestamp = Timestamp(ctype_timestamp)
filename = self.manager.make_on_disk_filename(
timestamp, self._extension, ctype_timestamp=ctype_timestamp,
*a, **kw)
metadata['name'] = self._name
target_path = join(self._datadir, filename)
tpool.execute(
self._finalize_put, metadata, target_path, cleanup,
logger_thread_locals=getattr(self.logger, 'thread_locals', None))
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
raise NotImplementedError
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
replication policy type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
def _part_power_cleanup(self, cur_path, new_path):
"""
Cleanup relative DiskFile directories.
If the partition power is increased soon or has just been increased but
the relinker didn't yet cleanup the old files, an additional cleanup of
the relative dirs has to be done. Otherwise there might be some unused
files left if a PUT or DELETE is done in the meantime
:param cur_path: current full path to an object file
:param new_path: recomputed path to an object file, based on the
next_part_power set in the ring
"""
if new_path is None:
return
# Partition power will be increased soon
if new_path != cur_path:
new_target_dir = os.path.dirname(new_path)
try:
self.manager.cleanup_ondisk_files(new_target_dir)
except OSError:
logging.exception(
'Problem cleaning up %s', new_target_dir)
# Partition power has been increased, cleanup not yet finished
else:
prev_part_power = int(self.next_part_power) - 1
old_target_path = replace_partition_in_path(
self.manager.devices, cur_path, prev_part_power)
old_target_dir = os.path.dirname(old_target_path)
try:
self.manager.cleanup_ondisk_files(old_target_dir)
except OSError:
logging.exception(
'Problem cleaning up %s', old_target_dir)
class BaseDiskFileReader(object):
"""
Encapsulation of the WSGI read context for servicing GET REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.reader` method.
.. note::
The quarantining behavior of this method is considered implementation
specific, and is not required of the API.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param fp: open file object pointer reference
:param data_file: on-disk data file name for the object
:param obj_size: verified on-disk size of the object
:param etag: expected metadata etag value for entire file
:param disk_chunk_size: size of reads from disk in bytes
:param keep_cache_size: maximum object size that will be kept in cache
:param device_path: on-disk device path, used when quarantining an obj
:param logger: logger caller wants this object to use
:param quarantine_hook: 1-arg callable called w/reason when quarantined
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param diskfile: the diskfile creating this DiskFileReader instance
:param keep_cache: should resulting reads be kept in the buffer cache
"""
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
# Parameter tracking
self._fp = fp
self._data_file = data_file
self._obj_size = obj_size
self._etag = etag
self._diskfile = diskfile
self._disk_chunk_size = disk_chunk_size
self._device_path = device_path
self._logger = logger
self._quarantine_hook = quarantine_hook
self._use_splice = use_splice
self._pipe_size = pipe_size
if keep_cache:
# Caller suggests we keep this in cache, only do it if the
# object's size is less than the maximum.
self._keep_cache = obj_size < keep_cache_size
else:
self._keep_cache = False
# Internal Attributes
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._md5_of_sent_bytes = None
self._suppress_file_closing = False
self._quarantined_dir = None
@property
def manager(self):
return self._diskfile.manager
def _init_checks(self):
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = md5(usedforsecurity=False)
def _update_checks(self, chunk):
if self._iter_etag:
self._iter_etag.update(chunk)
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._init_checks()
while True:
try:
chunk = self._fp.read(self._disk_chunk_size)
except IOError as e:
if e.errno == errno.EIO:
# Note that if there's no quarantine hook set up,
# this won't raise any exception
self._quarantine(str(e))
# ... so it's significant that this is not in an else
raise
if chunk:
self._update_checks(chunk)
self._bytes_read += len(chunk)
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
yield chunk
else:
self._read_to_eof = True
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
break
finally:
if not self._suppress_file_closing:
self.close()
def can_zero_copy_send(self):
return self._use_splice
def zero_copy_send(self, wsockfd):
"""
Does some magic with splice() and tee() to move stuff from disk to
network without ever touching userspace.
:param wsockfd: file descriptor (integer) of the socket out which to
send data
"""
# Note: if we ever add support for zero-copy ranged GET responses,
# we'll have to make this conditional.
self._started_at_0 = True
rfd = self._fp.fileno()
client_rpipe, client_wpipe = os.pipe()
hash_rpipe, hash_wpipe = os.pipe()
md5_sockfd = get_md5_socket()
# The actual amount allocated to the pipe may be rounded up to the
# nearest multiple of the page size. If we have the memory allocated,
# we may as well use it.
#
# Note: this will raise IOError on failure, so we don't bother
# checking the return value.
pipe_size = fcntl.fcntl(client_rpipe, F_SETPIPE_SZ, self._pipe_size)
fcntl.fcntl(hash_rpipe, F_SETPIPE_SZ, pipe_size)
dropped_cache = 0
self._bytes_read = 0
try:
while True:
# Read data from disk to pipe
(bytes_in_pipe, _1, _2) = splice(
rfd, None, client_wpipe, None, pipe_size, 0)
if bytes_in_pipe == 0:
self._read_to_eof = True
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
break
self._bytes_read += bytes_in_pipe
# "Copy" data from pipe A to pipe B (really just some pointer
# manipulation in the kernel, not actual copying).
bytes_copied = tee(client_rpipe, hash_wpipe, bytes_in_pipe, 0)
if bytes_copied != bytes_in_pipe:
# We teed data between two pipes of equal size, and the
# destination pipe was empty. If, somehow, the destination
# pipe was full before all the data was teed, we should
# fail here. If we don't raise an exception, then we will
# have the incorrect MD5 hash once the object has been
# sent out, causing a false-positive quarantine.
raise Exception("tee() failed: tried to move %d bytes, "
"but only moved %d" %
(bytes_in_pipe, bytes_copied))
# Take the data and feed it into an in-kernel MD5 socket. The
# MD5 socket hashes data that is written to it. Reading from
# it yields the MD5 checksum of the written data.
#
# Note that we don't have to worry about splice() returning
# None here (which happens on EWOULDBLOCK); we're splicing
# $bytes_in_pipe bytes from a pipe with exactly that many
# bytes in it, so read won't block, and we're splicing it into
# an MD5 socket, which synchronously hashes any data sent to
# it, so writing won't block either.
(hashed, _1, _2) = splice(hash_rpipe, None, md5_sockfd, None,
bytes_in_pipe, splice.SPLICE_F_MORE)
if hashed != bytes_in_pipe:
raise Exception("md5 socket didn't take all the data? "
"(tried to write %d, but wrote %d)" %
(bytes_in_pipe, hashed))
while bytes_in_pipe > 0:
try:
res = splice(client_rpipe, None, wsockfd, None,
bytes_in_pipe, 0)
bytes_in_pipe -= res[0]
except IOError as exc:
if exc.errno == errno.EWOULDBLOCK:
trampoline(wsockfd, write=True)
else:
raise
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
finally:
# Linux MD5 sockets return '00000000000000000000000000000000' for
# the checksum if you didn't write any bytes to them, instead of
# returning the correct value.
if self._bytes_read > 0:
bin_checksum = os.read(md5_sockfd, 16)
hex_checksum = binascii.hexlify(bin_checksum).decode('ascii')
else:
hex_checksum = MD5_OF_EMPTY_STRING
self._md5_of_sent_bytes = hex_checksum
os.close(client_rpipe)
os.close(client_wpipe)
os.close(hash_rpipe)
os.close(hash_wpipe)
os.close(md5_sockfd)
self.close()
def app_iter_range(self, start, stop):
"""
Returns an iterator over the data file for range (start, stop)
"""
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
"""
Returns an iterator over the data file for a set of ranges
"""
if not ranges:
yield b''
else:
if not isinstance(content_type, bytes):
content_type = content_type.encode('utf8')
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
self.close()
def _drop_cache(self, fd, offset, length):
"""
Method for no-oping buffer cache drop method.
:param fd: file descriptor or filename
"""
if not self._keep_cache:
drop_buffer_cache(fd, offset, length)
def _quarantine(self, msg):
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, self._data_file)
self._logger.warning("Quarantined object %s: %s" % (
self._data_file, msg))
self._logger.increment('quarantines')
self._quarantine_hook(msg)
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
if self._iter_etag and not self._md5_of_sent_bytes:
self._md5_of_sent_bytes = self._iter_etag.hexdigest()
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self._bytes_read, self._obj_size))
elif self._md5_of_sent_bytes and \
self._etag != self._md5_of_sent_bytes:
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._md5_of_sent_bytes))
def close(self):
"""
Close the open file handle if present.
For this specific implementation, this method will handle quarantining
the file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except DiskFileQuarantined:
raise
except (Exception, Timeout) as e:
self._logger.error(
'ERROR DiskFile %(data_file)s'
' close failure: %(exc)s : %(stack)s',
{'exc': e, 'stack': ''.join(traceback.format_exc()),
'data_file': self._data_file})
finally:
fp, self._fp = self._fp, None
fp.close()
class BaseDiskFile(object):
"""
Manage object files.
This specific implementation manages object files on a disk formatted with
a POSIX-compliant file system that supports extended attributes as
metadata on a file or directory.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
The following path format is used for data file locations:
<devices_path/<device_dir>/<datadir>/<partdir>/<suffixdir>/<hashdir>/
<datafile>.<ext>
:param mgr: associated DiskFileManager instance
:param device_path: path to the target device or drive
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param _datadir: override the full datadir otherwise constructed here
:param policy: the StoragePolicy instance
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param open_expired: if True, open() will not raise a DiskFileExpired if
object is expired
:param next_part_power: the next partition power to be used
"""
reader_cls = None # must be set by subclasses
writer_cls = None # must be set by subclasses
def __init__(self, mgr, device_path, partition,
account=None, container=None, obj=None, _datadir=None,
policy=None, use_splice=False, pipe_size=None,
open_expired=False, next_part_power=None, **kwargs):
self._manager = mgr
self._device_path = device_path
self._logger = mgr.logger
self._disk_chunk_size = mgr.disk_chunk_size
self._bytes_per_sync = mgr.bytes_per_sync
self._use_splice = use_splice
self._pipe_size = pipe_size
self._open_expired = open_expired
# This might look a lttle hacky i.e tracking number of newly created
# dirs to fsync only those many later. If there is a better way,
# please suggest.
# Or one could consider getting rid of doing fsyncs on dirs altogether
# and mounting XFS with the 'dirsync' mount option which should result
# in all entry fops being carried out synchronously.
self._dirs_created = 0
self.policy = policy
self.next_part_power = next_part_power
if account and container and obj:
self._name = '/' + '/'.join((account, container, obj))
self._account = account
self._container = container
self._obj = obj
elif account or container or obj:
raise ValueError(
'Received a/c/o args %r, %r, and %r. Either none or all must '
'be provided.' % (account, container, obj))
else:
# gets populated when we read the metadata
self._name = None
self._account = None
self._container = None
self._obj = None
self._tmpdir = join(device_path, get_tmp_dir(policy))
self._ondisk_info = None
self._metadata = None
self._datafile_metadata = None
self._metafile_metadata = None
self._data_file = None
self._fp = None
self._quarantined_dir = None
self._content_length = None
if _datadir:
self._datadir = _datadir
else:
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
def __repr__(self):
return '<%s datadir=%r>' % (self.__class__.__name__, self._datadir)
@property
def manager(self):
return self._manager
@property
def account(self):
return self._account
@property
def container(self):
return self._container
@property
def obj(self):
return self._obj
@property
def content_length(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._content_length
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
@property
def data_timestamp(self):
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest data file found in the object
directory.
:return: A Timestamp instance, or None if no data file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._datafile_metadata:
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
return None
@property
def fragments(self):
return None
@property
def content_type(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata.get('Content-Type')
@property
def content_type_timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
t = self._metadata.get('Content-Type-Timestamp',
self._datafile_metadata.get('X-Timestamp'))
return Timestamp(t)
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, partition, _datadir=hash_dir_path,
policy=policy)
def open(self, modernize=False, current_time=None):
"""
Open the object.
This implementation opens the data file representing the object, reads
the associated metadata in the extended attributes, additionally
combining metadata from fast-POST `.meta` files.
:param modernize: if set, update this diskfile to the latest format.
Currently, this means adding metadata checksums if none are
present.
:param current_time: Unix time used in checking expiration. If not
present, the current time will be used.
.. note::
An implementation is allowed to raise any of the following
exceptions, but is only required to raise `DiskFileNotExist` when
the object representation does not exist.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileNotExist: if the object does not exist
:raises DiskFileDeleted: if the object was previously deleted
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
:returns: itself for use as a context manager
"""
# First figure out if the data directory exists
try:
files = os.listdir(self._datadir)
except OSError as err:
if err.errno == errno.ENOTDIR:
# If there's a file here instead of a directory, quarantine
# it; something's gone wrong somewhere.
raise self._quarantine(
# hack: quarantine_renamer actually renames the directory
# enclosing the filename you give it, but here we just
# want this one file and not its parent.
os.path.join(self._datadir, "made-up-filename"),
"Expected directory, found file at %s" % self._datadir)
elif err.errno == errno.ENODATA:
try:
# We've seen cases where bad sectors lead to ENODATA here
raise self._quarantine(
# similar hack to above
os.path.join(self._datadir, "made-up-filename"),
"Failed to list directory at %s" % self._datadir)
except (OSError, IOError):
# We've *also* seen the bad sectors lead to us needing to
# quarantine the whole suffix, not just the hash dir
raise self._quarantine(
# skip the above hack to rename the suffix
self._datadir,
"Failed to list directory at %s" % self._datadir)
elif err.errno != errno.ENOENT:
raise DiskFileError(
"Error listing directory %s: %s" % (self._datadir, err))
# The data directory does not exist, so the object cannot exist.
files = []
# gather info about the valid files to use to open the DiskFile
file_info = self._get_ondisk_files(files, self.policy)
self._data_file = file_info.get('data_file')
if not self._data_file:
raise self._construct_exception_from_ts_file(**file_info)
try:
self._fp = self._construct_from_data_file(
current_time=current_time, modernize=modernize, **file_info)
except IOError as e:
if e.errno == errno.ENODATA:
raise self._quarantine(
file_info['data_file'],
"Failed to open %s: %s" % (file_info['data_file'], e))
# This method must populate the internal _metadata attribute.
self._metadata = self._metadata or {}
return self
def __enter__(self):
"""
Context enter.
.. note::
An implementation shall raise `DiskFileNotOpen` when has not
previously invoked the :func:`swift.obj.diskfile.DiskFile.open`
method.
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
"""
Context exit.
.. note::
This method will be invoked by the object server while servicing
the REST API *before* the object has actually been read. It is the
responsibility of the implementation to properly handle that.
"""
if self._fp is not None:
fp, self._fp = self._fp, None
fp.close()
def _quarantine(self, data_file, msg):
"""
Quarantine a file; responsible for incrementing the associated logger's
count of quarantines.
:param data_file: full path of data file to quarantine
:param msg: reason for quarantining to be included in the exception
:returns: DiskFileQuarantined exception object
"""
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, data_file)
self._logger.warning("Quarantined object %s: %s" % (
data_file, msg))
self._logger.increment('quarantines')
return DiskFileQuarantined(msg)
def _get_ondisk_files(self, files, policy=None):
"""
Determine the on-disk files to use.
:param files: a list of files in the object's dir
:param policy: storage policy used to store the files
:returns: dict of files to use having keys 'data_file', 'ts_file',
'meta_file'
"""
raise NotImplementedError
def _construct_exception_from_ts_file(self, ts_file, **kwargs):
"""
If a tombstone is present it means the object is considered
deleted. We just need to pull the metadata from the tombstone file
which has the timestamp to construct the deleted exception. If there
was no tombstone, just report it does not exist.
:param ts_file: the tombstone file name found on disk
:returns: DiskFileDeleted if the ts_file was provided, else
DiskFileNotExist
"""
if not ts_file:
exc = DiskFileNotExist()
else:
try:
metadata = self._failsafe_read_metadata(ts_file, ts_file)
except DiskFileQuarantined:
# If the tombstone's corrupted, quarantine it and pretend it
# wasn't there
exc = DiskFileNotExist()
else:
# All well and good that we have found a tombstone file, but
# we don't have a data file so we are just going to raise an
# exception that we could not find the object, providing the
# tombstone's timestamp.
exc = DiskFileDeleted(metadata=metadata)
return exc
def validate_metadata(self):
return ('Content-Length' in self._datafile_metadata)
def _verify_name_matches_hash(self, data_file):
"""
:param data_file: data file name, used when quarantines occur
"""
hash_from_fs = os.path.basename(self._datadir)
hash_from_name = hash_path(self._name.lstrip('/'))
if hash_from_fs != hash_from_name:
raise self._quarantine(
data_file,
"Hash of name in metadata does not match directory name")
def _verify_data_file(self, data_file, fp, current_time):
"""
Verify the metadata's name value matches what we think the object is
named.
:param data_file: data file name being consider, used when quarantines
occur
:param fp: open file pointer so that we can `fstat()` the file to
verify the on-disk size with Content-Length metadata value
:param current_time: Unix time used in checking expiration
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileExpired: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(data_file, "missing name metadata")
else:
if mname != self._name:
self._logger.error(
'Client path %(client)s does not match '
'path stored in object metadata %(meta)s',
{'client': self._name, 'meta': mname})
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if current_time is None:
current_time = time.time()
if x_delete_at <= current_time and not self._open_expired:
raise DiskFileExpired(metadata=self._metadata)
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
data_file, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
fd = fp.fileno()
try:
statbuf = os.fstat(fd)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(data_file, "not stat-able: %s" % err)
else:
obj_size = statbuf.st_size
if obj_size != metadata_size:
raise self._quarantine(
data_file, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, statbuf.st_size))
self._content_length = obj_size
return obj_size
def _failsafe_read_metadata(self, source, quarantine_filename=None,
add_missing_checksum=False):
"""
Read metadata from source object file. In case of failure, quarantine
the file.
Takes source and filename separately so we can read from an open
file if we have one.
:param source: file descriptor or filename to load the metadata from
:param quarantine_filename: full path of file to load the metadata from
:param add_missing_checksum: if True and no metadata checksum is
present, generate one and write it down
"""
try:
return read_metadata(source, add_missing_checksum)
except (DiskFileXattrNotSupported, DiskFileNotExist):
raise
except DiskFileBadMetadataChecksum as err:
raise self._quarantine(quarantine_filename, str(err))
except Exception as err:
raise self._quarantine(
quarantine_filename,
"Exception reading metadata: %s" % err)
def _merge_content_type_metadata(self, ctype_file):
"""
When a second .meta file is providing the most recent Content-Type
metadata then merge it into the metafile_metadata.
:param ctype_file: An on-disk .meta file
"""
ctypefile_metadata = self._failsafe_read_metadata(
ctype_file, ctype_file)
if ('Content-Type' in ctypefile_metadata
and (ctypefile_metadata.get('Content-Type-Timestamp', '') >
self._metafile_metadata.get('Content-Type-Timestamp', ''))
and (ctypefile_metadata.get('Content-Type-Timestamp', '') >
self.data_timestamp)):
self._metafile_metadata['Content-Type'] = \
ctypefile_metadata['Content-Type']
self._metafile_metadata['Content-Type-Timestamp'] = \
ctypefile_metadata.get('Content-Type-Timestamp')
def _construct_from_data_file(self, data_file, meta_file, ctype_file,
current_time, modernize=False,
**kwargs):
"""
Open the `.data` file to fetch its metadata, and fetch the metadata
from fast-POST `.meta` files as well if any exist, merging them
properly.
:param data_file: on-disk `.data` file being considered
:param meta_file: on-disk fast-POST `.meta` file being considered
:param ctype_file: on-disk fast-POST `.meta` file being considered that
contains content-type and content-type timestamp
:param current_time: Unix time used in checking expiration
:param modernize: whether to update the on-disk files to the newest
format
:returns: an opened data file pointer
:raises DiskFileError: various exceptions from
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
"""
try:
fp = open(data_file, 'rb')
except IOError as e:
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
raise
self._datafile_metadata = self._failsafe_read_metadata(
fp, data_file,
add_missing_checksum=modernize)
self._metadata = {}
if meta_file:
self._metafile_metadata = self._failsafe_read_metadata(
meta_file, meta_file,
add_missing_checksum=modernize)
if ctype_file and ctype_file != meta_file:
self._merge_content_type_metadata(ctype_file)
sys_metadata = dict(
[(key, val) for key, val in self._datafile_metadata.items()
if key.lower() in (RESERVED_DATAFILE_META |
DATAFILE_SYSTEM_META)
or is_sys_meta('object', key)])
self._metadata.update(self._metafile_metadata)
self._metadata.update(sys_metadata)
# diskfile writer added 'name' to metafile, so remove it here
self._metafile_metadata.pop('name', None)
# TODO: the check for Content-Type is only here for tests that
# create .data files without Content-Type
if ('Content-Type' in self._datafile_metadata and
(self.data_timestamp >
self._metafile_metadata.get('Content-Type-Timestamp'))):
self._metadata['Content-Type'] = \
self._datafile_metadata['Content-Type']
self._metadata.pop('Content-Type-Timestamp', None)
else:
self._metadata.update(self._datafile_metadata)
if self._name is None:
# If we don't know our name, we were just given a hash dir at
# instantiation, so we'd better validate that the name hashes back
# to us
self._name = self._metadata['name']
self._verify_name_matches_hash(data_file)
self._verify_data_file(data_file, fp, current_time)
return fp
def get_metafile_metadata(self):
"""
Provide the metafile metadata for a previously opened object as a
dictionary. This is metadata that was written by a POST and does not
include any persistent metadata that was set by the original PUT.
:returns: object's .meta file metadata dictionary, or None if there is
no .meta file
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metafile_metadata
def get_datafile_metadata(self):
"""
Provide the datafile metadata for a previously opened object as a
dictionary. This is metadata that was included when the object was
first PUT, and does not include metadata set by any subsequent POST.
:returns: object's datafile metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return self._datafile_metadata
def get_metadata(self):
"""
Provide the metadata for a previously opened object as a dictionary.
:returns: object's metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self, current_time=None):
"""
Return the metadata for an object without requiring the caller to open
the object first.
:param current_time: Unix time used in checking expiration. If not
present, the current time will be used.
:returns: metadata dictionary for an object
:raises DiskFileError: this implementation will raise the same
errors as the `open()` method.
"""
with self.open(current_time=current_time):
return self.get_metadata()
def reader(self, keep_cache=False,
_quarantine_hook=lambda m: None):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:param _quarantine_hook: 1-arg callable called when obj quarantined;
the arg is the reason for quarantine.
Default is to ignore it.
Not needed by the REST layer.
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
dr = self.reader_cls(
self._fp, self._data_file, int(self._metadata['Content-Length']),
self._metadata['ETag'], self._disk_chunk_size,
self._manager.keep_cache_size, self._device_path, self._logger,
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fp = None
return dr
def writer(self, size=None):
return self.writer_cls(self._name, self._datadir, size,
self._bytes_per_sync, self,
self.next_part_power)
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
.. note::
An implementation is not required to perform on-disk
preallocations even if the parameter is specified. But if it does
and it fails, it must raise a `DiskFileNoSpace` exception.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
dfw = self.writer(size)
try:
yield dfw.open()
finally:
dfw.close()
def write_metadata(self, metadata):
"""
Write a block of metadata to an object without requiring the caller to
create the object first. Supports fast-POST behavior semantics.
:param metadata: dictionary of metadata to be associated with the
object
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
with self.create() as writer:
writer._extension = '.meta'
writer.put(metadata)
def delete(self, timestamp):
"""
Delete the object.
This implementation creates a tombstone file using the given
timestamp, and removes any older versions of the object file. Any
file that has an older timestamp than timestamp will be deleted.
.. note::
An implementation is free to use or ignore the timestamp
parameter.
:param timestamp: timestamp to compare with each file
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
# this is dumb, only tests send in strings
timestamp = Timestamp(timestamp)
with self.create() as deleter:
deleter._extension = '.ts'
deleter.put({'X-Timestamp': timestamp.internal})
class DiskFileReader(BaseDiskFileReader):
pass
class DiskFileWriter(BaseDiskFileWriter):
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
super(DiskFileWriter, self)._put(metadata, True)
class DiskFile(BaseDiskFile):
reader_cls = DiskFileReader
writer_cls = DiskFileWriter
def _get_ondisk_files(self, files, policy=None):
self._ondisk_info = self.manager.get_ondisk_files(
files, self._datadir, policy=policy)
return self._ondisk_info
class DiskFileManager(BaseDiskFileManager):
diskfile_cls = DiskFile
policy = REPL_POLICY
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Implement replication policy specific handling of .data files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
if exts.get('.data'):
for ext in exts.keys():
if ext == '.data':
# older .data's are obsolete
exts[ext], obsolete = self._split_gte_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
else:
# other files at same or older timestamp as most recent
# data are obsolete
exts[ext], obsolete = self._split_gt_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
results.setdefault('obsolete', []).extend(obsolete)
# set results
results['data_info'] = exts['.data'][0]
# .meta files *may* be ready for reclaim if there is no data
if exts.get('.meta') and not exts.get('.data'):
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
if 'data_info' in ondisk_info:
file_info = ondisk_info['data_info']
hashes[None].update(
file_info['timestamp'].internal + file_info['ext'])
def _hash_suffix(self, path, policy=None):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:param policy: storage policy used to store the files
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: md5 of files in suffix
"""
hashes = self._hash_suffix_dir(path, policy)
return hashes[None].hexdigest()
class ECDiskFileReader(BaseDiskFileReader):
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
super(ECDiskFileReader, self).__init__(
fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile, keep_cache)
self.frag_buf = None
self.frag_offset = 0
self.frag_size = self._diskfile.policy.fragment_size
def _init_checks(self):
super(ECDiskFileReader, self)._init_checks()
# for a multi-range GET this will be called at the start of each range;
# only initialise the frag_buf for reads starting at 0.
# TODO: reset frag buf to '' if tell() shows that start is on a frag
# boundary so that we check frags selected by a range not starting at 0
if self._started_at_0:
self.frag_buf = b''
else:
self.frag_buf = None
def _check_frag(self, frag):
if not frag:
return
if not isinstance(frag, six.binary_type):
# ECInvalidParameter can be returned if the frag violates the input
# format so for safety, check the input chunk if it's binary to
# avoid quarantining a valid fragment archive.
self._diskfile._logger.warn(
'Unexpected fragment data type (not quarantined) '
'%(datadir)s: %(type)s at offset 0x%(offset)x',
{'datadir': self._diskfile._datadir,
'type': type(frag),
'offset': self.frag_offset})
return
try:
self._diskfile.policy.pyeclib_driver.get_metadata(frag)
except (ECInvalidFragmentMetadata, ECBadFragmentChecksum,
ECInvalidParameter):
# Any of these exceptions may be returned from ECDriver with a
# corrupted fragment.
msg = 'Invalid EC metadata at offset 0x%x' % self.frag_offset
self._quarantine(msg)
# We have to terminate the response iter with an exception but it
# can't be StopIteration, this will produce a STDERR traceback in
# eventlet.wsgi if you have eventlet_debug turned on; but any
# attempt to finish the iterator cleanly won't trigger the needful
# error handling cleanup - failing to do so, and yet still failing
# to deliver all promised bytes will hang the HTTP connection
raise DiskFileQuarantined(msg)
except ECDriverError as err:
self._diskfile._logger.warn(
'Problem checking EC fragment %(datadir)s: %(err)s',
{'datadir': self._diskfile._datadir, 'err': err})
def _update_checks(self, chunk):
super(ECDiskFileReader, self)._update_checks(chunk)
if self.frag_buf is not None:
self.frag_buf += chunk
cursor = 0
while len(self.frag_buf) >= cursor + self.frag_size:
self._check_frag(self.frag_buf[cursor:cursor + self.frag_size])
cursor += self.frag_size
self.frag_offset += self.frag_size
if cursor:
self.frag_buf = self.frag_buf[cursor:]
def _handle_close_quarantine(self):
super(ECDiskFileReader, self)._handle_close_quarantine()
self._check_frag(self.frag_buf)
class ECDiskFileWriter(BaseDiskFileWriter):
def _finalize_durable(self, data_file_path, durable_data_file_path,
timestamp):
exc = None
new_data_file_path = new_durable_data_file_path = None
if self.next_part_power:
new_data_file_path = replace_partition_in_path(
self.manager.devices, data_file_path, self.next_part_power)
new_durable_data_file_path = replace_partition_in_path(
self.manager.devices, durable_data_file_path,
self.next_part_power)
try:
try:
os.rename(data_file_path, durable_data_file_path)
fsync_dir(self._datadir)
if self.next_part_power and \
data_file_path != new_data_file_path:
try:
os.rename(new_data_file_path,
new_durable_data_file_path)
except OSError as exc:
self.manager.logger.exception(
'Renaming new path %s to %s failed: %s',
new_data_file_path, new_durable_data_file_path,
exc)
except (OSError, IOError) as err:
if err.errno == errno.ENOENT:
files = os.listdir(self._datadir)
results = self.manager.get_ondisk_files(
files, self._datadir,
frag_index=self._diskfile._frag_index,
policy=self._diskfile.policy)
# We "succeeded" if another writer cleaned up our data
ts_info = results.get('ts_info')
durables = results.get('durable_frag_set', [])
if ts_info and ts_info['timestamp'] > timestamp:
return
elif any(frag['timestamp'] >= timestamp
for frag in durables):
return
if err.errno not in (errno.ENOSPC, errno.EDQUOT):
# re-raise to catch all handler
raise
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
'No space left on device for %(file)s (%(err)s)',
params)
exc = DiskFileNoSpace(
'No space left on device for %(file)s (%(err)s)' % params)
else:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError as os_err:
self.manager.logger.exception(
'Problem cleaning up %(datadir)s (%(err)s)',
{'datadir': self._datadir, 'err': os_err})
self._part_power_cleanup(
durable_data_file_path, new_durable_data_file_path)
except Exception as err:
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
'Problem making data file durable %(file)s (%(err)s)',
params)
exc = DiskFileError(
'Problem making data file durable %(file)s (%(err)s)' % params)
if exc:
raise exc
def commit(self, timestamp):
"""
Finalize put by renaming the object data file to include a durable
marker. We do this for EC policy because it requires a 2-phase put
commit confirmation.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:raises DiskFileError: if the diskfile frag_index has not been set
(either during initialisation or a call to put())
"""
data_file_path = join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index))
durable_data_file_path = os.path.join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index, durable=True))
tpool.execute(
self._finalize_durable, data_file_path, durable_data_file_path,
timestamp)
def put(self, metadata):
"""
The only difference between this method and the replication policy
DiskFileWriter method is adding the frag index to the metadata.
:param metadata: dictionary of metadata to be associated with object
"""
fi = None
cleanup = True
if self._extension == '.data':
# generally we treat the fragment index provided in metadata as
# canon, but if it's unavailable (e.g. tests) it's reasonable to
# use the frag_index provided at instantiation. Either way make
# sure that the fragment index is included in object sysmeta.
fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index',
self._diskfile._frag_index)
fi = self.manager.validate_fragment_index(
fi, self._diskfile.policy)
self._diskfile._frag_index = fi
# defer cleanup until commit() writes makes diskfile durable
cleanup = False
super(ECDiskFileWriter, self)._put(metadata, cleanup, frag_index=fi)
class ECDiskFile(BaseDiskFile):
reader_cls = ECDiskFileReader
writer_cls = ECDiskFileWriter
def __init__(self, *args, **kwargs):
super(ECDiskFile, self).__init__(*args, **kwargs)
frag_index = kwargs.get('frag_index')
self._frag_index = None
if frag_index is not None:
self._frag_index = self.manager.validate_fragment_index(
frag_index, self.policy)
self._frag_prefs = self._validate_frag_prefs(kwargs.get('frag_prefs'))
self._durable_frag_set = None
def _validate_frag_prefs(self, frag_prefs):
"""
Validate that frag_prefs is a list of dicts containing expected keys
'timestamp' and 'exclude'. Convert timestamp values to Timestamp
instances and validate that exclude values are valid fragment indexes.
:param frag_prefs: data to validate, should be a list of dicts.
:raise DiskFileError: if the frag_prefs data is invalid.
:return: a list of dicts with converted and validated values.
"""
# We *do* want to preserve an empty frag_prefs list because it
# indicates that a durable file is not required.
if frag_prefs is None:
return None
try:
return [
{'timestamp': Timestamp(pref['timestamp']),
'exclude': [self.manager.validate_fragment_index(fi)
for fi in pref['exclude']]}
for pref in frag_prefs]
except ValueError as e:
raise DiskFileError(
'Bad timestamp in frag_prefs: %r: %s'
% (frag_prefs, e))
except DiskFileError as e:
raise DiskFileError(
'Bad fragment index in frag_prefs: %r: %s'
% (frag_prefs, e))
except (KeyError, TypeError) as e:
raise DiskFileError(
'Bad frag_prefs: %r: %s' % (frag_prefs, e))
def validate_metadata(self):
required_metadata = [
'Content-Length',
'X-Object-Sysmeta-Ec-Frag-Index',
'X-Object-Sysmeta-Ec-Etag',
]
for header in required_metadata:
if not self._datafile_metadata.get(header):
return False
return True
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest durable file found in the object
directory.
:return: A Timestamp instance, or None if no durable file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._ondisk_info.get('durable_frag_set'):
return self._ondisk_info['durable_frag_set'][0]['timestamp']
return None
@property
def fragments(self):
"""
Provides information about all fragments that were found in the object
directory, including fragments without a matching durable file, and
including any fragment chosen to construct the opened diskfile.
:return: A dict mapping <Timestamp instance> -> <list of frag indexes>,
or None if the diskfile has not been opened or no fragments
were found.
"""
if self._ondisk_info:
frag_sets = self._ondisk_info['frag_sets']
return dict([(ts, [info['frag_index'] for info in frag_set])
for ts, frag_set in frag_sets.items()])
def _get_ondisk_files(self, files, policy=None):
"""
The only difference between this method and the replication policy
DiskFile method is passing in the frag_index and frag_prefs kwargs to
our manager's get_ondisk_files method.
:param files: list of file names
:param policy: storage policy used to store the files
"""
self._ondisk_info = self.manager.get_ondisk_files(
files, self._datadir, frag_index=self._frag_index,
frag_prefs=self._frag_prefs, policy=policy)
return self._ondisk_info
def purge(self, timestamp, frag_index, nondurable_purge_delay=0,
meta_timestamp=None):
"""
Remove a tombstone file matching the specified timestamp or
datafile matching the specified timestamp and fragment index
from the object directory.
This provides the EC reconstructor/ssync process with a way to
remove a tombstone or fragment from a handoff node after
reverting it to its primary node.
The hash will be invalidated, and if empty the hsh_path will
be removed immediately.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param frag_index: fragment archive index, must be
a whole number or None.
:param nondurable_purge_delay: only remove a non-durable data file if
it's been on disk longer than this many seconds.
:param meta_timestamp: if not None then remove any meta file with this
timestamp
"""
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.ts')
purge_path = os.path.join(self._datadir, purge_file)
remove_file(purge_path)
if meta_timestamp is not None:
purge_file = self.manager.make_on_disk_filename(
meta_timestamp, ext='.meta')
purge_path = os.path.join(self._datadir, purge_file)
remove_file(purge_path)
if frag_index is not None:
# data file may or may not be durable so try removing both filename
# possibilities
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index)
purge_path = os.path.join(self._datadir, purge_file)
if is_file_older(purge_path, nondurable_purge_delay):
remove_file(purge_path)
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index, durable=True)
purge_path = os.path.join(self._datadir, purge_file)
remove_file(purge_path)
remove_directory(self._datadir)
self.manager.invalidate_hash(dirname(self._datadir))
class ECDiskFileManager(BaseDiskFileManager):
diskfile_cls = ECDiskFile
policy = EC_POLICY
def validate_fragment_index(self, frag_index, policy=None):
"""
Return int representation of frag_index, or raise a DiskFileError if
frag_index is not a whole number.
:param frag_index: a fragment archive index
:param policy: storage policy used to validate the index against
"""
try:
frag_index = int(str(frag_index))
except (ValueError, TypeError) as e:
raise DiskFileError(
'Bad fragment index: %s: %s' % (frag_index, e))
if frag_index < 0:
raise DiskFileError(
'Fragment index must not be negative: %s' % frag_index)
if policy and frag_index >= policy.ec_ndata + policy.ec_nparity:
msg = 'Fragment index must be less than %d for a %d+%d policy: %s'
raise DiskFileError(msg % (
policy.ec_ndata + policy.ec_nparity,
policy.ec_ndata, policy.ec_nparity, frag_index))
return frag_index
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
ctype_timestamp=None, durable=False, *a, **kw):
"""
Returns the EC specific filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param frag_index: a fragment archive index, used with .data extension
only, must be a whole number.
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:param durable: if True then include a durable marker in data filename.
:returns: a file name
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
a whole number
"""
if ext == '.data':
# for datafiles only we encode the fragment index in the filename
# to allow archives of different indexes to temporarily be stored
# on the same node in certain situations
frag_index = self.validate_fragment_index(frag_index)
rv = timestamp.internal + '#' + str(frag_index)
if durable:
rv += '#d'
return '%s%s' % (rv, ext)
return super(ECDiskFileManager, self).make_on_disk_filename(
timestamp, ext, ctype_timestamp, *a, **kw)
def parse_on_disk_filename(self, filename, policy):
"""
Returns timestamp(s) and other info extracted from a policy specific
file name. For EC policy the data file name includes a fragment index
and possibly a durable marker, both of which must be stripped off
to retrieve the timestamp.
:param filename: the file name including extension
:returns: a dict, with keys for timestamp, frag_index, durable, ext and
ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* frag_index is an int or None
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension
* durable is a boolean that is True if the filename is a data file
that includes a durable marker
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
frag_index = None
float_frag, ext = splitext(filename)
if ext == '.data':
parts = float_frag.split('#')
try:
timestamp = Timestamp(parts[0])
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
# it is an error for an EC data file to not have a valid
# fragment index
try:
frag_index = parts[1]
except IndexError:
# expect validate_fragment_index raise DiskFileError
pass
frag_index = self.validate_fragment_index(frag_index, policy)
try:
durable = parts[2] == 'd'
except IndexError:
durable = False
return {
'timestamp': timestamp,
'frag_index': frag_index,
'ext': ext,
'ctype_timestamp': None,
'durable': durable
}
rv = super(ECDiskFileManager, self).parse_on_disk_filename(
filename, policy)
rv['frag_index'] = None
return rv
def _process_ondisk_files(self, exts, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Implement EC policy specific handling of .data and legacy .durable
files.
If a frag_prefs keyword arg is provided then its value may determine
which fragment index at which timestamp is used to construct the
diskfile. The value of frag_prefs should be a list. Each item in the
frag_prefs list should be a dict that describes per-timestamp
preferences using the following items:
* timestamp: An instance of :class:`~swift.common.utils.Timestamp`.
* exclude: A list of valid fragment indexes (i.e. whole numbers)
that should be EXCLUDED when choosing a fragment at the
timestamp. This list may be empty.
For example::
[
{'timestamp': <Timestamp instance>, 'exclude': [1,3]},
{'timestamp': <Timestamp instance>, 'exclude': []}
]
The order of per-timestamp dicts in the frag_prefs list is significant
and indicates descending preference for fragments from each timestamp
i.e. a fragment that satisfies the first per-timestamp preference in
the frag_prefs will be preferred over a fragment that satisfies a
subsequent per-timestamp preferred, and so on.
If a timestamp is not cited in any per-timestamp preference dict then
it is assumed that any fragment index at that timestamp may be used to
construct the diskfile.
When a frag_prefs arg is provided, including an empty list, there is no
requirement for there to be a durable file at the same timestamp as a
data file that is chosen to construct the disk file
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file.
:param frag_prefs: if set, search for any fragment index .data file
that satisfies the frag_prefs.
"""
durable_info = None
if exts.get('.durable'):
# in older versions, separate .durable files were used to indicate
# the durability of data files having the same timestamp
durable_info = exts['.durable'][0]
# Split the list of .data files into sets of frags having the same
# timestamp, identifying the durable and newest sets (if any) as we go.
# To do this we can take advantage of the list of .data files being
# reverse-time ordered. Keep the resulting per-timestamp frag sets in
# a frag_sets dict mapping a Timestamp instance -> frag_set.
all_frags = exts.get('.data')
frag_sets = {}
durable_frag_set = None
while all_frags:
frag_set, all_frags = self._split_gte_timestamp(
all_frags, all_frags[0]['timestamp'])
# sort the frag set into ascending frag_index order
frag_set.sort(key=lambda info: info['frag_index'])
timestamp = frag_set[0]['timestamp']
frag_sets[timestamp] = frag_set
for frag in frag_set:
# a data file marked as durable may supersede a legacy durable
# file if it is newer
if frag['durable']:
if (not durable_info or
durable_info['timestamp'] < timestamp):
# this frag defines the durable timestamp
durable_info = frag
break
if durable_info and durable_info['timestamp'] == timestamp:
durable_frag_set = frag_set
# a data frag filename may not have the #d part if durability
# is defined by a legacy .durable, so always mark all data
# frags as durable here
for frag in frag_set:
frag['durable'] = True
break # ignore frags that are older than durable timestamp
# Choose which frag set to use
chosen_frag_set = None
if frag_prefs is not None:
candidate_frag_sets = dict(frag_sets)
# For each per-timestamp frag preference dict, do we have any frag
# indexes at that timestamp that are not in the exclusion list for
# that timestamp? If so choose the highest of those frag_indexes.
for ts, exclude_indexes in [
(ts_pref['timestamp'], ts_pref['exclude'])
for ts_pref in frag_prefs
if ts_pref['timestamp'] in candidate_frag_sets]:
available_indexes = [info['frag_index']
for info in candidate_frag_sets[ts]]
acceptable_indexes = list(set(available_indexes) -
set(exclude_indexes))
if acceptable_indexes:
chosen_frag_set = candidate_frag_sets[ts]
# override any frag_index passed in as method param with
# the last (highest) acceptable_index
frag_index = acceptable_indexes[-1]
break
else:
# this frag_set has no acceptable frag index so
# remove it from the candidate frag_sets
candidate_frag_sets.pop(ts)
else:
# No acceptable frag index was found at any timestamp mentioned
# in the frag_prefs. Choose the newest remaining candidate
# frag_set - the proxy can decide if it wants the returned
# fragment with that time.
if candidate_frag_sets:
ts_newest = sorted(candidate_frag_sets.keys())[-1]
chosen_frag_set = candidate_frag_sets[ts_newest]
else:
chosen_frag_set = durable_frag_set
# Select a single chosen frag from the chosen frag_set, by either
# matching against a specified frag_index or taking the highest index.
chosen_frag = None
if chosen_frag_set:
if frag_index is not None:
# search the frag set to find the exact frag_index
for info in chosen_frag_set:
if info['frag_index'] == frag_index:
chosen_frag = info
break
else:
chosen_frag = chosen_frag_set[-1]
# If we successfully found a frag then set results
if chosen_frag:
results['data_info'] = chosen_frag
results['durable_frag_set'] = durable_frag_set
results['chosen_frag_set'] = chosen_frag_set
if chosen_frag_set != durable_frag_set:
# hide meta files older than data file but newer than durable
# file so they don't get marked as obsolete (we already threw
# out .meta's that are older than a .durable)
exts['.meta'], _older = self._split_gt_timestamp(
exts['.meta'], chosen_frag['timestamp'])
results['frag_sets'] = frag_sets
# Mark everything older than most recent durable data as obsolete
# and remove from the exts dict.
if durable_info:
for ext in exts.keys():
exts[ext], older = self._split_gte_timestamp(
exts[ext], durable_info['timestamp'])
results.setdefault('obsolete', []).extend(older)
# Mark any isolated legacy .durable as obsolete
if exts.get('.durable') and not durable_frag_set:
results.setdefault('obsolete', []).extend(exts['.durable'])
exts.pop('.durable')
# Fragments *may* be ready for reclaim, unless they are most recent
# durable
for frag_set in frag_sets.values():
if frag_set in (durable_frag_set, chosen_frag_set):
continue
results.setdefault('possible_reclaim', []).extend(frag_set)
# .meta files *may* be ready for reclaim if there is no durable data
if exts.get('.meta') and not durable_frag_set:
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _verify_ondisk_files(self, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Verify that the final combination of on disk files complies with the
erasure-coded diskfile contract.
:param results: files that have been found and accepted
:param frag_index: specifies a specific fragment index .data file
:param frag_prefs: if set, indicates that fragment preferences have
been specified and therefore that a selected fragment is not
required to be durable.
:returns: True if the file combination is compliant, False otherwise
"""
if super(ECDiskFileManager, self)._verify_ondisk_files(
results, **kwargs):
have_data_file = results['data_file'] is not None
have_durable = (results.get('durable_frag_set') is not None or
(have_data_file and frag_prefs is not None))
return have_data_file == have_durable
return False
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
The only difference between this method and the replication policy
function is the way that data files update hashes dict. Instead of all
filenames hashed into a single hasher, each data file name will fall
into a bucket keyed by its fragment index.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
for frag_set in ondisk_info['frag_sets'].values():
for file_info in frag_set:
fi = file_info['frag_index']
hashes[fi].update(file_info['timestamp'].internal)
if 'durable_frag_set' in ondisk_info:
# The durable_frag_set may be indicated by a legacy
# <timestamp>.durable file or by a durable <timestamp>#fi#d.data
# file. Either way we update hashes[None] with the string
# <timestamp>.durable which is a consistent representation of the
# abstract state of the object regardless of the actual file set.
# That way if we use a local combination of a legacy t1.durable and
# t1#0.data to reconstruct a remote t1#0#d.data then, when next
# hashed, the local and remote will make identical updates to their
# suffix hashes.
file_info = ondisk_info['durable_frag_set'][0]
hashes[None].update(file_info['timestamp'].internal + '.durable')
def _hash_suffix(self, path, policy=None):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:param policy: storage policy used to store the files
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: dict of md5 hex digests
"""
# hash_per_fi instead of single hash for whole suffix
# here we flatten out the hashers hexdigest into a dictionary instead
# of just returning the one hexdigest for the whole suffix
hash_per_fi = self._hash_suffix_dir(path, policy)
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
| swift-master | swift/obj/diskfile.py |
swift-master | swift/obj/__init__.py |
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import queue
import six.moves.cPickle as pickle
import errno
import os
import signal
import sys
import time
import uuid
from random import random, shuffle
from collections import deque
from eventlet import spawn, Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_drive
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, RateLimitedIterator, split_path, \
eventlet_monkey_patch, get_redirect_data, ContextPool, hash_path, \
non_negative_float, config_positive_int_value, non_negative_int, \
EventletRateLimiter, node_to_string
from swift.common.daemon import Daemon
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import split_policy_string, PolicyError
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
from swift.obj.diskfile import get_tmp_dir, ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR, \
HTTP_MOVED_PERMANENTLY
class RateLimiterBucket(EventletRateLimiter):
"""
Extends EventletRateLimiter to also maintain a deque of items that have
been deferred due to rate-limiting, and to provide a comparator for sorting
instanced by readiness.
"""
def __init__(self, max_updates_per_second):
super(RateLimiterBucket, self).__init__(max_updates_per_second,
rate_buffer=0)
self.deque = deque()
def __len__(self):
return len(self.deque)
def __bool__(self):
return bool(self.deque)
__nonzero__ = __bool__ # py2
def __lt__(self, other):
# used to sort RateLimiterBuckets by readiness
if isinstance(other, RateLimiterBucket):
return self.running_time < other.running_time
return self.running_time < other
class BucketizedUpdateSkippingLimiter(object):
"""
Wrap an iterator to rate-limit updates on a per-bucket basis, where updates
are mapped to buckets by hashing their destination path. If an update is
rate-limited then it is placed on a deferral queue and may be sent later if
the wrapped iterator is exhausted before the ``drain_until`` time is
reached.
The deferral queue has constrained size and once the queue is full updates
are evicted using a first-in-first-out policy. This policy is used because
updates on the queue may have been made obsolete by newer updates written
to disk, and this is more likely for updates that have been on the queue
longest.
The iterator increments stats as follows:
* The `deferrals` stat is incremented for each update that is
rate-limited. Note that a individual update is rate-limited at most
once.
* The `skips` stat is incremented for each rate-limited update that is
not eventually yielded. This includes updates that are evicted from the
deferral queue and all updates that remain in the deferral queue when
``drain_until`` time is reached and the iterator terminates.
* The `drains` stat is incremented for each rate-limited update that is
eventually yielded.
Consequently, when this iterator terminates, the sum of `skips` and
`drains` is equal to the number of `deferrals`.
:param update_iterable: an async_pending update iterable
:param logger: a logger instance
:param stats: a SweepStats instance
:param num_buckets: number of buckets to divide container hashes into, the
more buckets total the less containers to a bucket
(once a busy container slows down a bucket the whole
bucket starts deferring)
:param max_elements_per_group_per_second: tunable, when deferring kicks in
:param max_deferred_elements: maximum number of deferred elements before
skipping starts. Each bucket may defer updates, but once the total
number of deferred updates summed across all buckets reaches this
value then all buckets will skip subsequent updates.
:param drain_until: time at which any remaining deferred elements must be
skipped and the iterator stops. Once the wrapped iterator has been
exhausted, this iterator will drain deferred elements from its buckets
until either all buckets have drained or this time is reached.
"""
def __init__(self, update_iterable, logger, stats, num_buckets=1000,
max_elements_per_group_per_second=50,
max_deferred_elements=0,
drain_until=0):
self.iterator = iter(update_iterable)
self.logger = logger
self.stats = stats
# if we want a smaller "blast radius" we could make this number bigger
self.num_buckets = max(num_buckets, 1)
self.max_deferred_elements = max_deferred_elements
self.deferred_buckets = deque()
self.drain_until = drain_until
self.salt = str(uuid.uuid4())
self.buckets = [RateLimiterBucket(max_elements_per_group_per_second)
for _ in range(self.num_buckets)]
self.buckets_ordered_by_readiness = None
def __iter__(self):
return self
def _bucket_key(self, update):
acct, cont = split_update_path(update)
return int(hash_path(acct, cont, self.salt), 16) % self.num_buckets
def _get_time(self):
return time.time()
def next(self):
# first iterate over the wrapped iterator...
for update_ctx in self.iterator:
bucket = self.buckets[self._bucket_key(update_ctx['update'])]
now = self._get_time()
if bucket.is_allowed(now=now):
# no need to ratelimit, just return next update
return update_ctx
self.stats.deferrals += 1
self.logger.increment("deferrals")
if self.max_deferred_elements > 0:
if len(self.deferred_buckets) >= self.max_deferred_elements:
# create space to defer this update by popping the least
# recent deferral from the least recently deferred bucket;
# updates read from disk recently are preferred over those
# read from disk less recently.
oldest_deferred_bucket = self.deferred_buckets.popleft()
oldest_deferred_bucket.deque.popleft()
self.stats.skips += 1
self.logger.increment("skips")
# append the update to the bucket's queue and append the bucket
# to the queue of deferred buckets
# note: buckets may have multiple entries in deferred_buckets,
# one for each deferred update in that particular bucket
bucket.deque.append(update_ctx)
self.deferred_buckets.append(bucket)
else:
self.stats.skips += 1
self.logger.increment("skips")
if self.buckets_ordered_by_readiness is None:
# initialise a queue of those buckets with deferred elements;
# buckets are queued in the chronological order in which they are
# ready to serve an element
self.buckets_ordered_by_readiness = queue.PriorityQueue()
for bucket in self.buckets:
if bucket:
self.buckets_ordered_by_readiness.put(bucket)
# now drain the buckets...
undrained_elements = []
while not self.buckets_ordered_by_readiness.empty():
now = self._get_time()
bucket = self.buckets_ordered_by_readiness.get_nowait()
if now < self.drain_until:
# wait for next element to be ready
bucket.wait(now=now)
# drain the most recently deferred element
item = bucket.deque.pop()
if bucket:
# bucket has more deferred elements, re-insert in queue in
# correct chronological position
self.buckets_ordered_by_readiness.put(bucket)
self.stats.drains += 1
self.logger.increment("drains")
return item
else:
# time to stop iterating: gather all un-drained elements
undrained_elements.extend(bucket.deque)
if undrained_elements:
# report final batch of skipped elements
self.stats.skips += len(undrained_elements)
self.logger.update_stats("skips", len(undrained_elements))
raise StopIteration()
__next__ = next
class SweepStats(object):
"""
Stats bucket for an update sweep
A measure of the rate at which updates are being rate-limited is::
deferrals / (deferrals + successes + failures - drains)
A measure of the rate at which updates are not being sent during a sweep
is::
skips / (skips + successes + failures)
"""
def __init__(self, errors=0, failures=0, quarantines=0, successes=0,
unlinks=0, redirects=0, skips=0, deferrals=0, drains=0):
self.errors = errors
self.failures = failures
self.quarantines = quarantines
self.successes = successes
self.unlinks = unlinks
self.redirects = redirects
self.skips = skips
self.deferrals = deferrals
self.drains = drains
def copy(self):
return type(self)(self.errors, self.failures, self.quarantines,
self.successes, self.unlinks, self.redirects,
self.skips, self.deferrals, self.drains)
def since(self, other):
return type(self)(self.errors - other.errors,
self.failures - other.failures,
self.quarantines - other.quarantines,
self.successes - other.successes,
self.unlinks - other.unlinks,
self.redirects - other.redirects,
self.skips - other.skips,
self.deferrals - other.deferrals,
self.drains - other.drains)
def reset(self):
self.errors = 0
self.failures = 0
self.quarantines = 0
self.successes = 0
self.unlinks = 0
self.redirects = 0
self.skips = 0
self.deferrals = 0
self.drains = 0
def __str__(self):
keys = (
(self.successes, 'successes'),
(self.failures, 'failures'),
(self.quarantines, 'quarantines'),
(self.unlinks, 'unlinks'),
(self.errors, 'errors'),
(self.redirects, 'redirects'),
(self.skips, 'skips'),
(self.deferrals, 'deferrals'),
(self.drains, 'drains'),
)
return ', '.join('%d %s' % pair for pair in keys)
def split_update_path(update):
"""
Split the account and container parts out of the async update data.
N.B. updates to shards set the container_path key while the account and
container keys are always the root.
"""
container_path = update.get('container_path')
if container_path:
acct, cont = split_path('/' + container_path, minsegs=2)
else:
acct, cont = update['account'], update['container']
return acct, cont
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = float(conf.get('interval', 300))
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 8))
self.updater_workers = int(conf.get('updater_workers', 1))
if 'slowdown' in conf:
self.logger.warning(
'The slowdown option is deprecated in favor of '
'objects_per_second. This option may be ignored in a '
'future release.')
objects_per_second = 1 / (
float(conf.get('slowdown', '0.01')) + 0.01)
else:
objects_per_second = 50
self.objects_running_time = 0
self.max_objects_per_second = \
float(conf.get('objects_per_second',
objects_per_second))
self.max_objects_per_container_per_second = non_negative_float(
conf.get('max_objects_per_container_per_second', 0))
self.per_container_ratelimit_buckets = config_positive_int_value(
conf.get('per_container_ratelimit_buckets', 1000))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.report_interval = float(conf.get('report_interval', 300))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
self.stats = SweepStats()
self.max_deferred_updates = non_negative_int(
conf.get('max_deferred_updates', 10000))
self.begin = time.time()
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error('ERROR: Unable to access %(path)s: '
'%(error)s',
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin object update sweep')
self.begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device,
self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional
# unmounted drive is part of normal cluster operations,
# so a simple warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
while len(pids) >= self.updater_workers:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.stats.reset()
forkbegin = time.time()
self.object_sweep(dev_path)
elapsed = time.time() - forkbegin
self.logger.info(
('Object update sweep of %(device)s '
'completed: %(elapsed).02fs, %(stats)s'),
{'device': device, 'elapsed': elapsed,
'stats': self.stats})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - self.begin
self.logger.info('Object update sweep completed: %.02fs',
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info('Begin object update single threaded sweep')
self.begin = time.time()
self.stats.reset()
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device, self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional unmounted
# drive is part of normal cluster operations, so a simple
# warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
self.object_sweep(dev_path)
elapsed = time.time() - self.begin
self.logger.info(
('Object update single-threaded sweep completed: '
'%(elapsed).02fs, %(stats)s'),
{'elapsed': elapsed, 'stats': self.stats})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def _load_update(self, device, update_path):
try:
return pickle.load(open(update_path, 'rb'))
except Exception as e:
if getattr(e, 'errno', None) == errno.ENOENT:
return
self.logger.exception(
'ERROR Pickle problem, quarantining %s', update_path)
self.stats.quarantines += 1
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
return
def _iter_async_pendings(self, device):
"""
Locate and yield an update context for all the async pending files on
the device. Each update context contains details of the async pending
file location, its timestamp and the un-pickled update data.
Async pending files that fail to load will be quarantined.
Only the most recent update for the same object is yielded; older
(stale) async pending files are unlinked as they are located.
The iterator tries to clean up empty directories as it goes.
"""
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc.
continue
if not os.path.isdir(async_pending):
continue
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
# This isn't an error, but a misconfiguration. Logging a
# warning should be sufficient.
self.logger.warning('Directory %(directory)r does not map '
'to a valid policy (%(error)s)', {
'directory': asyncdir, 'error': e})
continue
prefix_dirs = self._listdir(async_pending)
shuffle(prefix_dirs)
for prefix in prefix_dirs:
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update_file in sorted(self._listdir(prefix_path),
reverse=True):
update_path = os.path.join(prefix_path, update_file)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update_file.split('-')
except ValueError:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error(
'ERROR async pending file with unexpected '
'name %s', update_path)
continue
# Async pendings are stored on disk like this:
#
# <device>/async_pending/<suffix>/<obj_hash>-<timestamp>
#
# If there are multiple updates for a given object,
# they'll look like this:
#
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp1>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp2>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp3>
#
# Async updates also have the property that newer
# updates contain all the information in older updates.
# Since we sorted the directory listing in reverse
# order, we'll see timestamp3 first, yield it, and then
# unlink timestamp2 and timestamp1 since we know they
# are obsolete.
#
# This way, our caller only gets useful async_pendings.
if obj_hash == last_obj_hash:
self.stats.unlinks += 1
self.logger.increment('unlinks')
try:
os.unlink(update_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
last_obj_hash = obj_hash
update = self._load_update(device, update_path)
if update is not None:
yield {'device': device,
'policy': policy,
'update_path': update_path,
'obj_hash': obj_hash,
'timestamp': timestamp,
'update': update}
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
last_status_update = start_time
start_stats = self.stats.copy()
my_pid = os.getpid()
self.logger.info("Object update sweep starting on %s (pid: %d)",
device, my_pid)
ap_iter = RateLimitedIterator(
self._iter_async_pendings(device),
elements_per_second=self.max_objects_per_second)
ap_iter = BucketizedUpdateSkippingLimiter(
ap_iter, self.logger, self.stats,
self.per_container_ratelimit_buckets,
self.max_objects_per_container_per_second,
max_deferred_elements=self.max_deferred_updates,
drain_until=self.begin + self.interval)
with ContextPool(self.concurrency) as pool:
for update_ctx in ap_iter:
pool.spawn(self.process_object_update, **update_ctx)
now = time.time()
if now - last_status_update >= self.report_interval:
this_sweep = self.stats.since(start_stats)
self.logger.info(
('Object update sweep progress on %(device)s: '
'%(elapsed).02fs, %(stats)s (pid: %(pid)d)'),
{'device': device,
'elapsed': now - start_time,
'pid': my_pid,
'stats': this_sweep})
last_status_update = now
pool.waitall()
self.logger.timing_since('timing', start_time)
sweep_totals = self.stats.since(start_stats)
self.logger.info(
('Object update sweep completed on %(device)s '
'in %(elapsed).02fs seconds:, '
'%(successes)d successes, %(failures)d failures, '
'%(quarantines)d quarantines, '
'%(unlinks)d unlinks, %(errors)d errors, '
'%(redirects)d redirects, '
'%(skips)d skips, '
'%(deferrals)d deferrals, '
'%(drains)d drains '
'(pid: %(pid)d)'),
{'device': device,
'elapsed': time.time() - start_time,
'pid': my_pid,
'successes': sweep_totals.successes,
'failures': sweep_totals.failures,
'quarantines': sweep_totals.quarantines,
'unlinks': sweep_totals.unlinks,
'errors': sweep_totals.errors,
'redirects': sweep_totals.redirects,
'skips': sweep_totals.skips,
'deferrals': sweep_totals.deferrals,
'drains': sweep_totals.drains
})
def process_object_update(self, update_path, device, policy, update,
**kwargs):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
:param update: the un-pickled update data
:param kwargs: un-used keys from update_ctx
"""
def do_update():
successes = update.get('successes', [])
headers_out = HeaderKeyDict(update['headers'].copy())
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
headers_out.setdefault('X-Backend-Accept-Redirect', 'true')
headers_out.setdefault('X-Backend-Accept-Quoted-Location', 'true')
acct, cont = split_update_path(update)
part, nodes = self.get_container_ring().get_nodes(acct, cont)
obj = '/%s/%s/%s' % (acct, cont, update['obj'])
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = rewrite_pickle = False
redirect = None
redirects = set()
for event in events:
event_success, node_id, redirect = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if redirect:
redirects.add(redirect)
if success:
self.stats.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
elif redirects:
# erase any previous successes
update.pop('successes', None)
redirect = max(redirects, key=lambda x: x[-1])[0]
redirect_history = update.setdefault('redirect_history', [])
if redirect in redirect_history:
# force next update to be sent to root, reset history
update['container_path'] = None
update['redirect_history'] = []
else:
update['container_path'] = redirect
redirect_history.append(redirect)
self.stats.redirects += 1
self.logger.increment("redirects")
self.logger.debug(
'Update redirected for %(obj)s %(path)s to %(shard)s',
{'obj': obj, 'path': update_path,
'shard': update['container_path']})
rewrite_pickle = True
else:
self.stats.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
rewrite_pickle = True
return rewrite_pickle, redirect
rewrite_pickle, redirect = do_update()
if redirect:
# make one immediate retry to the redirect location
rewrite_pickle, redirect = do_update()
if rewrite_pickle:
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
:return: a tuple of (``success``, ``node_id``, ``redirect``)
where ``success`` is True if the update succeeded, ``node_id`` is
the_id of the node updated and ``redirect`` is either None or a
tuple of (a path, a timestamp string).
"""
redirect = None
start = time.time()
# Assume an error until we hear otherwise
status = 500
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
status = resp.status
if status == HTTP_MOVED_PERMANENTLY:
try:
redirect = get_redirect_data(resp)
except ValueError as err:
self.logger.error(
'Container update failed for %r; problem with '
'redirect location: %s' % (obj, err))
success = is_success(status)
if not success:
self.logger.debug(
'Error code %(status)d is returned from remote '
'server %(node)s',
{'status': resp.status,
'node': node_to_string(node, replication=True)})
return success, node['id'], redirect
except Exception:
self.logger.exception('ERROR with remote server %s',
node_to_string(node, replication=True))
except Timeout as exc:
action = 'connecting to'
if not isinstance(exc, ConnectionTimeout):
# i.e., we definitely made the request but gave up
# waiting for the response
status = 499
action = 'waiting on'
self.logger.info(
'Timeout %s remote server %s: %s',
action, node_to_string(node, replication=True), exc)
finally:
elapsed = time.time() - start
self.logger.timing('updater.timing.status.%s' % status,
elapsed * 1000)
return HTTP_INTERNAL_SERVER_ERROR, node['id'], redirect
| swift-master | swift/obj/updater.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import time
import signal
from os.path import basename, dirname, join
from random import shuffle
from contextlib import closing
from eventlet import Timeout
from swift.obj import diskfile, replicator
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist,\
DiskFileDeleted, DiskFileExpired, QuarantineRequest
from swift.common.daemon import Daemon
from swift.common.storage_policy import POLICIES
from swift.common.utils import (
config_auto_int_value, dump_recon_cache, get_logger, list_from_csv,
listdir, load_pkg_resource, parse_prefixed_conf, EventletRateLimiter,
readconf, round_robin_iter, unlink_paths_older_than, PrefixLoggerAdapter)
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
class AuditorWorker(object):
"""Walk through file system to audit objects"""
def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0,
watcher_defs=None):
if watcher_defs is None:
watcher_defs = {}
self.conf = conf
self.logger = logger
self.devices = devices
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
try:
# ideally unless ops overrides the rsync_tempfile_timeout in the
# auditor section we can base our behavior on whatever they
# configure for their replicator
replicator_config = readconf(self.conf['__file__'],
'object-replicator')
except (KeyError, ValueError, IOError):
# if we can't parse the real config (generally a KeyError on
# __file__, or ValueError on no object-replicator section, or
# IOError if reading the file failed) we use
# a very conservative default for rsync_timeout
default_rsync_timeout = 86400
else:
replicator_rsync_timeout = int(replicator_config.get(
'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT))
# Here we can do some light math for ops and use the *replicator's*
# rsync_timeout (plus 15 mins to avoid deleting local tempfiles
# before the remote replicator kills it's rsync)
default_rsync_timeout = replicator_rsync_timeout + 900
# there's not really a good reason to assume the replicator
# section's reclaim_age is more appropriate than the reconstructor
# reclaim_age - but we're already parsing the config so we can set
# the default value in our config if it's not already set
if 'reclaim_age' in replicator_config:
conf.setdefault('reclaim_age',
replicator_config['reclaim_age'])
self.rsync_tempfile_timeout = config_auto_int_value(
self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout)
self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.last_logged = 0
self.files_rate_limiter = EventletRateLimiter(
self.max_files_per_second)
self.bytes_rate_limiter = EventletRateLimiter(
self.max_bytes_per_second)
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.rcache = rcache
self.stats_sizes = sorted(
[int(s) for s in list_from_csv(conf.get('object_size_stats'))])
self.stats_buckets = dict(
[(s, 0) for s in self.stats_sizes + ['OVER']])
self.watchers = [
WatcherWrapper(wdef['klass'], name, wdef['conf'], logger)
for name, wdef in watcher_defs.items()]
logger.debug("%d audit watcher(s) loaded", len(self.watchers))
def create_recon_nested_dict(self, top_level_key, device_list, item):
if device_list:
device_key = ''.join(sorted(device_list))
return {top_level_key: {device_key: item}}
else:
return {top_level_key: item}
def audit_all_objects(self, mode='once', device_dirs=None):
description = ''
if device_dirs:
device_dir_str = ','.join(sorted(device_dirs))
if self.auditor_type == 'ALL':
description = ' - parallel, %s' % device_dir_str
else:
description = ' - %s' % device_dir_str
self.logger.info('Begin object audit "%(mode)s" mode (%(audi_type)s'
'%(description)s)',
{'mode': mode, 'audi_type': self.auditor_type,
'description': description})
for watcher in self.watchers:
watcher.start(self.auditor_type)
begin = reported = time.time()
self.total_bytes_processed = 0
self.total_files_processed = 0
total_quarantines = 0
total_errors = 0
time_auditing = 0
# get AuditLocations for each policy
loc_generators = []
for policy in POLICIES:
loc_generators.append(
self.diskfile_router[policy]
.object_audit_location_generator(
policy, device_dirs=device_dirs,
auditor_type=self.auditor_type))
all_locs = round_robin_iter(loc_generators)
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
self.logger.timing_since('timing', loop_time)
self.files_rate_limiter.wait()
self.total_files_processed += 1
now = time.time()
if now - self.last_logged >= self.log_time:
self.logger.info(
'Object audit (%(type)s). '
'Since %(start_time)s: Locally: %(passes)d passed, '
'%(quars)d quarantined, %(errors)d errors, '
'files/sec: %(frate).2f, bytes/sec: %(brate).2f, '
'Total time: %(total).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f', {
'type': '%s%s' % (self.auditor_type, description),
'start_time': time.ctime(reported),
'passes': self.passes, 'quars': self.quarantines,
'errors': self.errors,
'frate': self.passes / (now - reported),
'brate': self.bytes_processed / (now - reported),
'total': (now - begin), 'audit': time_auditing,
'audit_rate': time_auditing / (now - begin)})
cache_entry = self.create_recon_nested_dict(
'object_auditor_stats_%s' % (self.auditor_type),
device_dirs,
{'errors': self.errors, 'passes': self.passes,
'quarantined': self.quarantines,
'bytes_processed': self.bytes_processed,
'start_time': reported, 'audit_time': time_auditing})
dump_recon_cache(cache_entry, self.rcache, self.logger)
reported = now
total_quarantines += self.quarantines
total_errors += self.errors
self.passes = 0
self.quarantines = 0
self.errors = 0
self.bytes_processed = 0
self.last_logged = now
time_auditing += (now - loop_time)
# Avoid divide by zero during very short runs
elapsed = (time.time() - begin) or 0.000001
self.logger.info(
'Object audit (%(type)s) "%(mode)s" mode '
'completed: %(elapsed).02fs. Total quarantined: %(quars)d, '
'Total errors: %(errors)d, Total files/sec: %(frate).2f, '
'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f', {
'type': '%s%s' % (self.auditor_type, description),
'mode': mode, 'elapsed': elapsed,
'quars': total_quarantines + self.quarantines,
'errors': total_errors + self.errors,
'frate': self.total_files_processed / elapsed,
'brate': self.total_bytes_processed / elapsed,
'audit': time_auditing, 'audit_rate': time_auditing / elapsed})
for watcher in self.watchers:
watcher.end()
if self.stats_sizes:
self.logger.info(
'Object audit stats: %s', json.dumps(self.stats_buckets))
for policy in POLICIES:
# Unset remaining partitions to not skip them in the next run
self.diskfile_router[policy].clear_auditor_status(
policy,
self.auditor_type)
def record_stats(self, obj_size):
"""
Based on config's object_size_stats will keep track of how many objects
fall into the specified ranges. For example with the following:
object_size_stats = 10, 100, 1024
and your system has 3 objects of sizes: 5, 20, and 10000 bytes the log
will look like: {"10": 1, "100": 1, "1024": 0, "OVER": 1}
"""
for size in self.stats_sizes:
if obj_size <= size:
self.stats_buckets[size] += 1
break
else:
self.stats_buckets["OVER"] += 1
def failsafe_object_audit(self, location):
"""
Entrypoint to object_audit, with a failsafe generic exception handler.
"""
try:
self.object_audit(location)
except (Exception, Timeout):
self.logger.increment('errors')
self.errors += 1
self.logger.exception('ERROR Trying to audit %s', location)
def object_audit(self, location):
"""
Audits the given object location.
:param location: an audit location
(from diskfile.object_audit_location_generator)
"""
def raise_dfq(msg):
raise DiskFileQuarantined(msg)
diskfile_mgr = self.diskfile_router[location.policy]
# this method doesn't normally raise errors, even if the audit
# location does not exist; if this raises an unexpected error it
# will get logged in failsafe
df = diskfile_mgr.get_diskfile_from_audit_location(location)
reader = None
try:
with df.open(modernize=True):
metadata = df.get_metadata()
if not df.validate_metadata():
df._quarantine(
df._data_file,
"Metadata failed validation")
obj_size = int(metadata['Content-Length'])
if self.stats_sizes:
self.record_stats(obj_size)
if obj_size and not self.zero_byte_only_at_fps:
reader = df.reader(_quarantine_hook=raise_dfq)
if reader:
with closing(reader):
for chunk in reader:
chunk_len = len(chunk)
self.bytes_rate_limiter.wait(incr_by=chunk_len)
self.bytes_processed += chunk_len
self.total_bytes_processed += chunk_len
for watcher in self.watchers:
try:
watcher.see_object(
metadata,
df._ondisk_info['data_file'])
except QuarantineRequest:
raise df._quarantine(
df._data_file,
"Requested by %s" % watcher.watcher_name)
except DiskFileQuarantined as err:
self.quarantines += 1
self.logger.error('ERROR Object %(obj)s failed audit and was'
' quarantined: %(err)s',
{'obj': location, 'err': err})
except DiskFileExpired:
pass # ignore expired objects
except DiskFileDeleted:
# If there is a reclaimable tombstone, we'll invalidate the hash
# to trigger the replicator to rehash/cleanup this suffix
ts = df._ondisk_info['ts_info']['timestamp']
if (not self.zero_byte_only_at_fps and
(time.time() - float(ts)) > df.manager.reclaim_age):
df.manager.invalidate_hash(dirname(df._datadir))
except DiskFileNotExist:
pass
self.passes += 1
# _ondisk_info attr is initialized to None and filled in by open
ondisk_info_dict = df._ondisk_info or {}
if 'unexpected' in ondisk_info_dict:
is_rsync_tempfile = lambda fpath: (
diskfile.RE_RSYNC_TEMPFILE.match(basename(fpath)))
rsync_tempfile_paths = filter(is_rsync_tempfile,
ondisk_info_dict['unexpected'])
mtime = time.time() - self.rsync_tempfile_timeout
unlink_paths_older_than(rsync_tempfile_paths, mtime)
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf, logger=None, **options):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.concurrency = int(conf.get('concurrency', 1))
self.conf_zero_byte_fps = int(
conf.get('zero_byte_files_per_second', 50))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = join(self.recon_cache_path, RECON_OBJECT_FILE)
self.interval = float(conf.get('interval', 30))
watcher_names = set(list_from_csv(conf.get('watchers', '')))
# Normally '__file__' is always in config, but tests neglect it often.
watcher_configs = \
parse_prefixed_conf(conf['__file__'], 'object-auditor:watcher:') \
if '__file__' in conf else {}
self.watcher_defs = {}
for name in watcher_names:
self.logger.debug("Loading entry point '%s'", name)
wconf = dict(conf)
wconf.update(watcher_configs.get(name, {}))
self.watcher_defs[name] = {
'conf': wconf,
'klass': load_pkg_resource("swift.object_audit_watcher", name)}
def _sleep(self):
time.sleep(self.interval)
def clear_recon_cache(self, auditor_type):
"""Clear recon cache entries"""
dump_recon_cache({'object_auditor_stats_%s' % auditor_type: {}},
self.rcache, self.logger)
def run_audit(self, **kwargs):
"""Run the object audit"""
mode = kwargs.get('mode')
zero_byte_only_at_fps = kwargs.get('zero_byte_fps', 0)
device_dirs = kwargs.get('device_dirs')
worker = AuditorWorker(self.conf, self.logger, self.rcache,
self.devices,
zero_byte_only_at_fps=zero_byte_only_at_fps,
watcher_defs=self.watcher_defs)
worker.audit_all_objects(mode=mode, device_dirs=device_dirs)
def fork_child(self, zero_byte_fps=False, sleep_between_zbf_scanner=False,
**kwargs):
"""Child execution"""
pid = os.fork()
if pid:
return pid
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if zero_byte_fps:
kwargs['zero_byte_fps'] = self.conf_zero_byte_fps
if sleep_between_zbf_scanner:
self._sleep()
try:
self.run_audit(**kwargs)
except Exception as e:
self.logger.exception(
"ERROR: Unable to run auditing: %s", e)
finally:
sys.exit()
def audit_loop(self, parent, zbo_fps, override_devices=None, **kwargs):
"""Parallel audit loop"""
self.clear_recon_cache('ALL')
self.clear_recon_cache('ZBF')
once = kwargs.get('mode') == 'once'
kwargs['device_dirs'] = override_devices
if parent:
kwargs['zero_byte_fps'] = zbo_fps
self.run_audit(**kwargs)
else:
pids = set()
if self.conf_zero_byte_fps:
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
if self.concurrency == 1:
# Audit all devices in 1 process
pids.add(self.fork_child(**kwargs))
else:
# Divide devices amongst parallel processes set by
# self.concurrency. Total number of parallel processes
# is self.concurrency + 1 if zero_byte_fps.
parallel_proc = self.concurrency + 1 if \
self.conf_zero_byte_fps else self.concurrency
device_list = list(override_devices) if override_devices else \
listdir(self.devices)
shuffle(device_list)
while device_list:
pid = None
if len(pids) == parallel_proc:
pid = os.wait()[0]
pids.discard(pid)
if self.conf_zero_byte_fps and pid == zbf_pid and once:
# If we're only running one pass and the ZBF scanner
# finished, don't bother restarting it.
zbf_pid = -100
elif self.conf_zero_byte_fps and pid == zbf_pid:
# When we're running forever, the ZBF scanner must
# be restarted as soon as it finishes.
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
else:
kwargs['device_dirs'] = [device_list.pop()]
pids.add(self.fork_child(**kwargs))
while pids:
pid = os.wait()[0]
# ZBF scanner must be restarted as soon as it finishes
# unless we're in run-once mode
if self.conf_zero_byte_fps and pid == zbf_pid and \
len(pids) > 1 and not once:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
zbf_pid = self.fork_child(zero_byte_fps=True,
sleep_between_zbf_scanner=True,
**kwargs)
pids.add(zbf_pid)
pids.discard(pid)
def run_forever(self, *args, **kwargs):
"""Run the object audit until stopped."""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'forever'}
while True:
try:
self.audit_loop(parent, zbo_fps, **kwargs)
except (Exception, Timeout) as err:
self.logger.exception('ERROR auditing: %s', err)
self._sleep()
def run_once(self, *args, **kwargs):
"""Run the object audit once"""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
override_devices = list_from_csv(kwargs.get('devices'))
# Remove bogus entries and duplicates from override_devices
override_devices = list(
set(listdir(self.devices)).intersection(set(override_devices)))
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'once'}
try:
self.audit_loop(parent, zbo_fps, override_devices=override_devices,
**kwargs)
except (Exception, Timeout) as err:
self.logger.exception('ERROR auditing: %s', err)
class WatcherWrapper(object):
"""
Run the user-supplied watcher.
Simple and gets the job done. Note that we aren't doing anything
to isolate ourselves from hangs or file descriptor leaks
in the plugins.
"""
def __init__(self, watcher_class, watcher_name, conf, logger):
self.watcher_name = watcher_name
self.watcher_in_error = False
self.logger = PrefixLoggerAdapter(logger, {})
self.logger.set_prefix('[audit-watcher %s] ' % watcher_name)
try:
self.watcher = watcher_class(conf, self.logger)
except (Exception, Timeout):
self.logger.exception('Error intializing watcher')
self.watcher_in_error = True
def start(self, audit_type):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
try:
self.watcher.start(audit_type=audit_type)
except (Exception, Timeout):
self.logger.exception('Error starting watcher')
self.watcher_in_error = True
def see_object(self, meta, data_file_path):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
kwargs = {'object_metadata': meta,
'data_file_path': data_file_path}
try:
self.watcher.see_object(**kwargs)
except QuarantineRequest:
# Avoid extra logging.
raise
except (Exception, Timeout):
self.logger.exception(
'Error in see_object(meta=%r, data_file_path=%r)',
meta, data_file_path)
# Do *not* flag watcher as being in an error state; a failure
# to process one object shouldn't impact the ability to process
# others.
def end(self):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
kwargs = {}
try:
self.watcher.end(**kwargs)
except (Exception, Timeout):
self.logger.exception('Error ending watcher')
self.watcher_in_error = True
| swift-master | swift/obj/auditor.py |
# Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import errno
import os
from os.path import join
import random
import time
from collections import defaultdict
import six
import six.moves.cPickle as pickle
import shutil
from eventlet import (GreenPile, GreenPool, Timeout, sleep, tpool, spawn)
from eventlet.support.greenlets import GreenletExit
from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
dump_recon_cache, mkdirs, config_true_value,
GreenAsyncPile, Timestamp, remove_file, node_to_string,
load_recon_cache, parse_override_options, distribute_evenly,
PrefixLoggerAdapter, remove_directory, config_request_node_count_value,
non_negative_int)
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
from swift.common.ring.utils import is_local_device
from swift.obj.ssync_sender import Sender as ssync_sender
from swift.common.http import HTTP_OK, HTTP_NOT_FOUND, \
HTTP_INSUFFICIENT_STORAGE
from swift.obj.diskfile import DiskFileRouter, get_data_dir, \
get_tmp_dir, DEFAULT_RECLAIM_AGE
from swift.common.storage_policy import POLICIES, EC_POLICY
from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
SuffixSyncError, PartitionLockTimeout, DiskFileNotExist
SYNC, REVERT = ('sync_only', 'sync_revert')
UNKNOWN_RESPONSE_STATUS = 0 # used as response status for timeouts, exceptions
def _get_partners(node_index, part_nodes):
"""
Returns the left, right and far partners of the node whose index is equal
to the given node_index.
:param node_index: the primary index
:param part_nodes: a list of primary nodes
:returns: [<node-to-left>, <node-to-right>, <node-opposite>]
"""
num_nodes = len(part_nodes)
return [
part_nodes[(node_index - 1) % num_nodes],
part_nodes[(node_index + 1) % num_nodes],
part_nodes[(
node_index + (num_nodes // 2)
) % num_nodes],
]
def _full_path(node, part, relative_path, policy):
"""
Combines the node properties, partition, relative-path and policy into a
single string representation.
:param node: a dict describing node properties
:param part: partition number
:param path: path of the desired EC archive relative to partition dir
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:return: string representation of absolute path on node plus policy index
"""
if not isinstance(relative_path, six.text_type):
relative_path = relative_path.decode('utf8')
return '%(node)s/%(part)s%(path)s policy#%(policy)d' % {
'node': node_to_string(node, replication=True),
'part': part, 'path': relative_path,
'policy': policy,
}
class ResponseBucket(object):
"""
Encapsulates fragment GET response data related to a single timestamp.
"""
def __init__(self):
# count of all responses associated with this Bucket
self.num_responses = 0
# map {frag_index: response} for subset of responses that could be used
# to rebuild the missing fragment
self.useful_responses = {}
# set if a durable timestamp was seen in responses
self.durable = False
# etag of the first response associated with the Bucket
self.etag = None
class RebuildingECDiskFileStream(object):
"""
This class wraps the reconstructed fragment archive data and
metadata in the DiskFile interface for ssync.
"""
def __init__(self, datafile_metadata, frag_index, rebuilt_fragment_iter):
# start with metadata from a participating FA
self.datafile_metadata = datafile_metadata
# the new FA is going to have the same length as others in the set
self._content_length = int(self.datafile_metadata['Content-Length'])
# update the FI and delete the ETag, the obj server will
# recalc on the other side...
self.datafile_metadata['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index
for etag_key in ('ETag', 'Etag'):
self.datafile_metadata.pop(etag_key, None)
self.frag_index = frag_index
self.rebuilt_fragment_iter = rebuilt_fragment_iter
def get_metadata(self):
return self.datafile_metadata
def get_datafile_metadata(self):
return self.datafile_metadata
@property
def content_length(self):
return self._content_length
def reader(self):
for chunk in self.rebuilt_fragment_iter:
yield chunk
class ObjectReconstructor(Daemon):
"""
Reconstruct objects using erasure code. And also rebalance EC Fragment
Archive objects off handoff nodes.
Encapsulates most logic and data needed by the object reconstruction
process. Each call to .reconstruct() performs one pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = PrefixLoggerAdapter(
logger or get_logger(conf, log_route='object-reconstructor'), {})
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
# N.B. to maintain compatibility with legacy configs this option can
# not be named 'workers' because the object-server uses that option
# name in the DEFAULT section
self.reconstructor_workers = int(conf.get('reconstructor_workers', 0))
self.policies = [policy for policy in POLICIES
if policy.policy_type == EC_POLICY]
self.stats_interval = float(conf.get('stats_interval', '300'))
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.partition_times = []
self.interval = float(conf.get('interval') or
conf.get('run_pause') or 30)
if 'run_pause' in conf:
if 'interval' in conf:
self.logger.warning(
'Option object-reconstructor/run_pause is deprecated and '
'object-reconstructor/interval is already configured. '
'You can safely remove run_pause; it is now ignored and '
'will be removed in a future version.')
else:
self.logger.warning(
'Option object-reconstructor/run_pause is deprecated '
'and will be removed in a future version. '
'Update your configuration to use option '
'object-reconstructor/interval.')
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
self._next_rcache_update = time.time() + self.stats_interval
# defaults subject to change after beta
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.headers = {
'Content-Length': '0',
'user-agent': 'obj-reconstructor %s' % os.getpid()}
if 'handoffs_first' in conf:
self.logger.warning(
'The handoffs_first option is deprecated in favor '
'of handoffs_only. This option may be ignored in a '
'future release.')
# honor handoffs_first for backwards compatibility
default_handoffs_only = config_true_value(conf['handoffs_first'])
else:
default_handoffs_only = False
self.handoffs_only = config_true_value(
conf.get('handoffs_only', default_handoffs_only))
if self.handoffs_only:
self.logger.warning(
'Handoff only mode is not intended for normal '
'operation, use handoffs_only with care.')
elif default_handoffs_only:
self.logger.warning('Ignored handoffs_first option in favor '
'of handoffs_only.')
self.rebuild_handoff_node_count = int(conf.get(
'rebuild_handoff_node_count', 2))
self.quarantine_threshold = non_negative_int(
conf.get('quarantine_threshold', 0))
self.quarantine_age = int(
conf.get('quarantine_age',
conf.get('reclaim_age', DEFAULT_RECLAIM_AGE)))
self.request_node_count = config_request_node_count_value(
conf.get('request_node_count', '2 * replicas'))
self.max_objects_per_revert = non_negative_int(
conf.get('max_objects_per_revert', 0))
# When upgrading from liberasurecode<=1.5.0, you may want to continue
# writing legacy CRCs until all nodes are upgraded and capabale of
# reading fragments with zlib CRCs.
# See https://bugs.launchpad.net/liberasurecode/+bug/1886088 for more
# information.
if 'write_legacy_ec_crc' in conf:
os.environ['LIBERASURECODE_WRITE_LEGACY_CRC'] = \
'1' if config_true_value(conf['write_legacy_ec_crc']) else '0'
# else, assume operators know what they're doing and leave env alone
self._df_router = DiskFileRouter(conf, self.logger)
self.all_local_devices = self.get_local_devices()
self.rings_mtime = None
def get_worker_args(self, once=False, **kwargs):
"""
Take the set of all local devices for this node from all the EC
policies rings, and distribute them evenly into the number of workers
to be spawned according to the configured worker count. If `devices` is
given in `kwargs` then distribute only those devices.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: optional overrides from the command line
"""
if self.reconstructor_workers < 1:
return
override_opts = parse_override_options(once=once, **kwargs)
# Note that this get re-used when dumping stats and in is_healthy
self.all_local_devices = self.get_local_devices()
if override_opts.devices:
devices = [d for d in override_opts.devices
if d in self.all_local_devices]
else:
devices = list(self.all_local_devices)
if not devices:
# we only need a single worker to do nothing until a ring change
yield dict(override_devices=override_opts.devices,
override_partitions=override_opts.partitions,
multiprocess_worker_index=0)
return
# for somewhat uniform load per worker use same
# max_devices_per_worker when handling all devices or just override
# devices, but only use enough workers for the actual devices being
# handled
self.reconstructor_workers = min(self.reconstructor_workers,
len(devices))
for index, ods in enumerate(distribute_evenly(
devices, self.reconstructor_workers)):
yield dict(override_partitions=override_opts.partitions,
override_devices=ods,
multiprocess_worker_index=index)
def is_healthy(self):
"""
Check whether rings have changed, and maybe do a recon update.
:returns: False if any ec ring has changed
"""
now = time.time()
if now > self._next_rcache_update:
self._next_rcache_update = now + self.stats_interval
self.aggregate_recon_update()
rings_mtime = [os.path.getmtime(self.load_object_ring(
policy).serialized_path) for policy in self.policies]
if self.rings_mtime == rings_mtime:
return True
self.rings_mtime = rings_mtime
return self.get_local_devices() == self.all_local_devices
def aggregate_recon_update(self):
"""
Aggregate per-disk rcache updates from child workers.
"""
existing_data = load_recon_cache(self.rcache)
first_start = time.time()
last_finish = 0
all_devices_reporting = True
for device in self.all_local_devices:
per_disk_stats = existing_data.get(
'object_reconstruction_per_disk', {}).get(device, {})
try:
start_time = per_disk_stats['object_reconstruction_last'] - \
(per_disk_stats['object_reconstruction_time'] * 60)
finish_time = per_disk_stats['object_reconstruction_last']
except KeyError:
all_devices_reporting = False
break
first_start = min(first_start, start_time)
last_finish = max(last_finish, finish_time)
if all_devices_reporting and last_finish > 0:
duration = last_finish - first_start
recon_update = {
'object_reconstruction_time': duration / 60.0,
'object_reconstruction_last': last_finish
}
else:
# if any current devices have not yet dropped stats, or the rcache
# file does not yet exist, we may still clear out per device stats
# for any devices that have been removed from local devices
recon_update = {}
found_devices = set(existing_data.get(
'object_reconstruction_per_disk', {}).keys())
clear_update = {d: {} for d in found_devices
if d not in self.all_local_devices}
if clear_update:
recon_update['object_reconstruction_per_disk'] = clear_update
dump_recon_cache(recon_update, self.rcache, self.logger)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def _get_response(self, node, policy, partition, path, headers):
"""
Helper method for reconstruction that GETs a single EC fragment
archive
:param node: the node to GET from
:param policy: the job policy
:param partition: the partition
:param path: path of the desired EC archive relative to partition dir
:param headers: the headers to send
:returns: response
"""
full_path = _full_path(node, partition, path, policy)
resp = None
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], partition, 'GET', path, headers=headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.full_path = full_path
resp.node = node
except (Exception, Timeout):
self.logger.exception(
"Trying to GET %(full_path)s", {
'full_path': full_path})
return resp
def _handle_fragment_response(self, node, policy, partition, fi_to_rebuild,
path, buckets, error_responses, resp):
"""
Place ok responses into a per-timestamp bucket. Append bad responses to
a list per-status-code in error_responses.
:return: the per-timestamp bucket if the response is ok, otherwise
None.
"""
if not resp:
error_responses[UNKNOWN_RESPONSE_STATUS].append(resp)
return None
if resp.status not in [HTTP_OK, HTTP_NOT_FOUND]:
self.logger.warning(
"Invalid response %(resp)s from %(full_path)s",
{'resp': resp.status, 'full_path': resp.full_path})
if resp.status != HTTP_OK:
error_responses[resp.status].append(resp)
return None
resp.headers = HeaderKeyDict(resp.getheaders())
frag_index = resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
try:
resp_frag_index = int(frag_index)
except (TypeError, ValueError):
# The successful response should include valid X-Object-
# Sysmeta-Ec-Frag-Index but for safety, catching the case either
# missing X-Object-Sysmeta-Ec-Frag-Index or invalid frag index to
# reconstruct and dump warning log for that
self.logger.warning(
'Invalid resp from %s '
'(invalid X-Object-Sysmeta-Ec-Frag-Index: %r)',
resp.full_path, frag_index)
error_responses[UNKNOWN_RESPONSE_STATUS].append(resp)
return None
timestamp = resp.headers.get('X-Backend-Data-Timestamp',
resp.headers.get('X-Backend-Timestamp'))
if not timestamp:
self.logger.warning(
'Invalid resp from %s, frag index %s (missing '
'X-Backend-Data-Timestamp and X-Backend-Timestamp)',
resp.full_path, resp_frag_index)
error_responses[UNKNOWN_RESPONSE_STATUS].append(resp)
return None
timestamp = Timestamp(timestamp)
etag = resp.headers.get('X-Object-Sysmeta-Ec-Etag')
if not etag:
self.logger.warning(
'Invalid resp from %s, frag index %s (missing Etag)',
resp.full_path, resp_frag_index)
error_responses[UNKNOWN_RESPONSE_STATUS].append(resp)
return None
bucket = buckets[timestamp]
bucket.num_responses += 1
if bucket.etag is None:
bucket.etag = etag
elif bucket.etag != etag:
self.logger.error('Mixed Etag (%s, %s) for %s frag#%s',
etag, bucket.etag,
_full_path(node, partition, path, policy),
fi_to_rebuild)
return None
durable_timestamp = resp.headers.get('X-Backend-Durable-Timestamp')
if durable_timestamp:
buckets[Timestamp(durable_timestamp)].durable = True
if resp_frag_index == fi_to_rebuild:
# TODO: With duplicated EC frags it's not unreasonable to find the
# very fragment we're trying to rebuild exists on another primary
# node. In this case we should stream it directly from the remote
# node to our target instead of rebuild. But instead we ignore it.
self.logger.debug(
'Found existing frag #%s at %s while rebuilding to %s',
fi_to_rebuild, resp.full_path,
_full_path(node, partition, path, policy))
elif resp_frag_index not in bucket.useful_responses:
bucket.useful_responses[resp_frag_index] = resp
# else: duplicate frag_index isn't useful for rebuilding
return bucket
def _is_quarantine_candidate(self, policy, buckets, error_responses, df):
# This condition is deliberately strict because it determines if
# more requests will be issued and ultimately if the fragment
# will be quarantined.
if list(error_responses.keys()) != [404]:
# only quarantine if all other responses are 404 so we are
# confident there are no other frags on queried nodes
return False
local_timestamp = Timestamp(df.get_datafile_metadata()['X-Timestamp'])
if list(buckets.keys()) != [local_timestamp]:
# don't quarantine if there's insufficient other timestamp
# frags, or no response for the local frag timestamp: we
# possibly could quarantine, but this unexpected case may be
# worth more investigation
return False
if time.time() - float(local_timestamp) <= self.quarantine_age:
# If the fragment has not yet passed reclaim age then it is
# likely that a tombstone will be reverted to this node, or
# neighbor frags will get reverted from handoffs to *other* nodes
# and we'll discover we *do* have enough to reconstruct. Don't
# quarantine it yet: better that it is cleaned up 'normally'.
return False
bucket = buckets[local_timestamp]
return (bucket.num_responses <= self.quarantine_threshold and
bucket.num_responses < policy.ec_ndata and
df._frag_index in bucket.useful_responses)
def _make_fragment_requests(self, job, node, df, buckets, error_responses):
"""
Issue requests for fragments to the list of ``nodes`` and sort the
responses into per-timestamp ``buckets`` or per-status
``error_responses``. If any bucket accumulates sufficient responses to
rebuild the missing fragment then return that bucket.
:param job: job from ssync_sender.
:param node: node to which we're rebuilding.
:param df: an instance of :class:`~swift.obj.diskfile.BaseDiskFile`.
:param buckets: dict of per-timestamp buckets for ok responses.
:param error_responses: dict of per-status lists of error responses.
:return: A per-timestamp with sufficient responses, or None if
there is no such bucket.
"""
policy = job['policy']
partition = job['partition']
datafile_metadata = df.get_datafile_metadata()
# the fragment index we need to reconstruct is the position index
# of the node we're rebuilding to within the primary part list
fi_to_rebuild = node['backend_index']
# KISS send out connection requests to all nodes, see what sticks.
# Use fragment preferences header to tell other nodes that we want
# fragments at the same timestamp as our fragment, and that they don't
# need to be durable. Accumulate responses into per-timestamp buckets
# and if any buckets gets enough responses then use those responses to
# rebuild.
headers = self.headers.copy()
headers['X-Backend-Storage-Policy-Index'] = int(policy)
headers['X-Backend-Replication'] = 'True'
local_timestamp = Timestamp(datafile_metadata['X-Timestamp'])
frag_prefs = [{'timestamp': local_timestamp.normal, 'exclude': []}]
headers['X-Backend-Fragment-Preferences'] = json.dumps(frag_prefs)
path = datafile_metadata['name']
ring = policy.object_ring
primary_nodes = ring.get_part_nodes(partition)
# primary_node_count is the maximum number of nodes to consume in a
# normal rebuild attempt when there is no quarantine candidate,
# including the node to which we are rebuilding
primary_node_count = len(primary_nodes)
# don't try and fetch a fragment from the node we're rebuilding to
filtered_primary_nodes = [n for n in primary_nodes
if n['id'] != node['id']]
# concurrency is the number of requests fired off in initial batch
concurrency = len(filtered_primary_nodes)
# max_node_count is the maximum number of nodes to consume when
# verifying a quarantine candidate and is at least primary_node_count
max_node_count = max(primary_node_count,
self.request_node_count(primary_node_count))
pile = GreenAsyncPile(concurrency)
for primary_node in filtered_primary_nodes:
pile.spawn(self._get_response, primary_node, policy, partition,
path, headers)
useful_bucket = None
for resp in pile:
bucket = self._handle_fragment_response(
node, policy, partition, fi_to_rebuild, path, buckets,
error_responses, resp)
if bucket and len(bucket.useful_responses) >= policy.ec_ndata:
useful_bucket = bucket
break
# Once all rebuild nodes have responded, if we have a quarantine
# candidate, go beyond primary_node_count and on to handoffs. The
# first non-404 response will prevent quarantine, but the expected
# common case is all 404 responses so we use some concurrency to get an
# outcome faster at the risk of some unnecessary requests in the
# uncommon case.
if (not useful_bucket and
self._is_quarantine_candidate(
policy, buckets, error_responses, df)):
node_count = primary_node_count
handoff_iter = itertools.islice(ring.get_more_nodes(partition),
max_node_count - node_count)
for handoff_node in itertools.islice(handoff_iter, concurrency):
node_count += 1
pile.spawn(self._get_response, handoff_node, policy, partition,
path, headers)
for resp in pile:
bucket = self._handle_fragment_response(
node, policy, partition, fi_to_rebuild, path, buckets,
error_responses, resp)
if bucket and len(bucket.useful_responses) >= policy.ec_ndata:
useful_bucket = bucket
self.logger.debug(
'Reconstructing frag from handoffs, node_count=%d'
% node_count)
break
elif self._is_quarantine_candidate(
policy, buckets, error_responses, df):
try:
handoff_node = next(handoff_iter)
node_count += 1
pile.spawn(self._get_response, handoff_node, policy,
partition, path, headers)
except StopIteration:
pass
# else: this frag is no longer a quarantine candidate, so we
# could break right here and ignore any remaining responses,
# but given that we may have actually found another frag we'll
# optimistically wait for any remaining responses in case a
# useful bucket is assembled.
return useful_bucket
def reconstruct_fa(self, job, node, df):
"""
Reconstructs a fragment archive - this method is called from ssync
after a remote node responds that is missing this object - the local
diskfile is opened to provide metadata - but to reconstruct the
missing fragment archive we must connect to multiple object servers.
:param job: job from ssync_sender.
:param node: node to which we're rebuilding.
:param df: an instance of :class:`~swift.obj.diskfile.BaseDiskFile`.
:returns: a DiskFile like class for use by ssync.
:raises DiskFileQuarantined: if the fragment archive cannot be
reconstructed and has as a result been quarantined.
:raises DiskFileError: if the fragment archive cannot be reconstructed.
"""
policy = job['policy']
partition = job['partition']
# the fragment index we need to reconstruct is the position index
# of the node we're rebuilding to within the primary part list
fi_to_rebuild = node['backend_index']
datafile_metadata = df.get_datafile_metadata()
if not df.validate_metadata():
raise df._quarantine(
df._data_file, "Invalid fragment #%s" % df._frag_index)
local_timestamp = Timestamp(datafile_metadata['X-Timestamp'])
path = datafile_metadata['name']
buckets = defaultdict(ResponseBucket) # map timestamp -> Bucket
error_responses = defaultdict(list) # map status code -> response list
# don't try and fetch a fragment from the node we're rebuilding to
useful_bucket = self._make_fragment_requests(
job, node, df, buckets, error_responses)
if useful_bucket:
frag_indexes = list(useful_bucket.useful_responses.keys())
self.logger.debug('Reconstruct frag #%s with frag indexes %s'
% (fi_to_rebuild, frag_indexes))
responses = list(useful_bucket.useful_responses.values())
rebuilt_fragment_iter = self.make_rebuilt_fragment_iter(
responses[:policy.ec_ndata], path, policy, fi_to_rebuild)
return RebuildingECDiskFileStream(datafile_metadata, fi_to_rebuild,
rebuilt_fragment_iter)
full_path = _full_path(node, partition, path, policy)
for timestamp, bucket in sorted(buckets.items()):
self.logger.error(
'Unable to get enough responses (%s/%s from %s ok responses) '
'to reconstruct %s %s frag#%s with ETag %s and timestamp %s' %
(len(bucket.useful_responses), policy.ec_ndata,
bucket.num_responses,
'durable' if bucket.durable else 'non-durable',
full_path, fi_to_rebuild, bucket.etag, timestamp.internal))
if error_responses:
durable = buckets[local_timestamp].durable
errors = ', '.join(
'%s x %s' % (len(responses),
'unknown' if status == UNKNOWN_RESPONSE_STATUS
else status)
for status, responses in sorted(error_responses.items()))
self.logger.error(
'Unable to get enough responses (%s error responses) '
'to reconstruct %s %s frag#%s' % (
errors, 'durable' if durable else 'non-durable',
full_path, fi_to_rebuild))
if self._is_quarantine_candidate(policy, buckets, error_responses, df):
raise df._quarantine(
df._data_file, "Solitary fragment #%s" % df._frag_index)
raise DiskFileError('Unable to reconstruct EC archive')
def _reconstruct(self, policy, fragment_payload, frag_index):
return policy.pyeclib_driver.reconstruct(fragment_payload,
[frag_index])[0]
def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index):
"""
Turn a set of connections from backend object servers into a generator
that yields up the rebuilt fragment archive for frag_index.
"""
def _get_one_fragment(resp):
buff = []
remaining_bytes = policy.fragment_size
while remaining_bytes:
chunk = resp.read(remaining_bytes)
if not chunk:
break
remaining_bytes -= len(chunk)
buff.append(chunk)
return b''.join(buff)
def fragment_payload_iter():
# We need a fragment from each connections, so best to
# use a GreenPile to keep them ordered and in sync
pile = GreenPile(len(responses))
while True:
for resp in responses:
pile.spawn(_get_one_fragment, resp)
try:
with Timeout(self.node_timeout):
fragment_payload = [fragment for fragment in pile]
except (Exception, Timeout):
self.logger.exception(
"Error trying to rebuild %(path)s "
"policy#%(policy)d frag#%(frag_index)s",
{'path': path,
'policy': policy,
'frag_index': frag_index,
})
break
if not all(fragment_payload):
break
rebuilt_fragment = self._reconstruct(
policy, fragment_payload, frag_index)
yield rebuilt_fragment
return fragment_payload_iter()
def stats_line(self):
"""
Logs various stats for the currently running reconstruction pass.
"""
if (self.device_count and self.part_count):
elapsed = (time.time() - self.start) or 0.000001
rate = self.reconstruction_part_count / elapsed
self.logger.info(
"%(reconstructed)d/%(total)d (%(percentage).2f%%)"
" partitions reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)",
{'reconstructed': self.reconstruction_part_count,
'total': self.part_count,
'percentage':
self.reconstruction_part_count * 100.0 / self.part_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' %
compute_eta(self.start,
self.reconstruction_part_count,
self.part_count)})
if self.suffix_count and self.partition_times:
self.logger.info(
"%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced",
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
"Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs",
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
"Nothing reconstructed for %s seconds.",
(time.time() - self.start))
def _emplace_log_prefix(self, worker_index):
self.logger.set_prefix("[worker %d/%d pid=%s] " % (
worker_index + 1, # use 1-based indexing for more readable logs
self.reconstructor_workers,
os.getpid()))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during reconstruction. It
periodically logs progress.
"""
while True:
sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the reconstructor finishes its
reconstruction pass in some eventuality.
"""
while True:
sleep(self.lockup_timeout)
if self.reconstruction_count == self.last_reconstruction_count:
self.logger.error("Lockup detected.. killing live coros.")
self.kill_coros()
self.last_reconstruction_count = self.reconstruction_count
def _get_hashes(self, device, partition, policy, recalculate=None,
do_listdir=False):
df_mgr = self._df_router[policy]
hashed, suffix_hashes = tpool.execute(
df_mgr._get_hashes, device, partition, policy,
recalculate=recalculate, do_listdir=do_listdir)
self.logger.update_stats('suffix.hashes', hashed)
return suffix_hashes
def get_suffix_delta(self, local_suff, local_index,
remote_suff, remote_index):
"""
Compare the local suffix hashes with the remote suffix hashes
for the given local and remote fragment indexes. Return those
suffixes which should be synced.
:param local_suff: the local suffix hashes (from _get_hashes)
:param local_index: the local fragment index for the job
:param remote_suff: the remote suffix hashes (from remote
REPLICATE request)
:param remote_index: the remote fragment index for the job
:returns: a list of strings, the suffix dirs to sync
"""
suffixes = []
for suffix, sub_dict_local in local_suff.items():
sub_dict_remote = remote_suff.get(suffix, {})
if (sub_dict_local.get(None) != sub_dict_remote.get(None) or
sub_dict_local.get(local_index) !=
sub_dict_remote.get(remote_index)):
suffixes.append(suffix)
return suffixes
def _iter_nodes_for_frag(self, policy, partition, node):
"""
Generate a priority list of nodes that can sync to the given node.
The primary node is always the highest priority, after that we'll use
handoffs.
To avoid conflicts placing frags we'll skip through the handoffs and
only yield back those that are offset equal to the given primary
node index.
Nodes returned from this iterator will have 'backend_index' set.
"""
node['backend_index'] = policy.get_backend_index(node['index'])
yield node
count = 0
for handoff_node in policy.object_ring.get_more_nodes(partition):
handoff_backend_index = policy.get_backend_index(
handoff_node['handoff_index'])
if handoff_backend_index == node['backend_index']:
if (self.rebuild_handoff_node_count >= 0 and
count >= self.rebuild_handoff_node_count):
break
handoff_node['backend_index'] = handoff_backend_index
yield handoff_node
count += 1
def _get_suffixes_to_sync(self, job, node):
"""
For SYNC jobs we need to make a remote REPLICATE request to get
the remote node's current suffix's hashes and then compare to our
local suffix's hashes to decide which suffixes (if any) are out
of sync.
:param job: the job dict, with the keys defined in ``_get_part_jobs``
:param node: the remote node dict
:returns: a (possibly empty) list of strings, the suffixes to be
synced and the remote node.
"""
# get hashes from the remote node
remote_suffixes = None
attempts_remaining = 1
headers = self.headers.copy()
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
possible_nodes = self._iter_nodes_for_frag(
job['policy'], job['partition'], node)
while remote_suffixes is None and attempts_remaining:
try:
node = next(possible_nodes)
except StopIteration:
break
attempts_remaining -= 1
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
'%s responded as unmounted',
_full_path(node, job['partition'], '',
job['policy']))
attempts_remaining += 1
elif resp.status != HTTP_OK:
full_path = _full_path(node, job['partition'], '',
job['policy'])
self.logger.error(
"Invalid response %(resp)s from %(full_path)s",
{'resp': resp.status, 'full_path': full_path})
else:
remote_suffixes = pickle.loads(resp.read())
except (Exception, Timeout):
# all exceptions are logged here so that our caller can
# safely catch our exception and continue to the next node
# without logging
self.logger.exception('Unable to get remote suffix hashes '
'from %r' % _full_path(
node, job['partition'], '',
job['policy']))
if remote_suffixes is None:
raise SuffixSyncError('Unable to get remote suffix hashes')
suffixes = self.get_suffix_delta(job['hashes'],
job['frag_index'],
remote_suffixes,
node['backend_index'])
# now recalculate local hashes for suffixes that don't
# match so we're comparing the latest
local_suff = self._get_hashes(job['local_dev']['device'],
job['partition'],
job['policy'], recalculate=suffixes)
suffixes = self.get_suffix_delta(local_suff,
job['frag_index'],
remote_suffixes,
node['backend_index'])
self.suffix_count += len(suffixes)
return suffixes, node
def delete_reverted_objs(self, job, objects):
"""
For EC we can potentially revert only some of a partition
so we'll delete reverted objects here. Note that we delete
the fragment index of the file we sent to the remote node.
:param job: the job being processed
:param objects: a dict of objects to be deleted, each entry maps
hash=>timestamp
"""
df_mgr = self._df_router[job['policy']]
suffixes_to_delete = set()
for object_hash, timestamps in objects.items():
try:
df, filenames = df_mgr.get_diskfile_and_filenames_from_hash(
job['local_dev']['device'], job['partition'],
object_hash, job['policy'],
frag_index=job['frag_index'])
# legacy durable data files look like modern nondurable data
# files; we therefore override nondurable_purge_delay when we
# know the data file is durable so that legacy durable data
# files get purged
nondurable_purge_delay = (0 if timestamps.get('durable')
else df_mgr.commit_window)
data_files = [
f for f in filenames
if f.endswith('.data')]
purgable_data_files = [
f for f in data_files
if f.startswith(timestamps['ts_data'].internal)]
if (job['primary_frag_index'] is None
and len(purgable_data_files) == len(data_files) <= 1):
# pure handoff node, and we're about to purge the last
# .data file, so it's ok to remove any meta file that may
# have been reverted
meta_timestamp = timestamps.get('ts_meta')
else:
meta_timestamp = None
df.purge(timestamps['ts_data'], job['frag_index'],
nondurable_purge_delay, meta_timestamp)
except DiskFileNotExist:
# may have passed reclaim age since being reverted, or may have
# raced with another reconstructor process trying the same
pass
except DiskFileError:
self.logger.exception(
'Unable to purge DiskFile (%r %r %r)',
object_hash, timestamps['ts_data'], job['frag_index'])
suffixes_to_delete.add(object_hash[-3:])
for suffix in suffixes_to_delete:
remove_directory(os.path.join(job['path'], suffix))
def process_job(self, job):
"""
Sync the local partition with the remote node(s) according to
the parameters of the job. For primary nodes, the SYNC job type
will define both left and right hand sync_to nodes to ssync with
as defined by this primary nodes index in the node list based on
the fragment index found in the partition. For non-primary
nodes (either handoff revert, or rebalance) the REVERT job will
define a single node in sync_to which is the proper/new home for
the fragment index.
N.B. ring rebalancing can be time consuming and handoff nodes'
fragment indexes do not have a stable order, it's possible to
have more than one REVERT job for a partition, and in some rare
failure conditions there may even also be a SYNC job for the
same partition - but each one will be processed separately
because each job will define a separate list of node(s) to
'sync_to'.
:param job: the job dict, with the keys defined in ``_get_job_info``
"""
begin = time.time()
if job['job_type'] == REVERT:
self._revert(job, begin)
else:
self._sync(job, begin)
self.partition_times.append(time.time() - begin)
self.reconstruction_count += 1
def _sync(self, job, begin):
"""
Process a SYNC job.
"""
self.logger.increment(
'partition.update.count.%s' % (job['local_dev']['device'],))
for node in job['sync_to']:
try:
suffixes, node = self._get_suffixes_to_sync(job, node)
except SuffixSyncError:
continue
if not suffixes:
continue
# ssync any out-of-sync suffixes with the remote node; do not limit
# max_objects - we need to check them all because, unlike a revert
# job, we don't purge any objects so start with the same set each
# cycle
success, _ = ssync_sender(
self, node, job, suffixes, include_non_durable=False,
max_objects=0)()
# update stats for this attempt
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
self.logger.timing_since('partition.update.timing', begin)
def _revert(self, job, begin):
"""
Process a REVERT job.
"""
self.logger.increment(
'partition.delete.count.%s' % (job['local_dev']['device'],))
syncd_with = 0
reverted_objs = {}
try:
df_mgr = self._df_router[job['policy']]
# Only object-server can take this lock if an incoming SSYNC is
# running on the same partition. Taking the lock here ensure we
# won't enter a race condition where both nodes try to
# cross-replicate the same partition and both delete it.
with df_mgr.partition_lock(job['device'], job['policy'],
job['partition'], name='replication',
timeout=0.2):
limited_by_max_objects = False
for node in job['sync_to']:
node['backend_index'] = job['policy'].get_backend_index(
node['index'])
sender = ssync_sender(
self, node, job, job['suffixes'],
include_non_durable=True,
max_objects=self.max_objects_per_revert)
success, in_sync_objs = sender()
limited_by_max_objects |= sender.limited_by_max_objects
if success:
syncd_with += 1
reverted_objs.update(in_sync_objs)
if syncd_with >= len(job['sync_to']):
self.delete_reverted_objs(job, reverted_objs)
if syncd_with < len(job['sync_to']) or limited_by_max_objects:
self.handoffs_remaining += 1
except PartitionLockTimeout:
self.logger.info("Unable to lock handoff partition %d for revert "
"on device %s policy %d",
job['partition'], job['device'], job['policy'])
self.logger.increment('partition.lock-failure.count')
self.handoffs_remaining += 1
self.logger.timing_since('partition.delete.timing', begin)
def _get_part_jobs(self, local_dev, part_path, partition, policy):
"""
Helper function to build jobs for a partition, this method will
read the suffix hashes and create job dictionaries to describe
the needed work. There will be one job for each fragment index
discovered in the partition.
For a fragment index which corresponds to this node's ring
index, a job with job_type SYNC will be created to ensure that
the left and right hand primary ring nodes for the part have the
corresponding left and right hand fragment archives.
A fragment index (or entire partition) for which this node is
not the primary corresponding node, will create job(s) with
job_type REVERT to ensure that fragment archives are pushed to
the correct node and removed from this one.
A partition may result in multiple jobs. Potentially many
REVERT jobs, and zero or one SYNC job.
:param local_dev: the local device (node dict)
:param part_path: full path to partition
:param partition: partition number
:param policy: the policy
:returns: a list of dicts of job info
N.B. If this function ever returns an empty list of jobs the entire
partition will be deleted.
"""
# find all the fi's in the part, and which suffixes have them
try:
hashes = self._get_hashes(local_dev['device'], partition, policy,
do_listdir=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
self.logger.warning(
'Unexpected entity %r is not a directory' % part_path)
return []
non_data_fragment_suffixes = []
data_fi_to_suffixes = defaultdict(list)
for suffix, fi_hash in hashes.items():
if not fi_hash:
# this is for sanity and clarity, normally an empty
# suffix would get del'd from the hashes dict, but an
# OSError trying to re-hash the suffix could leave the
# value empty - it will log the exception; but there's
# no way to properly address this suffix at this time.
continue
data_frag_indexes = [f for f in fi_hash if f is not None]
if not data_frag_indexes:
non_data_fragment_suffixes.append(suffix)
else:
for fi in data_frag_indexes:
data_fi_to_suffixes[fi].append(suffix)
# helper to ensure consistent structure of jobs
def build_job(job_type, frag_index, suffixes, sync_to,
primary_frag_index):
return {
'job_type': job_type,
'frag_index': frag_index,
'suffixes': suffixes,
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': hashes,
'policy': policy,
'local_dev': local_dev,
# ssync likes to have it handy
'device': local_dev['device'],
# provide a hint to revert jobs that the node is a primary for
# one of the frag indexes
'primary_frag_index': primary_frag_index,
}
# aggregate jobs for all the fragment index in this part
jobs = []
# check the primary nodes - to see if the part belongs here
primary_frag_index = None
part_nodes = policy.object_ring.get_part_nodes(partition)
for node in part_nodes:
if node['id'] == local_dev['id']:
# this partition belongs here, we'll need a sync job
primary_frag_index = policy.get_backend_index(node['index'])
try:
suffixes = data_fi_to_suffixes.pop(primary_frag_index)
except KeyError:
# N.B. If this function ever returns an empty list of jobs
# the entire partition will be deleted.
suffixes = []
sync_job = build_job(
job_type=SYNC,
frag_index=primary_frag_index,
suffixes=suffixes,
sync_to=_get_partners(node['index'], part_nodes),
primary_frag_index=primary_frag_index
)
# ssync callback to rebuild missing fragment_archives
sync_job['sync_diskfile_builder'] = self.reconstruct_fa
jobs.append(sync_job)
break
# assign remaining data fragment suffixes to revert jobs
ordered_fis = sorted((len(suffixes), fi) for fi, suffixes
in data_fi_to_suffixes.items())
for count, fi in ordered_fis:
# In single region EC a revert job must sync to the specific
# primary who's node_index matches the data's frag_index. With
# duplicated EC frags a revert job must sync to all primary nodes
# that should be holding this frag_index.
if fi >= len(part_nodes):
self.logger.warning(
'Bad fragment index %r for suffixes %r under %s',
fi, data_fi_to_suffixes[fi], part_path)
continue
nodes_sync_to = []
node_index = fi
for n in range(policy.ec_duplication_factor):
nodes_sync_to.append(part_nodes[node_index])
node_index += policy.ec_n_unique_fragments
revert_job = build_job(
job_type=REVERT,
frag_index=fi,
suffixes=data_fi_to_suffixes[fi],
sync_to=nodes_sync_to,
primary_frag_index=primary_frag_index
)
jobs.append(revert_job)
# now we need to assign suffixes that have no data fragments
if non_data_fragment_suffixes:
if jobs:
# the first job will be either the sync_job, or the
# revert_job for the fragment index that is most common
# among the suffixes
jobs[0]['suffixes'].extend(non_data_fragment_suffixes)
else:
# this is an unfortunate situation, we need a revert job to
# push partitions off this node, but none of the suffixes
# have any data fragments to hint at which node would be a
# good candidate to receive the tombstones.
#
# we'll check a sample of other primaries before we delete our
# local tombstones, the exact number doesn't matter as long as
# it's enough to ensure the tombstones are not lost and less
# than *all the replicas*
nsample = (policy.ec_n_unique_fragments *
policy.ec_duplication_factor) - policy.ec_ndata + 1
jobs.append(build_job(
job_type=REVERT,
frag_index=None,
suffixes=non_data_fragment_suffixes,
sync_to=random.sample(part_nodes, nsample),
primary_frag_index=primary_frag_index
))
# return a list of jobs for this part
return jobs
def get_policy2devices(self):
ips = whataremyips(self.ring_ip)
policy2devices = {}
for policy in self.policies:
self.load_object_ring(policy)
local_devices = list(six.moves.filter(
lambda dev: dev and is_local_device(
ips, self.port,
dev['replication_ip'], dev['replication_port']),
policy.object_ring.devs))
policy2devices[policy] = local_devices
return policy2devices
def get_local_devices(self):
"""Returns a set of all local devices in all EC policies."""
policy2devices = self.get_policy2devices()
local_devices = set()
for devices in policy2devices.values():
local_devices.update(d['device'] for d in devices)
return local_devices
def collect_parts(self, override_devices=None, override_partitions=None):
"""
Helper for getting partitions in the top level reconstructor
In handoffs_only mode primary partitions will not be included in the
returned (possibly empty) list.
"""
override_devices = override_devices or []
override_partitions = override_partitions or []
policy2devices = self.get_policy2devices()
all_parts = []
for policy, local_devices in policy2devices.items():
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator
# can't detect them and would create a second copy of the
# file if not yet existing - and this might double the
# actual transferred and stored data
next_part_power = getattr(
policy.object_ring, 'next_part_power', None)
if next_part_power is not None:
self.logger.warning(
"next_part_power set in policy '%s'. Skipping",
policy.name)
continue
df_mgr = self._df_router[policy]
for local_dev in local_devices:
if override_devices and (
local_dev['device'] not in override_devices):
continue
self.device_count += 1
dev_path = df_mgr.get_dev_path(local_dev['device'])
if not dev_path:
self.logger.warning('%s is not mounted',
local_dev['device'])
continue
data_dir = get_data_dir(policy)
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception(
'Unable to create %s' % obj_path)
continue
try:
partitions = os.listdir(obj_path)
except OSError:
self.logger.exception(
'Unable to list partitions in %r' % obj_path)
continue
self.part_count += len(partitions)
for partition in partitions:
part_path = join(obj_path, partition)
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
if not partition.isdigit():
self.logger.warning(
'Unexpected entity in data dir: %r' % part_path)
self.delete_partition(part_path)
self.reconstruction_part_count += 1
continue
partition = int(partition)
if override_partitions and (partition not in
override_partitions):
continue
# N.B. At a primary node in handoffs_only mode may skip to
# sync misplaced (handoff) fragments in the primary
# partition. That may happen while rebalancing several
# times. (e.g. a node holding handoff fragment being a new
# primary) Those fragments will be synced (and revert) once
# handoffs_only mode turned off.
if self.handoffs_only and any(
local_dev['id'] == n['id']
for n in policy.object_ring.get_part_nodes(
partition)):
self.logger.debug('Skipping %s job for %s '
'while in handoffs_only mode.',
SYNC, part_path)
continue
part_info = {
'local_dev': local_dev,
'policy': policy,
'partition': partition,
'part_path': part_path,
}
all_parts.append(part_info)
random.shuffle(all_parts)
return all_parts
def build_reconstruction_jobs(self, part_info):
"""
Helper function for collect_jobs to build jobs for reconstruction
using EC style storage policy
N.B. If this function ever returns an empty list of jobs the entire
partition will be deleted.
"""
jobs = self._get_part_jobs(**part_info)
random.shuffle(jobs)
self.job_count += len(jobs)
return jobs
def _reset_stats(self):
self.start = time.time()
self.job_count = 0
self.part_count = 0
self.device_count = 0
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.reconstruction_count = 0
self.reconstruction_part_count = 0
self.last_reconstruction_count = -1
self.handoffs_remaining = 0
def delete_partition(self, path):
def kill_it(path):
shutil.rmtree(path, ignore_errors=True)
remove_file(path)
self.logger.info("Removing partition: %s", path)
tpool.execute(kill_it, path)
def reconstruct(self, **kwargs):
"""Run a reconstruction pass"""
self._reset_stats()
self.partition_times = []
stats = spawn(self.heartbeat)
lockup_detector = spawn(self.detect_lockups)
changed_rings = set()
try:
self.run_pool = GreenPool(size=self.concurrency)
for part_info in self.collect_parts(**kwargs):
sleep() # Give spawns a cycle
if part_info['policy'] in changed_rings:
continue
if not self.check_ring(part_info['policy'].object_ring):
changed_rings.add(part_info['policy'])
self.logger.info(
"Ring change detected for policy %d (%s). Aborting "
"current reconstruction pass for this policy.",
part_info['policy'].idx, part_info['policy'].name)
continue
self.reconstruction_part_count += 1
jobs = self.build_reconstruction_jobs(part_info)
if not jobs:
# If this part belongs on this node, _get_part_jobs
# will *always* build a sync_job - even if there's
# no suffixes in the partition that needs to sync.
# If there's any suffixes in the partition then our
# job list would have *at least* one revert job.
# Therefore we know this part a) doesn't belong on
# this node and b) doesn't have any suffixes in it.
self.run_pool.spawn(self.delete_partition,
part_info['part_path'])
for job in jobs:
self.run_pool.spawn(self.process_job, job)
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
self.logger.exception("Exception in top-level "
"reconstruction loop")
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
if self.handoffs_only:
if self.handoffs_remaining > 0:
self.logger.info(
"Handoffs only mode still has handoffs remaining. "
"Next pass will continue to revert handoffs.")
else:
self.logger.warning(
"Handoffs only mode found no handoffs remaining. "
"You should disable handoffs_only once all nodes "
"are reporting no handoffs remaining.")
def final_recon_dump(self, total, override_devices=None, **kwargs):
"""
Add stats for this worker's run to recon cache.
When in worker mode (per_disk_stats == True) this worker's stats are
added per device instead of in the top level keys (aggregation is
serialized in the parent process).
:param total: the runtime of cycle in minutes
:param override_devices: (optional) list of device that are being
reconstructed
"""
recon_update = {
'object_reconstruction_time': total,
'object_reconstruction_last': time.time(),
}
devices = override_devices or self.all_local_devices
if self.reconstructor_workers > 0 and devices:
recon_update['pid'] = os.getpid()
recon_update = {'object_reconstruction_per_disk': {
d: recon_update for d in devices}}
else:
# if not running in worker mode, kill any per_disk stats
recon_update['object_reconstruction_per_disk'] = {}
dump_recon_cache(recon_update, self.rcache, self.logger)
def post_multiprocess_run(self):
# This method is called after run_once when using multiple workers.
self.aggregate_recon_update()
def run_once(self, multiprocess_worker_index=None, *args, **kwargs):
if multiprocess_worker_index is not None:
self._emplace_log_prefix(multiprocess_worker_index)
start = time.time()
self.logger.info("Running object reconstructor in script mode.")
override_opts = parse_override_options(once=True, **kwargs)
self.reconstruct(override_devices=override_opts.devices,
override_partitions=override_opts.partitions)
total = (time.time() - start) / 60
self.logger.info(
"Object reconstruction complete (once). (%.02f minutes)", total)
# Only dump stats if they would actually be meaningful -- i.e. we're
# collecting per-disk stats and covering all partitions, or we're
# covering all partitions, all disks.
if not override_opts.partitions and (
self.reconstructor_workers > 0 or not override_opts.devices):
self.final_recon_dump(
total, override_devices=override_opts.devices,
override_partitions=override_opts.partitions)
def run_forever(self, multiprocess_worker_index=None, *args, **kwargs):
if multiprocess_worker_index is not None:
self._emplace_log_prefix(multiprocess_worker_index)
self.logger.info("Starting object reconstructor in daemon mode.")
# Run the reconstructor continually
while True:
start = time.time()
self.logger.info("Starting object reconstruction pass.")
override_opts = parse_override_options(**kwargs)
# Run the reconstructor
self.reconstruct(override_devices=override_opts.devices,
override_partitions=override_opts.partitions)
total = (time.time() - start) / 60
self.logger.info(
"Object reconstruction complete. (%.02f minutes)", total)
self.final_recon_dump(
total, override_devices=override_opts.devices,
override_partitions=override_opts.partitions)
self.logger.debug('reconstruction sleeping for %s seconds.',
self.interval)
sleep(self.interval)
| swift-master | swift/obj/reconstructor.py |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Object Server for Swift """
from swift.obj.mem_diskfile import InMemoryFileSystem
from swift.obj import server
class ObjectController(server.ObjectController):
"""
Implements the WSGI application for the Swift In-Memory Object Server.
"""
def setup(self, conf):
"""
Nothing specific to do for the in-memory version.
:param conf: WSGI configuration parameter
"""
self._filesystem = InMemoryFileSystem()
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._filesystem.get_diskfile(account, container, obj, **kwargs)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
"""
pass
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| swift-master | swift/obj/mem_server.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import sleep
import six
from six.moves import urllib
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import http
from swift.common import utils
from swift.common.swob import wsgi_to_bytes
def encode_missing(object_hash, ts_data, ts_meta=None, ts_ctype=None,
**kwargs):
"""
Returns a string representing the object hash, its data file timestamp,
the delta forwards to its metafile and content-type timestamps, if
non-zero, and its durability, in the form:
``<hash> <ts_data> [m:<hex delta to ts_meta>[,t:<hex delta to ts_ctype>]
[,durable:False]``
The decoder for this line is
:py:func:`~swift.obj.ssync_receiver.decode_missing`
"""
msg = ('%s %s'
% (urllib.parse.quote(object_hash),
urllib.parse.quote(ts_data.internal)))
extra_parts = []
if ts_meta and ts_meta != ts_data:
delta = ts_meta.raw - ts_data.raw
extra_parts.append('m:%x' % delta)
if ts_meta.offset:
extra_parts[-1] += '__%x' % ts_meta.offset
if ts_ctype and ts_ctype != ts_data:
delta = ts_ctype.raw - ts_data.raw
extra_parts.append('t:%x' % delta)
if ts_ctype.offset:
extra_parts[-1] += '__%x' % ts_ctype.offset
if 'durable' in kwargs and kwargs['durable'] is False:
# only send durable in the less common case that it is False
extra_parts.append('durable:%s' % kwargs['durable'])
if extra_parts:
msg = '%s %s' % (msg, ','.join(extra_parts))
return msg.encode('ascii')
def decode_wanted(parts):
"""
Parse missing_check line parts to determine which parts of local
diskfile were wanted by the receiver.
The encoder for parts is
:py:func:`~swift.obj.ssync_receiver.encode_wanted`
"""
wanted = {}
key_map = {'d': 'data', 'm': 'meta'}
if parts:
# receiver specified data and/or meta wanted, so use those as
# conditions for sending PUT and/or POST subrequests
for k in key_map:
if k in parts[0]:
wanted[key_map[k]] = True
if not wanted:
# assume legacy receiver which will only accept PUTs. There is no
# way to send any meta file content without morphing the timestamp
# of either the data or the metadata, so we just send data file
# content to a legacy receiver. Once the receiver gets updated we
# will be able to send it the meta file content.
wanted['data'] = True
return wanted
class SsyncBufferedHTTPResponse(bufferedhttp.BufferedHTTPResponse, object):
def __init__(self, *args, **kwargs):
super(SsyncBufferedHTTPResponse, self).__init__(*args, **kwargs)
self.ssync_response_buffer = b''
self.ssync_response_chunk_left = 0
def readline(self, size=1024):
"""
Reads a line from the SSYNC response body.
httplib has no readline and will block on read(x) until x is
read, so we have to do the work ourselves. A bit of this is
taken from Python's httplib itself.
"""
data = self.ssync_response_buffer
self.ssync_response_buffer = b''
while b'\n' not in data and len(data) < size:
if self.ssync_response_chunk_left == -1: # EOF-already indicator
break
if self.ssync_response_chunk_left == 0:
line = self.fp.readline()
i = line.find(b';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
self.ssync_response_chunk_left = int(line.strip(), 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise exceptions.ReplicationException('Early disconnect')
if self.ssync_response_chunk_left == 0:
self.ssync_response_chunk_left = -1
break
chunk = self.fp.read(min(self.ssync_response_chunk_left,
size - len(data)))
if not chunk:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise exceptions.ReplicationException('Early disconnect')
self.ssync_response_chunk_left -= len(chunk)
if self.ssync_response_chunk_left == 0:
self.fp.read(2) # discard the trailing \r\n
data += chunk
if b'\n' in data:
data, self.ssync_response_buffer = data.split(b'\n', 1)
data += b'\n'
return data
class SsyncBufferedHTTPConnection(bufferedhttp.BufferedHTTPConnection):
response_class = SsyncBufferedHTTPResponse
class Sender(object):
"""
Sends SSYNC requests to the object server.
These requests are eventually handled by
:py:mod:`.ssync_receiver` and full documentation about the
process is there.
"""
def __init__(self, daemon, node, job, suffixes, remote_check_objs=None,
include_non_durable=False, max_objects=0):
self.daemon = daemon
self.df_mgr = self.daemon._df_router[job['policy']]
self.node = node
self.job = job
self.suffixes = suffixes
# When remote_check_objs is given in job, ssync_sender trys only to
# make sure those objects exist or not in remote.
self.remote_check_objs = remote_check_objs
self.include_non_durable = include_non_durable
self.max_objects = max_objects
self.limited_by_max_objects = False
def __call__(self):
"""
Perform ssync with remote node.
:returns: a 2-tuple, in the form (success, can_delete_objs) where
success is a boolean and can_delete_objs is the map of
objects that are in sync with the receiver. Each entry in
can_delete_objs maps a hash => timestamp of data file or
tombstone file
"""
if not self.suffixes:
return True, {}
connection = response = None
try:
# Double try blocks in case our main error handler fails.
try:
# The general theme for these functions is that they should
# raise exceptions.MessageTimeout for client timeouts and
# exceptions.ReplicationException for common issues that will
# abort the replication attempt and log a simple error. All
# other exceptions will be logged with a full stack trace.
connection, response = self.connect()
# available_map has an entry for each object in given suffixes
# that is available to be sync'd;
# each entry is a hash => dict of timestamps of data file or
# tombstone file and/or meta file
# send_map has an entry for each object that the receiver wants
# to be sync'ed;
# each entry maps an object hash => dict of wanted parts
available_map, send_map = self.missing_check(connection,
response)
if self.remote_check_objs is None:
self.updates(connection, response, send_map)
can_delete_obj = available_map
else:
# when we are initialized with remote_check_objs we don't
# *send* any requested updates; instead we only collect
# what's already in sync and safe for deletion
in_sync_hashes = (set(available_map.keys()) -
set(send_map.keys()))
can_delete_obj = dict((hash_, available_map[hash_])
for hash_ in in_sync_hashes)
self.daemon.logger.debug(
'ssync completed ok: dev: %s, part: %s, policy: %d, '
'num suffixes: %s, available: %d, sent: %d, deletable: %d',
self.job['device'], self.job['partition'],
self.job['policy'].idx, len(self.suffixes),
len(available_map), len(send_map), len(can_delete_obj))
return True, can_delete_obj
except (exceptions.MessageTimeout,
exceptions.ReplicationException) as err:
node_str = utils.node_to_string(self.node, replication=True)
self.daemon.logger.error('%s/%s %s', node_str,
self.job['partition'], err)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
node_str = utils.node_to_string(self.node, replication=True)
self.daemon.logger.exception(
'%s/%s EXCEPTION in ssync.Sender',
node_str, self.job['partition'])
finally:
self.disconnect(connection)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
# This particular exception handler does the minimal amount as it
# would only get called if the above except Exception handler
# failed (bad node or job data).
self.daemon.logger.exception('EXCEPTION in ssync.Sender')
return False, {}
def connect(self):
"""
Establishes a connection and starts an SSYNC request
with the object server.
"""
connection = response = None
node_addr = '%s:%s' % (self.node['replication_ip'],
self.node['replication_port'])
with exceptions.MessageTimeout(
self.daemon.conn_timeout, 'connect send'):
connection = SsyncBufferedHTTPConnection(node_addr)
connection.putrequest('SSYNC', '/%s/%s' % (
self.node['device'], self.job['partition']))
connection.putheader('Transfer-Encoding', 'chunked')
connection.putheader('X-Backend-Storage-Policy-Index',
int(self.job['policy']))
# a sync job must use the node's backend_index for the frag_index
# of the rebuilt fragments instead of the frag_index from the job
# which will be rebuilding them
frag_index = self.node.get('backend_index')
if frag_index is not None:
connection.putheader('X-Backend-Ssync-Frag-Index', frag_index)
# Node-Index header is for backwards compat 2.4.0-2.20.0
connection.putheader('X-Backend-Ssync-Node-Index', frag_index)
connection.endheaders()
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'connect receive'):
response = connection.getresponse()
if response.status != http.HTTP_OK:
err_msg = utils.cap_length(response.read(), 1024)
raise exceptions.ReplicationException(
'Expected status %s; got %s (%s)' %
(http.HTTP_OK, response.status, err_msg))
if self.include_non_durable and not utils.config_true_value(
response.getheader('x-backend-accept-no-commit', False)):
# fall back to legacy behaviour if receiver does not understand
# X-Backend-Commit
self.daemon.logger.warning(
'ssync receiver %s does not accept non-durable fragments' %
node_addr)
self.include_non_durable = False
return connection, response
def missing_check(self, connection, response):
"""
Handles the sender-side of the MISSING_CHECK step of a
SSYNC request.
Full documentation of this can be found at
:py:meth:`.Receiver.missing_check`.
"""
self.limited_by_max_objects = False
available_map = {}
send_map = {}
# First, send our list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check start'):
msg = b':MISSING_CHECK: START\r\n'
connection.send(b'%x\r\n%s\r\n' % (len(msg), msg))
# an empty frag_prefs list is sufficient to get non-durable frags
# yielded, in which case an older durable frag will not be yielded
frag_prefs = [] if self.include_non_durable else None
hash_gen = self.df_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'),
frag_prefs=frag_prefs)
if self.remote_check_objs is not None:
hash_gen = six.moves.filter(
lambda objhash_timestamps:
objhash_timestamps[0] in
self.remote_check_objs, hash_gen)
nlines = 0
nbytes = 0
object_hash = None
for object_hash, timestamps in hash_gen:
available_map[object_hash] = timestamps
with exceptions.MessageTimeout(
self.daemon.node_timeout,
'missing_check send line: %d lines (%d bytes) sent'
% (nlines, nbytes)):
msg = b'%s\r\n' % encode_missing(object_hash, **timestamps)
msg = b'%x\r\n%s\r\n' % (len(msg), msg)
connection.send(msg)
if nlines % 5 == 0:
sleep() # Gives a chance for other greenthreads to run
nlines += 1
nbytes += len(msg)
if 0 < self.max_objects <= nlines:
break
for _ in hash_gen:
# only log truncation if there were more hashes to come...
self.limited_by_max_objects = True
self.daemon.logger.info(
'ssync missing_check truncated after %d objects: '
'device: %s, part: %s, policy: %s, last object hash: '
'%s', nlines, self.job['device'],
self.job['partition'], int(self.job['policy']),
object_hash)
break
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check end'):
msg = b':MISSING_CHECK: END\r\n'
connection.send(b'%x\r\n%s\r\n' % (len(msg), msg))
# Now, retrieve the list of what they want.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check start wait'):
line = response.readline(size=self.daemon.network_chunk_size)
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == b':MISSING_CHECK: START':
break
elif line:
if not six.PY2:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
pass
raise exceptions.ReplicationException(
'Unexpected response: %r' % utils.cap_length(line, 1024))
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check line wait'):
line = response.readline(size=self.daemon.network_chunk_size)
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == b':MISSING_CHECK: END':
break
parts = line.decode('ascii').split()
if parts:
send_map[parts[0]] = decode_wanted(parts[1:])
return available_map, send_map
def updates(self, connection, response, send_map):
"""
Handles the sender-side of the UPDATES step of an SSYNC
request.
Full documentation of this can be found at
:py:meth:`.Receiver.updates`.
"""
# First, send all our subrequests based on the send_map.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates start'):
msg = b':UPDATES: START\r\n'
connection.send(b'%x\r\n%s\r\n' % (len(msg), msg))
frag_prefs = [] if self.include_non_durable else None
updates = 0
for object_hash, want in send_map.items():
object_hash = urllib.parse.unquote(object_hash)
try:
df = self.df_mgr.get_diskfile_from_hash(
self.job['device'], self.job['partition'], object_hash,
self.job['policy'], frag_index=self.job.get('frag_index'),
open_expired=True, frag_prefs=frag_prefs)
except exceptions.DiskFileNotExist:
continue
url_path = urllib.parse.quote(
'/%s/%s/%s' % (df.account, df.container, df.obj))
try:
df.open()
if want.get('data'):
is_durable = (df.durable_timestamp == df.data_timestamp)
# EC reconstructor may have passed a callback to build an
# alternative diskfile - construct it using the metadata
# from the data file only.
df_alt = self.job.get(
'sync_diskfile_builder', lambda *args: df)(
self.job, self.node, df)
self.send_put(connection, url_path, df_alt,
durable=is_durable)
if want.get('meta') and df.data_timestamp != df.timestamp:
self.send_post(connection, url_path, df)
except exceptions.DiskFileDeleted as err:
if want.get('data'):
self.send_delete(connection, url_path, err.timestamp)
except exceptions.DiskFileError:
# DiskFileErrors are expected while opening the diskfile,
# before any data is read and sent. Since there is no partial
# state on the receiver it's ok to ignore this diskfile and
# continue. The diskfile may however be deleted after a
# successful ssync since it remains in the send_map.
pass
if updates % 5 == 0:
sleep() # Gives a chance for other greenthreads to run
updates += 1
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates end'):
msg = b':UPDATES: END\r\n'
connection.send(b'%x\r\n%s\r\n' % (len(msg), msg))
# Now, read their response for any issues.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates start wait'):
line = response.readline(size=self.daemon.network_chunk_size)
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == b':UPDATES: START':
break
elif line:
if not six.PY2:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
pass
raise exceptions.ReplicationException(
'Unexpected response: %r' % utils.cap_length(line, 1024))
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates line wait'):
line = response.readline(size=self.daemon.network_chunk_size)
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == b':UPDATES: END':
break
elif line:
if not six.PY2:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
pass
raise exceptions.ReplicationException(
'Unexpected response: %r' % utils.cap_length(line, 1024))
def send_subrequest(self, connection, method, url_path, headers, df):
msg = [b'%s %s' % (method.encode('ascii'), url_path.encode('utf8'))]
for key, value in sorted(headers.items()):
msg.append(wsgi_to_bytes('%s: %s' % (key, value)))
msg = b'\r\n'.join(msg) + b'\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout,
'send_%s' % method.lower()):
connection.send(b'%x\r\n%s\r\n' % (len(msg), msg))
if df:
bytes_read = 0
for chunk in df.reader():
bytes_read += len(chunk)
with exceptions.MessageTimeout(self.daemon.node_timeout,
'send_%s chunk' %
method.lower()):
connection.send(b'%x\r\n%s\r\n' % (len(chunk), chunk))
if bytes_read != df.content_length:
# Since we may now have partial state on the receiver we have
# to prevent the receiver finalising what may well be a bad or
# partially written diskfile. Unfortunately we have no other
# option than to pull the plug on this ssync session. If ssync
# supported multiphase PUTs like the proxy uses for EC we could
# send a bad etag in a footer of this subrequest, but that is
# not supported.
raise exceptions.ReplicationException(
'Sent data length does not match content-length')
def send_delete(self, connection, url_path, timestamp):
"""
Sends a DELETE subrequest with the given information.
"""
headers = {'X-Timestamp': timestamp.internal}
self.send_subrequest(connection, 'DELETE', url_path, headers, None)
def send_put(self, connection, url_path, df, durable=True):
"""
Sends a PUT subrequest for the url_path using the source df
(DiskFile) and content_length.
"""
headers = {'Content-Length': str(df.content_length)}
if not durable:
# only send this header for the less common case; without this
# header object servers assume default commit behaviour
headers['X-Backend-No-Commit'] = 'True'
for key, value in df.get_datafile_metadata().items():
if key not in ('name', 'Content-Length'):
headers[key] = value
self.send_subrequest(connection, 'PUT', url_path, headers, df)
def send_post(self, connection, url_path, df):
metadata = df.get_metafile_metadata()
if metadata is None:
return
self.send_subrequest(connection, 'POST', url_path, metadata, None)
def disconnect(self, connection):
"""
Closes down the connection to the object server once done
with the SSYNC request.
"""
if not connection:
return
try:
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'):
connection.send(b'0\r\n\r\n')
except (Exception, exceptions.Timeout):
pass # We're okay with the above failing.
connection.close()
| swift-master | swift/obj/ssync_sender.py |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Disk File Interface for Swift Object Server"""
import io
import time
from contextlib import contextmanager
from eventlet import Timeout
from swift.common.utils import Timestamp
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
from swift.common.request_helpers import is_sys_meta
from swift.common.swob import multi_range_iterator
from swift.common.utils import md5
from swift.obj.diskfile import DATAFILE_SYSTEM_META, RESERVED_DATAFILE_META
class InMemoryFileSystem(object):
"""
A very simplistic in-memory file system scheme.
There is one dictionary mapping a given object name to a tuple. The first
entry in the tuple is the BytesIO buffer representing the file contents,
the second entry is the metadata dictionary.
"""
def __init__(self):
self._filesystem = {}
def get_object(self, name):
"""
Return back an file-like object and its metadata
:param name: standard object name
:return: (fp, metadata) fp is ``BytesIO`` in-memory representation
object (or None). metadata is a dictionary
of metadata (or None)
"""
val = self._filesystem.get(name)
if val is None:
fp, metadata = None, None
else:
fp, metadata = val
return fp, metadata
def put_object(self, name, fp, metadata):
"""
Store object into memory
:param name: standard object name
:param fp: ``BytesIO`` in-memory representation object
:param metadata: dictionary of metadata to be written
"""
self._filesystem[name] = (fp, metadata)
def del_object(self, name):
"""
Delete object from memory
:param name: standard object name
"""
del self._filesystem[name]
def get_diskfile(self, account, container, obj, **kwargs):
return DiskFile(self, account, container, obj)
def pickle_async_update(self, *args, **kwargs):
"""
For now don't handle async updates.
"""
pass
class DiskFileWriter(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for DiskFile's create()
method.
:param fs: internal file system object to use
:param name: standard object name
"""
def __init__(self, fs, name):
self._filesystem = fs
self._name = name
self._fp = None
self._upload_size = 0
self._chunks_etag = md5(usedforsecurity=False)
def open(self):
"""
Prepare to accept writes.
Create a new ``BytesIO`` object for a started-but-not-yet-finished
PUT.
"""
self._fp = io.BytesIO()
return self
def close(self):
"""
Clean up resources following an ``open()``.
Note: If ``put()`` has not been called, the data written will be lost.
"""
self._fp = None
def write(self, chunk):
"""
Write a chunk of data into the ``BytesIO`` object.
:param chunk: the chunk of data to write as a string object
"""
self._fp.write(chunk)
self._upload_size += len(chunk)
self._chunks_etag.update(chunk)
def chunks_finished(self):
"""
Expose internal stats about written chunks.
:returns: a tuple, (upload_size, etag)
"""
return self._upload_size, self._chunks_etag.hexdigest()
def put(self, metadata):
"""
Make the final association in the in-memory file system for this name
with the ``BytesIO`` object.
:param metadata: dictionary of metadata to be written
"""
metadata['name'] = self._name
self._filesystem.put_object(self._name, self._fp, metadata)
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
mem_diskfile type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
class DiskFileReader(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the read context for servicing GET REST API
requests. Serves as the context manager object for DiskFile's reader()
method.
:param name: object name
:param fp: open file object pointer reference
:param obj_size: on-disk size of object in bytes
:param etag: MD5 hash of object from metadata
"""
def __init__(self, name, fp, obj_size, etag):
self._name = name
self._fp = fp
self._obj_size = obj_size
self._etag = etag
#
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._suppress_file_closing = False
#
self.was_quarantined = ''
def __iter__(self):
try:
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = md5(usedforsecurity=False)
while True:
chunk = self._fp.read()
if chunk:
if self._iter_etag:
self._iter_etag.update(chunk)
self._bytes_read += len(chunk)
yield chunk
else:
self._read_to_eof = True
break
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_range(self, start, stop):
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
try:
self.close()
except DiskFileQuarantined:
pass
def _quarantine(self, msg):
self.was_quarantined = msg
def _handle_close_quarantine(self):
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self._bytes_read, self._obj_size))
elif self._iter_etag and \
self._etag != self._iter_etag.hexdigest():
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._iter_etag.hexdigest()))
def close(self):
"""
Close the file. Will handle quarantining file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except (Exception, Timeout):
pass
finally:
self._fp = None
class DiskFile(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation. This
example duck-types the reference implementation DiskFile class.
Manage object files in-memory.
:param fs: an instance of InMemoryFileSystem
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
"""
def __init__(self, fs, account, container, obj):
self._name = '/' + '/'.join((account, container, obj))
self._metadata = None
self._fp = None
self._filesystem = fs
self.fragments = None
def open(self, modernize=False, current_time=None):
"""
Open the file and read the metadata.
This method must populate the _metadata attribute.
:param current_time: Unix time used in checking expiration. If not
present, the current time will be used.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileDeleted: if it does not exist, or a tombstone is
present
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
"""
fp, self._metadata = self._filesystem.get_object(self._name)
if fp is None:
raise DiskFileDeleted()
self._fp = self._verify_data_file(fp, current_time)
self._metadata = self._metadata or {}
return self
def __enter__(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
if self._fp is not None:
self._fp = None
def _quarantine(self, name, msg):
"""
Quarantine a file; responsible for incrementing the associated logger's
count of quarantines.
:param name: name of object to quarantine
:param msg: reason for quarantining to be included in the exception
:returns: DiskFileQuarantined exception object
"""
# for this implementation we simply delete the bad object
self._filesystem.del_object(name)
return DiskFileQuarantined(msg)
def _verify_data_file(self, fp, current_time):
"""
Verify the metadata's name value matches what we think the object is
named.
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileNotExist: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(self._name, "missing name metadata")
else:
if mname != self._name:
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if current_time is None:
current_time = time.time()
if x_delete_at <= current_time:
raise DiskFileNotExist('Expired')
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
self._name, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
try:
fp.seek(0, 2)
obj_size = fp.tell()
fp.seek(0, 0)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(self._name, "not stat-able: %s" % err)
if obj_size != metadata_size:
raise self._quarantine(
self._name, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, obj_size))
return fp
def get_metadata(self):
"""
Provide the metadata for an object as a dictionary.
:returns: object's metadata dictionary
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
get_datafile_metadata = get_metadata
get_metafile_metadata = get_metadata
def read_metadata(self, current_time=None):
"""
Return the metadata for an object.
:param current_time: Unix time used in checking expiration. If not
present, the current time will be used.
:returns: metadata dictionary for an object
"""
with self.open(current_time=current_time):
return self.get_metadata()
def reader(self, keep_cache=False):
"""
Return a swift.common.swob.Response class compatible "app_iter"
object. The responsibility of closing the open file is passed to the
DiskFileReader object.
:param keep_cache:
"""
dr = DiskFileReader(self._name, self._fp,
int(self._metadata['Content-Length']),
self._metadata['ETag'])
# At this point the reader object is now responsible for
# the file pointer.
self._fp = None
return dr
def writer(self, size=None):
return DiskFileWriter(self._filesystem, self._name)
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
writer = self.writer(size)
try:
yield writer.open()
finally:
writer.close()
def write_metadata(self, metadata):
"""
Write a block of metadata to an object.
"""
data, cur_mdata = self._filesystem.get_object(self._name)
if data is not None:
# The object exists. Update the new metadata with the object's
# immutable metadata (e.g. name, size, etag, sysmeta) and store it
# with the object data.
immutable_metadata = dict(
[(key, val) for key, val in cur_mdata.items()
if key.lower() in (RESERVED_DATAFILE_META |
DATAFILE_SYSTEM_META)
or is_sys_meta('object', key)])
metadata.update(immutable_metadata)
metadata['name'] = self._name
self._filesystem.put_object(self._name, data, metadata)
def delete(self, timestamp):
"""
Perform a delete for the given object in the given container under the
given account.
This creates a tombstone file with the given timestamp, and removes
any older versions of the object file. Any file that has an older
timestamp than timestamp will be deleted.
:param timestamp: timestamp to compare with each file
"""
fp, md = self._filesystem.get_object(self._name)
if md and md['X-Timestamp'] < Timestamp(timestamp):
self._filesystem.del_object(self._name)
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
data_timestamp = timestamp
durable_timestamp = timestamp
content_type_timestamp = timestamp
@property
def content_type(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata.get('Content-Type')
| swift-master | swift/obj/mem_diskfile.py |
swift-master | swift/obj/watchers/__init__.py |
|
# Copyright (c) 2019 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is an audit watcher that manages the dark data in the cluster.
# Since the API for audit watchers is intended to use external plugins,
# this code is invoked as if it were external: through pkg_resources.
# Our setup.py comes pre-configured for convenience, but the operator has
# to enable this watcher honestly by additing DarkDataWatcher to watchers=
# in object-server.conf. The default is off, as if this does not exist.
# Which is for the best, because of a large performance impact of this.
#
"""
The name of "Dark Data" refers to the scientific hypothesis of Dark Matter,
which supposes that the universe contains a lot of matter than we cannot
observe. The Dark Data in Swift is the name of objects that are not
accounted in the containers.
The experience of running large scale clusters suggests that Swift does
not have any particular bugs that trigger creation of dark data. So,
this is an excercise in writing watchers, with a plausible function.
When enabled, Dark Data watcher definitely drags down the cluster's overall
performance. Of course, the load increase can be mitigated as usual,
but at the expense of the total time taken by the pass of auditor.
Because the watcher only deems an object dark when all container
servers agree, it will silently fail to detect anything if even one
of container servers in the ring is down or unreacheable. This is
done in the interest of operators who run with action=delete.
Finally, keep in mind that Dark Data watcher needs the container
ring to operate, but runs on an object node. This can come up if
cluster has nodes separated by function.
"""
import os
import random
import shutil
import time
from eventlet import Timeout
from swift.common.direct_client import direct_get_container
from swift.common.exceptions import ClientException, QuarantineRequest
from swift.common.ring import Ring
from swift.common.utils import split_path, Timestamp
class ContainerError(Exception):
pass
class DarkDataWatcher(object):
def __init__(self, conf, logger):
self.logger = logger
swift_dir = '/etc/swift'
self.container_ring = Ring(swift_dir, ring_name='container')
self.dark_data_policy = conf.get('action')
if self.dark_data_policy not in ['log', 'delete', 'quarantine']:
if self.dark_data_policy is not None:
self.logger.warning(
"Dark data action %r unknown, defaults to action = 'log'" %
(self.dark_data_policy,))
self.dark_data_policy = 'log'
self.grace_age = int(conf.get('grace_age', 604800))
def start(self, audit_type, **other_kwargs):
self.is_zbf = audit_type == 'ZBF'
self.tot_unknown = 0
self.tot_dark = 0
self.tot_okay = 0
def policy_based_object_handling(self, data_file_path, metadata):
obj_path = metadata['name']
if self.dark_data_policy == "quarantine":
self.logger.info("quarantining dark data %s" % obj_path)
raise QuarantineRequest
elif self.dark_data_policy == "log":
self.logger.info("reporting dark data %s" % obj_path)
elif self.dark_data_policy == "delete":
obj_dir = os.path.dirname(data_file_path)
self.logger.info("deleting dark data %s" % obj_dir)
shutil.rmtree(obj_dir)
def see_object(self, object_metadata, data_file_path, **other_kwargs):
# No point in loading the container servers with unnecessary requests.
if self.is_zbf:
return
put_tstr = object_metadata['X-Timestamp']
if float(Timestamp(put_tstr)) + self.grace_age >= time.time():
# We can add "tot_new" if lumping these with the good objects
# ever bothers anyone.
self.tot_okay += 1
return
obj_path = object_metadata['name']
try:
obj_info = get_info_1(self.container_ring, obj_path, self.logger)
except ContainerError:
self.tot_unknown += 1
return
if obj_info is None:
self.tot_dark += 1
self.policy_based_object_handling(data_file_path, object_metadata)
else:
# OK, object is there, but in the future we might want to verify
# more. Watch out for versioned objects, EC, and all that.
self.tot_okay += 1
def end(self, **other_kwargs):
if self.is_zbf:
return
self.logger.info("total unknown %d ok %d dark %d" %
(self.tot_unknown, self.tot_okay, self.tot_dark))
#
# Get the information for 1 object from container server
#
def get_info_1(container_ring, obj_path, logger):
path_comps = split_path(obj_path, 1, 3, True)
account_name = path_comps[0]
container_name = path_comps[1]
obj_name = path_comps[2]
container_part, container_nodes = \
container_ring.get_nodes(account_name, container_name)
if not container_nodes:
raise ContainerError()
# Perhaps we should do something about the way we select the container
# nodes. For now we just shuffle. It spreads the load, but it does not
# improve upon the the case when some nodes are down, so auditor slows
# to a crawl (if this plugin is enabled).
random.shuffle(container_nodes)
err_flag = 0
for node in container_nodes:
try:
headers, objs = direct_get_container(
node, container_part, account_name, container_name,
prefix=obj_name, limit=1)
except (ClientException, Timeout):
# Something is wrong with that server, treat as an error.
err_flag += 1
continue
if objs and objs[0]['name'] == obj_name:
return objs[0]
# We only report the object as dark if all known servers agree that it is.
if err_flag:
raise ContainerError()
return None
| swift-master | swift/obj/watchers/dark_data.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mimetypes
import os
import socket
from collections import defaultdict
from random import shuffle
from time import time
import functools
import sys
from eventlet import Timeout
from swift import __canonical_version__ as swift_version
from swift.common import constraints
from swift.common.http import is_server_error, HTTP_INSUFFICIENT_STORAGE
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.error_limiter import ErrorLimiter
from swift.common.utils import Watchdog, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate, list_from_csv, \
parse_prefixed_conf, config_auto_int_value, node_to_string, \
config_request_node_count_value, config_percent_value, cap_length
from swift.common.registry import register_swift_info
from swift.common.constraints import check_utf8, valid_api_version
from swift.proxy.controllers import AccountController, ContainerController, \
ObjectControllerRouter, InfoController
from swift.proxy.controllers.base import get_container_info, \
DEFAULT_RECHECK_CONTAINER_EXISTENCE, DEFAULT_RECHECK_ACCOUNT_EXISTENCE, \
DEFAULT_RECHECK_UPDATING_SHARD_RANGES, DEFAULT_RECHECK_LISTING_SHARD_RANGES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request, HTTPServiceUnavailable, \
wsgi_to_str
from swift.common.exceptions import APIVersionError
# List of entry points for mandatory middlewares.
#
# Fields:
#
# "name" (required) is the entry point name from setup.py.
#
# "after_fn" (optional) a function that takes a PipelineWrapper object as its
# single argument and returns a list of middlewares that this middleware
# should come after. Any middlewares in the returned list that are not present
# in the pipeline will be ignored, so you can safely name optional middlewares
# to come after. For example, ["catch_errors", "bulk"] would install this
# middleware after catch_errors and bulk if both were present, but if bulk
# were absent, would just install it after catch_errors.
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'listing_formats', 'after_fn': lambda _junk: [
'catch_errors', 'gatekeeper', 'proxy_logging', 'memcache']},
# Put copy before dlo, slo and versioned_writes
{'name': 'copy', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']},
{'name': 'dlo', 'after_fn': lambda _junk: [
'copy', 'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']},
{'name': 'versioned_writes', 'after_fn': lambda _junk: [
'slo', 'dlo', 'copy', 'staticweb', 'tempauth',
'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']},
]
def _label_for_policy(policy):
if policy is not None:
return 'policy %s (%s)' % (policy.idx, policy.name)
return '(default)'
VALID_SORTING_METHODS = ('shuffle', 'timing', 'affinity')
class ProxyOverrideOptions(object):
"""
Encapsulates proxy server options that may be overridden e.g. for
policy specific configurations.
:param conf: the proxy-server config dict.
:param override_conf: a dict of overriding configuration options.
"""
def __init__(self, base_conf, override_conf, app):
def get(key, default):
return override_conf.get(key, base_conf.get(key, default))
self.sorting_method = get('sorting_method', 'shuffle').lower()
if self.sorting_method not in VALID_SORTING_METHODS:
raise ValueError(
'Invalid sorting_method value; must be one of %s, not %r' % (
', '.join(VALID_SORTING_METHODS), self.sorting_method))
self.read_affinity = get('read_affinity', '')
try:
self.read_affinity_sort_key = affinity_key_function(
self.read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(self.read_affinity, err.args[0]))
self.write_affinity = get('write_affinity', '')
try:
self.write_affinity_is_local_fn \
= affinity_locality_predicate(self.write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(self.write_affinity, err.args[0]))
self.write_affinity_node_count = get(
'write_affinity_node_count', '2 * replicas').lower()
value = self.write_affinity_node_count.split()
if len(value) == 1:
wanc_value = int(value[0])
self.write_affinity_node_count_fn = lambda replicas: wanc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
wanc_value = int(value[0])
self.write_affinity_node_count_fn = \
lambda replicas: wanc_value * replicas
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' %
(' '.join(value)))
self.write_affinity_handoff_delete_count = config_auto_int_value(
get('write_affinity_handoff_delete_count', 'auto'), None
)
self.rebalance_missing_suppression_count = int(get(
'rebalance_missing_suppression_count', 1))
self.concurrent_gets = config_true_value(get('concurrent_gets', False))
self.concurrency_timeout = float(get(
'concurrency_timeout', app.conn_timeout))
self.concurrent_ec_extra_requests = int(get(
'concurrent_ec_extra_requests', 0))
def __repr__(self):
return '%s({}, {%s}, app)' % (
self.__class__.__name__, ', '.join(
'%r: %r' % (k, getattr(self, k)) for k in (
'sorting_method',
'read_affinity',
'write_affinity',
'write_affinity_node_count',
'write_affinity_handoff_delete_count',
'rebalance_missing_suppression_count',
'concurrent_gets',
'concurrency_timeout',
'concurrent_ec_extra_requests',
)))
def __eq__(self, other):
if not isinstance(other, ProxyOverrideOptions):
return False
return all(getattr(self, k) == getattr(other, k) for k in (
'sorting_method',
'read_affinity',
'write_affinity',
'write_affinity_node_count',
'write_affinity_handoff_delete_count',
'rebalance_missing_suppression_count',
'concurrent_gets',
'concurrency_timeout',
'concurrent_ec_extra_requests',
))
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, logger=None, account_ring=None,
container_ring=None):
# This is for the sake of tests which instantiate an Application
# directly rather than via loadapp().
self._pipeline_final_app = self
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server',
statsd_tail_prefix='proxy-server')
else:
self.logger = logger
self.backend_user_agent = 'proxy-server %s' % os.getpid()
swift_dir = conf.get('swift_dir', '/etc/swift')
self.swift_dir = swift_dir
self.node_timeout = float(conf.get('node_timeout', 10))
self.recoverable_node_timeout = float(
conf.get('recoverable_node_timeout', self.node_timeout))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = float(conf.get('client_timeout', 60))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
error_suppression_interval = \
float(conf.get('error_suppression_interval', 60))
error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.error_limiter = ErrorLimiter(error_suppression_interval,
error_suppression_limit)
self.recheck_container_existence = \
int(conf.get('recheck_container_existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
self.recheck_updating_shard_ranges = \
int(conf.get('recheck_updating_shard_ranges',
DEFAULT_RECHECK_UPDATING_SHARD_RANGES))
self.recheck_listing_shard_ranges = \
int(conf.get('recheck_listing_shard_ranges',
DEFAULT_RECHECK_LISTING_SHARD_RANGES))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
self.container_existence_skip_cache = config_percent_value(
conf.get('container_existence_skip_cache_pct', 0))
self.container_updating_shard_ranges_skip_cache = \
config_percent_value(conf.get(
'container_updating_shard_ranges_skip_cache_pct', 0))
self.container_listing_shard_ranges_skip_cache = \
config_percent_value(conf.get(
'container_listing_shard_ranges_skip_cache_pct', 0))
self.account_existence_skip_cache = config_percent_value(
conf.get('account_existence_skip_cache_pct', 0))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
# ensure rings are loaded for all configured storage policies
for policy in POLICIES:
policy.load_ring(swift_dir)
self.obj_controller_router = ObjectControllerRouter()
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
self.auto_create_account_prefix = \
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = \
constraints.AUTO_CREATE_ACCOUNT_PREFIX
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.cors_expose_headers = [
a.strip()
for a in conf.get('cors_expose_headers', '').split(',')
if a.strip()]
self.strict_cors_mode = config_true_value(
conf.get('strict_cors_mode', 't'))
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
value = conf.get('request_node_count', '2 * replicas')
self.request_node_count = config_request_node_count_value(value)
# swift_owner_headers are stripped by the account and container
# controllers; we should extend header stripping to object controller
# when a privileged object header is implemented.
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
'x-account-access-control')
self.swift_owner_headers = [
name.strip().title()
for name in swift_owner_headers.split(',') if name.strip()]
# When upgrading from liberasurecode<=1.5.0, you may want to continue
# writing legacy CRCs until all nodes are upgraded and capabale of
# reading fragments with zlib CRCs.
# See https://bugs.launchpad.net/liberasurecode/+bug/1886088 for more
# information.
if 'write_legacy_ec_crc' in conf:
os.environ['LIBERASURECODE_WRITE_LEGACY_CRC'] = \
'1' if config_true_value(conf['write_legacy_ec_crc']) else '0'
# else, assume operators know what they're doing and leave env alone
# Initialization was successful, so now apply the client chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because it affects the client as well, currently, we use the
# client chunk size as the govenor and not the object chunk size.
if sys.version_info < (3,):
socket._fileobject.default_bufsize = self.client_chunk_size
# TODO: find a way to enable similar functionality in py3
self.expose_info = config_true_value(
conf.get('expose_info', 'yes'))
self.disallowed_sections = list_from_csv(
conf.get('disallowed_sections', ', '.join([
'swift.auto_create_account_prefix',
'swift.valid_api_versions',
])))
self.admin_key = conf.get('admin_key', None)
self._override_options = self._load_per_policy_config(conf)
self.sorts_by_timing = any(pc.sorting_method == 'timing'
for pc in self._override_options.values())
register_swift_info(
version=swift_version,
strict_cors_mode=self.strict_cors_mode,
policies=POLICIES.get_policy_info(),
allow_account_management=self.allow_account_management,
account_autocreate=self.account_autocreate,
**constraints.EFFECTIVE_CONSTRAINTS)
self.watchdog = Watchdog()
self.watchdog.spawn()
def _make_policy_override(self, policy, conf, override_conf):
label_for_policy = _label_for_policy(policy)
try:
override = ProxyOverrideOptions(conf, override_conf, self)
self.logger.debug("Loaded override config for %s: %r" %
(label_for_policy, override))
return override
except ValueError as err:
raise ValueError('%s for %s' % (err, label_for_policy))
def _load_per_policy_config(self, conf):
"""
Loads per-policy config override values from proxy server conf file.
:param conf: the proxy server local conf dict
:return: a dict mapping :class:`BaseStoragePolicy` to an instance of
:class:`ProxyOverrideOptions` that has policy-specific config
attributes
"""
# the default options will be used when looking up a policy that had no
# override options
default_options = self._make_policy_override(None, conf, {})
overrides = defaultdict(lambda: default_options)
# force None key to be set in the defaultdict so that it is found when
# iterating over items in check_config
overrides[None] = default_options
for index, override_conf in conf.get('policy_config', {}).items():
try:
index = int(index)
except ValueError:
# require policies to be referenced by index; using index *or*
# name isn't possible because names such as "3" are allowed
raise ValueError(
'Override config must refer to policy index: %r' % index)
try:
policy = POLICIES[index]
except KeyError:
raise ValueError(
"No policy found for override config, index: %s" % index)
override = self._make_policy_override(policy, conf, override_conf)
overrides[index] = override
return overrides
def get_policy_options(self, policy):
"""
Return policy specific options.
:param policy: an instance of :class:`BaseStoragePolicy` or ``None``
:return: an instance of :class:`ProxyOverrideOptions`
"""
return self._override_options[policy and policy.idx]
def check_config(self):
"""
Check the configuration for possible errors
"""
for policy_idx, options in self._override_options.items():
policy = (None if policy_idx is None
else POLICIES.get_by_index(policy_idx))
if options.read_affinity and options.sorting_method != 'affinity':
self.logger.warning(
"sorting_method is set to '%(method)s', not 'affinity'; "
"%(label)s read_affinity setting will have no effect.",
{'label': _label_for_policy(policy),
'method': options.sorting_method})
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:param policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def get_controller(self, req):
"""
Get the controller to handle a request.
:param req: the request
:returns: tuple of (controller class, path dictionary)
:raises ValueError: (thrown by split_path) if given invalid path
"""
if req.path == '/info':
d = dict(version=None,
expose_info=self.expose_info,
disallowed_sections=self.disallowed_sections,
admin_key=self.admin_key)
return InfoController, d
version, account, container, obj = split_path(
wsgi_to_str(req.path), 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if account and not valid_api_version(version):
raise APIVersionError('Invalid path')
if obj and container and account:
info = get_container_info(req.environ, self)
if is_server_error(info.get('status')):
raise HTTPServiceUnavailable(request=req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
if not policy:
# This indicates that a new policy has been created,
# with rings, deployed, released (i.e. deprecated =
# False), used by a client to create a container via
# another proxy that was restarted after the policy
# was released, and is now cached - all before this
# worker was HUPed to stop accepting new
# connections. There should never be an "unknown"
# index - but when there is - it's probably operator
# error and hopefully temporary.
raise HTTPServiceUnavailable('Unknown Storage Policy')
return self.obj_controller_router[policy], d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return [b'Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
te = req.headers.get('transfer-encoding', '').lower()
if te.rsplit(',', 1)[-1].strip() == 'chunked' and \
'content-length' in req.headers:
# RFC says if both are present, transfer-encoding wins.
# Definitely *don't* forward on the header the backend
# ought to ignore; that offers request-smuggling vectors.
del req.headers['content-length']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(wsgi_to_str(req.path_info),
internal=req.allow_reserved_names):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req)
except APIVersionError:
self.logger.increment('errors')
return HTTPBadRequest(request=req)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id_suffix = self.trans_id_suffix
trans_id_extra = req.headers.get('x-trans-id-extra')
if trans_id_extra:
trans_id_suffix += '-' + trans_id_extra[:32]
trans_id = generate_trans_id(trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
allowed_methods = controller.allowed_methods
if config_true_value(req.headers.get(
'X-Backend-Allow-Private-Methods', False)):
allowed_methods = set(allowed_methods).union(
controller.private_methods)
if req.method not in allowed_methods:
return HTTPMethodNotAllowed(request=req, headers={
'Allow': ', '.join(allowed_methods)})
handler = getattr(controller, req.method)
old_authorize = None
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp:
# No resp means authorized, no delayed recheck required.
old_authorize = req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ.setdefault('swift.orig_req_method', req.method)
try:
if old_authorize:
req.environ.pop('swift.authorize', None)
return handler(req)
finally:
if old_authorize:
req.environ['swift.authorize'] = old_authorize
except HTTPException as error_response:
return error_response
except (Exception, Timeout):
self.logger.exception('ERROR Unhandled exception in request')
return HTTPServerError(request=req)
def sort_nodes(self, nodes, policy=None):
"""
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
:param nodes: a list of nodes
:param policy: an instance of :class:`BaseStoragePolicy`
"""
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
policy_options = self.get_policy_options(policy)
if policy_options.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif policy_options.sorting_method == 'affinity':
nodes.sort(key=policy_options.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if not self.sorts_by_timing:
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
limited = self.error_limiter.is_limited(node)
if limited:
self.logger.increment('error_limiter.is_limited')
self.logger.debug(
'Node is error limited: %s', node_to_string(node))
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`increment`.
:param node: dictionary of node to error limit
:param msg: error message
"""
self.error_limiter.limit(node)
self.logger.increment('error_limiter.forced_limit')
self.logger.error(
'Node will be error limited for %.2fs: %s, error: %s',
self.error_limiter.suppression_interval, node_to_string(node),
msg)
def _error_increment(self, node):
"""
Call increment() on error limiter once, emit metrics and log if error
suppression will be triggered.
:param node: dictionary of node to handle errors for
"""
if self.error_limiter.increment(node):
self.logger.increment('error_limiter.incremented_limit')
self.logger.error(
'Node will be error limited for %.2fs: %s',
self.error_limiter.suppression_interval, node_to_string(node))
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
if isinstance(msg, bytes):
msg = msg.decode('utf-8')
self.logger.error('%(msg)s %(node)s',
{'msg': msg, 'node': node_to_string(node)})
self._error_increment(node)
def check_response(self, node, server_type, response, method, path,
body=None):
"""
Check response for error status codes and update error limiters as
required.
:param node: a dict describing a node
:param server_type: the type of server from which the response was
received (e.g. 'Object').
:param response: an instance of HTTPResponse.
:param method: the request method.
:param path: the request path.
:param body: an optional response body. If given, up to 1024 of the
start of the body will be included in any log message.
:return True: if the response status code is less than 500, False
otherwise.
"""
ok = False
if response.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node, 'ERROR Insufficient Storage')
elif is_server_error(response.status):
values = {'status': response.status,
'method': method,
'path': path,
'type': server_type}
if body is None:
fmt = 'ERROR %(status)d Trying to %(method)s ' \
'%(path)s From %(type)s Server'
else:
fmt = 'ERROR %(status)d %(body)s Trying to %(method)s ' \
'%(path)s From %(type)s Server'
values['body'] = cap_length(body, 1024)
self.error_occurred(node, fmt % values)
else:
ok = True
return ok
def exception_occurred(self, node, typ, additional_info,
**kwargs):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
if 'level' in kwargs:
log = functools.partial(self.logger.log, kwargs.pop('level'))
if 'exc_info' not in kwargs:
kwargs['exc_info'] = sys.exc_info()
else:
log = self.logger.exception
if isinstance(additional_info, bytes):
additional_info = additional_info.decode('utf-8')
log('ERROR with %(type)s server %(node)s'
' re: %(info)s',
{'type': typ, 'node': node_to_string(node),
'info': additional_info},
**kwargs)
self._error_increment(node)
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
self.logger.info(
'Adding required filter %(filter_name)s to pipeline at '
'position %(insert_at)d',
{'filter_name': filter_name, 'insert_at': insert_at})
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
self.logger.info("Pipeline was modified. "
"New pipeline is \"%s\".", pipe)
else:
self.logger.debug("Pipeline is \"%s\"", pipe)
def parse_per_policy_config(conf):
"""
Search the config file for any per-policy config sections and load those
sections to a dict mapping policy reference (name or index) to policy
options.
:param conf: the proxy server conf dict
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name
"""
policy_section_prefix = conf['__name__'] + ':policy:'
return parse_prefixed_conf(conf['__file__'], policy_section_prefix)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
# Do this here so that the use of conf['__file__'] and conf['__name__'] is
# isolated from the Application. This also simplifies tests that construct
# an Application instance directly.
conf['policy_config'] = parse_per_policy_config(conf)
app = Application(conf)
app.check_config()
return app
| swift-master | swift/proxy/server.py |
swift-master | swift/proxy/__init__.py |
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.proxy.controllers.base import Controller
from swift.proxy.controllers.info import InfoController
from swift.proxy.controllers.obj import ObjectControllerRouter
from swift.proxy.controllers.account import AccountController
from swift.proxy.controllers.container import ContainerController
__all__ = [
'AccountController',
'ContainerController',
'Controller',
'InfoController',
'ObjectControllerRouter',
]
| swift-master | swift/proxy/controllers/__init__.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import six
from six.moves.urllib.parse import unquote
from swift.common.memcached import MemcacheConnectionError
from swift.common.utils import public, private, csv_append, Timestamp, \
config_true_value, ShardRange, cache_from_env, filter_namespaces, \
NamespaceBoundList
from swift.common.constraints import check_metadata, CONTAINER_LISTING_LIMIT
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.common.request_helpers import get_sys_meta_prefix, get_param, \
constrain_req_limit, validate_container_params
from swift.proxy.controllers.base import Controller, delay_denial, NodeIter, \
cors_validation, set_info_cache, clear_info_cache, get_container_info, \
record_cache_op_metrics, get_cache_key, headers_from_container_info, \
update_headers
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
HTTPServiceUnavailable, str_to_wsgi, wsgi_to_str, Response
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = 'Container'
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to',
'x-versions-location']
def __init__(self, app, account_name, container_name, **kwargs):
super(ContainerController, self).__init__(app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
def _x_remove_headers(self):
st = self.server_type.lower()
return ['x-remove-%s-read' % st,
'x-remove-%s-write' % st,
'x-remove-versions-location',
'x-remove-%s-sync-key' % st,
'x-remove-%s-sync-to' % st]
def _convert_policy_to_index(self, req):
"""
Helper method to convert a policy name (from a request from a client)
to a policy index (for a request to a backend).
:param req: incoming request
"""
policy_name = req.headers.get('X-Storage-Policy')
if not policy_name:
return
policy = POLICIES.get_by_name(policy_name)
if not policy:
raise HTTPBadRequest(request=req,
content_type="text/plain",
body=("Invalid %s '%s'"
% ('X-Storage-Policy', policy_name)))
if policy.is_deprecated:
body = 'Storage Policy %r is deprecated' % (policy.name)
raise HTTPBadRequest(request=req, body=body)
return int(policy)
def clean_acls(self, req):
if 'swift.clean_acl' in req.environ:
for header in ('x-container-read', 'x-container-write'):
if header in req.headers:
try:
req.headers[header] = \
req.environ['swift.clean_acl'](header,
req.headers[header])
except ValueError as err:
return HTTPBadRequest(request=req, body=str(err))
return None
def _clear_container_info_cache(self, req):
clear_info_cache(req.environ,
self.account_name, self.container_name)
clear_info_cache(req.environ,
self.account_name, self.container_name, 'listing')
# TODO: should we also purge updating shards from cache?
def _GETorHEAD_from_backend(self, req):
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
concurrency = self.app.container_ring.replica_count \
if self.app.get_policy_options(None).concurrent_gets else 1
node_iter = NodeIter(self.app, self.app.container_ring, part,
self.logger, req)
resp = self.GETorHEAD_base(
req, 'Container', node_iter, part,
req.swift_entity_path, concurrency)
return resp
def _make_namespaces_response_body(self, req, ns_bound_list):
"""
Filter namespaces according to request constraints and return a
serialised list of namespaces.
:param req: the request object.
:param ns_bound_list: an instance of
:class:`~swift.common.utils.NamespaceBoundList`.
:return: a serialised list of namespaces.
"""
marker = get_param(req, 'marker', '')
end_marker = get_param(req, 'end_marker')
includes = get_param(req, 'includes')
reverse = config_true_value(get_param(req, 'reverse'))
if reverse:
marker, end_marker = end_marker, marker
namespaces = ns_bound_list.get_namespaces()
namespaces = filter_namespaces(
namespaces, includes, marker, end_marker)
if reverse:
namespaces.reverse()
return json.dumps([dict(ns) for ns in namespaces]).encode('ascii')
def _get_shard_ranges_from_cache(self, req, headers):
"""
Try to fetch shard namespace data from cache and, if successful, return
a response. Also return the cache state.
The response body will be a list of dicts each of which describes
a Namespace (i.e. includes the keys ``lower``, ``upper`` and ``name``).
:param req: an instance of ``swob.Request``.
:param headers: Headers to be sent with request.
:return: a tuple comprising (an instance of ``swob.Response``or
``None`` if no namespaces were found in cache, the cache state).
"""
infocache = req.environ.setdefault('swift.infocache', {})
memcache = cache_from_env(req.environ, True)
cache_key = get_cache_key(self.account_name,
self.container_name,
shard='listing')
resp_body = None
ns_bound_list = infocache.get(cache_key)
if ns_bound_list:
cache_state = 'infocache_hit'
resp_body = self._make_namespaces_response_body(req, ns_bound_list)
elif memcache:
skip_chance = \
self.app.container_listing_shard_ranges_skip_cache
if skip_chance and random.random() < skip_chance:
cache_state = 'skip'
else:
try:
cached_namespaces = memcache.get(
cache_key, raise_on_error=True)
if cached_namespaces:
cache_state = 'hit'
if six.PY2:
# json.loads() in memcache.get will convert json
# 'string' to 'unicode' with python2, here we cast
# 'unicode' back to 'str'
cached_namespaces = [
[lower.encode('utf-8'), name.encode('utf-8')]
for lower, name in cached_namespaces]
ns_bound_list = NamespaceBoundList(cached_namespaces)
resp_body = self._make_namespaces_response_body(
req, ns_bound_list)
else:
cache_state = 'miss'
except MemcacheConnectionError:
cache_state = 'error'
if resp_body is None:
resp = None
else:
# shard ranges can be returned from cache
infocache[cache_key] = ns_bound_list
self.logger.debug('Found %d shards in cache for %s',
len(ns_bound_list.bounds), req.path_qs)
headers.update({'x-backend-record-type': 'shard',
'x-backend-cached-results': 'true'})
# mimic GetOrHeadHandler.get_working_response...
# note: server sets charset with content_type but proxy
# GETorHEAD_base does not, so don't set it here either
resp = Response(request=req, body=resp_body)
update_headers(resp, headers)
resp.last_modified = Timestamp(headers['x-put-timestamp']).ceil()
resp.environ['swift_x_timestamp'] = headers.get('x-timestamp')
resp.accept_ranges = 'bytes'
resp.content_type = 'application/json'
return resp, cache_state
def _store_shard_ranges_in_cache(self, req, resp):
"""
Parse shard ranges returned from backend, store them in both infocache
and memcache.
:param req: the request object.
:param resp: the response object for the shard range listing.
:return: an instance of
:class:`~swift.common.utils.NamespaceBoundList`.
"""
# Note: Any gaps in the response's shard ranges will be 'lost' as a
# result of compacting the list of shard ranges to a
# NamespaceBoundList. That is ok. When the cached NamespaceBoundList is
# transformed back to shard range Namespaces to perform a listing, the
# Namespace before each gap will have expanded to include the gap,
# which means that the backend GET to that shard will have an
# end_marker beyond that shard's upper bound, and equal to the next
# available shard's lower. At worst, some misplaced objects, in the gap
# above the shard's upper, may be included in the shard's response.
data = self._parse_listing_response(req, resp)
backend_shard_ranges = self._parse_shard_ranges(req, data, resp)
if backend_shard_ranges is None:
return None
ns_bound_list = NamespaceBoundList.parse(backend_shard_ranges)
if resp.headers.get('x-backend-sharding-state') == 'sharded':
# cache in infocache even if no shard ranges returned; this
# is unexpected but use that result for this request
infocache = req.environ.setdefault('swift.infocache', {})
cache_key = get_cache_key(
self.account_name, self.container_name, shard='listing')
infocache[cache_key] = ns_bound_list
memcache = cache_from_env(req.environ, True)
if memcache and ns_bound_list:
# cache in memcache only if shard ranges as expected
self.logger.info('Caching listing shards for %s (%d shards)',
cache_key, len(ns_bound_list.bounds))
memcache.set(cache_key, ns_bound_list.bounds,
time=self.app.recheck_listing_shard_ranges)
return ns_bound_list
def _get_shard_ranges_from_backend(self, req):
"""
Make a backend request for shard ranges and return a response.
The response body will be a list of dicts each of which describes
a Namespace (i.e. includes the keys ``lower``, ``upper`` and ``name``).
If the response headers indicate that the response body contains a
complete list of shard ranges for a sharded container then the response
body will be transformed to a ``NamespaceBoundsList`` and cached.
:param req: an instance of ``swob.Request``.
:return: an instance of ``swob.Response``.
"""
# Note: We instruct the backend server to ignore name constraints in
# request params if returning shard ranges so that the response can
# potentially be cached, but we only cache it if the container state is
# 'sharded'. We don't attempt to cache shard ranges for a 'sharding'
# container as they may include the container itself as a 'gap filler'
# for shard ranges that have not yet cleaved; listings from 'gap
# filler' shard ranges are likely to become stale as the container
# continues to cleave objects to its shards and caching them is
# therefore more likely to result in stale or incomplete listings on
# subsequent container GETs.
req.headers['x-backend-override-shard-name-filter'] = 'sharded'
resp = self._GETorHEAD_from_backend(req)
sharding_state = resp.headers.get(
'x-backend-sharding-state', '').lower()
resp_record_type = resp.headers.get(
'x-backend-record-type', '').lower()
complete_listing = config_true_value(resp.headers.pop(
'x-backend-override-shard-name-filter', False))
# given that we sent 'x-backend-override-shard-name-filter=sharded' we
# should only receive back 'x-backend-override-shard-name-filter=true'
# if the sharding state is 'sharded', but check them both anyway...
if (resp_record_type == 'shard' and
sharding_state == 'sharded' and
complete_listing):
ns_bound_list = self._store_shard_ranges_in_cache(req, resp)
if ns_bound_list:
resp.body = self._make_namespaces_response_body(
req, ns_bound_list)
return resp
def _record_shard_listing_cache_metrics(
self, cache_state, resp, resp_record_type, info):
"""
Record a single cache operation by shard listing into its
corresponding metrics.
:param cache_state: the state of this cache operation, includes
infocache_hit, memcache hit, miss, error, skip, force_skip
and disabled.
:param resp: the response from either backend or cache hit.
:param resp_record_type: indicates the type of response record, e.g.
'shard' for shard range listing, 'object' for object listing.
:param info: the cached container info.
"""
should_record = False
if is_success(resp.status_int):
if resp_record_type == 'shard':
# Here we either got shard ranges by hitting the cache, or we
# got shard ranges from backend successfully for cache_state
# other than cache hit. Note: it's possible that later we find
# that shard ranges can't be parsed.
should_record = True
elif (info and is_success(info['status'])
and info.get('sharding_state') == 'sharded'):
# The shard listing request failed when getting shard ranges from
# backend.
# Note: In the absence of 'info' we cannot assume the container is
# sharded, so we don't increment the metric if 'info' is None. Even
# when we have valid info, we can't be sure that the container is
# sharded, but we assume info was correct and increment the failure
# metrics.
should_record = True
# else:
# The request failed, but in the absence of info we cannot assume
# the container is sharded, so we don't increment the metric.
if should_record:
record_cache_op_metrics(
self.logger, 'shard_listing', cache_state, resp)
def _GET_using_cache(self, req, info):
# It may be possible to fulfil the request from cache: we only reach
# here if request record_type is 'shard' or 'auto', so if the container
# state is 'sharded' then look for cached shard ranges. However, if
# X-Newest is true then we always fetch from the backend servers.
headers = headers_from_container_info(info)
if config_true_value(req.headers.get('x-newest', False)):
cache_state = 'force_skip'
self.logger.debug(
'Skipping shard cache lookup (x-newest) for %s', req.path_qs)
elif (headers and info and is_success(info['status']) and
info.get('sharding_state') == 'sharded'):
# container is sharded so we may have the shard ranges cached; only
# use cached values if all required backend headers available.
resp, cache_state = self._get_shard_ranges_from_cache(req, headers)
if resp:
return resp, cache_state
else:
# container metadata didn't support a cache lookup, this could be
# the case that container metadata was not in cache and we don't
# know if the container was sharded, or the case that the sharding
# state in metadata indicates the container was unsharded.
cache_state = 'bypass'
# The request was not fulfilled from cache so send to backend server.
return self._get_shard_ranges_from_backend(req), cache_state
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
ai = self.account_info(self.account_name, req)
auto_account = self.account_name.startswith(
self.app.auto_create_account_prefix)
if not (auto_account or ai[1]):
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
# Don't cache this. The lack of account will be cached, and that
# is sufficient.
return HTTPNotFound(request=req)
# The read-modify-write of params here is because the Request.params
# getter dynamically generates a dict of params from the query string;
# the setter must be called for new params to update the query string.
params = req.params
params['format'] = 'json'
# x-backend-record-type may be sent via internal client e.g. from
# the sharder or in probe tests
record_type = req.headers.get('X-Backend-Record-Type', '').lower()
if not record_type:
record_type = 'auto'
req.headers['X-Backend-Record-Type'] = 'auto'
params['states'] = 'listing'
req.params = params
if (req.method == 'GET'
and get_param(req, 'states') == 'listing'
and record_type != 'object'):
may_get_listing_shards = True
# Only lookup container info from cache and skip the backend HEAD,
# since we are going to GET the backend container anyway.
info = get_container_info(
req.environ, self.app, swift_source=None, cache_only=True)
else:
info = None
may_get_listing_shards = False
memcache = cache_from_env(req.environ, True)
sr_cache_state = None
if (may_get_listing_shards and
self.app.recheck_listing_shard_ranges > 0
and memcache
and not config_true_value(
req.headers.get('x-backend-include-deleted', False))):
# This GET might be served from cache or might populate cache.
# 'x-backend-include-deleted' is not usually expected in requests
# to the proxy (it is used from sharder to container servers) but
# it is included in the conditions just in case because we don't
# cache deleted shard ranges.
resp, sr_cache_state = self._GET_using_cache(req, info)
else:
resp = self._GETorHEAD_from_backend(req)
if may_get_listing_shards and (
not self.app.recheck_listing_shard_ranges or not memcache):
sr_cache_state = 'disabled'
resp_record_type = resp.headers.get('X-Backend-Record-Type', '')
if sr_cache_state:
self._record_shard_listing_cache_metrics(
sr_cache_state, resp, resp_record_type, info)
if all((req.method == "GET", record_type == 'auto',
resp_record_type.lower() == 'shard')):
resp = self._get_from_shards(req, resp)
if not config_true_value(
resp.headers.get('X-Backend-Cached-Results')):
# Cache container metadata. We just made a request to a storage
# node and got up-to-date information for the container.
resp.headers['X-Backend-Recheck-Container-Existence'] = str(
self.app.recheck_container_existence)
set_info_cache(req.environ, self.account_name,
self.container_name, resp)
if 'swift.authorize' in req.environ:
req.acl = wsgi_to_str(resp.headers.get('x-container-read'))
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
# Expose sharding state in reseller requests
if req.environ.get('reseller_request', False):
resp.headers['X-Container-Sharding'] = config_true_value(
resp.headers.get(get_sys_meta_prefix('container') + 'Sharding',
'False'))
return resp
def _get_from_shards(self, req, resp):
# Construct listing using shards described by the response body.
# The history of containers that have returned shard ranges is
# maintained in the request environ so that loops can be avoided by
# forcing an object listing if the same container is visited again.
# This can happen in at least two scenarios:
# 1. a container has filled a gap in its shard ranges with a
# shard range pointing to itself
# 2. a root container returns a (stale) shard range pointing to a
# shard that has shrunk into the root, in which case the shrunken
# shard may return the root's shard range.
shard_listing_history = req.environ.setdefault(
'swift.shard_listing_history', [])
policy_key = 'X-Backend-Storage-Policy-Index'
if not (shard_listing_history or policy_key in req.headers):
# We're handling the original request to the root container: set
# the root policy index in the request, unless it is already set,
# so that shards will return listings for that policy index.
# Note: we only get here if the root responded with shard ranges,
# or if the shard ranges were cached and the cached root container
# info has sharding_state==sharded; in both cases we can assume
# that the response is "modern enough" to include
# 'X-Backend-Storage-Policy-Index'.
req.headers[policy_key] = resp.headers[policy_key]
shard_listing_history.append((self.account_name, self.container_name))
# Note: when the response body has been synthesised from cached data,
# each item in the list only has 'name', 'lower' and 'upper' keys. We
# therefore cannot use ShardRange.from_dict(), and the ShardRange
# instances constructed here will only have 'name', 'lower' and 'upper'
# attributes set.
# Ideally we would construct Namespace objects here, but later we use
# the ShardRange account and container properties to access parsed
# parts of the name.
shard_ranges = [ShardRange(**data) for data in json.loads(resp.body)]
self.logger.debug('GET listing from %s shards for: %s',
len(shard_ranges), req.path_qs)
if not shard_ranges:
# can't find ranges or there was a problem getting the ranges. So
# return what we have.
return resp
objects = []
req_limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
params = req.params.copy()
params.pop('states', None)
req.headers.pop('X-Backend-Record-Type', None)
reverse = config_true_value(params.get('reverse'))
marker = wsgi_to_str(params.get('marker'))
end_marker = wsgi_to_str(params.get('end_marker'))
prefix = wsgi_to_str(params.get('prefix'))
limit = req_limit
all_resp_status = []
for i, shard_range in enumerate(shard_ranges):
params['limit'] = limit
# Always set marker to ensure that object names less than or equal
# to those already in the listing are not fetched; if the listing
# is empty then the original request marker, if any, is used. This
# allows misplaced objects below the expected shard range to be
# included in the listing.
last_name = ''
last_name_was_subdir = False
if objects:
last_name_was_subdir = 'subdir' in objects[-1]
if last_name_was_subdir:
last_name = objects[-1]['subdir']
else:
last_name = objects[-1]['name']
if six.PY2:
last_name = last_name.encode('utf8')
params['marker'] = str_to_wsgi(last_name)
elif marker:
params['marker'] = str_to_wsgi(marker)
else:
params['marker'] = ''
# Always set end_marker to ensure that misplaced objects beyond the
# expected shard range are not fetched. This prevents a misplaced
# object obscuring correctly placed objects in the next shard
# range.
if end_marker and end_marker in shard_range:
params['end_marker'] = str_to_wsgi(end_marker)
elif reverse:
params['end_marker'] = str_to_wsgi(shard_range.lower_str)
else:
params['end_marker'] = str_to_wsgi(shard_range.end_marker)
headers = {}
if ((shard_range.account, shard_range.container) in
shard_listing_history):
# directed back to same container - force GET of objects
headers['X-Backend-Record-Type'] = 'object'
if config_true_value(req.headers.get('x-newest', False)):
headers['X-Newest'] = 'true'
if prefix:
if prefix > shard_range:
continue
try:
just_past = prefix[:-1] + chr(ord(prefix[-1]) + 1)
except ValueError:
pass
else:
if just_past < shard_range:
continue
if last_name_was_subdir and str(
shard_range.lower if reverse else shard_range.upper
).startswith(last_name):
continue
self.logger.debug(
'Getting listing part %d from shard %s %s with %s',
i, shard_range, shard_range.name, headers)
objs, shard_resp = self._get_container_listing(
req, shard_range.account, shard_range.container,
headers=headers, params=params)
all_resp_status.append(shard_resp.status_int)
sharding_state = shard_resp.headers.get('x-backend-sharding-state',
'unknown')
if objs is None:
# give up if any non-success response from shard containers
self.logger.error(
'Aborting listing from shards due to bad response: %r'
% all_resp_status)
return HTTPServiceUnavailable(request=req)
shard_policy = shard_resp.headers.get(
'X-Backend-Record-Storage-Policy-Index',
shard_resp.headers[policy_key]
)
if shard_policy != req.headers[policy_key]:
self.logger.error(
'Aborting listing from shards due to bad shard policy '
'index: %s (expected %s)',
shard_policy, req.headers[policy_key])
return HTTPServiceUnavailable(request=req)
self.logger.debug(
'Found %d objects in shard (state=%s), total = %d',
len(objs), sharding_state, len(objs) + len(objects))
if not objs:
# tolerate empty shard containers
continue
objects.extend(objs)
limit -= len(objs)
if limit <= 0:
break
last_name = objects[-1].get('name',
objects[-1].get('subdir', u''))
if six.PY2:
last_name = last_name.encode('utf8')
if end_marker and reverse and end_marker >= last_name:
break
if end_marker and not reverse and end_marker <= last_name:
break
resp.body = json.dumps(objects).encode('ascii')
constrained = any(req.params.get(constraint) for constraint in (
'marker', 'end_marker', 'path', 'prefix', 'delimiter'))
if not constrained and len(objects) < req_limit:
self.logger.debug('Setting object count to %s' % len(objects))
# prefer the actual listing stats over the potentially outdated
# root stats. This condition is only likely when a sharded
# container is shrinking or in tests; typically a sharded container
# will have more than CONTAINER_LISTING_LIMIT objects so any
# unconstrained listing will be capped by the limit and total
# object stats cannot therefore be inferred from the listing.
resp.headers['X-Container-Object-Count'] = len(objects)
resp.headers['X-Container-Bytes-Used'] = sum(
[o['bytes'] for o in objects])
return resp
@public
@delay_denial
@cors_validation
def GET(self, req):
"""Handler for HTTP GET requests."""
# early checks for request validity
validate_container_params(req)
return self.GETorHEAD(req)
@public
@delay_denial
@cors_validation
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
def PUT(self, req):
"""HTTP PUT request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
policy_index = self._convert_policy_to_index(req)
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
if req.environ.get('reseller_request', False) and \
'X-Container-Sharding' in req.headers:
req.headers[get_sys_meta_prefix('container') + 'Sharding'] = \
str(config_true_value(req.headers['X-Container-Sharding']))
length_limit = self.get_name_length_limit()
if len(self.container_name) > length_limit:
body = 'Container name length of %d longer than %d' % (
len(self.container_name), length_limit)
resp = HTTPBadRequest(request=req, body=body)
return resp
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts and self.app.account_autocreate:
if not self.autocreate_account(req, self.account_name):
return HTTPServiceUnavailable(request=req)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
if 0 < self.app.max_containers_per_account <= container_count and \
self.account_name not in self.app.max_containers_whitelist:
container_info = \
self.container_info(self.account_name, self.container_name,
req)
if not is_success(container_info.get('status')):
body = 'Reached container limit of %s' % (
self.app.max_containers_per_account, )
resp = HTTPForbidden(request=req, body=body)
return resp
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts,
policy_index)
resp = self.make_requests(
req, self.app.container_ring,
container_partition, 'PUT', req.swift_entity_path, headers)
self._clear_container_info_cache(req)
return resp
@public
@cors_validation
def POST(self, req):
"""HTTP POST request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
if req.environ.get('reseller_request', False) and \
'X-Container-Sharding' in req.headers:
req.headers[get_sys_meta_prefix('container') + 'Sharding'] = \
str(config_true_value(req.headers['X-Container-Sharding']))
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self.generate_request_headers(req, transfer=True)
self._clear_container_info_cache(req)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'POST',
req.swift_entity_path, [headers] * len(containers))
return resp
@public
@cors_validation
def DELETE(self, req):
"""HTTP DELETE request handler."""
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts)
self._clear_container_info_cache(req)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'DELETE',
req.swift_entity_path, headers)
# Indicates no server had the container
if resp.status_int == HTTP_ACCEPTED:
return HTTPNotFound(request=req)
return resp
@private
def UPDATE(self, req):
"""HTTP UPDATE request handler.
Method to perform bulk operations on container DBs,
similar to a merge_items REPLICATE request.
Not client facing; internal clients or middlewares must include
``X-Backend-Allow-Private-Methods: true`` header to access.
"""
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
# Since this isn't client facing, expect callers to supply an index
policy_index = req.headers['X-Backend-Storage-Policy-Index']
headers = self._backend_requests(
req, len(containers), account_partition=None, accounts=[],
policy_index=policy_index)
return self.make_requests(
req, self.app.container_ring, container_partition, 'UPDATE',
req.swift_entity_path, headers, body=req.body)
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
policy_index=None):
additional = {'X-Timestamp': Timestamp.now().internal}
if policy_index is None:
additional['X-Backend-Storage-Policy-Default'] = \
int(POLICIES.default)
else:
additional['X-Backend-Storage-Policy-Index'] = str(policy_index)
headers = [self.generate_request_headers(req, transfer=True,
additional=additional)
for _junk in range(n_outgoing)]
for i, account in enumerate(accounts):
i = i % len(headers)
headers[i]['X-Account-Partition'] = account_partition
headers[i]['X-Account-Host'] = csv_append(
headers[i].get('X-Account-Host'),
'%(ip)s:%(port)s' % account)
headers[i]['X-Account-Device'] = csv_append(
headers[i].get('X-Account-Device'),
account['device'])
return headers
| swift-master | swift/proxy/controllers/container.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source bufferedhttp connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import six
from six.moves.urllib.parse import quote, unquote
from six.moves import zip
import collections
import itertools
import json
import mimetypes
import time
import math
import random
import sys
from greenlet import GreenletExit
from eventlet import GreenPile
from eventlet.queue import Queue, Empty
from eventlet.timeout import Timeout
from swift.common.utils import (
clean_content_type, config_true_value, ContextPool, csv_append,
GreenAsyncPile, GreenthreadSafeIterator, Timestamp, WatchdogTimeout,
normalize_delete_at_timestamp, public, get_expirer_container,
document_iters_to_http_response_body, parse_content_range,
quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
ShardRange, find_namespace, cache_from_env, NamespaceBoundList,
CooperativeIterator)
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation
from swift.common import constraints
from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \
InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \
PutterConnectError, ChunkReadError, RangeAlreadyComplete, ShortReadError
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import (
is_informational, is_success, is_client_error, is_server_error,
is_redirection, HTTP_CONTINUE, HTTP_INTERNAL_SERVER_ERROR,
HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE, HTTP_NOT_FOUND)
from swift.common.memcached import MemcacheConnectionError
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
ECDriverError, PolicyError)
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, update_headers, bytes_to_skip, ByteCountEnforcer, \
record_cache_op_metrics, get_cache_key, GetterBase, GetterSource, \
is_good_source, NodeIter
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \
HTTPUnprocessableEntity, Response, HTTPException, \
HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError, \
normalize_etag
from swift.common.request_helpers import update_etag_is_at_header, \
resolve_etag_is_at_header, validate_internal_obj, get_ip_port
def check_content_type(req):
if not req.environ.get('swift.content_type_overridden') and \
';' in req.headers.get('content-type', ''):
for param in req.headers['content-type'].split(';')[1:]:
if param.lstrip().startswith('swift_'):
return HTTPBadRequest("Invalid Content-Type, "
"swift_* is not a valid parameter name.")
return None
def num_container_updates(container_replicas, container_quorum,
object_replicas, object_quorum):
"""
We need to send container updates via enough object servers such
that, if the object PUT succeeds, then the container update is
durable (either it's synchronously updated or written to async
pendings).
Define:
Qc = the quorum size for the container ring
Qo = the quorum size for the object ring
Rc = the replica count for the container ring
Ro = the replica count (or EC N+K) for the object ring
A durable container update is one that's made it to at least Qc
nodes. To always be durable, we have to send enough container
updates so that, if only Qo object PUTs succeed, and all the
failed object PUTs had container updates, at least Qc updates
remain. Since (Ro - Qo) object PUTs may fail, we must have at
least Qc + Ro - Qo container updates to ensure that Qc of them
remain.
Also, each container replica is named in at least one object PUT
request so that, when all requests succeed, no work is generated
for the container replicator. Thus, at least Rc updates are
necessary.
:param container_replicas: replica count for the container ring (Rc)
:param container_quorum: quorum size for the container ring (Qc)
:param object_replicas: replica count for the object ring (Ro)
:param object_quorum: quorum size for the object ring (Qo)
"""
return max(
# Qc + Ro - Qo
container_quorum + object_replicas - object_quorum,
# Rc
container_replicas)
class ObjectControllerRouter(object):
policy_type_to_controller_map = {}
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their ObjectController implementations.
This also fills in a policy_type attribute on the class.
"""
def register_wrapper(controller_cls):
if policy_type in cls.policy_type_to_controller_map:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_controller_map[policy_type],
policy_type))
cls.policy_type_to_controller_map[policy_type] = controller_cls
controller_cls.policy_type = policy_type
return controller_cls
return register_wrapper
def __init__(self):
self.policy_to_controller_cls = {}
for policy in POLICIES:
self.policy_to_controller_cls[int(policy)] = \
self.policy_type_to_controller_map[policy.policy_type]
def __getitem__(self, policy):
return self.policy_to_controller_cls[int(policy)]
class BaseObjectController(Controller):
"""Base WSGI controller for object requests."""
server_type = 'Object'
def __init__(self, app, account_name, container_name, object_name,
**kwargs):
super(BaseObjectController, self).__init__(app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
validate_internal_obj(
self.account_name, self.container_name, self.object_name)
def iter_nodes_local_first(self, ring, partition, request, policy=None,
local_handoffs_first=False):
"""
Yields nodes for a ring partition.
If the 'write_affinity' setting is non-empty, then this will yield N
local nodes (as defined by the write_affinity setting) first, then the
rest of the nodes as normal. It is a re-ordering of the nodes such
that the local ones come first; no node is omitted. The effect is
that the request will be serviced by local object servers first, but
nonlocal ones will be employed if not enough local ones are available.
:param ring: ring to get nodes from
:param partition: ring partition to yield nodes for
:param request: nodes will be annotated with `use_replication` based on
the `request` headers
:param policy: optional, an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:param local_handoffs_first: optional, if True prefer primaries and
local handoff nodes first before looking elsewhere.
"""
policy_options = self.app.get_policy_options(policy)
is_local = policy_options.write_affinity_is_local_fn
if is_local is None:
return NodeIter(self.app, ring, partition, self.logger, request,
policy=policy)
primary_nodes = ring.get_part_nodes(partition)
handoff_nodes = ring.get_more_nodes(partition)
all_nodes = itertools.chain(primary_nodes, handoff_nodes)
if local_handoffs_first:
num_locals = policy_options.write_affinity_handoff_delete_count
if num_locals is None:
local_primaries = [node for node in primary_nodes
if is_local(node)]
num_locals = len(primary_nodes) - len(local_primaries)
first_local_handoffs = list(itertools.islice(
(node for node in handoff_nodes if is_local(node)), num_locals)
)
preferred_nodes = primary_nodes + first_local_handoffs
else:
num_locals = policy_options.write_affinity_node_count_fn(
len(primary_nodes)
)
preferred_nodes = list(itertools.islice(
(node for node in all_nodes if is_local(node)), num_locals)
)
# refresh it; it moved when we computed preferred_nodes
handoff_nodes = ring.get_more_nodes(partition)
all_nodes = itertools.chain(primary_nodes, handoff_nodes)
node_iter = itertools.chain(
preferred_nodes,
(node for node in all_nodes if node not in preferred_nodes)
)
return NodeIter(self.app, ring, partition, self.logger, request,
node_iter=node_iter, policy=policy)
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
obj_ring = self.app.get_object_ring(policy_index)
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
partition = obj_ring.get_part(
self.account_name, self.container_name, self.object_name)
node_iter = NodeIter(self.app, obj_ring, partition, self.logger, req,
policy=policy)
resp = self._get_or_head_response(req, node_iter, partition, policy)
if ';' in resp.headers.get('content-type', ''):
resp.content_type = clean_content_type(
resp.headers['content-type'])
return resp
@public
@cors_validation
@delay_denial
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
def _get_cached_updating_namespaces(
self, infocache, memcache, cache_key):
"""
Fetch cached updating namespaces of updating shard ranges from
infocache and memcache.
:param infocache: the infocache instance.
:param memcache: an instance of a memcache client,
:class:`swift.common.memcached.MemcacheRing`.
:param cache_key: the cache key for both infocache and memcache.
:return: a tuple of (an instance of NamespaceBoundList, cache state)
"""
# try get namespaces from infocache first
namespace_list = infocache.get(cache_key)
if namespace_list:
return namespace_list, 'infocache_hit'
# then try get them from memcache
if not memcache:
return None, 'disabled'
skip_chance = self.app.container_updating_shard_ranges_skip_cache
if skip_chance and random.random() < skip_chance:
return None, 'skip'
try:
namespaces = memcache.get(cache_key, raise_on_error=True)
cache_state = 'hit' if namespaces else 'miss'
except MemcacheConnectionError:
namespaces = None
cache_state = 'error'
if namespaces:
if six.PY2:
# json.loads() in memcache.get will convert json 'string' to
# 'unicode' with python2, here we cast 'unicode' back to 'str'
namespaces = [
[lower.encode('utf-8'), name.encode('utf-8')]
for lower, name in namespaces]
namespace_list = NamespaceBoundList(namespaces)
else:
namespace_list = None
return namespace_list, cache_state
def _get_update_shard_caching_disabled(self, req, account, container, obj):
"""
Fetch all updating shard ranges for the given root container when
all caching is disabled.
:param req: original Request instance.
:param account: account from which shard ranges should be fetched.
:param container: container from which shard ranges should be fetched.
:param obj: object getting updated.
:return: an instance of :class:`swift.common.utils.ShardRange`,
or None if the update should go back to the root
"""
# legacy behavior requests container server for includes=obj
shard_ranges, response = self._get_shard_ranges(
req, account, container, states='updating', includes=obj)
record_cache_op_metrics(
self.logger, 'shard_updating', 'disabled', response)
# there will be only one shard range in the list if any
return shard_ranges[0] if shard_ranges else None
def _get_update_shard(self, req, account, container, obj):
"""
Find the appropriate shard range for an object update.
Note that this fetches and caches (in both the per-request infocache
and memcache, if available) all shard ranges for the given root
container so we won't have to contact the container DB for every write.
:param req: original Request instance.
:param account: account from which shard ranges should be fetched.
:param container: container from which shard ranges should be fetched.
:param obj: object getting updated.
:return: an instance of :class:`swift.common.utils.ShardRange`,
or None if the update should go back to the root
"""
if not self.app.recheck_updating_shard_ranges:
# caching is disabled
return self._get_update_shard_caching_disabled(
req, account, container, obj)
# caching is enabled, try to get from caches
response = None
cache_key = get_cache_key(account, container, shard='updating')
infocache = req.environ.setdefault('swift.infocache', {})
memcache = cache_from_env(req.environ, True)
cached_namespaces, cache_state = self._get_cached_updating_namespaces(
infocache, memcache, cache_key)
if cached_namespaces:
# found cached namespaces in either infocache or memcache
infocache[cache_key] = cached_namespaces
namespace = cached_namespaces.get_namespace(obj)
update_shard = ShardRange(
name=namespace.name, timestamp=0, lower=namespace.lower,
upper=namespace.upper)
else:
# pull full set of updating shard ranges from backend
shard_ranges, response = self._get_shard_ranges(
req, account, container, states='updating')
if shard_ranges:
# only store the list of namespace lower bounds and names into
# infocache and memcache.
cached_namespaces = NamespaceBoundList.parse(
shard_ranges)
infocache[cache_key] = cached_namespaces
if memcache:
self.logger.info(
'Caching updating shards for %s (%d shards)',
cache_key, len(cached_namespaces.bounds))
memcache.set(
cache_key, cached_namespaces.bounds,
time=self.app.recheck_updating_shard_ranges)
update_shard = find_namespace(obj, shard_ranges or [])
record_cache_op_metrics(
self.logger, 'shard_updating', cache_state, response)
return update_shard
def _get_update_target(self, req, container_info):
# find the sharded container to which we'll send the update
db_state = container_info.get('sharding_state', 'unsharded')
if db_state in ('sharded', 'sharding'):
shard_range = self._get_update_shard(
req, self.account_name, self.container_name, self.object_name)
if shard_range:
partition, nodes = self.app.container_ring.get_nodes(
shard_range.account, shard_range.container)
return partition, nodes, shard_range.name
return container_info['partition'], container_info['nodes'], None
@public
@cors_validation
@delay_denial
def POST(self, req):
"""HTTP POST request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
container_partition, container_nodes, container_path = \
self._get_update_target(req, container_info)
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not container_nodes:
return HTTPNotFound(request=req)
error_response = check_metadata(req, 'object')
if error_response:
return error_response
req.ensure_x_timestamp()
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
next_part_power = getattr(obj_ring, 'next_part_power', None)
if next_part_power:
req.headers['X-Backend-Next-Part-Power'] = next_part_power
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
headers = self._backend_requests(
req, len(nodes), container_partition, container_nodes,
delete_at_container, delete_at_part, delete_at_nodes,
container_path=container_path)
return self._post_object(req, obj_ring, partition, headers)
def _backend_requests(self, req, n_outgoing,
container_partition, containers,
delete_at_container=None, delete_at_partition=None,
delete_at_nodes=None, container_path=None):
policy_index = req.headers['X-Backend-Storage-Policy-Index']
policy = POLICIES.get_by_index(policy_index)
headers = [self.generate_request_headers(req, additional=req.headers)
for _junk in range(n_outgoing)]
def set_container_update(index, container):
headers[index]['X-Container-Partition'] = container_partition
headers[index]['X-Container-Host'] = csv_append(
headers[index].get('X-Container-Host'),
'%(ip)s:%(port)s' % container)
headers[index]['X-Container-Device'] = csv_append(
headers[index].get('X-Container-Device'),
container['device'])
if container_path:
headers[index]['X-Backend-Quoted-Container-Path'] = quote(
container_path)
# NB: we used to send
# 'X-Backend-Container-Path': container_path
# but that isn't safe for container names with nulls or
# newlines (or possibly some other characters). We consciously
# *don't* make any attempt to set the old meta; during an
# upgrade, old object-servers will talk to the root which
# will eat the update and move it as a misplaced object.
def set_delete_at_headers(index, delete_at_node):
headers[index]['X-Delete-At-Container'] = delete_at_container
headers[index]['X-Delete-At-Partition'] = delete_at_partition
headers[index]['X-Delete-At-Host'] = csv_append(
headers[index].get('X-Delete-At-Host'),
'%(ip)s:%(port)s' % delete_at_node)
headers[index]['X-Delete-At-Device'] = csv_append(
headers[index].get('X-Delete-At-Device'),
delete_at_node['device'])
n_updates_needed = num_container_updates(
len(containers), quorum_size(len(containers)),
n_outgoing, policy.quorum)
container_iter = itertools.cycle(containers)
dan_iter = itertools.cycle(delete_at_nodes or [])
existing_updates = 0
while existing_updates < n_updates_needed:
index = existing_updates % n_outgoing
set_container_update(index, next(container_iter))
if delete_at_nodes:
# We reverse the index in order to distribute the updates
# across all nodes.
set_delete_at_headers(n_outgoing - 1 - index, next(dan_iter))
existing_updates += 1
# Keep the number of expirer-queue deletes to a reasonable number.
#
# In the best case, at least one object server writes out an
# async_pending for an expirer-queue update. In the worst case, no
# object server does so, and an expirer-queue row remains that
# refers to an already-deleted object. In this case, upon attempting
# to delete the object, the object expirer will notice that the
# object does not exist and then remove the row from the expirer
# queue.
#
# In other words: expirer-queue updates on object DELETE are nice to
# have, but not strictly necessary for correct operation.
#
# Also, each queue update results in an async_pending record, which
# causes the object updater to talk to all container servers. If we
# have N async_pendings and Rc container replicas, we cause N * Rc
# requests from object updaters to container servers (possibly more,
# depending on retries). Thus, it is helpful to keep this number
# small.
n_desired_queue_updates = 2
for i in range(len(headers)):
headers[i].setdefault('X-Backend-Clean-Expiring-Object-Queue',
't' if i < n_desired_queue_updates else 'f')
return headers
def _get_conn_response(self, putter, path, logger_thread_locals,
final_phase, **kwargs):
self.logger.thread_locals = logger_thread_locals
try:
resp = putter.await_response(
self.app.node_timeout, not final_phase)
except (Exception, Timeout):
resp = None
if final_phase:
status_type = 'final'
else:
status_type = 'commit'
self.app.exception_occurred(
putter.node, 'Object',
'Trying to get %(status_type)s status of PUT to %(path)s' %
{'status_type': status_type, 'path': path})
return (putter, resp)
def _have_adequate_put_responses(self, statuses, num_nodes, min_responses):
"""
Test for sufficient PUT responses from backend nodes to proceed with
PUT handling.
:param statuses: a list of response statuses.
:param num_nodes: number of backend nodes to which PUT requests may be
issued.
:param min_responses: (optional) minimum number of nodes required to
have responded with satisfactory status code.
:return: True if sufficient backend responses have returned a
satisfactory status code.
"""
raise NotImplementedError
def _get_put_responses(self, req, putters, num_nodes, final_phase=True,
min_responses=None):
"""
Collect object responses to a PUT request and determine if a
satisfactory number of nodes have returned success. Returns
lists of accumulated status codes, reasons, bodies and etags.
:param req: the request
:param putters: list of putters for the request
:param num_nodes: number of nodes involved
:param final_phase: boolean indicating if this is the last phase
:param min_responses: minimum needed when not requiring quorum
:return: a tuple of lists of status codes, reasons, bodies and etags.
The list of bodies and etags is only populated for the final
phase of a PUT transaction.
"""
statuses = []
reasons = []
bodies = []
etags = set()
pile = GreenAsyncPile(len(putters))
for putter in putters:
if putter.failed:
continue
pile.spawn(self._get_conn_response, putter, req.path,
self.logger.thread_locals, final_phase=final_phase)
def _handle_response(putter, response):
statuses.append(response.status)
reasons.append(response.reason)
if final_phase:
body = response.read()
else:
body = b''
bodies.append(body)
if not self.app.check_response(putter.node, 'Object', response,
req.method, req.path, body):
putter.failed = True
elif is_success(response.status):
etags.add(normalize_etag(response.getheader('etag')))
for (putter, response) in pile:
if response:
_handle_response(putter, response)
if self._have_adequate_put_responses(
statuses, num_nodes, min_responses):
break
else:
putter.failed = True
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for (putter, response) in finished_quickly:
if response:
_handle_response(putter, response)
if final_phase:
while len(statuses) < num_nodes:
statuses.append(HTTP_SERVICE_UNAVAILABLE)
reasons.append('')
bodies.append(b'')
return statuses, reasons, bodies, etags
def _config_obj_expiration(self, req):
delete_at_container = None
delete_at_part = None
delete_at_nodes = None
req = constraints.check_delete_headers(req)
if 'x-delete-at' in req.headers:
req.headers['x-delete-at'] = normalize_delete_at_timestamp(
int(req.headers['x-delete-at']))
x_delete_at = int(req.headers['x-delete-at'])
req.environ.setdefault('swift.log_info', []).append(
'x-delete-at:%s' % x_delete_at)
delete_at_container = get_expirer_container(
x_delete_at, self.app.expiring_objects_container_divisor,
self.account_name, self.container_name, self.object_name)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
return req, delete_at_container, delete_at_part, delete_at_nodes
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
def _check_failure_put_connections(self, putters, req, min_conns):
"""
Identify any failed connections and check minimum connection count.
:param putters: a list of Putter instances
:param req: request
:param min_conns: minimum number of putter connections required
"""
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [
putter.resp.status for putter in putters if putter.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.logger.debug(
'Object PUT returning 412, %(statuses)r',
{'statuses': statuses})
raise HTTPPreconditionFailed(request=req)
if any(putter for putter in putters if putter.resp and
putter.resp.status == HTTP_CONFLICT):
status_times = ['%(status)s (%(timestamp)s)' % {
'status': putter.resp.status,
'timestamp': HeaderKeyDict(
putter.resp.getheaders()).get(
'X-Backend-Timestamp', 'unknown')
} for putter in putters if putter.resp]
self.logger.debug(
'Object PUT returning 202 for 409: '
'%(req_timestamp)s <= %(timestamps)r',
{'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(status_times)})
raise HTTPAccepted(request=req)
self._check_min_conn(req, putters, min_conns)
def _make_putter(self, node, part, req, headers):
"""
Returns a putter object for handling streaming of object to object
servers.
Subclasses must implement this method.
:param node: a storage node
:param part: ring partition number
:param req: a swob Request
:param headers: request headers
:return: an instance of a Putter
"""
raise NotImplementedError
def _connect_put_node(self, nodes, part, req, headers,
logger_thread_locals):
"""
Make connection to storage nodes
Connects to the first working node that it finds in nodes iter and
sends over the request headers. Returns a Putter to handle the rest of
the streaming, or None if no working nodes were found.
:param nodes: an iterator of the target storage nodes
:param part: ring partition number
:param req: a swob Request
:param headers: request headers
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
:return: an instance of a Putter
"""
self.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
putter = self._make_putter(node, part, req, headers)
self.app.set_node_timing(node, putter.connect_duration)
return putter
except InsufficientStorage:
self.app.error_limit(node, 'ERROR Insufficient Storage')
except PutterConnectError as e:
msg = 'ERROR %d Expect: 100-continue From Object Server'
self.app.error_occurred(node, msg % e.status)
except (Exception, Timeout):
self.app.exception_occurred(
node, 'Object',
'Expect: 100-continue on %s' %
quote(req.swift_entity_path))
def _get_put_connections(self, req, nodes, partition, outgoing_headers,
policy):
"""
Establish connections to storage nodes for PUT request
"""
obj_ring = policy.object_ring
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(obj_ring, partition, req,
policy=policy))
pile = GreenPile(len(nodes))
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body,
# so switch to chunked request
if nheaders.get('Content-Length') == '0':
nheaders['Transfer-Encoding'] = 'chunked'
del nheaders['Content-Length']
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req, nheaders, self.logger.thread_locals)
putters = [putter for putter in pile if putter]
return putters
def _check_min_conn(self, req, putters, min_conns, msg=None):
msg = msg or ('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections')
if len(putters) < min_conns:
self.logger.error(msg, {'conns': len(putters), 'nodes': min_conns})
raise HTTPServiceUnavailable(request=req)
def _get_footers(self, req):
footers = HeaderKeyDict()
footer_callback = req.environ.get(
'swift.callback.update_footers', lambda _footer: None)
footer_callback(footers)
return footers
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
This method is responsible for establishing connection
with storage nodes and sending the data to each one of those
nodes. The process of transferring data is specific to each
Storage Policy, thus it is required for each policy specific
ObjectController to provide their own implementation of this method.
:param req: the PUT Request
:param data_source: an iterator of the source of the data
:param nodes: an iterator of the target storage nodes
:param partition: ring partition number
:param outgoing_headers: system headers to storage nodes
:return: Response object
"""
raise NotImplementedError()
def _delete_object(self, req, obj_ring, partition, headers,
node_count=None, node_iterator=None):
"""Delete object considering write-affinity.
When deleting object in write affinity deployment, also take configured
handoff nodes number into consideration, instead of just sending
requests to primary nodes. Otherwise (write-affinity is disabled),
go with the same way as before.
:param req: the DELETE Request
:param obj_ring: the object ring
:param partition: ring partition number
:param headers: system headers to storage nodes
:return: Response object
"""
status_overrides = {404: 204}
resp = self.make_requests(req, obj_ring,
partition, 'DELETE', req.swift_entity_path,
headers, overrides=status_overrides,
node_count=node_count,
node_iterator=node_iterator)
return resp
def _post_object(self, req, obj_ring, partition, headers):
"""
send object POST request to storage nodes.
:param req: the POST Request
:param obj_ring: the object ring
:param partition: ring partition number
:param headers: system headers to storage nodes
:return: Response object
"""
resp = self.make_requests(req, obj_ring, partition,
'POST', req.swift_entity_path, headers)
return resp
@public
@cors_validation
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
if req.if_none_match is not None and '*' not in req.if_none_match:
# Sending an etag with if-none-match isn't currently supported
return HTTPBadRequest(request=req, content_type='text/plain',
body='If-None-Match only supports *')
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
container_partition, container_nodes, container_path = \
self._get_update_target(req, container_info)
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
next_part_power = getattr(obj_ring, 'next_part_power', None)
if next_part_power:
req.headers['X-Backend-Next-Part-Power'] = next_part_power
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
# is request authorized
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not container_nodes:
return HTTPNotFound(request=req)
# update content type in case it is missing
self._update_content_type(req)
req.ensure_x_timestamp()
# check constraints on object name and request headers
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
def reader():
try:
return req.environ['wsgi.input'].read(
self.app.client_chunk_size)
except (ValueError, IOError) as e:
raise ChunkReadError(str(e))
data_source = iter(reader, b'')
# check if object is set to be automatically deleted (i.e. expired)
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
# add special headers to be handled by storage nodes
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, container_nodes,
delete_at_container, delete_at_part, delete_at_nodes,
container_path=container_path)
# send object to storage nodes
resp = self._store_object(
req, data_source, nodes, partition, outgoing_headers)
return resp
@public
@cors_validation
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
next_part_power = getattr(obj_ring, 'next_part_power', None)
if next_part_power:
req.headers['X-Backend-Next-Part-Power'] = next_part_power
container_partition, container_nodes, container_path = \
self._get_update_target(req, container_info)
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not container_nodes:
return HTTPNotFound(request=req)
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.ensure_x_timestamp()
# Include local handoff nodes if write-affinity is enabled.
node_count = len(nodes)
node_iterator = None
policy = POLICIES.get_by_index(policy_index)
policy_options = self.app.get_policy_options(policy)
is_local = policy_options.write_affinity_is_local_fn
if is_local is not None:
local_handoffs = policy_options.write_affinity_handoff_delete_count
if local_handoffs is None:
local_primaries = [node for node in nodes if is_local(node)]
local_handoffs = len(nodes) - len(local_primaries)
node_count += local_handoffs
node_iterator = self.iter_nodes_local_first(
obj_ring, partition, req, policy=policy,
local_handoffs_first=True)
headers = self._backend_requests(
req, node_count, container_partition, container_nodes,
container_path=container_path)
return self._delete_object(req, obj_ring, partition, headers,
node_count=node_count,
node_iterator=node_iterator)
@ObjectControllerRouter.register(REPL_POLICY)
class ReplicatedObjectController(BaseObjectController):
def _get_or_head_response(self, req, node_iter, partition, policy):
concurrency = self.app.get_object_ring(policy.idx).replica_count \
if self.app.get_policy_options(policy).concurrent_gets else 1
resp = self.GETorHEAD_base(
req, 'Object', node_iter, partition,
req.swift_entity_path, concurrency, policy)
return resp
def _make_putter(self, node, part, req, headers):
if req.environ.get('swift.callback.update_footers'):
putter = MIMEPutter.connect(
node, part, req.swift_entity_path, headers, self.app.watchdog,
conn_timeout=self.app.conn_timeout,
node_timeout=self.app.node_timeout,
write_timeout=self.app.node_timeout,
send_exception_handler=self.app.exception_occurred,
logger=self.logger,
need_multiphase=False)
else:
te = ',' + headers.get('Transfer-Encoding', '')
putter = Putter.connect(
node, part, req.swift_entity_path, headers, self.app.watchdog,
conn_timeout=self.app.conn_timeout,
node_timeout=self.app.node_timeout,
write_timeout=self.app.node_timeout,
send_exception_handler=self.app.exception_occurred,
logger=self.logger,
chunked=te.endswith(',chunked'))
return putter
def _transfer_data(self, req, data_source, putters, nodes):
"""
Transfer data for a replicated object.
This method was added in the PUT method extraction change
"""
bytes_transferred = 0
data_source = CooperativeIterator(data_source)
def send_chunk(chunk):
timeout_at = time.time() + self.app.node_timeout
for putter in list(putters):
if not putter.failed:
putter.send_chunk(chunk, timeout_at=timeout_at)
else:
putter.close()
putters.remove(putter)
self._check_min_conn(
req, putters, min_conns,
msg='Object PUT exceptions during send, '
'%(conns)s/%(nodes)s required connections')
min_conns = quorum_size(len(nodes))
try:
while True:
with WatchdogTimeout(self.app.watchdog,
self.app.client_timeout,
ChunkReadTimeout):
try:
chunk = next(data_source)
except StopIteration:
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
send_chunk(chunk)
ml = req.message_length()
if ml and bytes_transferred < ml:
self.logger.warning(
'Client disconnected without sending enough data')
self.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
trail_md = self._get_footers(req)
for putter in putters:
# send any footers set by middleware
putter.end_of_object_data(footer_metadata=trail_md)
self._check_min_conn(
req, [p for p in putters if not p.failed], min_conns,
msg='Object PUT exceptions after last send, '
'%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err:
self.logger.warning(
'ERROR Client read timeout (%ss)', err.seconds)
self.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except HTTPException:
raise
except ChunkReadError:
self.logger.warning(
'Client disconnected without sending last chunk')
self.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except Timeout:
self.logger.exception(
'ERROR Exception causing client disconnect')
raise HTTPClientDisconnect(request=req)
except Exception:
self.logger.exception(
'ERROR Exception transferring data to object servers %s',
{'path': req.path})
raise HTTPInternalServerError(request=req)
def _have_adequate_put_responses(self, statuses, num_nodes, min_responses):
return self.have_quorum(statuses, num_nodes)
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
Store a replicated object.
This method is responsible for establishing connection
with storage nodes and sending object to each one of those
nodes. After sending the data, the "best" response will be
returned based on statuses from all connections
"""
policy_index = req.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not nodes:
return HTTPNotFound()
putters = self._get_put_connections(
req, nodes, partition, outgoing_headers, policy)
min_conns = quorum_size(len(nodes))
try:
# check that a minimum number of connections were established and
# meet all the correct conditions set in the request
self._check_failure_put_connections(putters, req, min_conns)
# transfer data
self._transfer_data(req, data_source, putters, nodes)
# get responses
statuses, reasons, bodies, etags = \
self._get_put_responses(req, putters, len(nodes))
except HTTPException as resp:
return resp
finally:
for putter in putters:
putter.close()
if len(etags) > 1:
self.logger.error(
'Object servers returned %s mismatched etags', len(etags))
return HTTPServerError(request=req)
etag = etags.pop() if len(etags) else None
resp = self.best_response(req, statuses, reasons, bodies,
'Object PUT', etag=etag)
resp.last_modified = Timestamp(req.headers['X-Timestamp']).ceil()
return resp
class ECAppIter(object):
"""
WSGI iterable that decodes EC fragment archives (or portions thereof)
into the original object (or portions thereof).
:param path: object's path, sans v1 (e.g. /a/c/o)
:param policy: storage policy for this object
:param internal_parts_iters: list of the response-document-parts
iterators for the backend GET responses. For an M+K erasure code,
the caller must supply M such iterables.
:param range_specs: list of dictionaries describing the ranges requested
by the client. Each dictionary contains the start and end of the
client's requested byte range as well as the start and end of the EC
segments containing that byte range.
:param fa_length: length of the fragment archive, in bytes, if the
response is a 200. If it's a 206, then this is ignored.
:param obj_length: length of the object, in bytes. Learned from the
headers in the GET response from the object server.
:param logger: a logger
"""
def __init__(self, path, policy, internal_parts_iters, range_specs,
fa_length, obj_length, logger):
self.path = path
self.policy = policy
self.internal_parts_iters = internal_parts_iters
self.range_specs = range_specs
self.fa_length = fa_length
self.obj_length = obj_length if obj_length is not None else 0
self.boundary = b''
self.logger = logger
self.mime_boundary = None
self.learned_content_type = None
self.stashed_iter = None
self.pool = ContextPool(len(internal_parts_iters))
def close(self):
# close down the stashed iter and shutdown the context pool to
# clean up the frag queue feeding coroutines that may be currently
# executing the internal_parts_iters.
if self.stashed_iter:
close_if_possible(self.stashed_iter)
self.pool.close()
for it in self.internal_parts_iters:
close_if_possible(it)
def kickoff(self, req, resp):
"""
Start pulling data from the backends so that we can learn things like
the real Content-Type that might only be in the multipart/byteranges
response body. Update our response accordingly.
Also, this is the first point at which we can learn the MIME
boundary that our response has in the headers. We grab that so we
can also use it in the body.
:returns: None
:raises HTTPException: on error
"""
self.mime_boundary = resp.boundary
try:
self.stashed_iter = reiterate(self._real_iter(req, resp.headers))
except Exception:
self.close()
raise
if self.learned_content_type is not None:
resp.content_type = self.learned_content_type
resp.content_length = self.obj_length
def _next_ranges(self):
# Each FA part should have approximately the same headers. We really
# only care about Content-Range and Content-Type, and that'll be the
# same for all the different FAs.
for part_infos in zip(*self.internal_parts_iters):
frag_iters = [pi['part_iter'] for pi in part_infos]
headers = HeaderKeyDict(part_infos[0]['headers'])
yield headers, frag_iters
def _actual_range(self, req_start, req_end, entity_length):
# Takes 3 args: (requested-first-byte, requested-last-byte,
# actual-length).
#
# Returns a 3-tuple (first-byte, last-byte, satisfiable).
#
# It is possible to get (None, None, True). This means that the last
# N>0 bytes of a 0-byte object were requested, and we are able to
# satisfy that request by returning nothing.
try:
rng = Range("bytes=%s-%s" % (
req_start if req_start is not None else '',
req_end if req_end is not None else ''))
except ValueError:
return (None, None, False)
rfl = rng.ranges_for_length(entity_length)
if rfl and entity_length == 0:
return (None, None, True)
elif not rfl:
return (None, None, False)
else:
# ranges_for_length() adds 1 to the last byte's position
# because webob once made a mistake
return (rfl[0][0], rfl[0][1] - 1, True)
def _fill_out_range_specs_from_obj_length(self, range_specs):
# Add a few fields to each range spec:
#
# * resp_client_start, resp_client_end: the actual bytes that will
# be delivered to the client for the requested range. This may
# differ from the requested bytes if, say, the requested range
# overlaps the end of the object.
#
# * resp_segment_start, resp_segment_end: the actual offsets of the
# segments that will be decoded for the requested range. These
# differ from resp_client_start/end in that these are aligned
# to segment boundaries, while resp_client_start/end are not
# necessarily so.
#
# * satisfiable: a boolean indicating whether the range is
# satisfiable or not (i.e. the requested range overlaps the
# object in at least one byte).
#
# This is kept separate from _fill_out_range_specs_from_fa_length()
# because this computation can be done with just the response
# headers from the object servers (in particular
# X-Object-Sysmeta-Ec-Content-Length), while the computation in
# _fill_out_range_specs_from_fa_length() requires the beginnings of
# the response bodies.
for spec in range_specs:
cstart, cend, csat = self._actual_range(
spec['req_client_start'],
spec['req_client_end'],
self.obj_length)
spec['resp_client_start'] = cstart
spec['resp_client_end'] = cend
spec['satisfiable'] = csat
sstart, send, _junk = self._actual_range(
spec['req_segment_start'],
spec['req_segment_end'],
self.obj_length)
seg_size = self.policy.ec_segment_size
if (spec['req_segment_start'] is None and
sstart is not None and
sstart % seg_size != 0):
# Segment start may, in the case of a suffix request, need
# to be rounded up (not down!) to the nearest segment boundary.
# This reflects the trimming of leading garbage (partial
# fragments) from the retrieved fragments.
sstart += seg_size - (sstart % seg_size)
spec['resp_segment_start'] = sstart
spec['resp_segment_end'] = send
def _fill_out_range_specs_from_fa_length(self, fa_length, range_specs):
# Add two fields to each range spec:
#
# * resp_fragment_start, resp_fragment_end: the start and end of
# the fragments that compose this byterange. These values are
# aligned to fragment boundaries.
#
# This way, ECAppIter has the knowledge it needs to correlate
# response byteranges with requested ones for when some byteranges
# are omitted from the response entirely and also to put the right
# Content-Range headers in a multipart/byteranges response.
for spec in range_specs:
fstart, fend, _junk = self._actual_range(
spec['req_fragment_start'],
spec['req_fragment_end'],
fa_length)
spec['resp_fragment_start'] = fstart
spec['resp_fragment_end'] = fend
def __iter__(self):
if self.stashed_iter is not None:
return self
else:
raise ValueError("Failed to call kickoff() before __iter__()")
def __next__(self):
return next(self.stashed_iter)
next = __next__ # py2
def _real_iter(self, req, resp_headers):
if not self.range_specs:
client_asked_for_range = False
range_specs = [{
'req_client_start': 0,
'req_client_end': (None if self.obj_length is None
else self.obj_length - 1),
'resp_client_start': 0,
'resp_client_end': (None if self.obj_length is None
else self.obj_length - 1),
'req_segment_start': 0,
'req_segment_end': (None if self.obj_length is None
else self.obj_length - 1),
'resp_segment_start': 0,
'resp_segment_end': (None if self.obj_length is None
else self.obj_length - 1),
'req_fragment_start': 0,
'req_fragment_end': self.fa_length - 1,
'resp_fragment_start': 0,
'resp_fragment_end': self.fa_length - 1,
'satisfiable': self.obj_length > 0,
}]
else:
client_asked_for_range = True
range_specs = self.range_specs
self._fill_out_range_specs_from_obj_length(range_specs)
multipart = (len([rs for rs in range_specs if rs['satisfiable']]) > 1)
# Multipart responses are not required to be in the same order as
# the Range header; the parts may be in any order the server wants.
# Further, if multiple ranges are requested and only some are
# satisfiable, then only the satisfiable ones appear in the response
# at all. Thus, we cannot simply iterate over range_specs in order;
# we must use the Content-Range header from each part to figure out
# what we've been given.
#
# We do, however, make the assumption that all the object-server
# responses have their ranges in the same order. Otherwise, a
# streaming decode would be impossible.
def convert_ranges_iter():
seen_first_headers = False
ranges_for_resp = {}
for headers, frag_iters in self._next_ranges():
content_type = headers['Content-Type']
content_range = headers.get('Content-Range')
if content_range is not None:
fa_start, fa_end, fa_length = parse_content_range(
content_range)
elif self.fa_length <= 0:
fa_start = None
fa_end = None
fa_length = 0
else:
fa_start = 0
fa_end = self.fa_length - 1
fa_length = self.fa_length
if not seen_first_headers:
# This is the earliest we can possibly do this. On a
# 200 or 206-single-byterange response, we can learn
# the FA's length from the HTTP response headers.
# However, on a 206-multiple-byteranges response, we
# don't learn it until the first part of the
# response body, in the headers of the first MIME
# part.
#
# Similarly, the content type of a
# 206-multiple-byteranges response is
# "multipart/byteranges", not the object's actual
# content type.
self._fill_out_range_specs_from_fa_length(
fa_length, range_specs)
satisfiable = False
for range_spec in range_specs:
satisfiable |= range_spec['satisfiable']
key = (range_spec['resp_fragment_start'],
range_spec['resp_fragment_end'])
ranges_for_resp.setdefault(key, []).append(range_spec)
# The client may have asked for an unsatisfiable set of
# ranges, but when converted to fragments, the object
# servers see it as satisfiable. For example, imagine a
# request for bytes 800-900 of a 750-byte object with a
# 1024-byte segment size. The object servers will see a
# request for bytes 0-${fragsize-1}, and that's
# satisfiable, so they return 206. It's not until we
# learn the object size that we can check for this
# condition.
#
# Note that some unsatisfiable ranges *will* be caught
# by the object servers, like bytes 1800-1900 of a
# 100-byte object with 1024-byte segments. That's not
# what we're dealing with here, though.
if client_asked_for_range and not satisfiable:
req.environ[
'swift.non_client_disconnect'] = True
raise HTTPRequestedRangeNotSatisfiable(
request=req, headers=resp_headers)
self.learned_content_type = content_type
seen_first_headers = True
range_spec = ranges_for_resp[(fa_start, fa_end)].pop(0)
seg_iter = self._decode_segments_from_fragments(frag_iters)
if not range_spec['satisfiable']:
# This'll be small; just a single small segment. Discard
# it.
for x in seg_iter:
pass
continue
byterange_iter = self._iter_one_range(range_spec, seg_iter)
converted = {
"start_byte": range_spec["resp_client_start"],
"end_byte": range_spec["resp_client_end"],
"content_type": content_type,
"part_iter": byterange_iter}
if self.obj_length is not None:
converted["entity_length"] = self.obj_length
yield converted
return document_iters_to_http_response_body(
convert_ranges_iter(), self.mime_boundary, multipart, self.logger)
def _iter_one_range(self, range_spec, segment_iter):
client_start = range_spec['resp_client_start']
client_end = range_spec['resp_client_end']
segment_start = range_spec['resp_segment_start']
segment_end = range_spec['resp_segment_end']
# It's entirely possible that the client asked for a range that
# includes some bytes we have and some we don't; for example, a
# range of bytes 1000-20000000 on a 1500-byte object.
segment_end = (min(segment_end, self.obj_length - 1)
if segment_end is not None
else self.obj_length - 1)
client_end = (min(client_end, self.obj_length - 1)
if client_end is not None
else self.obj_length - 1)
if segment_start is None:
num_segments = 0
start_overrun = 0
end_overrun = 0
else:
num_segments = int(
math.ceil(float(segment_end + 1 - segment_start)
/ self.policy.ec_segment_size))
# We get full segments here, but the client may have requested a
# byte range that begins or ends in the middle of a segment.
# Thus, we have some amount of overrun (extra decoded bytes)
# that we trim off so the client gets exactly what they
# requested.
start_overrun = client_start - segment_start
end_overrun = segment_end - client_end
for i, next_seg in enumerate(segment_iter):
# We may have a start_overrun of more than one segment in
# the case of suffix-byte-range requests. However, we never
# have an end_overrun of more than one segment.
if start_overrun > 0:
seglen = len(next_seg)
if seglen <= start_overrun:
start_overrun -= seglen
continue
else:
next_seg = next_seg[start_overrun:]
start_overrun = 0
if i == (num_segments - 1) and end_overrun:
next_seg = next_seg[:-end_overrun]
yield next_seg
def _decode_segments_from_fragments(self, fragment_iters):
# Decodes the fragments from the object servers and yields one
# segment at a time.
queues = [Queue(1) for _junk in range(len(fragment_iters))]
def put_fragments_in_queue(frag_iter, queue, logger_thread_locals):
self.logger.thread_locals = logger_thread_locals
try:
for fragment in frag_iter:
if fragment.startswith(b' '):
raise Exception('Leading whitespace on fragment.')
queue.put(fragment)
except GreenletExit:
# killed by contextpool
pass
except ChunkReadTimeout:
# unable to resume in ECFragGetter
self.logger.exception(
"ChunkReadTimeout fetching fragments for %r",
quote(self.path))
except ChunkWriteTimeout:
# slow client disconnect
self.logger.exception(
"ChunkWriteTimeout feeding fragments for %r",
quote(self.path))
except: # noqa
self.logger.exception("Exception fetching fragments for %r",
quote(self.path))
finally:
queue.resize(2) # ensure there's room
queue.put(None)
frag_iter.close()
segments_decoded = 0
with self.pool as pool:
for frag_iter, queue in zip(fragment_iters, queues):
pool.spawn(put_fragments_in_queue, frag_iter, queue,
self.logger.thread_locals)
while True:
fragments = []
for queue in queues:
fragment = queue.get()
queue.task_done()
fragments.append(fragment)
# If any object server connection yields out a None; we're
# done. Either they are all None, and we've finished
# successfully; or some un-recoverable failure has left us
# with an un-reconstructible list of fragments - so we'll
# break out of the iter so WSGI can tear down the broken
# connection.
frags_with_data = sum([1 for f in fragments if f])
if frags_with_data < len(fragments):
if frags_with_data > 0:
self.logger.warning(
'Un-recoverable fragment rebuild. Only received '
'%d/%d fragments for %r', frags_with_data,
len(fragments), quote(self.path))
break
try:
segment = self.policy.pyeclib_driver.decode(fragments)
except ECDriverError as err:
self.logger.error(
"Error decoding fragments for %r. "
"Segments decoded: %d, "
"Lengths: [%s]: %s" % (
quote(self.path), segments_decoded,
', '.join(map(str, map(len, fragments))),
str(err)))
raise
segments_decoded += 1
yield segment
def app_iter_range(self, start, end):
return self
def app_iter_ranges(self, ranges, content_type, boundary, content_size):
return self
def client_range_to_segment_range(client_start, client_end, segment_size):
"""
Takes a byterange from the client and converts it into a byterange
spanning the necessary segments.
Handles prefix, suffix, and fully-specified byte ranges.
Examples:
client_range_to_segment_range(100, 700, 512) = (0, 1023)
client_range_to_segment_range(100, 700, 256) = (0, 767)
client_range_to_segment_range(300, None, 256) = (256, None)
:param client_start: first byte of the range requested by the client
:param client_end: last byte of the range requested by the client
:param segment_size: size of an EC segment, in bytes
:returns: a 2-tuple (seg_start, seg_end) where
* seg_start is the first byte of the first segment, or None if this is
a suffix byte range
* seg_end is the last byte of the last segment, or None if this is a
prefix byte range
"""
# the index of the first byte of the first segment
segment_start = (
int(client_start // segment_size)
* segment_size) if client_start is not None else None
# the index of the last byte of the last segment
segment_end = (
# bytes M-
None if client_end is None else
# bytes M-N
(((int(client_end // segment_size) + 1)
* segment_size) - 1) if client_start is not None else
# bytes -N: we get some extra bytes to make sure we
# have all we need.
#
# To see why, imagine a 100-byte segment size, a
# 340-byte object, and a request for the last 50
# bytes. Naively requesting the last 100 bytes would
# result in a truncated first segment and hence a
# truncated download. (Of course, the actual
# obj-server requests are for fragments, not
# segments, but that doesn't change the
# calculation.)
#
# This does mean that we fetch an extra segment if
# the object size is an exact multiple of the
# segment size. It's a little wasteful, but it's
# better to be a little wasteful than to get some
# range requests completely wrong.
(int(math.ceil((
float(client_end) / segment_size) + 1)) # nsegs
* segment_size))
return (segment_start, segment_end)
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
NO_DATA_SENT = 1
SENDING_DATA = 2
DATA_SENT = 3
DATA_ACKED = 4
COMMIT_SENT = 5
class Putter(object):
"""
Putter for backend PUT requests.
Encapsulates all the actions required to establish a connection with a
storage node and stream data to that node.
:param conn: an HTTPConnection instance
:param node: dict describing storage node
:param resp: an HTTPResponse instance if connect() received final response
:param path: the object path to send to the storage node
:param connect_duration: time taken to initiate the HTTPConnection
:param watchdog: a spawned Watchdog instance that will enforce timeouts
:param write_timeout: time limit to write a chunk to the connection socket
:param send_exception_handler: callback called when an exception occured
writing to the connection socket
:param logger: a Logger instance
:param chunked: boolean indicating if the request encoding is chunked
"""
def __init__(self, conn, node, resp, path, connect_duration, watchdog,
write_timeout, send_exception_handler, logger,
chunked=False):
# Note: you probably want to call Putter.connect() instead of
# instantiating one of these directly.
self.conn = conn
self.node = node
self.resp = self.final_resp = resp
self.path = path
self.connect_duration = connect_duration
self.watchdog = watchdog
self.write_timeout = write_timeout
self.send_exception_handler = send_exception_handler
# for handoff nodes node_index is None
self.node_index = node.get('index')
self.failed = False
self.state = NO_DATA_SENT
self.chunked = chunked
self.logger = logger
def await_response(self, timeout, informational=False):
"""
Get 100-continue response indicating the end of 1st phase of a 2-phase
commit or the final response, i.e. the one with status >= 200.
Might or might not actually wait for anything. If we said Expect:
100-continue but got back a non-100 response, that'll be the thing
returned, and we won't do any network IO to get it. OTOH, if we got
a 100 Continue response and sent up the PUT request's body, then
we'll actually read the 2xx-5xx response off the network here.
:param timeout: time to wait for a response
:param informational: if True then try to get a 100-continue response,
otherwise try to get a final response.
:returns: HTTPResponse
:raises Timeout: if the response took too long
"""
# don't do this update of self.resp if the Expect response during
# connect() was actually a final response
if not self.final_resp:
with Timeout(timeout):
if informational:
self.resp = self.conn.getexpect()
else:
self.resp = self.conn.getresponse()
return self.resp
def _start_object_data(self):
# Called immediately before the first chunk of object data is sent.
# Subclasses may implement custom behaviour
pass
def send_chunk(self, chunk, timeout_at=None):
if not chunk:
# If we're not using chunked transfer-encoding, sending a 0-byte
# chunk is just wasteful. If we *are* using chunked
# transfer-encoding, sending a 0-byte chunk terminates the
# request body. Neither one of these is good.
return
elif self.state == DATA_SENT:
raise ValueError("called send_chunk after end_of_object_data")
if self.state == NO_DATA_SENT:
self._start_object_data()
self.state = SENDING_DATA
self._send_chunk(chunk, timeout_at=timeout_at)
def end_of_object_data(self, **kwargs):
"""
Call when there is no more data to send.
"""
if self.state == DATA_SENT:
raise ValueError("called end_of_object_data twice")
self._send_chunk(b'')
self.state = DATA_SENT
def _send_chunk(self, chunk, timeout_at=None):
if not self.failed:
if self.chunked:
to_send = b"%x\r\n%s\r\n" % (len(chunk), chunk)
else:
to_send = chunk
try:
with WatchdogTimeout(self.watchdog, self.write_timeout,
ChunkWriteTimeout, timeout_at=timeout_at):
self.conn.send(to_send)
except (Exception, ChunkWriteTimeout):
self.failed = True
self.send_exception_handler(self.node, 'Object',
'Trying to write to %s'
% quote(self.path))
def close(self):
# release reference to response to ensure connection really does close,
# see bug https://bugs.launchpad.net/swift/+bug/1594739
self.resp = self.final_resp = None
self.conn.close()
@classmethod
def _make_connection(cls, node, part, path, headers, conn_timeout,
node_timeout):
ip, port = get_ip_port(node, headers)
start_time = time.time()
with ConnectionTimeout(conn_timeout):
conn = http_connect(ip, port, node['device'],
part, 'PUT', path, headers)
connect_duration = time.time() - start_time
with ResponseTimeout(node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
raise InsufficientStorage
if is_server_error(resp.status):
raise PutterConnectError(resp.status)
final_resp = None
if (is_success(resp.status) or
resp.status in (HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY) or
(headers.get('If-None-Match', None) is not None and
resp.status == HTTP_PRECONDITION_FAILED)):
final_resp = resp
return conn, resp, final_resp, connect_duration
@classmethod
def connect(cls, node, part, path, headers, watchdog, conn_timeout,
node_timeout, write_timeout, send_exception_handler,
logger=None, chunked=False, **kwargs):
"""
Connect to a backend node and send the headers.
:returns: Putter instance
:raises ConnectionTimeout: if initial connection timed out
:raises ResponseTimeout: if header retrieval timed out
:raises InsufficientStorage: on 507 response from node
:raises PutterConnectError: on non-507 server error response from node
"""
conn, expect_resp, final_resp, connect_duration = cls._make_connection(
node, part, path, headers, conn_timeout, node_timeout)
return cls(conn, node, final_resp, path, connect_duration, watchdog,
write_timeout, send_exception_handler, logger,
chunked=chunked)
class MIMEPutter(Putter):
"""
Putter for backend PUT requests that use MIME.
This is here mostly to wrap up the fact that all multipart PUTs are
chunked because of the mime boundary footer trick and the first
half of the two-phase PUT conversation handling.
An HTTP PUT request that supports streaming.
"""
def __init__(self, conn, node, resp, req, connect_duration, watchdog,
write_timeout, send_exception_handler, logger, mime_boundary,
multiphase=False):
super(MIMEPutter, self).__init__(conn, node, resp, req,
connect_duration, watchdog,
write_timeout, send_exception_handler,
logger)
# Note: you probably want to call MimePutter.connect() instead of
# instantiating one of these directly.
self.chunked = True # MIME requests always send chunked body
self.mime_boundary = mime_boundary
self.multiphase = multiphase
def _start_object_data(self):
# We're sending the object plus other stuff in the same request
# body, all wrapped up in multipart MIME, so we'd better start
# off the MIME document before sending any object data.
self._send_chunk(b"--%s\r\nX-Document: object body\r\n\r\n" %
(self.mime_boundary,))
def end_of_object_data(self, footer_metadata=None):
"""
Call when there is no more data to send.
Overrides superclass implementation to send any footer metadata
after object data.
:param footer_metadata: dictionary of metadata items
to be sent as footers.
"""
if self.state == DATA_SENT:
raise ValueError("called end_of_object_data twice")
elif self.state == NO_DATA_SENT and self.mime_boundary:
self._start_object_data()
footer_body = json.dumps(footer_metadata).encode('ascii')
footer_md5 = md5(
footer_body, usedforsecurity=False).hexdigest().encode('ascii')
tail_boundary = (b"--%s" % (self.mime_boundary,))
if not self.multiphase:
# this will be the last part sent
tail_boundary = tail_boundary + b"--"
message_parts = [
(b"\r\n--%s\r\n" % self.mime_boundary),
b"X-Document: object metadata\r\n",
b"Content-MD5: %s\r\n" % footer_md5,
b"\r\n",
footer_body, b"\r\n",
tail_boundary, b"\r\n",
]
self._send_chunk(b"".join(message_parts))
self._send_chunk(b'')
self.state = DATA_SENT
def send_commit_confirmation(self):
"""
Call when there are > quorum 2XX responses received. Send commit
confirmations to all object nodes to finalize the PUT.
"""
if not self.multiphase:
raise ValueError(
"called send_commit_confirmation but multiphase is False")
if self.state == COMMIT_SENT:
raise ValueError("called send_commit_confirmation twice")
self.state = DATA_ACKED
if self.mime_boundary:
body = b"put_commit_confirmation"
tail_boundary = (b"--%s--" % (self.mime_boundary,))
message_parts = [
b"X-Document: put commit\r\n",
b"\r\n",
body, b"\r\n",
tail_boundary,
]
self._send_chunk(b"".join(message_parts))
self._send_chunk(b'')
self.state = COMMIT_SENT
@classmethod
def connect(cls, node, part, req, headers, watchdog, conn_timeout,
node_timeout, write_timeout, send_exception_handler,
logger=None, need_multiphase=True, **kwargs):
"""
Connect to a backend node and send the headers.
Override superclass method to notify object of need for support for
multipart body with footers and optionally multiphase commit, and
verify object server's capabilities.
:param need_multiphase: if True then multiphase support is required of
the object server
:raises FooterNotSupported: if need_metadata_footer is set but
backend node can't process footers
:raises MultiphasePUTNotSupported: if need_multiphase is set but
backend node can't handle multiphase PUT
"""
mime_boundary = b"%.64x" % random.randint(0, 16 ** 64)
headers = HeaderKeyDict(headers)
# when using a multipart mime request to backend the actual
# content-length is not equal to the object content size, so move the
# object content size to X-Backend-Obj-Content-Length if that has not
# already been set by the EC PUT path.
headers.setdefault('X-Backend-Obj-Content-Length',
headers.pop('Content-Length', None))
# We're going to be adding some unknown amount of data to the
# request, so we can't use an explicit content length, and thus
# we must use chunked encoding.
headers['Transfer-Encoding'] = 'chunked'
headers['Expect'] = '100-continue'
headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary
headers['X-Backend-Obj-Metadata-Footer'] = 'yes'
if need_multiphase:
headers['X-Backend-Obj-Multiphase-Commit'] = 'yes'
conn, expect_resp, final_resp, connect_duration = cls._make_connection(
node, part, req, headers, conn_timeout, node_timeout)
if is_informational(expect_resp.status):
continue_headers = HeaderKeyDict(expect_resp.getheaders())
can_send_metadata_footer = config_true_value(
continue_headers.get('X-Obj-Metadata-Footer', 'no'))
can_handle_multiphase_put = config_true_value(
continue_headers.get('X-Obj-Multiphase-Commit', 'no'))
if not can_send_metadata_footer:
raise FooterNotSupported()
if need_multiphase and not can_handle_multiphase_put:
raise MultiphasePUTNotSupported()
return cls(conn, node, final_resp, req, connect_duration, watchdog,
write_timeout, send_exception_handler, logger,
mime_boundary, multiphase=need_multiphase)
def chunk_transformer(policy):
"""
A generator to transform a source chunk to erasure coded chunks for each
`send` call. The number of erasure coded chunks is as
policy.ec_n_unique_fragments.
"""
segment_size = policy.ec_segment_size
buf = collections.deque()
total_buf_len = 0
chunk = yield
while chunk:
buf.append(chunk)
total_buf_len += len(chunk)
if total_buf_len >= segment_size:
chunks_to_encode = []
# extract as many chunks as we can from the input buffer
while total_buf_len >= segment_size:
to_take = segment_size
pieces = []
while to_take > 0:
piece = buf.popleft()
if len(piece) > to_take:
buf.appendleft(piece[to_take:])
piece = piece[:to_take]
pieces.append(piece)
to_take -= len(piece)
total_buf_len -= len(piece)
chunks_to_encode.append(b''.join(pieces))
frags_by_byte_order = []
for chunk_to_encode in chunks_to_encode:
frags_by_byte_order.append(
policy.pyeclib_driver.encode(chunk_to_encode))
# Sequential calls to encode() have given us a list that
# looks like this:
#
# [[frag_A1, frag_B1, frag_C1, ...],
# [frag_A2, frag_B2, frag_C2, ...], ...]
#
# What we need is a list like this:
#
# [(frag_A1 + frag_A2 + ...), # destined for node A
# (frag_B1 + frag_B2 + ...), # destined for node B
# (frag_C1 + frag_C2 + ...), # destined for node C
# ...]
obj_data = [b''.join(frags)
for frags in zip(*frags_by_byte_order)]
chunk = yield obj_data
else:
# didn't have enough data to encode
chunk = yield None
# Now we've gotten an empty chunk, which indicates end-of-input.
# Take any leftover bytes and encode them.
last_bytes = b''.join(buf)
if last_bytes:
last_frags = policy.pyeclib_driver.encode(last_bytes)
yield last_frags
else:
yield [b''] * policy.ec_n_unique_fragments
def trailing_metadata(policy, client_obj_hasher,
bytes_transferred_from_client,
fragment_archive_index):
return HeaderKeyDict({
# etag and size values are being added twice here.
# The container override header is used to update the container db
# with these values as they represent the correct etag and size for
# the whole object and not just the FA.
# The object sysmeta headers will be saved on each FA of the object.
'X-Object-Sysmeta-EC-Etag': client_obj_hasher.hexdigest(),
'X-Object-Sysmeta-EC-Content-Length':
str(bytes_transferred_from_client),
# older style x-backend-container-update-override-* headers are used
# here (rather than x-object-sysmeta-container-update-override-*
# headers) for backwards compatibility: the request may be to an object
# server that has not yet been upgraded to accept the newer style
# x-object-sysmeta-container-update-override- headers.
'X-Backend-Container-Update-Override-Etag':
client_obj_hasher.hexdigest(),
'X-Backend-Container-Update-Override-Size':
str(bytes_transferred_from_client),
'X-Object-Sysmeta-Ec-Frag-Index': str(fragment_archive_index),
# These fields are for debuggability,
# AKA "what is this thing?"
'X-Object-Sysmeta-EC-Scheme': policy.ec_scheme_description,
'X-Object-Sysmeta-EC-Segment-Size': str(policy.ec_segment_size),
})
class ECGetResponseBucket(object):
"""
A helper class to encapsulate the properties of buckets in which fragment
getters and alternate nodes are collected.
"""
def __init__(self, policy, timestamp):
"""
:param policy: an instance of ECStoragePolicy
:param timestamp: a Timestamp, or None for a bucket of error responses
"""
self.policy = policy
self.timestamp = timestamp
# if no timestamp when init'd then the bucket will update its timestamp
# as responses are added
self.update_timestamp = timestamp is None
self.gets = collections.defaultdict(list)
self.alt_nodes = collections.defaultdict(list)
self._durable = False
self.status = self.headers = None
def set_durable(self):
self._durable = True
@property
def durable(self):
return self._durable
def add_response(self, getter, parts_iter):
"""
Add another response to this bucket. Response buckets can be for
fragments with the same timestamp, or for errors with the same status.
"""
headers = getter.last_headers
timestamp_str = headers.get('X-Backend-Timestamp',
headers.get('X-Timestamp'))
if timestamp_str and self.update_timestamp:
# 404s will keep the most recent timestamp
self.timestamp = max(Timestamp(timestamp_str), self.timestamp)
if not self.gets:
# stash first set of backend headers, which will be used to
# populate a client response
self.status = getter.last_status
# TODO: each bucket is for a single *data* timestamp, but sources
# in the same bucket may have different *metadata* timestamps if
# some backends have more recent .meta files than others. Currently
# we just use the last received metadata headers - this behavior is
# ok and is consistent with a replication policy GET which
# similarly does not attempt to find the backend with the most
# recent metadata. We could alternatively choose to the *newest*
# metadata headers for self.headers by selecting the source with
# the latest X-Timestamp.
self.headers = headers
elif headers.get('X-Object-Sysmeta-Ec-Etag') != \
self.headers.get('X-Object-Sysmeta-Ec-Etag'):
# Fragments at the same timestamp with different etags are never
# expected and error buckets shouldn't have this header. If somehow
# this happens then ignore those responses to avoid mixing
# fragments that will not reconstruct otherwise an exception from
# pyeclib is almost certain.
raise ValueError("ETag mismatch")
frag_index = headers.get('X-Object-Sysmeta-Ec-Frag-Index')
frag_index = int(frag_index) if frag_index is not None else None
self.gets[frag_index].append((getter, parts_iter))
def get_responses(self):
"""
Return a list of all useful sources. Where there are multiple sources
associated with the same frag_index then only one is included.
:return: a list of sources, each source being a tuple of form
(ECFragGetter, iter)
"""
all_sources = []
for frag_index, sources in self.gets.items():
if frag_index is None:
# bad responses don't have a frag_index (and fake good
# responses from some unit tests)
all_sources.extend(sources)
else:
all_sources.extend(sources[:1])
return all_sources
def add_alternate_nodes(self, node, frag_indexes):
for frag_index in frag_indexes:
self.alt_nodes[frag_index].append(node)
@property
def shortfall(self):
"""
The number of additional responses needed to complete this bucket;
typically (ndata - resp_count).
If the bucket has no durable responses, shortfall is extended out to
replica count to ensure the proxy makes additional primary requests.
"""
resp_count = len(self.get_responses())
if self.durable or self.status == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
return max(self.policy.ec_ndata - resp_count, 0)
alt_count = min(self.policy.object_ring.replica_count - resp_count,
self.policy.ec_nparity)
return max([1, self.policy.ec_ndata - resp_count, alt_count])
@property
def shortfall_with_alts(self):
# The shortfall that we expect to have if we were to send requests
# for frags on the alt nodes.
alts = set(self.alt_nodes.keys()).difference(set(self.gets.keys()))
result = self.policy.ec_ndata - (len(self.get_responses()) + len(alts))
return max(result, 0)
def close_conns(self):
"""
Close bucket's responses; they won't be used for a client response.
"""
for getter, frag_iter in self.get_responses():
if getter.source:
getter.source.close()
def __str__(self):
# return a string summarising bucket state, useful for debugging.
return '<%s, %s, %s, %s(%s), %s>' \
% (self.timestamp.internal, self.status, self._durable,
self.shortfall, self.shortfall_with_alts, len(self.gets))
class ECGetResponseCollection(object):
"""
Manages all successful EC GET responses gathered by ECFragGetters.
A response comprises a tuple of (<getter instance>, <parts iterator>). All
responses having the same data timestamp are placed in an
ECGetResponseBucket for that timestamp. The buckets are stored in the
'buckets' dict which maps timestamp-> bucket.
This class encapsulates logic for selecting the best bucket from the
collection, and for choosing alternate nodes.
"""
def __init__(self, policy):
"""
:param policy: an instance of ECStoragePolicy
"""
self.policy = policy
self.buckets = {}
self.default_bad_bucket = ECGetResponseBucket(self.policy, None)
self.bad_buckets = {}
self.node_iter_count = 0
def _get_bucket(self, timestamp):
"""
:param timestamp: a Timestamp
:return: ECGetResponseBucket for given timestamp
"""
return self.buckets.setdefault(
timestamp, ECGetResponseBucket(self.policy, timestamp))
def _get_bad_bucket(self, status):
"""
:param status: a representation of status
:return: ECGetResponseBucket for given status
"""
return self.bad_buckets.setdefault(
status, ECGetResponseBucket(self.policy, None))
def add_response(self, get, parts_iter):
"""
Add a response to the collection.
:param get: An instance of
:class:`~swift.proxy.controllers.obj.ECFragGetter`
:param parts_iter: An iterator over response body parts
:raises ValueError: if the response etag or status code values do not
match any values previously received for the same timestamp
"""
if is_success(get.last_status):
self.add_good_response(get, parts_iter)
else:
self.add_bad_resp(get, parts_iter)
def add_bad_resp(self, get, parts_iter):
bad_bucket = self._get_bad_bucket(get.last_status)
bad_bucket.add_response(get, parts_iter)
def add_good_response(self, get, parts_iter):
headers = get.last_headers
# Add the response to the appropriate bucket keyed by data file
# timestamp. Fall back to using X-Backend-Timestamp as key for object
# servers that have not been upgraded.
t_data_file = headers.get('X-Backend-Data-Timestamp')
t_obj = headers.get('X-Backend-Timestamp', headers.get('X-Timestamp'))
if t_data_file:
timestamp = Timestamp(t_data_file)
elif t_obj:
timestamp = Timestamp(t_obj)
else:
# Don't think this should ever come up in practice,
# but tests cover it
timestamp = None
self._get_bucket(timestamp).add_response(get, parts_iter)
# The node may also have alternate fragments indexes (possibly at
# different timestamps). For each list of alternate fragments indexes,
# find the bucket for their data file timestamp and add the node and
# list to that bucket's alternate nodes.
frag_sets = safe_json_loads(headers.get('X-Backend-Fragments')) or {}
for t_frag, frag_set in frag_sets.items():
t_frag = Timestamp(t_frag)
self._get_bucket(t_frag).add_alternate_nodes(
get.source.node, frag_set)
# If the response includes a durable timestamp then mark that bucket as
# durable. Note that this may be a different bucket than the one this
# response got added to, and that we may never go and get a durable
# frag from this node; it is sufficient that we have been told that a
# durable frag exists, somewhere, at t_durable.
t_durable = headers.get('X-Backend-Durable-Timestamp')
if not t_durable and not t_data_file:
# obj server not upgraded so assume this response's frag is durable
t_durable = t_obj
if t_durable:
self._get_bucket(Timestamp(t_durable)).set_durable()
def _sort_buckets(self):
def key_fn(bucket):
# Returns a tuple to use for sort ordering:
# durable buckets with no shortfall sort higher,
# then durable buckets with no shortfall_with_alts,
# then non-durable buckets with no shortfall,
# otherwise buckets with lowest shortfall_with_alts sort higher,
# finally buckets with newer timestamps sort higher.
return (bucket.durable,
bucket.shortfall <= 0,
-1 * bucket.shortfall_with_alts,
bucket.timestamp)
return sorted(self.buckets.values(), key=key_fn, reverse=True)
@property
def best_bucket(self):
"""
Return the "best" bucket in the collection.
The "best" bucket is the newest timestamp with sufficient getters, or
the closest to having sufficient getters, unless it is bettered by a
bucket with potential alternate nodes.
If there are no good buckets we return the "least_bad" bucket.
:return: An instance of :class:`~ECGetResponseBucket` or None if there
are no buckets in the collection.
"""
sorted_buckets = self._sort_buckets()
for bucket in sorted_buckets:
# tombstones will set bad_bucket.timestamp
not_found_bucket = self.bad_buckets.get(404)
if not_found_bucket and not_found_bucket.timestamp and \
bucket.timestamp < not_found_bucket.timestamp:
# "good bucket" is trumped by newer tombstone
continue
return bucket
return self.least_bad_bucket
def choose_best_bucket(self):
best_bucket = self.best_bucket
# it's now or never -- close down any other requests
for bucket in self.buckets.values():
if bucket is best_bucket:
continue
bucket.close_conns()
return best_bucket
@property
def least_bad_bucket(self):
"""
Return the bad_bucket with the smallest shortfall
"""
if all(status == 404 for status in self.bad_buckets):
# NB: also covers an empty self.bad_buckets
return self.default_bad_bucket
# we want "enough" 416s to prevent "extra" requests - but we keep
# digging on 404s
short, status = min((bucket.shortfall, status)
for status, bucket in self.bad_buckets.items()
if status != 404)
return self.bad_buckets[status]
@property
def shortfall(self):
best_bucket = self.best_bucket
shortfall = best_bucket.shortfall
return min(shortfall, self.least_bad_bucket.shortfall)
@property
def durable(self):
return self.best_bucket.durable
def _get_frag_prefs(self):
# Construct the current frag_prefs list, with best_bucket prefs first.
frag_prefs = []
for bucket in self._sort_buckets():
if bucket.timestamp:
exclusions = [fi for fi in bucket.gets if fi is not None]
prefs = {'timestamp': bucket.timestamp.internal,
'exclude': exclusions}
frag_prefs.append(prefs)
return frag_prefs
def get_extra_headers(self):
frag_prefs = self._get_frag_prefs()
return {'X-Backend-Fragment-Preferences': json.dumps(frag_prefs)}
def _get_alternate_nodes(self):
if self.node_iter_count <= self.policy.ec_ndata:
# It makes sense to wait before starting to use alternate nodes,
# because if we find sufficient frags on *distinct* nodes then we
# spread work across mode nodes. There's no formal proof that
# waiting for ec_ndata GETs is the right answer, but it seems
# reasonable to try *at least* that many primary nodes before
# resorting to alternate nodes.
return None
bucket = self.best_bucket
if (bucket is None) or (bucket.shortfall <= 0) or not bucket.durable:
return None
alt_frags = set(bucket.alt_nodes.keys())
got_frags = set(bucket.gets.keys())
wanted_frags = list(alt_frags.difference(got_frags))
# We may have the same frag_index on more than one node so shuffle to
# avoid using the same frag_index consecutively, since we may not get a
# response from the last node provided before being asked to provide
# another node.
random.shuffle(wanted_frags)
for frag_index in wanted_frags:
nodes = bucket.alt_nodes.get(frag_index)
if nodes:
return nodes
return None
def has_alternate_node(self):
return True if self._get_alternate_nodes() else False
def provide_alternate_node(self):
"""
Callback function that is installed in a NodeIter. Called on every call
to NodeIter.next(), which means we can track the number of nodes to
which GET requests have been made and selectively inject an alternate
node, if we have one.
:return: A dict describing a node to which the next GET request
should be made.
"""
self.node_iter_count += 1
nodes = self._get_alternate_nodes()
if nodes:
return nodes.pop(0).copy()
class ECFragGetter(GetterBase):
def __init__(self, app, req, node_iter, partition, policy, path,
backend_headers, header_provider, logger_thread_locals,
logger):
super(ECFragGetter, self).__init__(
app=app, req=req, node_iter=node_iter,
partition=partition, policy=policy, path=path,
backend_headers=backend_headers, logger=logger)
self.header_provider = header_provider
self.fragment_size = policy.fragment_size
self.skip_bytes = 0
self.logger_thread_locals = logger_thread_locals
self.status = self.reason = self.body = self.source_headers = None
self._source_iter = None
def _get_next_response_part(self):
node_timeout = self.app.recoverable_node_timeout
while True:
# the loop here is to resume if trying to parse
# multipart/byteranges response raises a ChunkReadTimeout
# and resets the source_parts_iter
try:
with WatchdogTimeout(self.app.watchdog, node_timeout,
ChunkReadTimeout):
# If we don't have a multipart/byteranges response,
# but just a 200 or a single-range 206, then this
# performs no IO, and just returns source (or
# raises StopIteration).
# Otherwise, this call to next() performs IO when
# we have a multipart/byteranges response; as it
# will read the MIME boundary and part headers.
start_byte, end_byte, length, headers, part = next(
self.source.parts_iter)
return (start_byte, end_byte, length, headers, part)
except ChunkReadTimeout:
if not self._replace_source(
'Trying to read next part of EC multi-part GET '
'(retrying)'):
raise
def _iter_bytes_from_response_part(self, part_file, nbytes):
buf = b''
part_file = ByteCountEnforcer(part_file, nbytes)
while True:
try:
with WatchdogTimeout(self.app.watchdog,
self.app.recoverable_node_timeout,
ChunkReadTimeout):
chunk = part_file.read(self.app.object_chunk_size)
# NB: this append must be *inside* the context
# manager for test.unit.SlowBody to do its thing
buf += chunk
if nbytes is not None:
nbytes -= len(chunk)
except (ChunkReadTimeout, ShortReadError):
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
self.fast_forward(self.bytes_used_from_backend)
except (HTTPException, ValueError):
self.logger.exception('Unable to fast forward')
six.reraise(exc_type, exc_value, exc_traceback)
except RangeAlreadyComplete:
break
buf = b''
if self._replace_source(
'Trying to read EC fragment during GET (retrying)'):
try:
_junk, _junk, _junk, _junk, part_file = \
self._get_next_response_part()
except StopIteration:
# it's not clear to me how to make
# _get_next_response_part raise StopIteration for the
# first doc part of a new request
six.reraise(exc_type, exc_value, exc_traceback)
part_file = ByteCountEnforcer(part_file, nbytes)
else:
six.reraise(exc_type, exc_value, exc_traceback)
else:
if buf and self.skip_bytes:
if self.skip_bytes < len(buf):
buf = buf[self.skip_bytes:]
self.bytes_used_from_backend += self.skip_bytes
self.skip_bytes = 0
else:
self.skip_bytes -= len(buf)
self.bytes_used_from_backend += len(buf)
buf = b''
while buf and (len(buf) >= self.fragment_size or not chunk):
client_chunk = buf[:self.fragment_size]
buf = buf[self.fragment_size:]
with WatchdogTimeout(self.app.watchdog,
self.app.client_timeout,
ChunkWriteTimeout):
self.bytes_used_from_backend += len(client_chunk)
yield client_chunk
if not chunk:
break
def _iter_parts_from_response(self, req):
try:
part_iter = None
try:
while True:
try:
start_byte, end_byte, length, headers, part = \
self._get_next_response_part()
except StopIteration:
# it seems this is the only way out of the loop; not
# sure why the req.environ update is always needed
req.environ['swift.non_client_disconnect'] = True
break
# skip_bytes compensates for the backend request range
# expansion done in _convert_range
self.skip_bytes = bytes_to_skip(
self.fragment_size, start_byte)
self.learn_size_from_content_range(
start_byte, end_byte, length)
self.bytes_used_from_backend = 0
# not length; that refers to the whole object, so is the
# wrong value to use for GET-range responses
byte_count = ((end_byte - start_byte + 1) - self.skip_bytes
if (end_byte is not None
and start_byte is not None)
else None)
part_iter = CooperativeIterator(
self._iter_bytes_from_response_part(part, byte_count))
yield {'start_byte': start_byte, 'end_byte': end_byte,
'entity_length': length, 'headers': headers,
'part_iter': part_iter}
self.pop_range()
finally:
if part_iter:
part_iter.close()
except ChunkReadTimeout:
self.app.exception_occurred(self.source.node, 'Object',
'Trying to read during GET')
raise
except ChunkWriteTimeout:
self.logger.warning(
'Client did not read from proxy within %ss' %
self.app.client_timeout)
self.logger.increment('client_timeouts')
except GeneratorExit:
warn = True
req_range = self.backend_headers['Range']
if req_range:
req_range = Range(req_range)
if len(req_range.ranges) == 1:
begin, end = req_range.ranges[0]
if end is not None and begin is not None:
if end - begin + 1 == self.bytes_used_from_backend:
warn = False
if not req.environ.get('swift.non_client_disconnect') and warn:
self.logger.warning(
'Client disconnected on read of EC frag %r', self.path)
raise
except Exception:
self.logger.exception('Trying to send to client')
raise
finally:
self.source.close()
@property
def last_status(self):
return self.status or HTTP_INTERNAL_SERVER_ERROR
@property
def last_headers(self):
if self.source_headers:
return HeaderKeyDict(self.source_headers)
else:
return HeaderKeyDict()
def _make_node_request(self, node, node_timeout):
# make a backend request; return a response if it has an acceptable
# status code, otherwise None
self.logger.thread_locals = self.logger_thread_locals
req_headers = dict(self.backend_headers)
ip, port = get_ip_port(node, req_headers)
req_headers.update(self.header_provider())
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
ip, port, node['device'],
self.partition, 'GET', self.path,
headers=req_headers,
query_string=self.req.query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, 'Object',
'Trying to %(method)s %(path)s' %
{'method': self.req.method, 'path': self.req.path})
return None
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
if 'handoff_index' in node and \
(is_server_error(possible_source.status) or
possible_source.status == HTTP_NOT_FOUND) and \
not Timestamp(src_headers.get('x-backend-timestamp', 0)):
# throw out 5XX and 404s from handoff nodes unless the data is
# really on disk and had been DELETEd
self.logger.debug('Ignoring %s from handoff' %
possible_source.status)
conn.close()
return None
self.status = possible_source.status
self.reason = possible_source.reason
self.source_headers = possible_source.getheaders()
if is_good_source(possible_source.status, server_type='Object'):
self.body = None
return possible_source
else:
self.body = possible_source.read()
conn.close()
if self.app.check_response(node, 'Object', possible_source, 'GET',
self.path):
self.logger.debug(
'Ignoring %s from primary' % possible_source.status)
return None
@property
def source_iter(self):
"""
An iterator over responses to backend fragment GETs. Yields an
instance of ``GetterSource`` if a response is good, otherwise ``None``.
"""
if self._source_iter is None:
self._source_iter = self._source_gen()
return self._source_iter
def _source_gen(self):
self.status = self.reason = self.body = self.source_headers = None
for node in self.node_iter:
source = self._make_node_request(
node, self.app.recoverable_node_timeout)
if source:
yield GetterSource(self.app, source, node)
else:
yield None
self.status = self.reason = self.body = self.source_headers = None
def _find_source(self):
# capture last used etag before continuation
used_etag = self.last_headers.get('X-Object-Sysmeta-EC-ETag')
for source in self.source_iter:
if not source:
# _make_node_request only returns good sources
continue
if source.resp.getheader('X-Object-Sysmeta-EC-ETag') != used_etag:
self.logger.warning(
'Skipping source (etag mismatch: got %s, expected %s)',
source.resp.getheader('X-Object-Sysmeta-EC-ETag'),
used_etag)
else:
self.source = source
return True
return False
def response_parts_iter(self, req):
"""
Create an iterator over a single fragment response body.
:param req: a ``swob.Request``.
:return: an interator that yields chunks of bytes from a fragment
response body.
"""
it = None
try:
source = next(self.source_iter)
except StopIteration:
pass
else:
if source:
self.source = source
it = self._iter_parts_from_response(req)
return it
@ObjectControllerRouter.register(EC_POLICY)
class ECObjectController(BaseObjectController):
def _fragment_GET_request(
self, req, node_iter, partition, policy,
header_provider, logger_thread_locals):
"""
Makes a GET request for a fragment.
"""
self.logger.thread_locals = logger_thread_locals
backend_headers = self.generate_request_headers(
req, additional=req.headers)
getter = ECFragGetter(self.app, req, node_iter, partition,
policy, req.swift_entity_path, backend_headers,
header_provider, logger_thread_locals,
self.logger)
return (getter, getter.response_parts_iter(req))
def _convert_range(self, req, policy):
"""
Take the requested range(s) from the client and convert it to range(s)
to be sent to the object servers.
This includes widening requested ranges to full segments, then
converting those ranges to fragments so that we retrieve the minimum
number of fragments from the object server.
Mutates the request passed in.
Returns a list of range specs (dictionaries with the different byte
indices in them).
"""
# Since segments and fragments have different sizes, we need
# to modify the Range header sent to the object servers to
# make sure we get the right fragments out of the fragment
# archives.
segment_size = policy.ec_segment_size
fragment_size = policy.fragment_size
range_specs = []
new_ranges = []
for client_start, client_end in req.range.ranges:
# TODO: coalesce ranges that overlap segments. For
# example, "bytes=0-10,20-30,40-50" with a 64 KiB
# segment size will result in a Range header in the
# object request of "bytes=0-65535,0-65535,0-65535",
# which is wasteful. We should be smarter and only
# request that first segment once.
segment_start, segment_end = client_range_to_segment_range(
client_start, client_end, segment_size)
fragment_start, fragment_end = \
segment_range_to_fragment_range(
segment_start, segment_end,
segment_size, fragment_size)
new_ranges.append((fragment_start, fragment_end))
range_specs.append({'req_client_start': client_start,
'req_client_end': client_end,
'req_segment_start': segment_start,
'req_segment_end': segment_end,
'req_fragment_start': fragment_start,
'req_fragment_end': fragment_end})
req.range = "bytes=" + ",".join(
"%s-%s" % (s if s is not None else "",
e if e is not None else "")
for s, e in new_ranges)
return range_specs
def feed_remaining_primaries(self, safe_iter, pile, req, partition, policy,
buckets, feeder_q, logger_thread_locals):
timeout = self.app.get_policy_options(policy).concurrency_timeout
while True:
try:
feeder_q.get(timeout=timeout)
except Empty:
if safe_iter.unsafe_iter.primaries_left:
# this will run async, if it ends up taking the last
# primary we won't find out until the next pass
pile.spawn(self._fragment_GET_request,
req, safe_iter, partition,
policy, buckets.get_extra_headers,
logger_thread_locals)
else:
# ran out of primaries
break
else:
# got a stop
break
def _get_or_head_response(self, req, node_iter, partition, policy):
update_etag_is_at_header(req, "X-Object-Sysmeta-Ec-Etag")
if req.method == 'HEAD':
# no fancy EC decoding here, just one plain old HEAD request to
# one object server because all fragments hold all metadata
# information about the object.
concurrency = policy.ec_ndata \
if self.app.get_policy_options(policy).concurrent_gets else 1
resp = self.GETorHEAD_base(
req, 'Object', node_iter, partition,
req.swift_entity_path, concurrency, policy)
self._fix_response(req, resp)
return resp
# GET request
orig_range = None
range_specs = []
if req.range:
orig_range = req.range
range_specs = self._convert_range(req, policy)
safe_iter = GreenthreadSafeIterator(node_iter)
policy_options = self.app.get_policy_options(policy)
ec_request_count = policy.ec_ndata
if policy_options.concurrent_gets:
ec_request_count += policy_options.concurrent_ec_extra_requests
with ContextPool(policy.ec_n_unique_fragments) as pool:
pile = GreenAsyncPile(pool)
buckets = ECGetResponseCollection(policy)
node_iter.set_node_provider(buckets.provide_alternate_node)
for node_count in range(ec_request_count):
pile.spawn(self._fragment_GET_request,
req, safe_iter, partition,
policy, buckets.get_extra_headers,
self.logger.thread_locals)
feeder_q = None
if policy_options.concurrent_gets:
feeder_q = Queue()
pool.spawn(self.feed_remaining_primaries, safe_iter, pile, req,
partition, policy, buckets, feeder_q,
self.logger.thread_locals)
extra_requests = 0
# max_extra_requests is an arbitrary hard limit for spawning extra
# getters in case some unforeseen scenario, or a misbehaving object
# server, causes us to otherwise make endless requests e.g. if an
# object server were to ignore frag_prefs and always respond with
# a frag that is already in a bucket. Now we're assuming it should
# be limit at most 2 * replicas.
max_extra_requests = (
(policy.object_ring.replica_count * 2) - policy.ec_ndata)
for get, parts_iter in pile:
try:
buckets.add_response(get, parts_iter)
except ValueError as err:
self.logger.error(
"Problem with fragment response: %s", err)
best_bucket = buckets.best_bucket
if best_bucket.durable and best_bucket.shortfall <= 0:
# good enough!
break
requests_available = extra_requests < max_extra_requests and (
node_iter.nodes_left > 0 or buckets.has_alternate_node())
if requests_available and (
buckets.shortfall > pile._pending or
not is_good_source(get.last_status, self.server_type)):
extra_requests += 1
pile.spawn(self._fragment_GET_request, req, safe_iter,
partition, policy, buckets.get_extra_headers,
self.logger.thread_locals)
if feeder_q:
feeder_q.put('stop')
# Put this back, since we *may* need it for kickoff()/_fix_response()
# (but note that _fix_ranges() may also pop it back off before then)
req.range = orig_range
best_bucket = buckets.choose_best_bucket()
if best_bucket.shortfall <= 0 and best_bucket.durable:
# headers can come from any of the getters
resp_headers = best_bucket.headers
resp_headers.pop('Content-Range', None)
eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length')
obj_length = int(eccl) if eccl is not None else None
# This is only true if we didn't get a 206 response, but
# that's the only time this is used anyway.
fa_length = int(resp_headers['Content-Length'])
app_iter = ECAppIter(
req.swift_entity_path,
policy,
[p_iter for _getter, p_iter in best_bucket.get_responses()],
range_specs, fa_length, obj_length,
self.logger)
resp = Response(
request=req,
conditional_response=True,
app_iter=app_iter)
update_headers(resp, resp_headers)
self._fix_ranges(req, resp)
try:
app_iter.kickoff(req, resp)
except HTTPException as err_resp:
# catch any HTTPException response here so that we can
# process response headers uniformly in _fix_response
resp = err_resp
else:
# TODO: we can get here if all buckets are successful but none
# have ec_ndata getters, so bad_bucket may have no gets and we will
# return a 503 when a 404 may be more appropriate. We can also get
# here with less than ec_ndata 416's and may then return a 416
# which is also questionable because a non-range get for same
# object would return 404 or 503.
statuses = []
reasons = []
bodies = []
headers = []
best_bucket.close_conns()
rebalance_missing_suppression_count = min(
policy_options.rebalance_missing_suppression_count,
node_iter.num_primary_nodes - 1)
for status, bad_bucket in buckets.bad_buckets.items():
for getter, _parts_iter in bad_bucket.get_responses():
if best_bucket.durable:
bad_resp_headers = getter.last_headers
t_data_file = bad_resp_headers.get(
'X-Backend-Data-Timestamp')
t_obj = bad_resp_headers.get(
'X-Backend-Timestamp',
bad_resp_headers.get('X-Timestamp'))
bad_ts = Timestamp(t_data_file or t_obj or '0')
if bad_ts <= best_bucket.timestamp:
# We have reason to believe there's still good data
# out there, it's just currently unavailable
continue
if getter.status:
timestamp = Timestamp(getter.last_headers.get(
'X-Backend-Timestamp',
getter.last_headers.get('X-Timestamp', 0)))
if (rebalance_missing_suppression_count > 0 and
getter.status == HTTP_NOT_FOUND and
not timestamp):
rebalance_missing_suppression_count -= 1
continue
statuses.append(getter.status)
reasons.append(getter.reason)
bodies.append(getter.body)
headers.append(getter.source_headers)
if not statuses and is_success(best_bucket.status) and \
not best_bucket.durable:
# pretend that non-durable bucket was 404s
statuses.append(404)
reasons.append('404 Not Found')
bodies.append(b'')
headers.append({})
resp = self.best_response(
req, statuses, reasons, bodies, 'Object',
headers=headers)
self._fix_response(req, resp)
# For sure put this back before actually returning the response
# to the rest of the pipeline, so we don't modify the client headers
req.range = orig_range
return resp
def _fix_response(self, req, resp):
# EC fragment archives each have different bytes, hence different
# etags. However, they all have the original object's etag stored in
# sysmeta, so we copy that here (if it exists) so the client gets it.
resp.headers['Etag'] = resp.headers.get('X-Object-Sysmeta-Ec-Etag')
# We're about to invoke conditional response checking so set the
# correct conditional etag from wherever X-Backend-Etag-Is-At points,
# if it exists at all.
resp._conditional_etag = resolve_etag_is_at_header(req, resp.headers)
if (is_success(resp.status_int) or is_redirection(resp.status_int) or
resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE):
resp.accept_ranges = 'bytes'
if is_success(resp.status_int):
resp.headers['Content-Length'] = resp.headers.get(
'X-Object-Sysmeta-Ec-Content-Length')
resp.fix_conditional_response()
if resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
resp.headers['Content-Range'] = 'bytes */%s' % resp.headers[
'X-Object-Sysmeta-Ec-Content-Length']
ec_headers = [header for header in resp.headers
if header.lower().startswith('x-object-sysmeta-ec-')]
for header in ec_headers:
# clients (including middlewares) shouldn't need to care about
# this implementation detail
del resp.headers[header]
def _fix_ranges(self, req, resp):
# Has to be called *before* kickoff()!
if is_success(resp.status_int):
ignore_range_headers = set(
h.strip().lower()
for h in req.headers.get(
'X-Backend-Ignore-Range-If-Metadata-Present',
'').split(','))
if ignore_range_headers.intersection(
h.lower() for h in resp.headers):
# If we leave the Range header around, swob (or somebody) will
# try to "fix" things for us when we kickoff() the app_iter.
req.headers.pop('Range', None)
resp.app_iter.range_specs = []
def _make_putter(self, node, part, req, headers):
return MIMEPutter.connect(
node, part, req.swift_entity_path, headers, self.app.watchdog,
conn_timeout=self.app.conn_timeout,
node_timeout=self.app.node_timeout,
write_timeout=self.app.node_timeout,
send_exception_handler=self.app.exception_occurred,
logger=self.logger,
need_multiphase=True)
def _determine_chunk_destinations(self, putters, policy):
"""
Given a list of putters, return a dict where the key is the putter
and the value is the frag index to use.
This is done so that we line up handoffs using the same frag index
(in the primary part list) as the primary that the handoff is standing
in for. This lets erasure-code fragment archives wind up on the
preferred local primary nodes when possible.
:param putters: a list of swift.proxy.controllers.obj.MIMEPutter
instance
:param policy: A policy instance which should be one of ECStoragePolicy
"""
# Give each putter a "frag index": the index of the
# transformed chunk that we'll send to it.
#
# For primary nodes, that's just its index (primary 0 gets
# chunk 0, primary 1 gets chunk 1, and so on). For handoffs,
# we assign the chunk index of a missing primary.
handoff_conns = []
putter_to_frag_index = {}
for p in putters:
if p.node_index is not None:
putter_to_frag_index[p] = policy.get_backend_index(
p.node_index)
else:
handoff_conns.append(p)
# Note: we may have more holes than handoffs. This is okay; it
# just means that we failed to connect to one or more storage
# nodes. Holes occur when a storage node is down, in which
# case the connection is not replaced, and when a storage node
# returns 507, in which case a handoff is used to replace it.
# lack_list is a dict of list to keep hole indexes
# e.g. if we have 2 holes for frag index 0 with ec_duplication_factor=2
# lack_list is like {0: [0], 1: [0]}, and then, if 1 hole found
# for frag index 1, lack_list will be {0: [0, 1], 1: [0]}.
# After that, holes will be filled from bigger key
# (i.e. 1:[0] at first)
# Grouping all missing fragment indexes for each frag_index
available_indexes = list(putter_to_frag_index.values())
lack_list = collections.defaultdict(list)
for frag_index in range(policy.ec_n_unique_fragments):
# Set the missing index to lack_list
available_count = available_indexes.count(frag_index)
# N.B. it should be duplication_factor >= lack >= 0
lack = policy.ec_duplication_factor - available_count
# now we are missing one or more nodes to store the frag index
for lack_tier in range(lack):
lack_list[lack_tier].append(frag_index)
# Extract the lack_list to a flat list
holes = []
for lack_tier, indexes in sorted(lack_list.items(), reverse=True):
holes.extend(indexes)
# Fill putter_to_frag_index list with the hole list
for hole, p in zip(holes, handoff_conns):
putter_to_frag_index[p] = hole
return putter_to_frag_index
def _transfer_data(self, req, policy, data_source, putters, nodes,
min_conns, etag_hasher):
"""
Transfer data for an erasure coded object.
This method was added in the PUT method extraction change
"""
bytes_transferred = 0
chunk_transform = chunk_transformer(policy)
chunk_transform.send(None)
frag_hashers = collections.defaultdict(
lambda: md5(usedforsecurity=False))
def send_chunk(chunk):
# Note: there's two different hashers in here. etag_hasher is
# hashing the original object so that we can validate the ETag
# that the client sent (and etag_hasher is None if the client
# didn't send one). The hasher in frag_hashers is hashing the
# fragment archive being sent to the client; this lets us guard
# against data corruption on the network between proxy and
# object server.
if etag_hasher:
etag_hasher.update(chunk)
backend_chunks = chunk_transform.send(chunk)
if backend_chunks is None:
# If there's not enough bytes buffered for erasure-encoding
# or whatever we're doing, the transform will give us None.
return
updated_frag_indexes = set()
timeout_at = time.time() + self.app.node_timeout
for putter in list(putters):
frag_index = putter_to_frag_index[putter]
backend_chunk = backend_chunks[frag_index]
if not putter.failed:
# N.B. same frag_index will appear when using
# ec_duplication_factor >= 2. So skip to feed the chunk
# to hasher if the frag was updated already.
if frag_index not in updated_frag_indexes:
frag_hashers[frag_index].update(backend_chunk)
updated_frag_indexes.add(frag_index)
putter.send_chunk(backend_chunk, timeout_at=timeout_at)
else:
putter.close()
putters.remove(putter)
self._check_min_conn(
req, putters, min_conns,
msg='Object PUT exceptions during send, '
'%(conns)s/%(nodes)s required connections')
try:
# build our putter_to_frag_index dict to place handoffs in the
# same part nodes index as the primaries they are covering
putter_to_frag_index = self._determine_chunk_destinations(
putters, policy)
data_source = CooperativeIterator(data_source)
while True:
with WatchdogTimeout(self.app.watchdog,
self.app.client_timeout,
ChunkReadTimeout):
try:
chunk = next(data_source)
except StopIteration:
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
send_chunk(chunk)
ml = req.message_length()
if ml and bytes_transferred < ml:
self.logger.warning(
'Client disconnected without sending enough data')
self.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
send_chunk(b'') # flush out any buffered data
computed_etag = (etag_hasher.hexdigest()
if etag_hasher else None)
footers = self._get_footers(req)
received_etag = normalize_etag(footers.get(
'etag', req.headers.get('etag', '')))
if (computed_etag and received_etag and
computed_etag != received_etag):
raise HTTPUnprocessableEntity(request=req)
# Remove any EC reserved metadata names from footers
footers = {(k, v) for k, v in footers.items()
if not k.lower().startswith('x-object-sysmeta-ec-')}
for putter in putters:
frag_index = putter_to_frag_index[putter]
# Update any footers set by middleware with EC footers
trail_md = trailing_metadata(
policy, etag_hasher,
bytes_transferred, frag_index)
trail_md.update(footers)
# Etag footer must always be hash of what we sent
trail_md['Etag'] = frag_hashers[frag_index].hexdigest()
putter.end_of_object_data(footer_metadata=trail_md)
# for storage policies requiring 2-phase commit (e.g.
# erasure coding), enforce >= 'quorum' number of
# 100-continue responses - this indicates successful
# object data and metadata commit and is a necessary
# condition to be met before starting 2nd PUT phase
final_phase = False
statuses, reasons, bodies, _junk = \
self._get_put_responses(
req, putters, len(nodes), final_phase=final_phase,
min_responses=min_conns)
if not self.have_quorum(
statuses, len(nodes), quorum=min_conns):
self.logger.error(
'Not enough object servers ack\'ed (got %d)',
statuses.count(HTTP_CONTINUE))
raise HTTPServiceUnavailable(request=req)
elif not self._have_adequate_informational(
statuses, min_conns):
resp = self.best_response(req, statuses, reasons, bodies,
'Object PUT',
quorum_size=min_conns)
if is_client_error(resp.status_int):
# if 4xx occurred in this state it is absolutely
# a bad conversation between proxy-server and
# object-server (even if it's
# HTTP_UNPROCESSABLE_ENTITY) so we should regard this
# as HTTPServiceUnavailable.
raise HTTPServiceUnavailable(request=req)
else:
# Other errors should use raw best_response
raise resp
# quorum achieved, start 2nd phase - send commit
# confirmation to participating object servers
# so they write a .durable state file indicating
# a successful PUT
for putter in putters:
putter.send_commit_confirmation()
except ChunkReadTimeout as err:
self.logger.warning(
'ERROR Client read timeout (%ss)', err.seconds)
self.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except ChunkReadError:
self.logger.warning(
'Client disconnected without sending last chunk')
self.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except HTTPException:
raise
except Timeout:
self.logger.exception(
'ERROR Exception causing client disconnect')
raise HTTPClientDisconnect(request=req)
except Exception:
self.logger.exception(
'ERROR Exception transferring data to object servers %s',
{'path': req.path})
raise HTTPInternalServerError(request=req)
def _have_adequate_responses(
self, statuses, min_responses, conditional_func):
"""
Given a list of statuses from several requests, determine if a
satisfactory number of nodes have responded with 1xx or 2xx statuses to
deem the transaction for a successful response to the client.
:param statuses: list of statuses returned so far
:param min_responses: minimal pass criterion for number of successes
:param conditional_func: a callable function to check http status code
:returns: True or False, depending on current number of successes
"""
if sum(1 for s in statuses if (conditional_func(s))) >= min_responses:
return True
return False
def _have_adequate_successes(self, statuses, min_responses):
"""
Partial method of _have_adequate_responses for 2xx
"""
return self._have_adequate_responses(
statuses, min_responses, is_success)
def _have_adequate_informational(self, statuses, min_responses):
"""
Partial method of _have_adequate_responses for 1xx
"""
return self._have_adequate_responses(
statuses, min_responses, is_informational)
def _have_adequate_put_responses(self, statuses, num_nodes, min_responses):
# For an EC PUT we require a quorum of responses with success statuses
# in order to move on to next phase of PUT request handling without
# having to wait for *all* responses.
# TODO: this implies that in the first phase of the backend PUTs when
# we are actually expecting 1xx responses that we will end up waiting
# for *all* responses. That seems inefficient since we only need a
# quorum of 1xx responses to proceed.
return self._have_adequate_successes(statuses, min_responses)
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
Store an erasure coded object.
"""
policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index'))
policy = POLICIES.get_by_index(policy_index)
expected_frag_size = None
ml = req.message_length()
if ml:
# TODO: PyECLib <= 1.2.0 looks to return the segment info
# different from the input for aligned data efficiency but
# Swift never does. So calculate the fragment length Swift
# will actually send to object server by making two different
# get_segment_info calls (until PyECLib fixed).
# policy.fragment_size makes the call using segment size,
# and the next call is to get info for the last segment
# get number of fragments except the tail - use truncation //
num_fragments = ml // policy.ec_segment_size
expected_frag_size = policy.fragment_size * num_fragments
# calculate the tail fragment_size by hand and add it to
# expected_frag_size
last_segment_size = ml % policy.ec_segment_size
if last_segment_size:
last_info = policy.pyeclib_driver.get_segment_info(
last_segment_size, policy.ec_segment_size)
expected_frag_size += last_info['fragment_size']
for headers in outgoing_headers:
headers['X-Backend-Obj-Content-Length'] = expected_frag_size
# the object server will get different bytes, so these
# values do not apply.
headers.pop('Content-Length', None)
headers.pop('Etag', None)
# Since the request body sent from client -> proxy is not
# the same as the request body sent proxy -> object, we
# can't rely on the object-server to do the etag checking -
# so we have to do it here.
etag_hasher = md5(usedforsecurity=False)
min_conns = policy.quorum
putters = self._get_put_connections(
req, nodes, partition, outgoing_headers, policy)
try:
# check that a minimum number of connections were established and
# meet all the correct conditions set in the request
self._check_failure_put_connections(putters, req, min_conns)
self._transfer_data(req, policy, data_source, putters,
nodes, min_conns, etag_hasher)
# The durable state will propagate in a replicated fashion; if
# one fragment is durable then the reconstructor will spread the
# durable status around.
# In order to avoid successfully writing an object, but refusing
# to serve it on a subsequent GET because don't have enough
# durable data fragments - we require the same number of durable
# writes as quorum fragment writes. If object servers are in the
# future able to serve their non-durable fragment archives we may
# be able to reduce this quorum count if needed.
# ignore response etags
statuses, reasons, bodies, _etags = \
self._get_put_responses(req, putters, len(nodes),
final_phase=True,
min_responses=min_conns)
except HTTPException as resp:
return resp
finally:
for putter in putters:
putter.close()
etag = etag_hasher.hexdigest()
resp = self.best_response(req, statuses, reasons, bodies,
'Object PUT', etag=etag,
quorum_size=min_conns)
resp.last_modified = Timestamp(req.headers['X-Timestamp']).ceil()
return resp
| swift-master | swift/proxy/controllers/obj.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from time import time
from swift.common.utils import public, streq_const_time
from swift.common.digest import get_hmac
from swift.common.registry import get_swift_info
from swift.proxy.controllers.base import Controller, delay_denial
from swift.common.swob import HTTPOk, HTTPForbidden, HTTPUnauthorized
class InfoController(Controller):
"""WSGI controller for info requests"""
server_type = 'Info'
def __init__(self, app, version, expose_info, disallowed_sections,
admin_key):
super(InfoController, self).__init__(app)
self.expose_info = expose_info
self.disallowed_sections = disallowed_sections
self.admin_key = admin_key
self.allowed_hmac_methods = {
'HEAD': ['HEAD', 'GET'],
'GET': ['GET']}
@public
@delay_denial
def GET(self, req):
return self.GETorHEAD(req)
@public
@delay_denial
def HEAD(self, req):
return self.GETorHEAD(req)
@public
@delay_denial
def OPTIONS(self, req):
return HTTPOk(request=req, headers={'Allow': 'HEAD, GET, OPTIONS'})
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
"""
Handles requests to /info
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
if not self.expose_info:
return HTTPForbidden(request=req)
admin_request = False
sig = req.params.get('swiftinfo_sig', '')
expires = req.params.get('swiftinfo_expires', '')
if sig != '' or expires != '':
admin_request = True
if not self.admin_key:
return HTTPForbidden(request=req)
try:
expires = int(expires)
except ValueError:
return HTTPUnauthorized(request=req)
if expires < time():
return HTTPUnauthorized(request=req)
valid_sigs = []
for method in self.allowed_hmac_methods[req.method]:
valid_sigs.append(get_hmac(method,
'/info',
expires,
self.admin_key))
# While it's true that any() will short-circuit, this doesn't
# affect the timing-attack resistance since the only way this will
# short-circuit is when a valid signature is passed in.
is_valid_hmac = any(streq_const_time(valid_sig, sig)
for valid_sig in valid_sigs)
if not is_valid_hmac:
return HTTPUnauthorized(request=req)
headers = {}
if 'Origin' in req.headers:
headers['Access-Control-Allow-Origin'] = req.headers['Origin']
headers['Access-Control-Expose-Headers'] = ', '.join(
['x-trans-id'])
info = json.dumps(get_swift_info(
admin=admin_request, disallowed_sections=self.disallowed_sections))
return HTTPOk(request=req,
headers=headers,
body=info.encode('ascii'),
content_type='application/json; charset=UTF-8')
| swift-master | swift/proxy/controllers/info.py |
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source bufferedhttp connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
from six.moves.urllib.parse import quote
import time
import json
import functools
import inspect
import itertools
import operator
import random
from copy import deepcopy
from sys import exc_info
from eventlet.timeout import Timeout
import six
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request
from swift.common.utils import Timestamp, WatchdogTimeout, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
GreenAsyncPile, quorum_size, parse_content_type, drain_and_close, \
document_iters_to_http_response_body, ShardRange, cache_from_env, \
MetricsPrefixLoggerAdapter, CooperativeIterator
from swift.common.bufferedhttp import http_connect
from swift.common import constraints
from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
ConnectionTimeout, RangeAlreadyComplete, ShortReadError
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE, \
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.common.swob import Request, Response, Range, \
HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \
status_map, wsgi_to_str, str_to_wsgi, wsgi_quote, wsgi_unquote, \
normalize_etag
from swift.common.request_helpers import strip_sys_meta_prefix, \
strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \
http_response_to_document_iters, is_object_transient_sysmeta, \
strip_object_transient_sysmeta_prefix, get_ip_port, get_user_meta_prefix, \
get_sys_meta_prefix, is_use_replication_network
from swift.common.storage_policy import POLICIES
DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds
DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds
DEFAULT_RECHECK_UPDATING_SHARD_RANGES = 3600 # seconds
DEFAULT_RECHECK_LISTING_SHARD_RANGES = 600 # seconds
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name.lower() == 'etag':
response.headers[name] = value.replace('"', '')
elif name.lower() not in (
'date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
return func
def _prep_headers_to_info(headers, server_type):
"""
Helper method that iterates once over a dict of headers,
converting all keys to lower case and separating
into subsets containing user metadata, system metadata
and other headers.
"""
meta = {}
sysmeta = {}
other = {}
for key, val in dict(headers).items():
lkey = wsgi_to_str(key).lower()
val = wsgi_to_str(val) if isinstance(val, str) else val
if is_user_meta(server_type, lkey):
meta[strip_user_meta_prefix(server_type, lkey)] = val
elif is_sys_meta(server_type, lkey):
sysmeta[strip_sys_meta_prefix(server_type, lkey)] = val
else:
other[lkey] = val
return other, meta, sysmeta
def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'account')
account_info = {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'storage_policies': {policy.idx: {
'container_count': int(headers.get(
'x-account-storage-policy-{}-container-count'.format(
policy.name), 0)),
'object_count': int(headers.get(
'x-account-storage-policy-{}-object-count'.format(
policy.name), 0)),
'bytes': int(headers.get(
'x-account-storage-policy-{}-bytes-used'.format(
policy.name), 0))}
for policy in POLICIES
},
'meta': meta,
'sysmeta': sysmeta,
}
if is_success(status_int):
account_info['account_really_exists'] = not config_true_value(
headers.get('x-backend-fake-account-listing'))
return account_info
def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'container')
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_to': headers.get('x-container-sync-to'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'storage_policy': headers.get('x-backend-storage-policy-index', '0'),
'cors': {
'allow_origin': meta.get('access-control-allow-origin'),
'expose_headers': meta.get('access-control-expose-headers'),
'max_age': meta.get('access-control-max-age')
},
'meta': meta,
'sysmeta': sysmeta,
'sharding_state': headers.get('x-backend-sharding-state', 'unsharded'),
# the 'internal' format version of timestamps is cached since the
# normal format can be derived from this when required
'created_at': headers.get('x-backend-timestamp'),
'put_timestamp': headers.get('x-backend-put-timestamp'),
'delete_timestamp': headers.get('x-backend-delete-timestamp'),
'status_changed_at': headers.get('x-backend-status-changed-at'),
}
def headers_from_container_info(info):
"""
Construct a HeaderKeyDict from a container info dict.
:param info: a dict of container metadata
:returns: a HeaderKeyDict or None if info is None or any required headers
could not be constructed
"""
if not info:
return None
required = (
('x-backend-timestamp', 'created_at'),
('x-backend-put-timestamp', 'put_timestamp'),
('x-backend-delete-timestamp', 'delete_timestamp'),
('x-backend-status-changed-at', 'status_changed_at'),
('x-backend-storage-policy-index', 'storage_policy'),
('x-container-object-count', 'object_count'),
('x-container-bytes-used', 'bytes'),
('x-backend-sharding-state', 'sharding_state'),
)
required_normal_format_timestamps = (
('x-timestamp', 'created_at'),
('x-put-timestamp', 'put_timestamp'),
)
optional = (
('x-container-read', 'read_acl'),
('x-container-write', 'write_acl'),
('x-container-sync-key', 'sync_key'),
('x-container-sync-to', 'sync_to'),
('x-versions-location', 'versions'),
)
cors_optional = (
('access-control-allow-origin', 'allow_origin'),
('access-control-expose-headers', 'expose_headers'),
('access-control-max-age', 'max_age')
)
def lookup(info, key):
# raises KeyError or ValueError
val = info[key]
if val is None:
raise ValueError
return val
# note: required headers may be missing from info for example during
# upgrade when stale info is still in cache
headers = HeaderKeyDict()
for hdr, key in required:
try:
headers[hdr] = lookup(info, key)
except (KeyError, ValueError):
return None
for hdr, key in required_normal_format_timestamps:
try:
headers[hdr] = Timestamp(lookup(info, key)).normal
except (KeyError, ValueError):
return None
for hdr, key in optional:
try:
headers[hdr] = lookup(info, key)
except (KeyError, ValueError):
pass
policy_index = info.get('storage_policy')
headers['x-storage-policy'] = POLICIES[int(policy_index)].name
prefix = get_user_meta_prefix('container')
headers.update(
(prefix + k, v)
for k, v in info.get('meta', {}).items())
for hdr, key in cors_optional:
try:
headers[prefix + hdr] = lookup(info.get('cors'), key)
except (KeyError, ValueError):
pass
prefix = get_sys_meta_prefix('container')
headers.update(
(prefix + k, v)
for k, v in info.get('sysmeta', {}).items())
return headers
def headers_to_object_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of object info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'object')
transient_sysmeta = {}
for key, val in headers.items():
if is_object_transient_sysmeta(key):
key = strip_object_transient_sysmeta_prefix(key.lower())
transient_sysmeta[key] = val
info = {'status': status_int,
'length': headers.get('content-length'),
'type': headers.get('content-type'),
'etag': headers.get('etag'),
'meta': meta,
'sysmeta': sysmeta,
'transient_sysmeta': transient_sysmeta
}
return info
def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
if controller.app.strict_cors_mode and \
not controller.is_origin_allowed(cors_info, req_origin):
return resp
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - headers provided by the operator in cors_expose_headers
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
if 'Access-Control-Expose-Headers' not in resp.headers:
expose_headers = set([
'cache-control', 'content-language', 'content-type',
'expires', 'last-modified', 'pragma', 'etag',
'x-timestamp', 'x-trans-id', 'x-openstack-request-id'])
expose_headers.update(controller.app.cors_expose_headers)
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.add(header.lower())
if cors_info.get('expose_headers'):
expose_headers = expose_headers.union(
[header_line.strip().lower()
for header_line in
cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
if 'Access-Control-Allow-Origin' not in resp.headers:
if cors_info['allow_origin'] and \
cors_info['allow_origin'].strip() == '*':
resp.headers['Access-Control-Allow-Origin'] = '*'
else:
resp.headers['Access-Control-Allow-Origin'] = req_origin
if 'Vary' in resp.headers:
resp.headers['Vary'] += ', Origin'
else:
resp.headers['Vary'] = 'Origin'
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped
def get_object_info(env, app, path=None, swift_source=None):
"""
Get the info structure for an object, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the object.
"""
(version, account, container, obj) = \
split_path(path or env['PATH_INFO'], 4, 4, True)
info = _get_object_info(app, env, account, container, obj,
swift_source=swift_source)
if info:
info = deepcopy(info)
else:
info = headers_to_object_info({}, 0)
for field in ('length',):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def _record_ac_info_cache_metrics(
app, cache_state, container=None, resp=None):
"""
Record a single cache operation by account or container lookup into its
corresponding metrics.
:param app: the application object
:param cache_state: the state of this cache operation, includes
infocache_hit, memcache hit, miss, error, skip, force_skip
and disabled.
:param container: the container name
:param resp: the response from either backend or cache hit.
"""
try:
proxy_app = app._pipeline_final_app
except AttributeError:
logger = None
else:
logger = proxy_app.logger
op_type = 'container.info' if container else 'account.info'
if logger:
record_cache_op_metrics(logger, op_type, cache_state, resp)
def get_container_info(env, app, swift_source=None, cache_only=False):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
:param env: the environment used by the current request
:param app: the application object
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param cache_only: If true, indicates that caller doesn't want to HEAD the
backend container when cache miss.
:returns: the object info
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the container.
"""
(version, wsgi_account, wsgi_container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
if not constraints.valid_api_version(version):
# Not a valid Swift request; return 0 like we do
# if there's an account failure
return headers_to_container_info({}, 0)
account = wsgi_to_str(wsgi_account)
container = wsgi_to_str(wsgi_container)
# Try to cut through all the layers to the proxy app
# (while also preserving logging)
try:
logged_app = app._pipeline_request_logging_app
proxy_app = app._pipeline_final_app
except AttributeError:
logged_app = proxy_app = app
# Check in environment cache and in memcache (in that order)
info, cache_state = _get_info_from_caches(
proxy_app, env, account, container)
resp = None
if not info and not cache_only:
# Cache miss; go HEAD the container and populate the caches
env.setdefault('swift.infocache', {})
# Before checking the container, make sure the account exists.
#
# If it is an autocreateable account, just assume it exists; don't
# HEAD the account, as a GET or HEAD response for an autocreateable
# account is successful whether the account actually has .db files
# on disk or not.
is_autocreate_account = account.startswith(
getattr(proxy_app, 'auto_create_account_prefix',
constraints.AUTO_CREATE_ACCOUNT_PREFIX))
if not is_autocreate_account:
account_info = get_account_info(env, logged_app, swift_source)
if not account_info or not is_success(account_info['status']):
_record_ac_info_cache_metrics(
logged_app, cache_state, container)
return headers_to_container_info({}, 0)
req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, wsgi_account, wsgi_container)),
(swift_source or 'GET_CONTAINER_INFO'))
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(logged_app)
drain_and_close(resp)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# See similar comment in get_account_info() for justification.
info = _get_info_from_infocache(env, account, container)
if info is None:
info = set_info_cache(env, account, container, resp)
if info:
info = deepcopy(info) # avoid mutating what's in swift.infocache
else:
status_int = 0 if cache_only else 503
info = headers_to_container_info({}, status_int)
# Old data format in memcache immediately after a Swift upgrade; clean
# it up so consumers of get_container_info() aren't exposed to it.
if 'object_count' not in info and 'container_size' in info:
info['object_count'] = info.pop('container_size')
for field in ('storage_policy', 'bytes', 'object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
if info.get('sharding_state') is None:
info['sharding_state'] = 'unsharded'
versions_cont = info.get('sysmeta', {}).get('versions-container', '')
if versions_cont:
versions_cont = wsgi_unquote(str_to_wsgi(
versions_cont)).split('/')[0]
versions_req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, wsgi_account, versions_cont)),
(swift_source or 'GET_CONTAINER_INFO'))
versions_req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
versions_info = get_container_info(versions_req.environ, app)
info['bytes'] = info['bytes'] + versions_info['bytes']
_record_ac_info_cache_metrics(logged_app, cache_state, container, resp)
return info
def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the account.
:raises ValueError: when path doesn't contain an account
"""
(version, wsgi_account, _junk) = split_path(env['PATH_INFO'], 2, 3, True)
if not constraints.valid_api_version(version):
return headers_to_account_info({}, 0)
account = wsgi_to_str(wsgi_account)
# Try to cut through all the layers to the proxy app
# (while also preserving logging)
try:
app = app._pipeline_request_logging_app
except AttributeError:
pass
# Check in environment cache and in memcache (in that order)
info, cache_state = _get_info_from_caches(app, env, account)
# Cache miss; go HEAD the account and populate the caches
if info:
resp = None
else:
env.setdefault('swift.infocache', {})
req = _prepare_pre_auth_info_request(
env, "/%s/%s" % (version, wsgi_account),
(swift_source or 'GET_ACCOUNT_INFO'))
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(app)
drain_and_close(resp)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# The point of this is to avoid setting the value in memcached
# twice. Otherwise, we're needlessly sending requests across the
# network.
#
# If the info didn't make it into the cache, we'll compute it from
# the response and populate the cache ourselves.
#
# Note that this is taking "exists in infocache" to imply "exists in
# memcache". That's because we're trying to avoid superfluous
# network traffic, and checking in memcache prior to setting in
# memcache would defeat the purpose.
info = _get_info_from_infocache(env, account)
if info is None:
info = set_info_cache(env, account, None, resp)
if info:
info = info.copy() # avoid mutating what's in swift.infocache
else:
info = headers_to_account_info({}, 503)
for field in ('container_count', 'bytes', 'total_object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
_record_ac_info_cache_metrics(app, cache_state, container=None, resp=resp)
return info
def get_cache_key(account, container=None, obj=None, shard=None):
"""
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:param shard: Sharding state for the container query; typically 'updating'
or 'listing' (Requires account and container; cannot use
with obj)
:returns: a (native) string cache_key
"""
if six.PY2:
def to_native(s):
if s is None or isinstance(s, str):
return s
return s.encode('utf8')
else:
def to_native(s):
if s is None or isinstance(s, str):
return s
return s.decode('utf8', 'surrogateescape')
account = to_native(account)
container = to_native(container)
obj = to_native(obj)
if shard:
if not (account and container):
raise ValueError('Shard cache key requires account and container')
if obj:
raise ValueError('Shard cache key cannot have obj')
cache_key = 'shard-%s-v2/%s/%s' % (shard, account, container)
elif obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
cache_key = 'object/%s/%s/%s' % (account, container, obj)
elif container:
if not account:
raise ValueError('Container cache key requires account')
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
return cache_key
def set_info_cache(env, account, container, resp):
"""
Cache info in both memcache and env.
:param env: the WSGI request environment
:param account: the unquoted account name
:param container: the unquoted container name or None
:param resp: the response received or None if info cache should be cleared
:returns: the info that was placed into the cache, or None if the
request status was not in (404, 410, 2xx).
"""
cache_key = get_cache_key(account, container)
infocache = env.setdefault('swift.infocache', {})
memcache = cache_from_env(env, True)
if resp is None:
clear_info_cache(env, account, container)
return
if container:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Container-Existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
else:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Account-Existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE):
cache_time *= 0.1
elif not is_success(resp.status_int):
# If we got a response, it was unsuccessful, and it wasn't an
# "authoritative" failure, bail without touching caches.
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, time=cache_time)
infocache[cache_key] = info
return info
def set_object_info_cache(app, env, account, container, obj, resp):
"""
Cache object info in the WSGI environment, but not in memcache. Caching
in memcache would lead to cache pressure and mass evictions due to the
large number of objects in a typical Swift cluster. This is a
per-request cache only.
:param app: the application object
:param env: the environment used by the current request
:param account: the unquoted account name
:param container: the unquoted container name
:param obj: the unquoted object name
:param resp: a GET or HEAD response received from an object server, or
None if info cache should be cleared
:returns: the object info
"""
cache_key = get_cache_key(account, container, obj)
if 'swift.infocache' in env and not resp:
env['swift.infocache'].pop(cache_key, None)
return
info = headers_to_object_info(resp.headers, resp.status_int)
env.setdefault('swift.infocache', {})[cache_key] = info
return info
def clear_info_cache(env, account, container=None, shard=None):
"""
Clear the cached info in both memcache and env
:param env: the WSGI request environment
:param account: the account name
:param container: the container name if clearing info for containers, or
None
:param shard: the sharding state if clearing info for container shard
ranges, or None
"""
cache_key = get_cache_key(account, container, shard=shard)
infocache = env.setdefault('swift.infocache', {})
memcache = cache_from_env(env, True)
infocache.pop(cache_key, None)
if memcache:
memcache.delete(cache_key)
def _get_info_from_infocache(env, account, container=None):
"""
Get cached account or container information from request-environment
cache (swift.infocache).
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss
"""
cache_key = get_cache_key(account, container)
if 'swift.infocache' in env and cache_key in env['swift.infocache']:
return env['swift.infocache'][cache_key]
return None
def record_cache_op_metrics(
logger, op_type, cache_state, resp=None):
"""
Record a single cache operation into its corresponding metrics.
:param logger: the metrics logger
:param op_type: the name of the operation type, includes 'shard_listing',
'shard_updating', and etc.
:param cache_state: the state of this cache operation. When it's
'infocache_hit' or memcache 'hit', expect it succeeded and 'resp'
will be None; for all other cases like memcache 'miss' or 'skip'
which will make to backend, expect a valid 'resp'.
:param resp: the response from backend for all cases except cache hits.
"""
if cache_state == 'infocache_hit':
logger.increment('%s.infocache.hit' % op_type)
elif cache_state == 'hit':
# memcache hits.
logger.increment('%s.cache.hit' % op_type)
else:
# the cases of cache_state is memcache miss, error, skip, force_skip
# or disabled.
if resp:
logger.increment(
'%s.cache.%s.%d' % (op_type, cache_state, resp.status_int))
else:
# In some situation, we choose not to lookup backend after cache
# miss.
logger.increment('%s.cache.%s' % (op_type, cache_state))
def _get_info_from_memcache(app, env, account, container=None):
"""
Get cached account or container information from memcache
:param app: the application object
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a tuple of two values, the first is a dictionary of cached info
on cache hit, None on miss or if memcache is not in use; the second is
cache state.
"""
memcache = cache_from_env(env, True)
if not memcache:
return None, 'disabled'
try:
proxy_app = app._pipeline_final_app
except AttributeError:
# Only the middleware entry-points get a reference to the
# proxy-server app; if a middleware composes itself as multiple
# filters, we'll just have to choose a reasonable default
skip_chance = 0.0
else:
if container:
skip_chance = proxy_app.container_existence_skip_cache
else:
skip_chance = proxy_app.account_existence_skip_cache
cache_key = get_cache_key(account, container)
if skip_chance and random.random() < skip_chance:
info = None
cache_state = 'skip'
else:
info = memcache.get(cache_key)
cache_state = 'hit' if info else 'miss'
if info and six.PY2:
# Get back to native strings
new_info = {}
for key in info:
new_key = key.encode("utf-8") if isinstance(
key, six.text_type) else key
if isinstance(info[key], six.text_type):
new_info[new_key] = info[key].encode("utf-8")
elif isinstance(info[key], dict):
new_info[new_key] = {}
for subkey, value in info[key].items():
new_subkey = subkey.encode("utf-8") if isinstance(
subkey, six.text_type) else subkey
if isinstance(value, six.text_type):
new_info[new_key][new_subkey] = \
value.encode("utf-8")
else:
new_info[new_key][new_subkey] = value
else:
new_info[new_key] = info[key]
info = new_info
if info:
env.setdefault('swift.infocache', {})[cache_key] = info
return info, cache_state
def _get_info_from_caches(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order.
Used for both account and container info.
:param app: the application object
:param env: the environment used by the current request
:returns: a tuple of (the cached info or None if not cached, cache state)
"""
info = _get_info_from_infocache(env, account, container)
if info:
cache_state = 'infocache_hit'
else:
info, cache_state = _get_info_from_memcache(
app, env, account, container)
return info, cache_state
def _prepare_pre_auth_info_request(env, path, swift_source):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted, WSGI-str request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source=swift_source)
# This is a sub request for container metadata- drop the Origin header from
# the request so the it is not treated as a CORS request.
newenv.pop('HTTP_ORIGIN', None)
# ACLs are only shown to account owners, so let's make sure this request
# looks like it came from the account owner.
newenv['swift_owner'] = True
# Note that Request.blank expects quoted path
return Request.blank(wsgi_quote(path), environ=newenv)
def get_info(app, env, account, container=None, swift_source=None):
"""
Get info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:param swift_source: swift source logged for any subrequests made while
retrieving the account or container info
:returns: information about the specified entity in a dictionary. See
get_account_info and get_container_info for details on what's in the
dictionary.
"""
env.setdefault('swift.infocache', {})
if container:
path = '/v1/%s/%s' % (account, container)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_container_info(path_env, app, swift_source=swift_source)
else:
# account info
path = '/v1/%s' % (account,)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_account_info(path_env, app, swift_source=swift_source)
def _get_object_info(app, env, account, container, obj, swift_source=None):
"""
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted, WSGI-str name of the account
:param container: The unquoted, WSGI-str name of the container
:param obj: The unquoted, WSGI-str name of the object
:returns: the cached info or None if cannot be retrieved
"""
cache_key = get_cache_key(account, container, obj)
info = env.get('swift.infocache', {}).get(cache_key)
if info:
return info
# Not in cache, let's try the object servers
path = '/v1/%s/%s/%s' % (account, container, obj)
req = _prepare_pre_auth_info_request(env, path, swift_source)
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(app)
# Unlike get_account_info() and get_container_info(), we don't save
# things in memcache, so we can store the info without network traffic,
# *and* the proxy doesn't cache object info for us, so there's no chance
# that the object info would be in the environment. Thus, we just
# compute the object info based on the response and stash it in
# swift.infocache.
info = set_object_info_cache(app, env, account, container, obj, resp)
return info
def close_swift_conn(src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
# Since the backends set "Connection: close" in their response
# headers, the response object (src) is solely responsible for the
# socket. The connection object (src.swift_conn) has no references
# to the socket, so calling its close() method does nothing, and
# therefore we don't do it.
#
# Also, since calling the response's close() method might not
# close the underlying socket but only decrement some
# reference-counter, we have a special method here that really,
# really kills the underlying socket with a close() syscall.
src.nuke_from_orbit() # it's the only way to be sure
except Exception:
pass
def bytes_to_skip(record_size, range_start):
"""
Assume an object is composed of N records, where the first N-1 are all
the same size and the last is at most that large, but may be smaller.
When a range request is made, it might start with a partial record. This
must be discarded, lest the consumer get bad data. This is particularly
true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the
size of the object is unknown at the time the request is made.
This function computes the number of bytes that must be discarded to
ensure only whole records are yielded. Erasure-code decoding needs this.
This function could have been inlined, but it took enough tries to get
right that some targeted unit tests were desirable, hence its extraction.
"""
return (record_size - (range_start % record_size)) % record_size
def is_good_source(status, server_type):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param resp: the response from the backend.
:param server_type: the type of server: 'Account', 'Container' or 'Object'.
:returns: True if the response status code is acceptable, False if not.
"""
if (server_type == 'Object' and
status == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE):
return True
return is_success(status) or is_redirection(status)
class ByteCountEnforcer(object):
"""
Enforces that successive calls to file_like.read() give at least
<nbytes> bytes before exhaustion.
If file_like fails to do so, ShortReadError is raised.
If more than <nbytes> bytes are read, we don't care.
"""
def __init__(self, file_like, nbytes):
"""
:param file_like: file-like object
:param nbytes: number of bytes expected, or None if length is unknown.
"""
self.file_like = file_like
self.nbytes = self.bytes_left = nbytes
def read(self, amt=None):
chunk = self.file_like.read(amt)
if self.bytes_left is None:
return chunk
elif len(chunk) == 0 and self.bytes_left > 0:
raise ShortReadError(
"Too few bytes; read %d, expecting %d" % (
self.nbytes - self.bytes_left, self.nbytes))
else:
self.bytes_left -= len(chunk)
return chunk
class GetterSource(object):
__slots__ = ('app', 'resp', 'node', '_parts_iter')
def __init__(self, app, resp, node):
self.app = app
self.resp = resp
self.node = node
self._parts_iter = None
@property
def timestamp(self):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:return: an instance of ``utils.Timestamp``
"""
return Timestamp(self.resp.getheader('x-backend-data-timestamp') or
self.resp.getheader('x-backend-timestamp') or
self.resp.getheader('x-put-timestamp') or
self.resp.getheader('x-timestamp') or 0)
@property
def parts_iter(self):
# lazy load a source response body parts iter if and when the source is
# actually read
if self.resp and not self._parts_iter:
self._parts_iter = http_response_to_document_iters(
self.resp, read_chunk_size=self.app.object_chunk_size)
return self._parts_iter
def close(self):
# Close-out the connection as best as possible.
close_swift_conn(self.resp)
class GetterBase(object):
def __init__(self, app, req, node_iter, partition, policy,
path, backend_headers, logger=None):
self.app = app
self.req = req
self.node_iter = node_iter
self.partition = partition
self.policy = policy
self.path = path
self.backend_headers = backend_headers
self.logger = logger or app.logger
self.bytes_used_from_backend = 0
self.source = None
def _find_source(self):
"""
Look for a suitable new source and if one is found then set
``self.source``.
:return: ``True`` if ``self.source`` has been updated, ``False``
otherwise.
"""
# Subclasses must implement this method
raise NotImplementedError()
def _replace_source(self, err_msg):
# _find_source can modify self.source so stash current source
old_source = self.source
if not self._find_source():
return False
self.app.error_occurred(old_source.node, err_msg)
old_source.close()
return True
def fast_forward(self, num_bytes):
"""
Will skip num_bytes into the current ranges.
:params num_bytes: the number of bytes that have already been read on
this request. This will change the Range header
so that the next req will start where it left off.
:raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes
> end of range + 1
:raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1
"""
self.backend_headers.pop(
'X-Backend-Ignore-Range-If-Metadata-Present', None)
try:
req_range = Range(self.backend_headers.get('Range'))
except ValueError:
req_range = None
if req_range:
begin, end = req_range.ranges[0]
if begin is None:
# this is a -50 range req (last 50 bytes of file)
end -= num_bytes
if end == 0:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
if end < 0:
raise HTTPRequestedRangeNotSatisfiable()
else:
begin += num_bytes
if end is not None and begin == end + 1:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
if end is not None and begin > end:
raise HTTPRequestedRangeNotSatisfiable()
req_range.ranges = [(begin, end)] + req_range.ranges[1:]
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers['Range'] = 'bytes=%d-' % num_bytes
# Reset so if we need to do this more than once, we don't double-up
self.bytes_used_from_backend = 0
def pop_range(self):
"""
Remove the first byterange from our Range header.
This is used after a byterange has been completely sent to the
client; this way, should we need to resume the download from another
object server, we do not re-fetch byteranges that the client already
has.
If we have no Range header, this is a no-op.
"""
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
except ValueError:
# there's a Range header, but it's garbage, so get rid of it
self.backend_headers.pop('Range')
return
begin, end = req_range.ranges.pop(0)
if len(req_range.ranges) > 0:
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers.pop('Range')
def learn_size_from_content_range(self, start, end, length):
"""
Sets our Range header's first byterange to the value learned from
the Content-Range header in the response; if we were given a
fully-specified range (e.g. "bytes=123-456"), this is a no-op.
If we were given a half-specified range (e.g. "bytes=123-" or
"bytes=-456"), then this changes the Range header to a
semantically-equivalent one *and* it lets us resume on a proper
boundary instead of just in the middle of a piece somewhere.
"""
if length == 0:
return
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
new_ranges = [(start, end)] + req_range.ranges[1:]
except ValueError:
new_ranges = [(start, end)]
else:
new_ranges = [(start, end)]
self.backend_headers['Range'] = (
"bytes=" + (",".join("%s-%s" % (s if s is not None else '',
e if e is not None else '')
for s, e in new_ranges)))
class GetOrHeadHandler(GetterBase):
def __init__(self, app, req, server_type, node_iter, partition, path,
backend_headers, concurrency=1, policy=None,
newest=None, logger=None):
super(GetOrHeadHandler, self).__init__(
app=app, req=req, node_iter=node_iter,
partition=partition, policy=policy, path=path,
backend_headers=backend_headers, logger=logger)
self.server_type = server_type
self.used_nodes = []
self.used_source_etag = None
self.concurrency = concurrency
self.latest_404_timestamp = Timestamp(0)
if self.server_type == 'Object':
self.node_timeout = self.app.recoverable_node_timeout
else:
self.node_timeout = self.app.node_timeout
policy_options = self.app.get_policy_options(self.policy)
self.rebalance_missing_suppression_count = min(
policy_options.rebalance_missing_suppression_count,
node_iter.num_primary_nodes - 1)
if newest is None:
self.newest = config_true_value(req.headers.get('x-newest', 'f'))
else:
self.newest = newest
# populated when finding source
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
# populated from response headers
self.start_byte = self.end_byte = self.length = None
def _get_next_response_part(self):
# return the next part of the response body; there may only be one part
# unless it's a multipart/byteranges response
while True:
try:
# This call to next() performs IO when we have a
# multipart/byteranges response; it reads the MIME
# boundary and part headers.
#
# If we don't have a multipart/byteranges response,
# but just a 200 or a single-range 206, then this
# performs no IO, and either just returns source or
# raises StopIteration.
with WatchdogTimeout(self.app.watchdog, self.node_timeout,
ChunkReadTimeout):
# if StopIteration is raised, it escapes and is
# handled elsewhere
start_byte, end_byte, length, headers, part = next(
self.source.parts_iter)
return (start_byte, end_byte, length, headers, part)
except ChunkReadTimeout:
if not self._replace_source(
'Trying to read object during GET (retrying)'):
raise StopIteration()
def _iter_bytes_from_response_part(self, part_file, nbytes):
# yield chunks of bytes from a single response part; if an error
# occurs, try to resume yielding bytes from a different source
part_file = ByteCountEnforcer(part_file, nbytes)
while True:
try:
with WatchdogTimeout(self.app.watchdog, self.node_timeout,
ChunkReadTimeout):
chunk = part_file.read(self.app.object_chunk_size)
if nbytes is not None:
nbytes -= len(chunk)
except (ChunkReadTimeout, ShortReadError):
exc_type, exc_value, exc_traceback = exc_info()
if self.newest or self.server_type != 'Object':
raise
try:
self.fast_forward(self.bytes_used_from_backend)
except (HTTPException, ValueError):
six.reraise(exc_type, exc_value, exc_traceback)
except RangeAlreadyComplete:
break
if self._replace_source(
'Trying to read object during GET (retrying)'):
try:
_junk, _junk, _junk, _junk, part_file = \
self._get_next_response_part()
except StopIteration:
# Tried to find a new node from which to
# finish the GET, but failed. There's
# nothing more we can do here.
six.reraise(exc_type, exc_value, exc_traceback)
part_file = ByteCountEnforcer(part_file, nbytes)
else:
six.reraise(exc_type, exc_value, exc_traceback)
else:
if not chunk:
break
with WatchdogTimeout(self.app.watchdog,
self.app.client_timeout,
ChunkWriteTimeout):
self.bytes_used_from_backend += len(chunk)
yield chunk
def _iter_parts_from_response(self, req):
# iterate over potentially multiple response body parts; for each
# part, yield an iterator over the part's bytes
try:
part_iter = None
try:
while True:
start_byte, end_byte, length, headers, part = \
self._get_next_response_part()
self.learn_size_from_content_range(
start_byte, end_byte, length)
self.bytes_used_from_backend = 0
# not length; that refers to the whole object, so is the
# wrong value to use for GET-range responses
byte_count = ((end_byte - start_byte + 1)
if (end_byte is not None
and start_byte is not None)
else None)
part_iter = CooperativeIterator(
self._iter_bytes_from_response_part(part, byte_count))
yield {'start_byte': start_byte, 'end_byte': end_byte,
'entity_length': length, 'headers': headers,
'part_iter': part_iter}
self.pop_range()
except StopIteration:
req.environ['swift.non_client_disconnect'] = True
finally:
if part_iter:
part_iter.close()
except ChunkReadTimeout:
self.app.exception_occurred(self.source.node, 'Object',
'Trying to read during GET')
raise
except ChunkWriteTimeout:
self.logger.info(
'Client did not read from proxy within %ss',
self.app.client_timeout)
self.logger.increment('client_timeouts')
except GeneratorExit:
warn = True
req_range = self.backend_headers['Range']
if req_range:
req_range = Range(req_range)
if len(req_range.ranges) == 1:
begin, end = req_range.ranges[0]
if end is not None and begin is not None:
if end - begin + 1 == self.bytes_used_from_backend:
warn = False
if not req.environ.get('swift.non_client_disconnect') and warn:
self.logger.info('Client disconnected on read of %r',
self.path)
raise
except Exception:
self.logger.exception('Trying to send to client')
raise
finally:
self.source.close()
@property
def last_status(self):
if self.statuses:
return self.statuses[-1]
else:
return None
@property
def last_headers(self):
if self.source_headers:
return HeaderKeyDict(self.source_headers[-1])
else:
return None
def _make_node_request(self, node, node_timeout, logger_thread_locals):
# make a backend request; return True if the response is deemed good
# (has an acceptable status code), useful (matches any previously
# discovered etag) and sufficient (a single good response is
# insufficient when we're searching for the newest timestamp)
self.logger.thread_locals = logger_thread_locals
if node in self.used_nodes:
return False
req_headers = dict(self.backend_headers)
ip, port = get_ip_port(node, req_headers)
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
ip, port, node['device'],
self.partition, self.req.method, self.path,
headers=req_headers,
query_string=self.req.query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
'Trying to %(method)s %(path)s' %
{'method': self.req.method, 'path': self.req.path})
return False
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
if is_good_source(possible_source.status, self.server_type):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
close_swift_conn(possible_source)
else:
if self.used_source_etag and \
self.used_source_etag != normalize_etag(
src_headers.get('etag', '')):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
return False
# a possible source should only be added as a valid source
# if its timestamp is newer than previously found tombstones
ps_timestamp = Timestamp(
src_headers.get('x-backend-data-timestamp') or
src_headers.get('x-backend-timestamp') or
src_headers.get('x-put-timestamp') or
src_headers.get('x-timestamp') or 0)
if ps_timestamp >= self.latest_404_timestamp:
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(None)
self.source_headers.append(possible_source.getheaders())
self.sources.append(
GetterSource(self.app, possible_source, node))
if not self.newest: # one good source is enough
return True
else:
if 'handoff_index' in node and \
(is_server_error(possible_source.status) or
possible_source.status == HTTP_NOT_FOUND) and \
not Timestamp(src_headers.get('x-backend-timestamp', 0)):
# throw out 5XX and 404s from handoff nodes unless the data is
# really on disk and had been DELETEd
return False
if self.rebalance_missing_suppression_count > 0 and \
possible_source.status == HTTP_NOT_FOUND and \
not Timestamp(src_headers.get('x-backend-timestamp', 0)):
self.rebalance_missing_suppression_count -= 1
return False
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(possible_source.read())
self.source_headers.append(possible_source.getheaders())
# if 404, record the timestamp. If a good source shows up, its
# timestamp will be compared to the latest 404.
# For now checking only on objects, but future work could include
# the same check for account and containers. See lp 1560574.
if self.server_type == 'Object' and \
possible_source.status == HTTP_NOT_FOUND:
hdrs = HeaderKeyDict(possible_source.getheaders())
ts = Timestamp(hdrs.get('X-Backend-Timestamp', 0))
if ts > self.latest_404_timestamp:
self.latest_404_timestamp = ts
self.app.check_response(node, self.server_type, possible_source,
self.req.method, self.path,
self.bodies[-1])
return False
def _find_source(self):
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
nodes = GreenthreadSafeIterator(self.node_iter)
node_timeout = self.app.node_timeout
if self.server_type == 'Object' and not self.newest:
node_timeout = self.app.recoverable_node_timeout
pile = GreenAsyncPile(self.concurrency)
for node in nodes:
pile.spawn(self._make_node_request, node, node_timeout,
self.logger.thread_locals)
_timeout = self.app.get_policy_options(
self.policy).concurrency_timeout \
if pile.inflight < self.concurrency else None
if pile.waitfirst(_timeout):
break
else:
# ran out of nodes, see if any stragglers will finish
any(pile)
# this helps weed out any sucess status that were found before a 404
# and added to the list in the case of x-newest.
if self.sources:
self.sources = [s for s in self.sources
if s.timestamp >= self.latest_404_timestamp]
if self.sources:
self.sources.sort(key=operator.attrgetter('timestamp'))
source = self.sources.pop()
for unused_source in self.sources:
unused_source.close()
self.used_nodes.append(source.node)
# Save off the source etag so that, if we lose the connection
# and have to resume from a different node, we can be sure that
# we have the same object (replication). Otherwise, if the cluster
# has two versions of the same object, we might end up switching
# between old and new mid-stream and giving garbage to the client.
if self.used_source_etag is None:
self.used_source_etag = normalize_etag(
source.resp.getheader('etag', ''))
self.source = source
return True
return False
def _make_app_iter(self, req):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param req: incoming request object
:return: an iterator that yields chunks of response body bytes
"""
ct = self.source.resp.getheader('Content-Type')
if ct:
content_type, content_type_attrs = parse_content_type(ct)
is_multipart = content_type == 'multipart/byteranges'
else:
is_multipart = False
boundary = "dontcare"
if is_multipart:
# we need some MIME boundary; fortunately, the object server has
# furnished one for us, so we'll just re-use it
boundary = dict(content_type_attrs)["boundary"]
parts_iter = self._iter_parts_from_response(req)
def add_content_type(response_part):
response_part["content_type"] = \
HeaderKeyDict(response_part["headers"]).get("Content-Type")
return response_part
return document_iters_to_http_response_body(
(add_content_type(pi) for pi in parts_iter),
boundary, is_multipart, self.logger)
def get_working_response(self, req):
res = None
if self._find_source():
res = Response(request=req)
res.status = self.source.resp.status
update_headers(res, self.source.resp.getheaders())
if req.method == 'GET' and \
self.source.resp.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(req)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = self.source.resp.swift_conn
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = self.source.resp.getheader(
'x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = self.source.resp.getheader('Content-Length')
if self.source.resp.getheader('Content-Type'):
res.charset = None
res.content_type = self.source.resp.getheader('Content-Type')
return res
class NodeIter(object):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param app: a proxy app
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param logger: a logger instance
:param request: yielded nodes will be annotated with `use_replication`
based on the `request` headers.
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
:param policy: an instance of :class:`BaseStoragePolicy`. This should be
None for an account or container ring.
"""
def __init__(self, app, ring, partition, logger, request, node_iter=None,
policy=None):
self.app = app
self.ring = ring
self.partition = partition
self.logger = logger
self.request = request
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(
part_nodes, ring.get_more_nodes(partition))
self.num_primary_nodes = len(part_nodes)
self.nodes_left = self.app.request_node_count(self.num_primary_nodes)
self.expected_handoffs = self.nodes_left - self.num_primary_nodes
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
self.primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, self.num_primary_nodes)),
policy=policy)
self.handoff_iter = node_iter
self._node_provider = None
@property
def primaries_left(self):
return len(self.primary_nodes)
def __iter__(self):
self._node_iter = self._node_gen()
return self
def log_handoffs(self, handoffs):
"""
Log handoff requests if handoff logging is enabled and the
handoff was not expected.
We only log handoffs when we've pushed the handoff count further
than we would normally have expected under normal circumstances,
that is (request_node_count - num_primaries), when handoffs goes
higher than that it means one of the primaries must have been
skipped because of error limiting before we consumed all of our
nodes_left.
"""
if not self.app.log_handoffs:
return
extra_handoffs = handoffs - self.expected_handoffs
if extra_handoffs > 0:
self.logger.increment('handoff_count')
self.logger.warning(
'Handoff requested (%d)' % handoffs)
if (extra_handoffs == self.num_primary_nodes):
# all the primaries were skipped, and handoffs didn't help
self.logger.increment('handoff_all_count')
def set_node_provider(self, callback):
"""
Install a callback function that will be used during a call to next()
to get an alternate node instead of returning the next node from the
iterator.
:param callback: A no argument function that should return a node dict
or None.
"""
self._node_provider = callback
def _node_gen(self):
while self.primary_nodes:
node = self.primary_nodes.pop(0)
if not self.app.error_limited(node):
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
handoffs = 0
for node in self.handoff_iter:
if not self.app.error_limited(node):
handoffs += 1
self.log_handoffs(handoffs)
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
def _annotate_node(self, node):
"""
Helper function to set use_replication dict value for a node by looking
up the header value for x-backend-use-replication-network.
:param node: node dictionary from the ring or node_iter.
:returns: node dictionary with replication network enabled/disabled
"""
# nodes may have come from a ring or a node_iter passed to the
# constructor: be careful not to mutate them!
return dict(node, use_replication=is_use_replication_network(
self.request.headers))
def next(self):
node = None
if self._node_provider:
# give node provider the opportunity to inject a node
node = self._node_provider()
if not node:
node = next(self._node_iter)
return self._annotate_node(node)
def __next__(self):
return self.next()
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
"""
Creates a controller attached to an application instance
:param app: the application instance
"""
self.account_name = None
self.app = app
self.trans_id = '-'
self._allowed_methods = None
self._private_methods = None
# adapt the app logger to prefix statsd metrics with the server type
self.logger = MetricsPrefixLoggerAdapter(
self.app.logger, {}, self.server_type.lower())
@property
def allowed_methods(self):
if self._allowed_methods is None:
self._allowed_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'publicly_accessible', False):
self._allowed_methods.add(name)
return self._allowed_methods
@property
def private_methods(self):
if self._private_methods is None:
self._private_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'privately_accessible', False):
self._private_methods.add(name)
return self._private_methods
def _x_remove_headers(self):
"""
Returns a list of headers that must not be sent to the backend
:returns: a list of header
"""
return []
def transfer_headers(self, src_headers, dst_headers):
"""
Transfer legal headers from an original client request to dictionary
that will be used as headers by the backend request
:param src_headers: A dictionary of the original client request headers
:param dst_headers: A dictionary of the backend request headers
"""
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in self._x_remove_headers())
dst_headers.update((k.lower(), v)
for k, v in src_headers.items()
if k.lower() in self.pass_through_headers or
is_sys_or_user_meta(st, k))
def generate_request_headers(self, orig_req=None, additional=None,
transfer=False):
"""
Create a list of headers to be used in backend requests
:param orig_req: the original request sent by the client to the proxy
:param additional: additional headers to send to the backend
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
headers = HeaderKeyDict()
if orig_req:
headers.update((k.lower(), v)
for k, v in orig_req.headers.items()
if k.lower().startswith('x-backend-'))
referer = orig_req.as_referer()
else:
referer = ''
# additional headers can override x-backend-* headers from orig_req
if additional:
headers.update(additional)
if orig_req and transfer:
# transfer headers from orig_req can override additional headers
self.transfer_headers(orig_req.headers, headers)
headers.setdefault('x-timestamp', Timestamp.now().internal)
# orig_req and additional headers cannot override the following...
headers['x-trans-id'] = self.trans_id
headers['connection'] = 'close'
headers['user-agent'] = self.app.backend_user_agent
headers['referer'] = referer
return headers
def account_info(self, account, req):
"""
Get account information, and also verify that the account exists.
:param account: native str name of the account to get the info for
:param req: caller's HTTP request context object
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s" % (str_to_wsgi(account),)
info = get_account_info(path_env, self.app)
if (not info
or not is_success(info['status'])
or not info.get('account_really_exists', True)):
return None, None, None
container_count = info['container_count']
partition, nodes = self.app.account_ring.get_nodes(account)
return partition, nodes, container_count
def container_info(self, account, container, req):
"""
Get container information and thusly verify container existence.
This will also verify account existence.
:param account: native-str account name for the container
:param container: native-str container name to look up
:param req: caller's HTTP request context object
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s/%s" % (
str_to_wsgi(account), str_to_wsgi(container))
info = get_container_info(path_env, self.app)
if not is_success(info.get('status')):
info['partition'] = None
info['nodes'] = None
else:
part, nodes = self.app.container_ring.get_nodes(account, container)
info['partition'] = part
info['nodes'] = nodes
return info
def _make_request(self, nodes, part, method, path, headers, query,
body, logger_thread_locals):
"""
Iterates over the given node iterator, sending an HTTP request to one
node at a time. The first non-informational, non-server-error
response is returned. If no non-informational, non-server-error
response is received from any of the nodes, returns None.
:param nodes: an iterator of the backend server and handoff servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: dictionary of headers
:param query: query string to send to the backend.
:param body: byte string to use as the request body.
Try to keep it small.
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
:returns: a swob.Response object, or None if no responses were received
"""
self.logger.thread_locals = logger_thread_locals
if body:
if not isinstance(body, bytes):
raise TypeError('body must be bytes, not %s' % type(body))
headers['Content-Length'] = str(len(body))
for node in nodes:
try:
ip, port = get_ip_port(node, headers)
start_node_timing = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
ip, port, node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
self.app.set_node_timing(node, time.time() - start_node_timing)
if body:
with Timeout(self.app.node_timeout):
conn.send(body)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if (self.app.check_response(node, self.server_type, resp,
method, path)
and not is_informational(resp.status)):
return resp.status, resp.reason, resp.getheaders(), \
resp.read()
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
'Trying to %(method)s %(path)s' %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string='', overrides=None, node_count=None,
node_iterator=None, body=None):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param req: a request sent by the client
:param ring: the ring used for finding backend servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query_string: optional query string to send to the backend
:param overrides: optional return status override map used to override
the returned status of a request.
:param node_count: optional number of nodes to send request to.
:param node_iterator: optional node iterator.
:returns: a swob.Response object
"""
nodes = GreenthreadSafeIterator(
node_iterator or NodeIter(self.app, ring, part, self.logger, req)
)
node_number = node_count or len(ring.get_part_nodes(part))
pile = GreenAsyncPile(node_number)
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, body, self.logger.thread_locals)
response = []
statuses = []
for resp in pile:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
if self.have_quorum(statuses, node_number):
break
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for resp in finished_quickly:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
while len(response) < node_number:
response.append((HTTP_SERVICE_UNAVAILABLE, '', '', b''))
statuses, reasons, resp_headers, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method),
overrides=overrides, headers=resp_headers)
def _quorum_size(self, n):
"""
Number of successful backend responses needed for the proxy to
consider the client request successful.
"""
return quorum_size(n)
def have_quorum(self, statuses, node_count, quorum=None):
"""
Given a list of statuses from several requests, determine if
a quorum response can already be decided.
:param statuses: list of statuses returned
:param node_count: number of nodes being queried (basically ring count)
:param quorum: number of statuses required for quorum
:returns: True or False, depending on if quorum is established
"""
if quorum is None:
quorum = self._quorum_size(node_count)
if len(statuses) >= quorum:
for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES,
HTTP_BAD_REQUEST):
if sum(1 for s in statuses
if hundred <= s < hundred + 100) >= quorum:
return True
return False
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None, overrides=None,
quorum_size=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:param overrides: overrides to apply when lacking quorum
:param quorum_size: quorum size to use
:returns: swob.Response object with the correct status, body, etc. set
"""
if quorum_size is None:
quorum_size = self._quorum_size(len(statuses))
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
quorum_size=quorum_size)
if overrides and not resp:
faked_up_status_indices = set()
transformed = []
for (i, (status, reason, hdrs, body)) in enumerate(zip(
statuses, reasons, headers, bodies)):
if status in overrides:
faked_up_status_indices.add(i)
transformed.append((overrides[status], '', '', ''))
else:
transformed.append((status, reason, hdrs, body))
statuses, reasons, headers, bodies = zip(*transformed)
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
indices_to_avoid=faked_up_status_indices,
quorum_size=quorum_size)
if not resp:
resp = HTTPServiceUnavailable(request=req)
self.logger.error('%(type)s returning 503 for %(statuses)s',
{'type': server_type, 'statuses': statuses})
return resp
def _compute_quorum_response(self, req, statuses, reasons, bodies, etag,
headers, quorum_size, indices_to_avoid=()):
if not statuses:
return None
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[(i, s) for i, s in enumerate(statuses)
if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size:
try:
status_index, status = max(
((i, stat) for i, stat in hstatuses
if i not in indices_to_avoid),
key=operator.itemgetter(1))
except ValueError:
# All statuses were indices to avoid
continue
resp = status_map[status](request=req)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = normalize_etag(etag)
return resp
return None
@public
def GET(self, req):
"""
Handler for HTTP GET requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""
Handler for HTTP HEAD requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
def autocreate_account(self, req, account):
"""
Autocreate an account
:param req: request leading to this autocreate
:param account: the unquoted account name
"""
partition, nodes = self.app.account_ring.get_nodes(account)
path = '/%s' % account
headers = {'X-Timestamp': Timestamp.now().internal,
'X-Trans-Id': self.trans_id,
'X-Openstack-Request-Id': self.trans_id,
'Connection': 'close'}
# transfer any x-account-sysmeta headers from original request
# to the autocreate PUT
headers.update((k, v)
for k, v in req.headers.items()
if is_sys_meta('account', k))
resp = self.make_requests(Request.blank(str_to_wsgi('/v1' + path)),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if is_success(resp.status_int):
self.logger.info('autocreate account %r', path)
clear_info_cache(req.environ, account)
return True
else:
self.logger.warning('Could not autocreate account %r', path)
return False
def GETorHEAD_base(self, req, server_type, node_iter, partition, path,
concurrency=1, policy=None):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type used in logging
:param node_iter: an iterator to obtain nodes from
:param partition: partition
:param path: path for the request
:param concurrency: number of requests to run concurrently
:param policy: the policy instance, or None if Account or Container
:returns: swob.Response object
"""
backend_headers = self.generate_request_headers(
req, additional=req.headers)
handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter,
partition, path, backend_headers,
concurrency, policy=policy,
logger=self.logger)
res = handler.get_working_response(req)
if not res:
res = self.best_response(
req, handler.statuses, handler.reasons, handler.bodies,
'%s %s' % (server_type, req.method),
headers=handler.source_headers)
# if a backend policy index is present in resp headers, translate it
# here with the friendly policy name
if 'X-Backend-Storage-Policy-Index' in res.headers and \
is_success(res.status_int):
policy = \
POLICIES.get_by_index(
res.headers['X-Backend-Storage-Policy-Index'])
if policy:
res.headers['X-Storage-Policy'] = policy.name
else:
self.logger.error(
'Could not translate %s (%r) from %r to policy',
'X-Backend-Storage-Policy-Index',
res.headers['X-Backend-Storage-Policy-Index'], path)
return res
def is_origin_allowed(self, cors_info, origin):
"""
Is the given Origin allowed to make requests to this resource
:param cors_info: the resource's CORS related metadata headers
:param origin: the origin making the request
:return: True or False
"""
allowed_origins = set()
if cors_info.get('allow_origin'):
allowed_origins.update(
[a.strip()
for a in cors_info['allow_origin'].split(' ')
if a.strip()])
if self.app.cors_allow_origin:
allowed_origins.update(self.app.cors_allow_origin)
return origin in allowed_origins or '*' in allowed_origins
@public
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods)}
resp = Response(status=200, request=req, headers=headers)
# If this isn't a CORS pre-flight request then return now
req_origin_value = req.headers.get('Origin', None)
if not req_origin_value:
return resp
# This is a CORS preflight request so check it's allowed
try:
container_info = \
self.container_info(self.account_name,
self.container_name, req)
except AttributeError:
# This should only happen for requests to the Account. A future
# change could allow CORS requests to the Account level as well.
return resp
cors = container_info.get('cors', {})
# If the CORS origin isn't allowed return a 401
if not self.is_origin_allowed(cors, req_origin_value) or (
req.headers.get('Access-Control-Request-Method') not in
self.allowed_methods):
resp.status = HTTP_UNAUTHORIZED
return resp
# Populate the response with the CORS preflight headers
if cors.get('allow_origin') and \
cors.get('allow_origin').strip() == '*':
headers['access-control-allow-origin'] = '*'
else:
headers['access-control-allow-origin'] = req_origin_value
if 'vary' in headers:
headers['vary'] += ', Origin'
else:
headers['vary'] = 'Origin'
if cors.get('max_age') is not None:
headers['access-control-max-age'] = cors.get('max_age')
headers['access-control-allow-methods'] = \
', '.join(self.allowed_methods)
# Allow all headers requested in the request. The CORS
# specification does leave the door open for this, as mentioned in
# http://www.w3.org/TR/cors/#resource-preflight-requests
# Note: Since the list of headers can be unbounded
# simply returning headers can be enough.
allow_headers = set(
list_from_csv(req.headers.get('Access-Control-Request-Headers')))
if allow_headers:
headers['access-control-allow-headers'] = ', '.join(allow_headers)
if 'vary' in headers:
headers['vary'] += ', Access-Control-Request-Headers'
else:
headers['vary'] = 'Access-Control-Request-Headers'
resp.headers = headers
return resp
def get_name_length_limit(self):
if self.account_name.startswith(self.app.auto_create_account_prefix):
multiplier = 2
else:
multiplier = 1
if self.server_type == 'Account':
return constraints.MAX_ACCOUNT_NAME_LENGTH * multiplier
elif self.server_type == 'Container':
return constraints.MAX_CONTAINER_NAME_LENGTH * multiplier
else:
raise ValueError(
"server_type can only be 'account' or 'container'")
def _parse_listing_response(self, req, response):
if not is_success(response.status_int):
self.logger.warning(
'Failed to get container listing from %s: %s',
req.path_qs, response.status_int)
return None
try:
data = json.loads(response.body)
if not isinstance(data, list):
raise ValueError('not a list')
return data
except ValueError as err:
self.logger.error(
'Problem with listing response from %s: %r',
req.path_qs, err)
return None
def _get_container_listing(self, req, account, container, headers=None,
params=None):
"""
Fetch container listing from given `account/container`.
:param req: original Request instance.
:param account: account in which `container` is stored.
:param container: container from which listing should be fetched.
:param headers: extra headers to be included with the listing
sub-request; these update the headers copied from the original
request.
:param params: query string parameters to be used.
:return: a tuple of (deserialized json data structure, swob Response)
"""
params = params or {}
version, _a, _c, _other = req.split_path(3, 4, True)
path = '/'.join(['', version, account, container])
subreq = make_pre_authed_request(
req.environ, method='GET', path=quote(path), headers=req.headers,
swift_source='SH')
if headers:
subreq.headers.update(headers)
subreq.params = params
self.logger.debug(
'Get listing from %s %s' % (subreq.path_qs, headers))
response = self.app.handle_request(subreq)
data = self._parse_listing_response(req, response)
return data, response
def _parse_shard_ranges(self, req, listing, response):
if listing is None:
return None
record_type = response.headers.get('x-backend-record-type')
if record_type != 'shard':
err = 'unexpected record type %r' % record_type
self.logger.error("Failed to get shard ranges from %s: %s",
req.path_qs, err)
return None
try:
return [ShardRange.from_dict(shard_range)
for shard_range in listing]
except (ValueError, TypeError, KeyError) as err:
self.logger.error(
"Failed to get shard ranges from %s: invalid data: %r",
req.path_qs, err)
return None
def _get_shard_ranges(
self, req, account, container, includes=None, states=None):
"""
Fetch shard ranges from given `account/container`. If `includes` is
given then the shard range for that object name is requested, otherwise
all shard ranges are requested.
:param req: original Request instance.
:param account: account from which shard ranges should be fetched.
:param container: container from which shard ranges should be fetched.
:param includes: (optional) restricts the list of fetched shard ranges
to those which include the given name.
:param states: (optional) the states of shard ranges to be fetched.
:return: a list of instances of :class:`swift.common.utils.ShardRange`,
or None if there was a problem fetching the shard ranges
"""
params = req.params.copy()
params.pop('limit', None)
params['format'] = 'json'
if includes:
params['includes'] = str_to_wsgi(includes)
if states:
params['states'] = states
headers = {'X-Backend-Record-Type': 'shard'}
listing, response = self._get_container_listing(
req, account, container, headers=headers, params=params)
return self._parse_shard_ranges(req, listing, response), response
| swift-master | swift/proxy/controllers/base.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import unquote
from swift.account.utils import account_listing_response
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.utils import public
from swift.common.constraints import check_metadata
from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE
from swift.proxy.controllers.base import Controller, clear_info_cache, \
set_info_cache, NodeIter
from swift.common.middleware import listing_formats
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.request_helpers import get_sys_meta_prefix
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = 'Account'
def __init__(self, app, account_name, **kwargs):
super(AccountController, self).__init__(app)
self.account_name = unquote(account_name)
if not self.app.allow_account_management:
self.allowed_methods.remove('PUT')
self.allowed_methods.remove('DELETE')
def add_acls_from_sys_metadata(self, resp):
if resp.environ['REQUEST_METHOD'] in ('HEAD', 'GET', 'PUT', 'POST'):
prefix = get_sys_meta_prefix('account') + 'core-'
name = 'access-control'
(extname, intname) = ('x-account-' + name, prefix + name)
acl_dict = parse_acl(version=2, data=resp.headers.pop(intname))
if acl_dict: # treat empty dict as empty header
resp.headers[extname] = format_acl(
version=2, acl_dict=acl_dict)
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
# Don't cache this. We know the account doesn't exist because
# the name is bad; we don't need to cache that because it's
# really cheap to recompute.
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = self.app.account_ring.replica_count \
if self.app.get_policy_options(None).concurrent_gets else 1
node_iter = NodeIter(self.app, self.app.account_ring, partition,
self.logger, req)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(
req, 'Account', node_iter, partition,
req.swift_entity_path.rstrip('/'), concurrency)
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE
elif self.app.account_autocreate:
# This is kind of a lie; we pretend like the account is
# there, but it's not. We'll create it as soon as something
# tries to write to it, but we don't need databases on disk
# to tell us that nothing's there.
#
# We set a header so that certain consumers can tell it's a
# fake listing. The important one is the PUT of a container
# to an autocreate account; the proxy checks to see if the
# account exists before actually performing the PUT and
# creates the account if necessary. If we feed it a perfect
# lie, it'll just try to create the container without
# creating the account, and that'll fail.
resp = account_listing_response(
self.account_name, req,
listing_formats.get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
# Cache this. We just made a request to a storage node and got
# up-to-date information for the account.
resp.headers['X-Backend-Recheck-Account-Existence'] = str(
self.app.recheck_account_existence)
set_info_cache(req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
@public
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'PUT',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def POST(self, req):
"""HTTP POST request handler."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def DELETE(self, req):
"""HTTP DELETE request handler."""
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'DELETE',
req.swift_entity_path, [headers] * len(accounts))
return resp
| swift-master | swift/proxy/controllers/account.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The ``swift-manage-shard-ranges`` tool provides commands for initiating
sharding of a container. ``swift-manage-shard-ranges`` operates directly on a
container database file.
.. note::
``swift-manage-shard-ranges`` must only be used on one replica of a
container database to avoid inconsistent results. The modifications made by
``swift-manage-shard-ranges`` will be automatically copied to other
replicas of the container database via normal replication processes.
There are three steps in the process of initiating sharding, each of which may
be performed in isolation or, as shown below, using a single command.
#. The ``find`` sub-command scans the container database to identify how many
shard containers will be required and which objects they will manage. Each
shard container manages a range of the object namespace defined by a
``lower`` and ``upper`` bound. The maximum number of objects to be allocated
to each shard container is specified on the command line. For example::
$ swift-manage-shard-ranges <path_to_db> find 500000
Loaded db broker for AUTH_test/c1.
[
{
"index": 0,
"lower": "",
"object_count": 500000,
"upper": "o_01086834"
},
{
"index": 1,
"lower": "o_01086834",
"object_count": 500000,
"upper": "o_01586834"
},
{
"index": 2,
"lower": "o_01586834",
"object_count": 500000,
"upper": "o_02087570"
},
{
"index": 3,
"lower": "o_02087570",
"object_count": 500000,
"upper": "o_02587572"
},
{
"index": 4,
"lower": "o_02587572",
"object_count": 500000,
"upper": "o_03087572"
},
{
"index": 5,
"lower": "o_03087572",
"object_count": 500000,
"upper": "o_03587572"
},
{
"index": 6,
"lower": "o_03587572",
"object_count": 349194,
"upper": ""
}
]
Found 7 ranges in 4.37222s (total object count 3349194)
This command returns a list of shard ranges each of which describes the
namespace to be managed by a shard container. No other action is taken by
this command and the container database is unchanged. The output may be
redirected to a file for subsequent retrieval by the ``replace`` command.
For example::
$ swift-manage-shard-ranges <path_to_db> find 500000 > my_shard_ranges
Loaded db broker for AUTH_test/c1.
Found 7 ranges in 2.448s (total object count 3349194)
#. The ``replace`` sub-command deletes any shard ranges that might already be
in the container database and inserts shard ranges from a given file. The
file contents should be in the format generated by the ``find`` sub-command.
For example::
$ swift-manage-shard-ranges <path_to_db> replace my_shard_ranges
Loaded db broker for AUTH_test/c1.
No shard ranges found to delete.
Injected 7 shard ranges.
Run container-replicator to replicate them to other nodes.
Use the enable sub-command to enable sharding.
The container database is modified to store the shard ranges, but the
container will not start sharding until sharding is enabled. The ``info``
sub-command may be used to inspect the state of the container database at
any point, and the ``show`` sub-command may be used to display the inserted
shard ranges.
Shard ranges stored in the container database may be replaced using the
``replace`` sub-command. This will first delete all existing shard ranges
before storing new shard ranges. Shard ranges may also be deleted from the
container database using the ``delete`` sub-command.
Shard ranges should not be replaced or deleted using
``swift-manage-shard-ranges`` once the next step of enabling sharding has
been taken.
#. The ``enable`` sub-command enables the container for sharding. The sharder
daemon and/or container replicator daemon will replicate shard ranges to
other replicas of the container DB and the sharder daemon will proceed to
shard the container. This process may take some time depending on the size
of the container, the number of shard ranges and the underlying hardware.
.. note::
Once the ``enable`` sub-command has been used there is no supported
mechanism to revert sharding. Do not use ``swift-manage-shard-ranges``
to make any further changes to the shard ranges in the container DB.
For example::
$ swift-manage-shard-ranges <path_to_db> enable
Loaded db broker for AUTH_test/c1.
Container moved to state 'sharding' with epoch 1525345093.22908.
Run container-sharder on all nodes to shard the container.
This does not shard the container - sharding is performed by the
:ref:`sharder_daemon` - but sets the necessary state in the database for the
daemon to subsequently start the sharding process.
The ``epoch`` value displayed in the output is the time at which sharding
was enabled. When the :ref:`sharder_daemon` starts sharding this container
it creates a new container database file using the epoch in the filename to
distinguish it from the retiring DB that is being sharded.
All three steps may be performed with one sub-command::
$ swift-manage-shard-ranges <path_to_db> find_and_replace 500000 --enable \
--force
Loaded db broker for AUTH_test/c1.
No shard ranges found to delete.
Injected 7 shard ranges.
Run container-replicator to replicate them to other nodes.
Container moved to state 'sharding' with epoch 1525345669.46153.
Run container-sharder on all nodes to shard the container.
"""
from __future__ import print_function
import argparse
import json
import os.path
import sys
import time
from contextlib import contextmanager
from six.moves import input
from swift.common.utils import Timestamp, get_logger, ShardRange, readconf, \
ShardRangeList, non_negative_int, config_positive_int_value
from swift.container.backend import ContainerBroker, UNSHARDED
from swift.container.sharder import make_shard_ranges, sharding_enabled, \
CleavingContext, process_compactible_shard_sequences, \
find_compactible_shard_sequences, find_overlapping_ranges, \
find_paths, rank_paths, finalize_shrinking, DEFAULT_SHARDER_CONF, \
ContainerSharderConf, find_paths_with_gaps, combine_shard_ranges, \
update_own_shard_range_stats
EXIT_SUCCESS = 0
EXIT_ERROR = 1
EXIT_INVALID_ARGS = 2 # consistent with argparse exit code for invalid args
EXIT_USER_QUIT = 3
MIN_SHARD_RANGE_AGE_FOR_REPAIR = 4 * 3600
# Some CLI options derive their default values from DEFAULT_SHARDER_CONF if
# they have not been set. It is therefore important that the CLI parser
# provides None as a default so that we can detect that no value was set on the
# command line. We use this alias to act as a reminder.
USE_SHARDER_DEFAULT = object()
class ManageShardRangesException(Exception):
pass
class GapsFoundException(ManageShardRangesException):
pass
class InvalidStateException(ManageShardRangesException):
pass
class InvalidSolutionException(ManageShardRangesException):
def __init__(self, msg, acceptor_path, overlapping_donors):
super(InvalidSolutionException, self).__init__(msg)
self.acceptor_path = acceptor_path
self.overlapping_donors = overlapping_donors
def wrap_for_argparse(func, msg=None):
"""
Wrap the given ``func`` to catch any ``ValueError`` and raise an
``argparse.ArgumentTypeError`` instead.
:param func: a function.
:param msg: an optional message to use with any exception that is used; if
not given then the string representation of the ValueError will be
used.
:return: a function wrapper.
"""
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err) if msg is None else msg)
return wrapped_func
def _proceed(args):
if args.dry_run:
choice = 'no'
elif args.yes:
choice = 'yes'
else:
try:
choice = input('Do you want to apply these changes to the '
'container DB? [yes/N]')
except (EOFError, KeyboardInterrupt):
choice = 'no'
if choice != 'yes':
print('No changes applied')
return choice == 'yes'
def _print_shard_range(sr, level=0):
indent = ' ' * level
print(indent + '%r' % sr.name)
print(indent + ' objects: %9d, tombstones: %9d, lower: %r'
% (sr.object_count, sr.tombstones, sr.lower_str))
print(indent + ' state: %9s, deleted: %d upper: %r'
% (sr.state_text, sr.deleted, sr.upper_str))
@contextmanager
def _open_input(args):
if args.input == '-':
args.input = '<STDIN>'
yield sys.stdin
else:
with open(args.input, 'r') as fd:
yield fd
def _load_and_validate_shard_data(args, require_index=True):
required_keys = ['lower', 'upper', 'object_count']
if require_index:
required_keys.append('index')
try:
with _open_input(args) as fd:
try:
data = json.load(fd)
if not isinstance(data, list):
raise ValueError('Shard data must be a list of dicts')
for k in required_keys:
for shard in data:
shard[k] # trigger KeyError for missing required key
return data
except (TypeError, ValueError, KeyError) as err:
print('Failed to load valid shard range data: %r' % err,
file=sys.stderr)
exit(2)
except IOError as err:
print('Failed to open file %s: %s' % (args.input, err),
file=sys.stderr)
exit(2)
def _check_shard_ranges(own_shard_range, shard_ranges):
reasons = []
def reason(x, y):
if x != y:
reasons.append('%s != %s' % (x, y))
if not shard_ranges:
reasons.append('No shard ranges.')
else:
reason(own_shard_range.lower, shard_ranges[0].lower)
reason(own_shard_range.upper, shard_ranges[-1].upper)
for x, y in zip(shard_ranges, shard_ranges[1:]):
reason(x.upper, y.lower)
if reasons:
print('WARNING: invalid shard ranges: %s.' % reasons)
print('Aborting.')
exit(EXIT_ERROR)
def _check_own_shard_range(broker, args):
# TODO: this check is weak - if the shards prefix changes then we may not
# identify a shard container. The goal is to not inadvertently create an
# entire namespace default shard range for a shard container.
is_shard = broker.account.startswith(args.shards_account_prefix)
own_shard_range = broker.get_own_shard_range(no_default=is_shard)
if not own_shard_range:
print('WARNING: shard container missing own shard range.')
print('Aborting.')
exit(2)
return own_shard_range
def _find_ranges(broker, args, status_file=None):
start = last_report = time.time()
limit = 5 if status_file else -1
shard_data, last_found = broker.find_shard_ranges(
args.rows_per_shard, limit=limit,
minimum_shard_size=args.minimum_shard_size)
if shard_data:
while not last_found:
if last_report + 10 < time.time():
print('Found %d ranges in %gs; looking for more...' % (
len(shard_data), time.time() - start), file=status_file)
last_report = time.time()
# prefix doesn't matter since we aren't persisting it
found_ranges = make_shard_ranges(broker, shard_data, '.shards_')
more_shard_data, last_found = broker.find_shard_ranges(
args.rows_per_shard, existing_ranges=found_ranges, limit=5,
minimum_shard_size=args.minimum_shard_size)
shard_data.extend(more_shard_data)
return shard_data, time.time() - start
def find_ranges(broker, args):
shard_data, delta_t = _find_ranges(broker, args, sys.stderr)
print(json.dumps(shard_data, sort_keys=True, indent=2))
print('Found %d ranges in %gs (total object count %s)' %
(len(shard_data), delta_t,
sum(r['object_count'] for r in shard_data)),
file=sys.stderr)
return EXIT_SUCCESS
def show_shard_ranges(broker, args):
shard_ranges = broker.get_shard_ranges(
includes=getattr(args, 'includes', None),
include_deleted=getattr(args, 'include_deleted', False))
shard_data = [dict(sr, state=sr.state_text)
for sr in shard_ranges]
if not shard_data:
print("No shard data found.", file=sys.stderr)
elif getattr(args, 'brief', False):
print("Existing shard ranges:", file=sys.stderr)
print(json.dumps([(sd['lower'], sd['upper']) for sd in shard_data],
sort_keys=True, indent=2))
else:
print("Existing shard ranges:", file=sys.stderr)
print(json.dumps(shard_data, sort_keys=True, indent=2))
return EXIT_SUCCESS
def db_info(broker, args):
print('Sharding enabled = %s' % sharding_enabled(broker))
own_sr = broker.get_own_shard_range(no_default=True)
print('Own shard range: %s' %
(json.dumps(dict(own_sr, state=own_sr.state_text),
sort_keys=True, indent=2)
if own_sr else None))
db_state = broker.get_db_state()
print('db_state = %s' % db_state)
info = broker.get_info()
print('object_count = %d' % info['object_count'])
print('bytes_used = %d' % info['bytes_used'])
if db_state == 'sharding':
print('Retiring db id: %s' % broker.get_brokers()[0].get_info()['id'])
print('Cleaving context: %s' %
json.dumps(dict(CleavingContext.load(broker)),
sort_keys=True, indent=2))
print('Metadata:')
for k, (v, t) in broker.metadata.items():
print(' %s = %s' % (k, v))
return EXIT_SUCCESS
def delete_shard_ranges(broker, args):
shard_ranges = broker.get_shard_ranges()
if not shard_ranges:
print("No shard ranges found to delete.")
return EXIT_SUCCESS
while not args.force:
print('This will delete existing %d shard ranges.' % len(shard_ranges))
if broker.get_db_state() != UNSHARDED:
print('WARNING: Be very cautious about deleting existing shard '
'ranges. Deleting all ranges in this db does not guarantee '
'deletion of all ranges on all replicas of the db.')
print(' - this db is in state %s' % broker.get_db_state())
print(' - %d existing shard ranges have started sharding' %
[sr.state != ShardRange.FOUND
for sr in shard_ranges].count(True))
try:
choice = input('Do you want to show the existing ranges [s], '
'delete the existing ranges [yes] '
'or quit without deleting [q]? ')
except (EOFError, KeyboardInterrupt):
choice = 'q'
if choice == 's':
show_shard_ranges(broker, args)
continue
elif choice == 'q':
return EXIT_USER_QUIT
elif choice == 'yes':
break
else:
print('Please make a valid choice.')
print()
now = Timestamp.now()
for sr in shard_ranges:
sr.deleted = 1
sr.timestamp = now
broker.merge_shard_ranges(shard_ranges)
print('Deleted %s existing shard ranges.' % len(shard_ranges))
return EXIT_SUCCESS
def merge_shard_ranges(broker, args):
_check_own_shard_range(broker, args)
shard_data = _load_and_validate_shard_data(args, require_index=False)
new_shard_ranges = ShardRangeList([ShardRange.from_dict(sr)
for sr in shard_data])
new_shard_ranges.sort(key=ShardRange.sort_key)
# do some checks before merging...
existing_shard_ranges = ShardRangeList(
broker.get_shard_ranges(include_deleted=True))
outcome = combine_shard_ranges(new_shard_ranges, existing_shard_ranges)
if args.verbose:
print('This change will result in the following shard ranges in the '
'affected namespace:')
print(json.dumps([dict(sr) for sr in outcome], indent=2))
overlaps = find_overlapping_ranges(outcome)
if overlaps:
print('WARNING: this change will result in shard ranges overlaps!')
paths_with_gaps = find_paths_with_gaps(outcome)
gaps = [gap for start_path, gap, end_path in paths_with_gaps
if existing_shard_ranges.includes(gap)]
if gaps:
print('WARNING: this change will result in shard ranges gaps!')
if not _proceed(args):
return EXIT_USER_QUIT
with broker.updated_timeout(args.replace_timeout):
broker.merge_shard_ranges(new_shard_ranges)
print('Injected %d shard ranges.' % len(new_shard_ranges))
print('Run container-replicator to replicate them to other nodes.')
return EXIT_SUCCESS
def _replace_shard_ranges(broker, args, shard_data, timeout=0):
own_shard_range = _check_own_shard_range(broker, args)
shard_ranges = make_shard_ranges(
broker, shard_data, args.shards_account_prefix)
_check_shard_ranges(own_shard_range, shard_ranges)
if args.verbose > 0:
print('New shard ranges to be injected:')
print(json.dumps([dict(sr) for sr in shard_ranges],
sort_keys=True, indent=2))
# Crank up the timeout in an effort to *make sure* this succeeds
with broker.updated_timeout(max(timeout, args.replace_timeout)):
delete_status = delete_shard_ranges(broker, args)
if delete_status != EXIT_SUCCESS:
return delete_status
broker.merge_shard_ranges(shard_ranges)
print('Injected %d shard ranges.' % len(shard_ranges))
print('Run container-replicator to replicate them to other nodes.')
if args.enable:
return enable_sharding(broker, args)
else:
print('Use the enable sub-command to enable sharding.')
return EXIT_SUCCESS
def replace_shard_ranges(broker, args):
shard_data = _load_and_validate_shard_data(args)
return _replace_shard_ranges(broker, args, shard_data)
def find_replace_shard_ranges(broker, args):
shard_data, delta_t = _find_ranges(broker, args, sys.stdout)
# Since we're trying to one-shot this, and the previous step probably
# took a while, make the timeout for writing *at least* that long
return _replace_shard_ranges(broker, args, shard_data, timeout=delta_t)
def _enable_sharding(broker, own_shard_range, args):
if own_shard_range.update_state(ShardRange.SHARDING):
own_shard_range.epoch = Timestamp.now()
own_shard_range.state_timestamp = own_shard_range.epoch
# initialise own_shard_range with current broker object stats...
update_own_shard_range_stats(broker, own_shard_range)
with broker.updated_timeout(args.enable_timeout):
broker.merge_shard_ranges([own_shard_range])
broker.update_metadata({'X-Container-Sysmeta-Sharding':
('True', Timestamp.now().normal)})
return own_shard_range
def enable_sharding(broker, args):
own_shard_range = _check_own_shard_range(broker, args)
_check_shard_ranges(own_shard_range, broker.get_shard_ranges())
if own_shard_range.state == ShardRange.ACTIVE:
own_shard_range = _enable_sharding(broker, own_shard_range, args)
print('Container moved to state %r with epoch %s.' %
(own_shard_range.state_text, own_shard_range.epoch.internal))
elif own_shard_range.state == ShardRange.SHARDING:
if own_shard_range.epoch:
print('Container already in state %r with epoch %s.' %
(own_shard_range.state_text, own_shard_range.epoch.internal))
print('No action required.')
else:
print('Container already in state %r but missing epoch.' %
own_shard_range.state_text)
own_shard_range = _enable_sharding(broker, own_shard_range, args)
print('Container in state %r given epoch %s.' %
(own_shard_range.state_text, own_shard_range.epoch.internal))
else:
print('WARNING: container in state %s (should be active or sharding).'
% own_shard_range.state_text)
print('Aborting.')
return EXIT_ERROR
print('Run container-sharder on all nodes to shard the container.')
return EXIT_SUCCESS
def compact_shard_ranges(broker, args):
if not broker.is_root_container():
print('WARNING: Shard containers cannot be compacted.')
print('This command should be used on a root container.')
return EXIT_ERROR
if not broker.is_sharded():
print('WARNING: Container is not yet sharded so cannot be compacted.')
return EXIT_ERROR
shard_ranges = broker.get_shard_ranges()
if find_overlapping_ranges([sr for sr in shard_ranges if
sr.state != ShardRange.SHRINKING]):
print('WARNING: Container has overlapping shard ranges so cannot be '
'compacted.')
return EXIT_ERROR
compactible = find_compactible_shard_sequences(broker,
args.shrink_threshold,
args.expansion_limit,
args.max_shrinking,
args.max_expanding)
if not compactible:
print('No shards identified for compaction.')
return EXIT_SUCCESS
for sequence in compactible:
if sequence[-1].state not in (ShardRange.ACTIVE, ShardRange.SHARDED):
print('ERROR: acceptor not in correct state: %s' % sequence[-1],
file=sys.stderr)
return EXIT_ERROR
for sequence in compactible:
acceptor = sequence[-1]
donors = sequence[:-1]
print('Donor shard range(s) with total of %d rows:'
% donors.row_count)
for donor in donors:
_print_shard_range(donor, level=1)
print('can be compacted into acceptor shard range:')
_print_shard_range(acceptor, level=1)
print('Total of %d shard sequences identified for compaction.'
% len(compactible))
print('Once applied to the broker these changes will result in shard '
'range compaction the next time the sharder runs.')
if not _proceed(args):
return EXIT_USER_QUIT
process_compactible_shard_sequences(broker, compactible)
print('Updated %s shard sequences for compaction.' % len(compactible))
print('Run container-replicator to replicate the changes to other '
'nodes.')
print('Run container-sharder on all nodes to compact shards.')
return EXIT_SUCCESS
def _remove_illegal_overlapping_donors(
acceptor_path, overlapping_donors, args):
# Check parent-children relationship in overlaps between acceptors and
# donors, remove any overlapping parent or child shard range from donors.
# Note: we can use set() here, since shard range object is hashed by
# id and all shard ranges in overlapping_donors are unique already.
parent_child_donors = set()
for acceptor in acceptor_path:
parent_child_donors.update(
[donor for donor in overlapping_donors
if acceptor.is_child_of(donor) or donor.is_child_of(acceptor)])
if parent_child_donors:
overlapping_donors = ShardRangeList(
[sr for sr in overlapping_donors
if sr not in parent_child_donors])
print('%d donor shards ignored due to parent-child relationship '
'checks' % len(parent_child_donors))
# Check minimum age requirement in overlaps between acceptors and donors.
if args.min_shard_age == 0:
return acceptor_path, overlapping_donors
ts_now = Timestamp.now()
# Remove overlapping donor shard ranges who were created recently within
# 'min_shard_age' age limit.
qualified_donors = ShardRangeList(
[sr for sr in overlapping_donors
if float(sr.timestamp) + args.min_shard_age < float(ts_now)])
young_donors = len(overlapping_donors) - len(qualified_donors)
if young_donors > 0:
print('%d overlapping donor shards ignored due to minimum age '
'limit' % young_donors)
if not qualified_donors:
return acceptor_path, None
# Remove those overlapping donors whose overlapping acceptors were created
# within age limit.
donors_with_young_overlap_acceptor = set()
for acceptor_sr in acceptor_path:
if float(acceptor_sr.timestamp) + args.min_shard_age < float(ts_now):
continue
donors_with_young_overlap_acceptor.update(
[sr for sr in qualified_donors if acceptor_sr.overlaps(sr)])
if donors_with_young_overlap_acceptor:
qualified_donors = ShardRangeList(
[sr for sr in qualified_donors
if sr not in donors_with_young_overlap_acceptor])
print('%d donor shards ignored due to existence of overlapping young '
'acceptors' % len(donors_with_young_overlap_acceptor))
return acceptor_path, qualified_donors
def _find_overlapping_donors(shard_ranges, own_sr, args):
shard_ranges = ShardRangeList(shard_ranges)
if ShardRange.SHARDING in shard_ranges.states:
# This may be over-cautious, but for now we'll avoid dealing with
# SHARDING shards (which by design will temporarily overlap with their
# sub-shards) and require repair to be re-tried once sharding has
# completed. Note that once a shard ranges moves from SHARDING to
# SHARDED state and is deleted, some replicas of the shard may still be
# in the process of sharding but we cannot detect that at the root.
raise InvalidStateException('Found shard ranges in sharding state')
if ShardRange.SHRINKING in shard_ranges.states:
# Also stop now if there are SHRINKING shard ranges: we would need to
# ensure that these were not chosen as acceptors, but for now it is
# simpler to require repair to be re-tried once shrinking has
# completes.
raise InvalidStateException('Found shard ranges in shrinking state')
paths = find_paths(shard_ranges)
ranked_paths = rank_paths(paths, own_sr)
if not (ranked_paths and ranked_paths[0].includes(own_sr)):
# individual paths do not have gaps within them; if no path spans the
# entire namespace then there must be a gap in the shard_ranges
raise GapsFoundException
# simple repair strategy: choose the highest ranked complete sequence and
# shrink all other shard ranges into it
acceptor_path = ranked_paths[0]
acceptor_names = set(sr.name for sr in acceptor_path)
overlapping_donors = ShardRangeList([sr for sr in shard_ranges
if sr.name not in acceptor_names])
# check that the solution makes sense: if the acceptor path has the most
# progressed continuous cleaving, which has reached cleaved_upper, then we
# don't expect any shard ranges beyond cleaved_upper to be in states
# CLEAVED or ACTIVE, otherwise there should have been a better acceptor
# path that reached them.
cleaved_states = {ShardRange.CLEAVED, ShardRange.ACTIVE}
cleaved_upper = acceptor_path.find_lower(
lambda sr: sr.state not in cleaved_states)
beyond_cleaved = acceptor_path.filter(marker=cleaved_upper)
if beyond_cleaved.states.intersection(cleaved_states):
raise InvalidSolutionException(
'Isolated cleaved and/or active shard ranges in acceptor path',
acceptor_path, overlapping_donors)
beyond_cleaved = overlapping_donors.filter(marker=cleaved_upper)
if beyond_cleaved.states.intersection(cleaved_states):
raise InvalidSolutionException(
'Isolated cleaved and/or active shard ranges in donor ranges',
acceptor_path, overlapping_donors)
return _remove_illegal_overlapping_donors(
acceptor_path, overlapping_donors, args)
def _fix_gaps(broker, args, paths_with_gaps):
timestamp = Timestamp.now()
solutions = []
print('Found %d gaps:' % len(paths_with_gaps))
for start_path, gap_range, end_path in paths_with_gaps:
if end_path[0].state == ShardRange.ACTIVE:
expanding_range = end_path[0]
solutions.append((gap_range, expanding_range))
elif start_path[-1].state == ShardRange.ACTIVE:
expanding_range = start_path[-1]
solutions.append((gap_range, expanding_range))
else:
expanding_range = None
print(' gap: %r - %r'
% (gap_range.lower, gap_range.upper))
print(' apparent gap contents:')
for sr in broker.get_shard_ranges(marker=gap_range.lower,
end_marker=gap_range.upper,
include_deleted=True):
_print_shard_range(sr, 3)
if expanding_range:
print(' gap can be fixed by expanding neighbor range:')
_print_shard_range(expanding_range, 3)
else:
print('Warning: cannot fix gap: non-ACTIVE neighbors')
if args.max_expanding >= 0:
solutions = solutions[:args.max_expanding]
# it's possible that an expanding range is used twice, expanding both down
# and up; if so, we only want one copy of it in our merged shard ranges
expanding_ranges = {}
for gap_range, expanding_range in solutions:
expanding_range.expand([gap_range])
expanding_range.timestamp = timestamp
expanding_ranges[expanding_range.name] = expanding_range
print('')
print('Repairs necessary to fill gaps.')
print('The following expanded shard range(s) will be applied to the DB:')
for expanding_range in sorted(expanding_ranges.values(),
key=lambda s: s.lower):
_print_shard_range(expanding_range, 2)
print('')
print(
'It is recommended that no other concurrent changes are made to the \n'
'shard ranges while fixing gaps. If necessary, abort this change \n'
'and stop any auto-sharding processes before repeating this command.'
)
print('')
if not _proceed(args):
return EXIT_USER_QUIT
broker.merge_shard_ranges(list(expanding_ranges.values()))
print('Run container-replicator to replicate the changes to other nodes.')
print('Run container-sharder on all nodes to fill gaps.')
return EXIT_SUCCESS
def repair_gaps(broker, args):
shard_ranges = broker.get_shard_ranges()
paths_with_gaps = find_paths_with_gaps(shard_ranges)
if paths_with_gaps:
return _fix_gaps(broker, args, paths_with_gaps)
else:
print('Found one complete sequence of %d shard ranges with no gaps.'
% len(shard_ranges))
print('No repairs necessary.')
return EXIT_SUCCESS
def print_repair_solution(acceptor_path, overlapping_donors):
print('Donors:')
for donor in sorted(overlapping_donors):
_print_shard_range(donor, level=1)
print('Acceptors:')
for acceptor in acceptor_path:
_print_shard_range(acceptor, level=1)
def find_repair_solution(shard_ranges, own_sr, args):
try:
acceptor_path, overlapping_donors = _find_overlapping_donors(
shard_ranges, own_sr, args)
except GapsFoundException:
print('Found no complete sequence of shard ranges.')
print('Repairs necessary to fill gaps.')
print('Gap filling not supported by this tool. No repairs performed.')
raise
except InvalidStateException as exc:
print('WARNING: %s' % exc)
print('No repairs performed.')
raise
except InvalidSolutionException as exc:
print('ERROR: %s' % exc)
print_repair_solution(exc.acceptor_path, exc.overlapping_donors)
print('No repairs performed.')
raise
if not overlapping_donors:
print('Found one complete sequence of %d shard ranges and no '
'overlapping shard ranges.' % len(acceptor_path))
print('No repairs necessary.')
return None, None
print('Repairs necessary to remove overlapping shard ranges.')
print('Chosen a complete sequence of %d shard ranges with current total '
'of %d object records to accept object records from %d overlapping '
'donor shard ranges.' %
(len(acceptor_path), acceptor_path.object_count,
len(overlapping_donors)))
if args.verbose:
print_repair_solution(acceptor_path, overlapping_donors)
print('Once applied to the broker these changes will result in:')
print(' %d shard ranges being removed.' % len(overlapping_donors))
print(' %d object records being moved to the chosen shard ranges.'
% overlapping_donors.object_count)
return acceptor_path, overlapping_donors
def repair_overlaps(broker, args):
shard_ranges = broker.get_shard_ranges()
if not shard_ranges:
print('No shards found, nothing to do.')
return EXIT_SUCCESS
own_sr = broker.get_own_shard_range()
try:
acceptor_path, overlapping_donors = find_repair_solution(
shard_ranges, own_sr, args)
except ManageShardRangesException:
return EXIT_ERROR
if not acceptor_path:
return EXIT_SUCCESS
if not _proceed(args):
return EXIT_USER_QUIT
# merge changes to the broker...
# note: acceptors do not need to be modified since they already span the
# complete range
ts_now = Timestamp.now()
finalize_shrinking(broker, [], overlapping_donors, ts_now)
print('Updated %s donor shard ranges.' % len(overlapping_donors))
print('Run container-replicator to replicate the changes to other nodes.')
print('Run container-sharder on all nodes to repair shards.')
return EXIT_SUCCESS
def repair_shard_ranges(broker, args):
if not broker.is_root_container():
print('WARNING: Shard containers cannot be repaired.')
print('This command should be used on a root container.')
return EXIT_ERROR
if args.gaps:
return repair_gaps(broker, args)
else:
return repair_overlaps(broker, args)
def analyze_shard_ranges(args):
shard_data = _load_and_validate_shard_data(args, require_index=False)
for data in shard_data:
# allow for incomplete shard range data that may have been scraped from
# swift-container-info output
data.setdefault('epoch', None)
shard_ranges = [ShardRange.from_dict(data) for data in shard_data]
whole_sr = ShardRange('whole/namespace', 0)
try:
find_repair_solution(shard_ranges, whole_sr, args)
except ManageShardRangesException:
return EXIT_ERROR
return EXIT_SUCCESS
def _add_find_args(parser):
parser.add_argument(
'rows_per_shard', nargs='?', type=int, default=USE_SHARDER_DEFAULT,
help='Target number of rows for newly created shards. '
'Default is half of the shard_container_threshold value if that is '
'given in a conf file specified with --config, otherwise %s.'
% DEFAULT_SHARDER_CONF['rows_per_shard'])
parser.add_argument(
'--minimum-shard-size',
type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Minimum size of the final shard range. If this is greater than '
'one then the final shard range may be extended to more than '
'rows_per_shard in order to avoid a further shard range with less '
'than minimum-shard-size rows.')
def _add_account_prefix_arg(parser):
parser.add_argument(
'--shards_account_prefix', metavar='shards_account_prefix', type=str,
required=False, default='.shards_',
help="Prefix for shards account. The default is '.shards_'. This "
"should only be changed if the auto_create_account_prefix option "
"has been similarly changed in swift.conf.")
def _add_replace_args(parser):
_add_account_prefix_arg(parser)
parser.add_argument(
'--replace-timeout', type=int, default=600,
help='Minimum DB timeout to use when replacing shard ranges.')
parser.add_argument(
'--force', '-f', action='store_true', default=False,
help='Delete existing shard ranges; no questions asked.')
parser.add_argument(
'--enable', action='store_true', default=False,
help='Enable sharding after adding shard ranges.')
def _add_enable_args(parser):
parser.add_argument(
'--enable-timeout', type=int, default=300,
help='DB timeout to use when enabling sharding.')
def _add_prompt_args(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--yes', '-y', action='store_true', default=False,
help='Apply shard range changes to broker without prompting. '
'Cannot be used with --dry-run option.')
group.add_argument(
'--dry-run', '-n', action='store_true', default=False,
help='Do not apply any shard range changes to broker. '
'Cannot be used with --yes option.')
def _add_max_expanding_arg(parser):
parser.add_argument(
'--max-expanding', nargs='?',
type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Maximum number of shards that should be '
'expanded. Defaults to unlimited.')
def _make_parser():
parser = argparse.ArgumentParser(description='Manage shard ranges')
parser.add_argument('path_to_file',
help='Path to a container DB file or, for the analyze '
'subcommand, a shard data file.')
parser.add_argument('--config', dest='conf_file', required=False,
help='Path to config file with [container-sharder] '
'section. The following subcommand options will '
'be loaded from a config file if they are not '
'given on the command line: '
'rows_per_shard, '
'max_shrinking, '
'max_expanding, '
'shrink_threshold, '
'expansion_limit')
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Increase output verbosity')
# this is useful for probe tests that shard containers with unrealistically
# low numbers of objects, of which a significant proportion may still be in
# the pending file
parser.add_argument(
'--force-commits', action='store_true', default=False,
help='Force broker to commit pending object updates before finding '
'shard ranges. By default the broker will skip commits.')
subparsers = parser.add_subparsers(
dest='subcommand', help='Sub-command help', title='Sub-commands')
# find
find_parser = subparsers.add_parser(
'find', help='Find and display shard ranges')
_add_find_args(find_parser)
find_parser.set_defaults(func=find_ranges)
# delete
delete_parser = subparsers.add_parser(
'delete', help='Delete all existing shard ranges from db')
delete_parser.add_argument(
'--force', '-f', action='store_true', default=False,
help='Delete existing shard ranges; no questions asked.')
delete_parser.set_defaults(func=delete_shard_ranges)
# show
show_parser = subparsers.add_parser(
'show', help='Print shard range data')
show_parser.add_argument(
'--include_deleted', '-d', action='store_true', default=False,
help='Include deleted shard ranges in output.')
show_parser.add_argument(
'--brief', '-b', action='store_true', default=False,
help='Show only shard range bounds in output.')
show_parser.add_argument('--includes',
help='limit shard ranges to include key')
show_parser.set_defaults(func=show_shard_ranges)
# info
info_parser = subparsers.add_parser(
'info', help='Print container db info')
info_parser.set_defaults(func=db_info)
# merge
merge_parser = subparsers.add_parser(
'merge',
help='Merge shard range(s) from file with existing shard ranges. This '
'subcommand should only be used if you are confident that you '
'know what you are doing. Shard ranges should not typically be '
'modified in this way.')
merge_parser.add_argument('input', metavar='input_file',
type=str, help='Name of file')
merge_parser.add_argument(
'--replace-timeout', type=int, default=600,
help='Minimum DB timeout to use when merging shard ranges.')
_add_account_prefix_arg(merge_parser)
_add_prompt_args(merge_parser)
merge_parser.set_defaults(func=merge_shard_ranges)
# replace
replace_parser = subparsers.add_parser(
'replace',
help='Replace existing shard ranges. User will be prompted before '
'deleting any existing shard ranges.')
replace_parser.add_argument('input', metavar='input_file',
type=str, help='Name of file')
_add_replace_args(replace_parser)
replace_parser.set_defaults(func=replace_shard_ranges)
# find_and_replace
find_replace_parser = subparsers.add_parser(
'find_and_replace',
help='Find new shard ranges and replace existing shard ranges. '
'User will be prompted before deleting any existing shard ranges.'
)
_add_find_args(find_replace_parser)
_add_replace_args(find_replace_parser)
_add_enable_args(find_replace_parser)
find_replace_parser.set_defaults(func=find_replace_shard_ranges)
# enable
enable_parser = subparsers.add_parser(
'enable', help='Enable sharding and move db to sharding state.')
_add_enable_args(enable_parser)
enable_parser.set_defaults(func=enable_sharding)
_add_replace_args(enable_parser)
# compact
compact_parser = subparsers.add_parser(
'compact',
help='Compact shard ranges with less than the shrink-threshold number '
'of rows. This command only works on root containers.')
_add_prompt_args(compact_parser)
compact_parser.add_argument(
'--shrink-threshold', nargs='?',
type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='The number of rows below which a shard can qualify for '
'shrinking. '
'Defaults to %d' % DEFAULT_SHARDER_CONF['shrink_threshold'])
compact_parser.add_argument(
'--expansion-limit', nargs='?',
type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Maximum number of rows for an expanding shard to have after '
'compaction has completed. '
'Defaults to %d' % DEFAULT_SHARDER_CONF['expansion_limit'])
# If just one donor shard is chosen to shrink to an acceptor then the
# expanded acceptor will handle object listings as soon as the donor shard
# has shrunk. If more than one donor shard are chosen to shrink to an
# acceptor then the acceptor may not handle object listings for some donor
# shards that have shrunk until *all* donors have shrunk, resulting in
# temporary gap(s) in object listings where the shrunk donors are missing.
compact_parser.add_argument(
'--max-shrinking', nargs='?',
type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Maximum number of shards that should be '
'shrunk into each expanding shard. '
'Defaults to 1. Using values greater '
'than 1 may result in temporary gaps in '
'object listings until all selected '
'shards have shrunk.')
_add_max_expanding_arg(compact_parser)
compact_parser.set_defaults(func=compact_shard_ranges)
# repair
repair_parser = subparsers.add_parser(
'repair',
help='Repair overlapping shard ranges. No action will be taken '
'without user confirmation unless the -y option is used.')
_add_prompt_args(repair_parser)
repair_parser.add_argument(
'--min-shard-age', nargs='?',
type=wrap_for_argparse(non_negative_int, 'must be >= 0'),
default=MIN_SHARD_RANGE_AGE_FOR_REPAIR,
help='Minimum age of a shard for it to be considered as an overlap '
'that is due for repair. Overlapping shards younger than this '
'age will be ignored. Value of 0 means no recent shards will be '
'ignored. Defaults to %d.' % MIN_SHARD_RANGE_AGE_FOR_REPAIR)
# TODO: maybe this should be a separate subcommand given that it needs
# some extra options vs repairing overlaps?
repair_parser.add_argument(
'--gaps', action='store_true', default=False,
help='Repair gaps in shard ranges.')
_add_max_expanding_arg(repair_parser)
repair_parser.set_defaults(func=repair_shard_ranges)
# analyze
analyze_parser = subparsers.add_parser(
'analyze',
help='Analyze shard range json data read from file. Use -v to see '
'more detailed analysis.')
analyze_parser.add_argument(
'--min-shard-age', nargs='?',
type=wrap_for_argparse(non_negative_int, 'must be >= 0'),
default=0,
help='Minimum age of a shard for it to be considered as an overlap '
'that is due for repair. Overlapping shards younger than this '
'age will be ignored. Value of 0 means no recent shards will be '
'ignored. Defaults to 0.')
analyze_parser.set_defaults(func=analyze_shard_ranges)
return parser
def main(cli_args=None):
parser = _make_parser()
args = parser.parse_args(cli_args)
if not args.subcommand:
# On py2, subparsers are required; on py3 they are not; see
# https://bugs.python.org/issue9253. py37 added a `required` kwarg
# to let you control it, but prior to that, there was no choice in
# the matter. So, check whether the destination was set and bomb
# out if not.
parser.print_help()
print('\nA sub-command is required.', file=sys.stderr)
return EXIT_INVALID_ARGS
try:
conf = {}
if args.conf_file:
conf = readconf(args.conf_file, 'container-sharder')
conf.update(dict((k, v) for k, v in vars(args).items()
if v != USE_SHARDER_DEFAULT))
conf_args = ContainerSharderConf(conf)
except (OSError, IOError) as exc:
print('Error opening config file %s: %s' % (args.conf_file, exc),
file=sys.stderr)
return EXIT_ERROR
except (TypeError, ValueError) as exc:
print('Error loading config: %s' % exc, file=sys.stderr)
return EXIT_INVALID_ARGS
for k, v in vars(args).items():
# set any un-set cli args from conf_args
if v is USE_SHARDER_DEFAULT:
setattr(args, k, getattr(conf_args, k))
try:
ContainerSharderConf.validate_conf(args)
except ValueError as err:
print('Invalid config: %s' % err, file=sys.stderr)
return EXIT_INVALID_ARGS
if args.func in (analyze_shard_ranges,):
args.input = args.path_to_file
return args.func(args) or 0
logger = get_logger({}, name='ContainerBroker', log_to_console=True)
broker = ContainerBroker(os.path.realpath(args.path_to_file),
logger=logger,
skip_commits=not args.force_commits)
try:
broker.get_info()
except Exception as exc:
print('Error opening container DB %s: %s' % (args.path_to_file, exc),
file=sys.stderr)
return EXIT_ERROR
print('Loaded db broker for %s' % broker.path, file=sys.stderr)
return args.func(broker, args)
if __name__ == '__main__':
exit(main())
| swift-master | swift/cli/manage_shard_ranges.py |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``swift-ring-composer`` is an experimental tool for building a composite ring
file from other existing component ring builder files. Its CLI, name or
implementation may change or be removed altogether in future versions of Swift.
Currently its interface is similar to that of the ``swift-ring-builder``. The
command structure takes the form of::
swift-ring-composer <composite builder file> <sub-command> <options>
where ``<composite builder file>`` is a special builder which stores a json
blob of composite ring metadata. This metadata describes the component
``RingBuilder``'s used in the composite ring, their order and version.
There are currently 2 sub-commands: ``show`` and ``compose``. The ``show``
sub-command takes no additional arguments and displays the current contents of
of the composite builder file::
swift-ring-composer <composite builder file> show
The ``compose`` sub-command is the one that actually stitches the component
ring builders together to create both the composite ring file and composite
builder file. The command takes the form::
swift-ring-composer <composite builder file> compose <builder1> \\
<builder2> [<builder3> .. <builderN>] --output <composite ring file> \\
[--force]
There may look like there is a lot going on there but it's actually quite
simple. The ``compose`` command takes in the list of builders to stitch
together and the filename for the composite ring file via the ``--output``
option. The ``--force`` option overrides checks on the ring composition.
To change ring devices, first add or remove devices from the component ring
builders and then use the ``compose`` sub-command to create a new composite
ring file.
.. note::
``swift-ring-builder`` cannot be used to inspect the generated composite
ring file because there is no conventional builder file corresponding to
the composite ring file name. You can either programmatically look inside
the composite ring file using the swift ring classes or create a temporary
builder file from the composite ring file using::
swift-ring-builder <composite ring file> write_builder
Do not use this builder file to manage ring devices.
For further details use::
swift-ring-composer -h
"""
from __future__ import print_function
import argparse
import json
import os
import sys
from swift.common.ring.composite_builder import CompositeRingBuilder
EXIT_SUCCESS = 0
EXIT_ERROR = 2
WARNING = """
NOTE: This tool is for experimental use and may be
removed in future versions of Swift.
"""
DESCRIPTION = """
This is a tool for building a composite ring file from other existing ring
builder files. The component ring builders must all have the same partition
power. Each device must only be used in a single component builder. Each region
must only be used in a single component builder.
"""
def _print_to_stderr(msg):
print(msg, file=sys.stderr)
def _print_err(msg, err):
_print_to_stderr('%s\nOriginal exception message:\n%s' % (msg, err))
def show(composite_builder, args):
print(json.dumps(composite_builder.to_dict(), indent=4, sort_keys=True))
return EXIT_SUCCESS
def compose(composite_builder, args):
composite_builder = composite_builder or CompositeRingBuilder()
try:
ring_data = composite_builder.compose(
args.builder_files, force=args.force, require_modified=True)
except Exception as err:
_print_err(
'An error occurred while composing the ring.', err)
return EXIT_ERROR
try:
ring_data.save(args.output)
except Exception as err:
_print_err(
'An error occurred while writing the composite ring file.', err)
return EXIT_ERROR
try:
composite_builder.save(args.composite_builder_file)
except Exception as err:
_print_err(
'An error occurred while writing the composite builder file.', err)
return EXIT_ERROR
return EXIT_SUCCESS
def main(arguments=None):
if arguments is not None:
argv = arguments
else:
argv = sys.argv
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'composite_builder_file',
metavar='composite_builder_file', type=str,
help='Name of composite builder file')
subparsers = parser.add_subparsers(
help='subcommand help', title='subcommands')
# show
show_parser = subparsers.add_parser(
'show', help='show composite ring builder metadata')
show_parser.set_defaults(func=show)
# compose
compose_parser = subparsers.add_parser(
'compose', help='compose composite ring',
usage='%(prog)s [-h] '
'[builder_file builder_file [builder_file ...] '
'--output ring_file [--force]')
bf_help = ('Paths to component ring builder files to include in composite '
'ring')
compose_parser.add_argument('builder_files', metavar='builder_file',
nargs='*', type=str, help=bf_help)
compose_parser.add_argument('--output', metavar='output_file', type=str,
required=True, help='Name of output ring file')
compose_parser.add_argument(
'--force', action='store_true',
help='Force new composite ring file to be written')
compose_parser.set_defaults(func=compose)
_print_to_stderr(WARNING)
args = parser.parse_args(argv[1:])
composite_builder = None
if args.func != compose or os.path.exists(args.composite_builder_file):
try:
composite_builder = CompositeRingBuilder.load(
args.composite_builder_file)
except Exception as err:
_print_err(
'An error occurred while loading the composite builder file.',
err)
exit(EXIT_ERROR)
exit(args.func(composite_builder, args))
if __name__ == '__main__':
main()
| swift-master | swift/cli/ringcomposer.py |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import errno
import fcntl
import json
import logging
import os
import time
from collections import defaultdict
from eventlet import hubs
from swift.common.exceptions import LockTimeout
from swift.common.storage_policy import POLICIES
from swift.common.utils import replace_partition_in_path, config_true_value, \
audit_location_generator, get_logger, readconf, drop_privileges, \
RateLimitedIterator, PrefixLoggerAdapter, distribute_evenly, \
non_negative_float, non_negative_int, config_auto_int_value, \
dump_recon_cache, get_partition_from_path, get_hub
from swift.obj import diskfile
from swift.common.recon import RECON_RELINKER_FILE, DEFAULT_RECON_CACHE_PATH
LOCK_FILE = '.relink.{datadir}.lock'
STATE_FILE = 'relink.{datadir}.json'
STATE_TMP_FILE = '.relink.{datadir}.json.tmp'
STEP_RELINK = 'relink'
STEP_CLEANUP = 'cleanup'
EXIT_SUCCESS = 0
EXIT_NO_APPLICABLE_POLICY = 2
EXIT_ERROR = 1
DEFAULT_STATS_INTERVAL = 300.0
def recursive_defaultdict():
return defaultdict(recursive_defaultdict)
def policy(policy_name_or_index):
value = POLICIES.get_by_name_or_index(policy_name_or_index)
if value is None:
raise ValueError
return value
def _aggregate_stats(base_stats, update_stats):
for key, value in update_stats.items():
base_stats.setdefault(key, 0)
base_stats[key] += value
return base_stats
def _aggregate_recon_stats(base_stats, updated_stats):
for k, v in updated_stats.items():
if k == 'stats':
base_stats['stats'] = _aggregate_stats(base_stats['stats'], v)
elif k == "start_time":
base_stats[k] = min(base_stats.get(k, v), v)
elif k in ("timestamp", "total_time"):
base_stats[k] = max(base_stats.get(k, 0), v)
elif k in ('parts_done', 'total_parts'):
base_stats[k] += v
return base_stats
def _zero_stats():
return {
'hash_dirs': 0,
'files': 0,
'linked': 0,
'removed': 0,
'errors': 0}
def _zero_collated_stats():
return {
'parts_done': 0,
'total_parts': 0,
'total_time': 0,
'stats': _zero_stats()}
class Relinker(object):
def __init__(self, conf, logger, device_list=None, do_cleanup=False):
self.conf = conf
self.recon_cache = os.path.join(self.conf['recon_cache_path'],
RECON_RELINKER_FILE)
self.logger = logger
self.device_list = device_list or []
self.do_cleanup = do_cleanup
self.root = self.conf['devices']
if len(self.device_list) == 1:
self.root = os.path.join(self.root, list(self.device_list)[0])
self.part_power = self.next_part_power = None
self.diskfile_mgr = None
self.dev_lock = None
self._last_recon_update = time.time()
self.stats_interval = float(conf.get(
'stats_interval', DEFAULT_STATS_INTERVAL))
self.diskfile_router = diskfile.DiskFileRouter(self.conf, self.logger)
self.stats = _zero_stats()
self.devices_data = recursive_defaultdict()
self.policy_count = 0
self.pid = os.getpid()
self.linked_into_partitions = set()
def _aggregate_dev_policy_stats(self):
for dev_data in self.devices_data.values():
dev_data.update(_zero_collated_stats())
for policy_data in dev_data.get('policies', {}).values():
_aggregate_recon_stats(dev_data, policy_data)
def _update_recon(self, device=None, force_dump=False):
if not force_dump and self._last_recon_update + self.stats_interval \
> time.time():
# not time yet!
return
if device:
# dump recon stats for the device
num_parts_done = sum(
1 for part_done in self.states["state"].values()
if part_done)
num_total_parts = len(self.states["state"])
step = STEP_CLEANUP if self.do_cleanup else STEP_RELINK
policy_dev_progress = {'step': step,
'parts_done': num_parts_done,
'total_parts': num_total_parts,
'timestamp': time.time()}
self.devices_data[device]['policies'][self.policy.idx].update(
policy_dev_progress)
# aggregate device policy level values into device level
self._aggregate_dev_policy_stats()
# We want to periodically update the worker recon timestamp so we know
# it's still running
recon_data = self._update_worker_stats(recon_dump=False)
recon_data.update({'devices': self.devices_data})
if device:
self.logger.debug("Updating recon for %s", device)
else:
self.logger.debug("Updating recon")
self._last_recon_update = time.time()
dump_recon_cache(recon_data, self.recon_cache, self.logger)
@property
def total_errors(self):
# first make sure the policy data is aggregated down to the device
# level
self._aggregate_dev_policy_stats()
return sum([sum([
dev.get('stats', {}).get('errors', 0),
dev.get('stats', {}).get('unmounted', 0),
dev.get('stats', {}).get('unlistable_partitions', 0)])
for dev in self.devices_data.values()])
def devices_filter(self, _, devices):
if self.device_list:
devices = [d for d in devices if d in self.device_list]
return set(devices)
def hook_pre_device(self, device_path):
lock_file = os.path.join(device_path,
LOCK_FILE.format(datadir=self.datadir))
fd = os.open(lock_file, os.O_CREAT | os.O_WRONLY)
fcntl.flock(fd, fcntl.LOCK_EX)
self.dev_lock = fd
state_file = os.path.join(device_path,
STATE_FILE.format(datadir=self.datadir))
self.states["state"].clear()
try:
with open(state_file, 'rt') as f:
state_from_disk = json.load(f)
if state_from_disk["next_part_power"] != \
self.states["next_part_power"]:
raise ValueError
on_disk_part_power = state_from_disk["part_power"]
if on_disk_part_power != self.states["part_power"]:
self.states["prev_part_power"] = on_disk_part_power
raise ValueError
self.states["state"].update(state_from_disk["state"])
except (ValueError, TypeError, KeyError):
# Bad state file: remove the file to restart from scratch
os.unlink(state_file)
except IOError as err:
# Ignore file not found error
if err.errno != errno.ENOENT:
raise
# initialise the device in recon.
device = os.path.basename(device_path)
self.devices_data[device]['policies'][self.policy.idx] = {
'start_time': time.time(), 'stats': _zero_stats(),
'part_power': self.states["part_power"],
'next_part_power': self.states["next_part_power"]}
self.stats = \
self.devices_data[device]['policies'][self.policy.idx]['stats']
self._update_recon(device)
def hook_post_device(self, device_path):
os.close(self.dev_lock)
self.dev_lock = None
device = os.path.basename(device_path)
pol_stats = self.devices_data[device]['policies'][self.policy.idx]
total_time = time.time() - pol_stats['start_time']
pol_stats.update({'total_time': total_time, 'stats': self.stats})
self._update_recon(device, force_dump=True)
def partitions_filter(self, datadir_path, partitions):
# Remove all non partitions first (eg: auditor_status_ALL.json)
partitions = [p for p in partitions if p.isdigit()]
relinking = (self.part_power != self.next_part_power)
if relinking:
# All partitions in the upper half are new partitions and there is
# nothing to relink there
partitions = [part for part in partitions
if int(part) < 2 ** self.part_power]
elif "prev_part_power" in self.states:
# All partitions in the upper half are new partitions and there is
# nothing to clean up there
partitions = [part for part in partitions
if int(part) < 2 ** self.states["prev_part_power"]]
# Format: { 'part': processed }
if self.states["state"]:
missing = list(set(partitions) - set(self.states["state"].keys()))
if missing:
# All missing partitions were created after the first run of
# the relinker with this part_power/next_part_power pair. This
# is expected when relinking, where new partitions appear that
# are appropriate for the target part power. In such cases,
# there's nothing to be done. Err on the side of caution
# during cleanup, however.
for part in missing:
self.states["state"][part] = relinking
partitions = [
str(part) for part, processed in self.states["state"].items()
if not processed]
else:
self.states["state"].update({
str(part): False for part in partitions})
# Always scan the partitions in reverse order to minimize the amount
# of IO (it actually only matters for relink, not for cleanup).
#
# Initial situation:
# objects/0/000/00000000...00000000/12345.data
# -> relinked to objects/1/000/10000000...00000000/12345.data
#
# If the relinker then scan partition 1, it will listdir that object
# while it's unnecessary. By working in reverse order of partitions,
# this is avoided.
partitions = sorted(partitions, key=int, reverse=True)
# do this last so that self.states, and thus the state file, has been
# initiated with *all* partitions before partitions are restricted for
# this particular run...
conf_partitions = self.conf.get('partitions')
if conf_partitions:
partitions = [p for p in partitions if int(p) in conf_partitions]
return partitions
def hook_pre_partition(self, partition_path):
self.pre_partition_errors = self.total_errors
self.linked_into_partitions = set()
def hook_post_partition(self, partition_path):
datadir_path, partition = os.path.split(
os.path.abspath(partition_path))
device_path, datadir_name = os.path.split(datadir_path)
device = os.path.basename(device_path)
state_tmp_file = os.path.join(
device_path, STATE_TMP_FILE.format(datadir=datadir_name))
state_file = os.path.join(
device_path, STATE_FILE.format(datadir=datadir_name))
# We started with a partition space like
# |0 N|
# |ABCDEFGHIJKLMNOP|
#
# After relinking, it will be more like
# |0 2N|
# |AABBCCDDEEFFGGHHIIJJKKLLMMNNOOPP|
#
# We want to hold off on rehashing until after cleanup, since that is
# the point at which we've finished with filesystem manipulations. But
# there's a slight complication: we know the upper half has nothing to
# clean up, so the cleanup phase only looks at
# |0 2N|
# |AABBCCDDEEFFGGHH |
#
# To ensure that the upper half gets rehashed, too, do it as part of
# relinking; as we finish
# |0 N|
# | IJKLMNOP|
# shift to the new partition space and rehash
# |0 2N|
# | IIJJKKLLMMNNOOPP|
for dirty_partition in self.linked_into_partitions:
if self.do_cleanup or \
dirty_partition >= 2 ** self.states['part_power']:
self.diskfile_mgr.get_hashes(
device, dirty_partition, [], self.policy)
if self.do_cleanup:
try:
hashes = self.diskfile_mgr.get_hashes(
device, int(partition), [], self.policy)
except LockTimeout:
hashes = 1 # truthy, but invalid
# In any reasonably-large cluster, we'd expect all old
# partitions P to be empty after cleanup (i.e., it's unlikely
# that there's another partition Q := P//2 that also has data
# on this device).
#
# Try to clean up empty partitions now, so operators can use
# existing rebalance-complete metrics to monitor relinking
# progress (provided there are few/no handoffs when relinking
# starts and little data is written to handoffs during the
# increase).
if not hashes:
try:
with self.diskfile_mgr.replication_lock(
device, self.policy, partition), \
self.diskfile_mgr.partition_lock(
device, self.policy, partition):
# Order here is somewhat important for crash-tolerance
for f in ('hashes.pkl', 'hashes.invalid', '.lock',
'.lock-replication'):
try:
os.unlink(os.path.join(partition_path, f))
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Note that as soon as we've deleted the lock files, some
# other process could come along and make new ones -- so
# this may well complain that the directory is not empty
os.rmdir(partition_path)
except (OSError, LockTimeout):
# Most likely, some data landed in here or we hit an error
# above. Let the replicator deal with things; it was worth
# a shot.
pass
# If there were no errors, mark this partition as done. This is handy
# in case the process is interrupted and needs to resume, or there
# were errors and the relinker needs to run again.
if self.pre_partition_errors == self.total_errors:
self.states["state"][partition] = True
with open(state_tmp_file, 'wt') as f:
json.dump(self.states, f)
os.fsync(f.fileno())
os.rename(state_tmp_file, state_file)
num_parts_done = sum(
1 for part in self.states["state"].values()
if part)
step = STEP_CLEANUP if self.do_cleanup else STEP_RELINK
num_total_parts = len(self.states["state"])
self.logger.info(
"Step: %s Device: %s Policy: %s Partitions: %d/%d",
step, device, self.policy.name, num_parts_done, num_total_parts)
self._update_recon(device)
def hashes_filter(self, suff_path, hashes):
hashes = list(hashes)
for hsh in hashes:
fname = os.path.join(suff_path, hsh)
if fname == replace_partition_in_path(
self.conf['devices'], fname, self.next_part_power):
hashes.remove(hsh)
return hashes
def process_location(self, hash_path, new_hash_path):
# Compare the contents of each hash dir with contents of same hash
# dir in its new partition to verify that the new location has the
# most up to date set of files. The new location may have newer
# files if it has been updated since relinked.
self.stats['hash_dirs'] += 1
# Get on disk data for new and old locations, cleaning up any
# reclaimable or obsolete files in each. The new location is
# cleaned up *before* the old location to prevent false negatives
# where the old still has a file that has been cleaned up in the
# new; cleaning up the new location first ensures that the old will
# always be 'cleaner' than the new.
new_df_data = self.diskfile_mgr.cleanup_ondisk_files(new_hash_path)
old_df_data = self.diskfile_mgr.cleanup_ondisk_files(hash_path)
# Now determine the most up to date set of on disk files would be
# given the content of old and new locations...
new_files = set(new_df_data['files'])
old_files = set(old_df_data['files'])
union_files = new_files.union(old_files)
union_data = self.diskfile_mgr.get_ondisk_files(
union_files, '', verify=False)
obsolete_files = set(info['filename']
for info in union_data.get('obsolete', []))
# drop 'obsolete' files but retain 'unexpected' files which might
# be misplaced diskfiles from another policy
required_files = union_files.difference(obsolete_files)
required_links = required_files.intersection(old_files)
missing_links = 0
created_links = 0
unwanted_files = []
for filename in required_links:
# Before removing old files, be sure that the corresponding
# required new files exist by calling relink_paths again. There
# are several possible outcomes:
# - The common case is that the new file exists, in which case
# relink_paths checks that the new file has the same inode
# as the old file. An exception is raised if the inode of
# the new file is not the same as the old file.
# - The new file may not exist because the relinker failed to
# create the link to the old file and has erroneously moved
# on to cleanup. In this case the relink_paths will create
# the link now or raise an exception if that fails.
# - The new file may not exist because some other process,
# such as an object server handling a request, has cleaned
# it up since we called cleanup_ondisk_files(new_hash_path).
# In this case a new link will be created to the old file.
# This is unnecessary but simpler than repeating the
# evaluation of what links are now required and safer than
# assuming that a non-existent file that *was* required is
# no longer required. The new file will eventually be
# cleaned up again.
self.stats['files'] += 1
old_file = os.path.join(hash_path, filename)
new_file = os.path.join(new_hash_path, filename)
try:
if diskfile.relink_paths(old_file, new_file):
self.logger.debug(
"Relinking%s created link: %s to %s",
' (cleanup)' if self.do_cleanup else '',
old_file, new_file)
created_links += 1
self.stats['linked'] += 1
except OSError as exc:
if exc.errno == errno.EEXIST and filename.endswith('.ts'):
# special case for duplicate tombstones, see:
# https://bugs.launchpad.net/swift/+bug/1921718
# https://bugs.launchpad.net/swift/+bug/1934142
self.logger.debug(
"Relinking%s: tolerating different inodes for "
"tombstone with same timestamp: %s to %s",
' (cleanup)' if self.do_cleanup else '',
old_file, new_file)
else:
self.logger.warning(
"Error relinking%s: failed to relink %s to %s: %s",
' (cleanup)' if self.do_cleanup else '',
old_file, new_file, exc)
self.stats['errors'] += 1
missing_links += 1
if created_links:
self.linked_into_partitions.add(get_partition_from_path(
self.conf['devices'], new_hash_path))
try:
diskfile.invalidate_hash(os.path.dirname(new_hash_path))
except (Exception, LockTimeout) as exc:
# at this point, the link's created. even if we counted it as
# an error, a subsequent run wouldn't find any work to do. so,
# don't bother; instead, wait for replication to be re-enabled
# so post-replication rehashing or periodic rehashing can
# eventually pick up the change
self.logger.warning(
'Error invalidating suffix for %s: %r',
new_hash_path, exc)
if self.do_cleanup and not missing_links:
# use the sorted list to help unit testing
unwanted_files = old_df_data['files']
# the new partition hash dir has the most up to date set of on
# disk files so it is safe to delete the old location...
rehash = False
for filename in unwanted_files:
old_file = os.path.join(hash_path, filename)
try:
os.remove(old_file)
except OSError as exc:
self.logger.warning('Error cleaning up %s: %r', old_file, exc)
self.stats['errors'] += 1
else:
rehash = True
self.stats['removed'] += 1
self.logger.debug("Removed %s", old_file)
if rehash:
# Even though we're invalidating the suffix, don't update
# self.linked_into_partitions -- we only care about them for
# relinking into the new part-power space
try:
diskfile.invalidate_hash(os.path.dirname(hash_path))
except (Exception, LockTimeout) as exc:
# note: not counted as an error
self.logger.warning(
'Error invalidating suffix for %s: %r',
hash_path, exc)
def place_policy_stat(self, dev, policy, stat, value):
stats = self.devices_data[dev]['policies'][policy.idx].setdefault(
"stats", _zero_stats())
stats[stat] = stats.get(stat, 0) + value
def process_policy(self, policy):
self.logger.info(
'Processing files for policy %s under %s (cleanup=%s)',
policy.name, self.root, self.do_cleanup)
self.part_power = policy.object_ring.part_power
self.next_part_power = policy.object_ring.next_part_power
self.diskfile_mgr = self.diskfile_router[policy]
self.datadir = diskfile.get_data_dir(policy)
self.states = {
"part_power": self.part_power,
"next_part_power": self.next_part_power,
"state": {},
}
audit_stats = {}
locations = audit_location_generator(
self.conf['devices'],
self.datadir,
mount_check=self.conf['mount_check'],
devices_filter=self.devices_filter,
hook_pre_device=self.hook_pre_device,
hook_post_device=self.hook_post_device,
partitions_filter=self.partitions_filter,
hook_pre_partition=self.hook_pre_partition,
hook_post_partition=self.hook_post_partition,
hashes_filter=self.hashes_filter,
logger=self.logger,
error_counter=audit_stats,
yield_hash_dirs=True
)
if self.conf['files_per_second'] > 0:
locations = RateLimitedIterator(
locations, self.conf['files_per_second'])
for hash_path, device, partition in locations:
# note, in cleanup step next_part_power == part_power
new_hash_path = replace_partition_in_path(
self.conf['devices'], hash_path, self.next_part_power)
if new_hash_path == hash_path:
continue
self.process_location(hash_path, new_hash_path)
# any unmounted devices don't trigger the pre_device trigger.
# so we'll deal with them here.
for dev in audit_stats.get('unmounted', []):
self.place_policy_stat(dev, policy, 'unmounted', 1)
# Further unlistable_partitions doesn't trigger the post_device, so
# we also need to deal with them here.
for datadir in audit_stats.get('unlistable_partitions', []):
device_path, _ = os.path.split(datadir)
device = os.path.basename(device_path)
self.place_policy_stat(device, policy, 'unlistable_partitions', 1)
def _update_worker_stats(self, recon_dump=True, return_code=None):
worker_stats = {'devices': self.device_list,
'timestamp': time.time(),
'return_code': return_code}
worker_data = {"workers": {str(self.pid): worker_stats}}
if recon_dump:
dump_recon_cache(worker_data, self.recon_cache, self.logger)
return worker_data
def run(self):
num_policies = 0
self._update_worker_stats()
for policy in self.conf['policies']:
self.policy = policy
policy.object_ring = None # Ensure it will be reloaded
policy.load_ring(self.conf['swift_dir'])
ring = policy.object_ring
if not ring.next_part_power:
continue
part_power_increased = ring.next_part_power == ring.part_power
if self.do_cleanup != part_power_increased:
continue
num_policies += 1
self.process_policy(policy)
# Some stat collation happens during _update_recon and we want to force
# this to happen at the end of the run
self._update_recon(force_dump=True)
if not num_policies:
self.logger.warning(
"No policy found to increase the partition power.")
self._update_worker_stats(return_code=EXIT_NO_APPLICABLE_POLICY)
return EXIT_NO_APPLICABLE_POLICY
if self.total_errors > 0:
log_method = self.logger.warning
# NB: audit_location_generator logs unmounted disks as warnings,
# but we want to treat them as errors
status = EXIT_ERROR
else:
log_method = self.logger.info
status = EXIT_SUCCESS
stats = _zero_stats()
for dev_stats in self.devices_data.values():
stats = _aggregate_stats(stats, dev_stats.get('stats', {}))
hash_dirs = stats.pop('hash_dirs')
files = stats.pop('files')
linked = stats.pop('linked')
removed = stats.pop('removed')
action_errors = stats.pop('errors')
unmounted = stats.pop('unmounted', 0)
if unmounted:
self.logger.warning('%d disks were unmounted', unmounted)
listdir_errors = stats.pop('unlistable_partitions', 0)
if listdir_errors:
self.logger.warning(
'There were %d errors listing partition directories',
listdir_errors)
if stats:
self.logger.warning(
'There were unexpected errors while enumerating disk '
'files: %r', stats)
log_method(
'%d hash dirs processed (cleanup=%s) (%d files, %d linked, '
'%d removed, %d errors)', hash_dirs, self.do_cleanup, files,
linked, removed, action_errors + listdir_errors)
self._update_worker_stats(return_code=status)
return status
def _reset_recon(recon_cache, logger):
device_progress_recon = {'devices': {}, 'workers': {}}
dump_recon_cache(device_progress_recon, recon_cache, logger)
def parallel_process(do_cleanup, conf, logger=None, device_list=None):
logger = logger or logging.getLogger()
# initialise recon dump for collection
# Lets start by always deleting last run's stats
recon_cache = os.path.join(conf['recon_cache_path'], RECON_RELINKER_FILE)
_reset_recon(recon_cache, logger)
device_list = sorted(set(device_list or os.listdir(conf['devices'])))
workers = conf['workers']
if workers == 'auto':
workers = len(device_list)
else:
workers = min(workers, len(device_list))
start = time.time()
logger.info('Starting relinker (cleanup=%s) using %d workers: %s' %
(do_cleanup, workers,
time.strftime('%X %x %Z', time.gmtime(start))))
if workers == 0 or len(device_list) in (0, 1):
ret = Relinker(
conf, logger, device_list, do_cleanup=do_cleanup).run()
logger.info('Finished relinker (cleanup=%s): %s (%s elapsed)' %
(do_cleanup, time.strftime('%X %x %Z', time.gmtime()),
datetime.timedelta(seconds=time.time() - start)))
return ret
children = {}
for worker_devs in distribute_evenly(device_list, workers):
pid = os.fork()
if pid == 0:
dev_logger = PrefixLoggerAdapter(logger, {})
dev_logger.set_prefix('[pid=%s, devs=%s] ' % (
os.getpid(), ','.join(worker_devs)))
os._exit(Relinker(
conf, dev_logger, worker_devs, do_cleanup=do_cleanup).run())
else:
children[pid] = worker_devs
final_status = EXIT_SUCCESS
final_messages = []
while children:
pid, status = os.wait()
sig = status & 0xff
status = status >> 8
time_delta = time.time() - start
devs = children.pop(pid, ['unknown device'])
worker_desc = '(pid=%s, devs=%s)' % (pid, ','.join(devs))
if sig != 0:
final_status = EXIT_ERROR
final_messages.append(
'Worker %s exited in %.1fs after receiving signal: %s'
% (worker_desc, time_delta, sig))
continue
if status == EXIT_SUCCESS:
continue
if status == EXIT_NO_APPLICABLE_POLICY:
if final_status == EXIT_SUCCESS:
final_status = status
continue
final_status = EXIT_ERROR
if status == EXIT_ERROR:
final_messages.append(
'Worker %s completed in %.1fs with errors'
% (worker_desc, time_delta))
else:
final_messages.append(
'Worker %s exited in %.1fs with unexpected status %s'
% (worker_desc, time_delta, status))
for msg in final_messages:
logger.warning(msg)
logger.info('Finished relinker (cleanup=%s): %s (%s elapsed)' %
(do_cleanup, time.strftime('%X %x %Z', time.gmtime()),
datetime.timedelta(seconds=time.time() - start)))
return final_status
def auto_or_int(value):
return config_auto_int_value(value, default='auto')
def main(args):
parser = argparse.ArgumentParser(
description='Relink and cleanup objects to increase partition power')
parser.add_argument('action', choices=['relink', 'cleanup'])
parser.add_argument('conf_file', nargs='?', help=(
'Path to config file with [object-relinker] section'))
parser.add_argument('--swift-dir', default=None,
dest='swift_dir', help='Path to swift directory')
parser.add_argument(
'--policy', default=[], dest='policies',
action='append', type=policy,
help='Policy to relink; may specify multiple (default: all)')
parser.add_argument('--devices', default=None,
dest='devices', help='Path to swift device directory')
parser.add_argument('--user', default=None, dest='user',
help='Drop privileges to this user before relinking')
parser.add_argument('--device',
default=[], dest='device_list', action='append',
help='Device name to relink (default: all)')
parser.add_argument('--partition', '-p', default=[], dest='partitions',
type=non_negative_int, action='append',
help='Partition to relink (default: all)')
parser.add_argument('--skip-mount-check', default=False,
help='Don\'t test if disk is mounted',
action="store_true", dest='skip_mount_check')
parser.add_argument('--files-per-second', default=None,
type=non_negative_float, dest='files_per_second',
help='Used to limit I/O. Zero implies no limit '
'(default: no limit).')
parser.add_argument('--stats-interval', default=None,
type=non_negative_float, dest='stats_interval',
help='Emit stats to recon roughly every N seconds. '
'(default: %d).' % DEFAULT_STATS_INTERVAL)
parser.add_argument(
'--workers', default=None, type=auto_or_int, help=(
'Process devices across N workers '
'(default: one worker per device)'))
parser.add_argument('--logfile', default=None, dest='logfile',
help='Set log file name. Ignored if using conf_file.')
parser.add_argument('--debug', default=False, action='store_true',
help='Enable debug mode')
args = parser.parse_args(args)
hubs.use_hub(get_hub())
if args.conf_file:
conf = readconf(args.conf_file, 'object-relinker')
if args.debug:
conf['log_level'] = 'DEBUG'
user = args.user or conf.get('user')
if user:
drop_privileges(user)
logger = get_logger(conf)
else:
conf = {'log_level': 'DEBUG' if args.debug else 'INFO'}
if args.user:
# Drop privs before creating log file
drop_privileges(args.user)
conf['user'] = args.user
logging.basicConfig(
format='%(message)s',
level=logging.DEBUG if args.debug else logging.INFO,
filename=args.logfile)
logger = logging.getLogger()
conf.update({
'swift_dir': args.swift_dir or conf.get('swift_dir', '/etc/swift'),
'devices': args.devices or conf.get('devices', '/srv/node'),
'mount_check': (config_true_value(conf.get('mount_check', 'true'))
and not args.skip_mount_check),
'files_per_second': (
args.files_per_second if args.files_per_second is not None
else non_negative_float(conf.get('files_per_second', '0'))),
'policies': set(args.policies) or POLICIES,
'partitions': set(args.partitions),
'workers': config_auto_int_value(
conf.get('workers') if args.workers is None else args.workers,
'auto'),
'recon_cache_path': conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH),
'stats_interval': non_negative_float(
args.stats_interval or conf.get('stats_interval',
DEFAULT_STATS_INTERVAL)),
})
return parallel_process(
args.action == 'cleanup', conf, logger, args.device_list)
| swift-master | swift/cli/relinker.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
from collections import defaultdict
from errno import EEXIST
from itertools import islice
from operator import itemgetter
from os import mkdir
from os.path import basename, abspath, dirname, exists, join as pathjoin
from sys import argv as sys_argv, exit, stderr, stdout
from textwrap import wrap
from time import time
from datetime import timedelta
import optparse
import math
from six.moves import zip as izip
from six.moves import input
from swift.common import exceptions
from swift.common.ring import RingBuilder, Ring, RingData
from swift.common.ring.builder import MAX_BALANCE
from swift.common.ring.composite_builder import CompositeRingBuilder
from swift.common.ring.utils import validate_args, \
validate_and_normalize_ip, build_dev_from_opts, \
parse_builder_ring_filename_args, parse_search_value, \
parse_search_values_from_opts, parse_change_values_from_opts, \
dispersion_report, parse_add_value
from swift.common.utils import lock_parent_directory, is_valid_ipv6
MAJOR_VERSION = 1
MINOR_VERSION = 3
EXIT_SUCCESS = 0
EXIT_WARNING = 1
EXIT_ERROR = 2
global argv, backup_dir, builder, builder_file, ring_file
argv = backup_dir = builder = builder_file = ring_file = None
def format_device(dev):
"""
Format a device for display.
"""
copy_dev = dev.copy()
for key in ('ip', 'replication_ip'):
if ':' in copy_dev[key]:
copy_dev[key] = '[' + copy_dev[key] + ']'
return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR'
'%(replication_ip)s:%(replication_port)s/%(device)s_'
'"%(meta)s"' % copy_dev)
def _parse_search_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
search_values = {}
if len(args) > 0:
if new_cmd_format or len(args) != 1:
print(Commands.search.__doc__.strip())
exit(EXIT_ERROR)
search_values = parse_search_value(args[0])
else:
search_values = parse_search_values_from_opts(opts)
return search_values
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _find_parts(devs):
devs = [d['id'] for d in devs]
if not devs or not builder._replica2part2dev:
return None
partition_count = {}
for replica in builder._replica2part2dev:
for partition, device in enumerate(replica):
if device in devs:
if partition not in partition_count:
partition_count[partition] = 0
partition_count[partition] += 1
# Sort by number of found replicas to keep the output format
sorted_partition_count = sorted(
partition_count.items(), key=itemgetter(1), reverse=True)
return sorted_partition_count
def _parse_list_parts_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print(Commands.list_parts.__doc__.strip())
exit(EXIT_ERROR)
for arg in args:
devs.extend(
builder.search_devs(parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
return devs
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _parse_add_values(argvish):
"""
Parse devices to add as specified on the command line.
Will exit on error and spew warnings.
:returns: array of device dicts
"""
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
parsed_devs = []
if len(args) > 0:
if new_cmd_format or len(args) % 2 != 0:
print(Commands.add.__doc__.strip())
exit(EXIT_ERROR)
devs_and_weights = izip(islice(args, 0, len(args), 2),
islice(args, 1, len(args), 2))
for devstr, weightstr in devs_and_weights:
dev_dict = parse_add_value(devstr)
if dev_dict['region'] is None:
stderr.write('WARNING: No region specified for %s. '
'Defaulting to region 1.\n' % devstr)
dev_dict['region'] = 1
if dev_dict['replication_ip'] is None:
dev_dict['replication_ip'] = dev_dict['ip']
if dev_dict['replication_port'] is None:
dev_dict['replication_port'] = dev_dict['port']
weight = float(weightstr)
if weight < 0:
raise ValueError('Invalid weight value: %s' % devstr)
dev_dict['weight'] = weight
parsed_devs.append(dev_dict)
else:
parsed_devs.append(build_dev_from_opts(opts))
return parsed_devs
def check_devs(devs, input_question, opts, abort_msg):
if not devs:
print('Search value matched 0 devices.\n'
'The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
if len(devs) > 1:
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
try:
abort = not opts.yes and input(input_question) != 'y'
except (EOFError, KeyboardInterrupt):
abort = True
if abort:
print(abort_msg)
exit(EXIT_ERROR)
def _set_weight_values(devs, weight, opts):
input_question = 'Are you sure you want to update the weight for these ' \
'%s devices? (y/N) ' % len(devs)
abort_msg = 'Aborting device modifications'
check_devs(devs, input_question, opts, abort_msg)
for dev in devs:
builder.set_dev_weight(dev['id'], weight)
print('%s weight set to %s' % (format_device(dev),
dev['weight']))
def _set_region_values(devs, region, opts):
input_question = 'Are you sure you want to update the region for these ' \
'%s devices? (y/N) ' % len(devs)
abort_msg = 'Aborting device modifications'
check_devs(devs, input_question, opts, abort_msg)
for dev in devs:
builder.set_dev_region(dev['id'], region)
print('%s region set to %s' % (format_device(dev),
dev['region']))
def _set_zone_values(devs, zone, opts):
input_question = 'Are you sure you want to update the zone for these ' \
'%s devices? (y/N) ' % len(devs)
abort_msg = 'Aborting device modifications'
check_devs(devs, input_question, opts, abort_msg)
for dev in devs:
builder.set_dev_zone(dev['id'], zone)
print('%s zone set to %s' % (format_device(dev),
dev['zone']))
def _parse_set_weight_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.set_weight.__doc__.strip())
exit(EXIT_ERROR)
devs_and_weights = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, weightstr in devs_and_weights:
devs = (builder.search_devs(
parse_search_value(devstr)) or [])
weight = float(weightstr)
_set_weight_values(devs, weight, opts)
else:
if len(args) != 1:
print(Commands.set_weight.__doc__.strip())
exit(EXIT_ERROR)
devs = (builder.search_devs(
parse_search_values_from_opts(opts)) or [])
weight = float(args[0])
_set_weight_values(devs, weight, opts)
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _set_info_values(devs, change, opts):
input_question = 'Are you sure you want to update the info for these ' \
'%s devices? (y/N) ' % len(devs)
abort_msg = 'Aborting device modifications'
check_devs(devs, input_question, opts, abort_msg)
for dev in devs:
orig_dev_string = format_device(dev)
test_dev = dict(dev)
for key in change:
test_dev[key] = change[key]
for check_dev in builder.devs:
if not check_dev or check_dev['id'] == test_dev['id']:
continue
if check_dev['ip'] == test_dev['ip'] and \
check_dev['port'] == test_dev['port'] and \
check_dev['device'] == test_dev['device']:
print('Device %d already uses %s:%d/%s.' %
(check_dev['id'], check_dev['ip'],
check_dev['port'], check_dev['device']))
exit(EXIT_ERROR)
for key in change:
dev[key] = change[key]
print('Device %s is now %s' % (orig_dev_string,
format_device(dev)))
def calculate_change_value(change_value, change, v_name, v_name_port):
ip = ''
if change_value and change_value[0].isdigit():
i = 1
while (i < len(change_value) and
change_value[i] in '0123456789.'):
i += 1
ip = change_value[:i]
change_value = change_value[i:]
elif change_value and change_value.startswith('['):
i = 1
while i < len(change_value) and change_value[i] != ']':
i += 1
i += 1
ip = change_value[:i].lstrip('[').rstrip(']')
change_value = change_value[i:]
if ip:
change[v_name] = validate_and_normalize_ip(ip)
if change_value.startswith(':'):
i = 1
while i < len(change_value) and change_value[i].isdigit():
i += 1
change[v_name_port] = int(change_value[1:i])
change_value = change_value[i:]
return change_value
def _parse_set_region_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.set_region.__doc__.strip())
exit(EXIT_ERROR)
devs_and_regions = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, regionstr in devs_and_regions:
devs.extend(builder.search_devs(
parse_search_value(devstr)) or [])
region = int(regionstr)
_set_region_values(devs, region, opts)
else:
if len(args) != 1:
print(Commands.set_region.__doc__.strip())
exit(EXIT_ERROR)
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
region = int(args[0])
_set_region_values(devs, region, opts)
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _parse_set_zone_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.set_zone.__doc__.strip())
exit(EXIT_ERROR)
devs_and_zones = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, zonestr in devs_and_zones:
devs.extend(builder.search_devs(
parse_search_value(devstr)) or [])
zone = int(zonestr)
_set_zone_values(devs, zone, opts)
else:
if len(args) != 1:
print(Commands.set_zone.__doc__.strip())
exit(EXIT_ERROR)
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
zone = int(args[0])
_set_zone_values(devs, zone, opts)
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _parse_set_info_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.search.__doc__.strip())
exit(EXIT_ERROR)
searches_and_changes = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for search_value, change_value in searches_and_changes:
devs = builder.search_devs(parse_search_value(search_value))
change = {}
change_value = calculate_change_value(change_value, change,
'ip', 'port')
if change_value.startswith('R'):
change_value = change_value[1:]
change_value = calculate_change_value(change_value, change,
'replication_ip',
'replication_port')
if change_value.startswith('/'):
i = 1
while i < len(change_value) and change_value[i] != '_':
i += 1
change['device'] = change_value[1:i]
change_value = change_value[i:]
if change_value.startswith('_'):
change['meta'] = change_value[1:]
change_value = ''
if change_value or not change:
raise ValueError('Invalid set info change value: %s' %
repr(argvish[1]))
_set_info_values(devs, change, opts)
else:
devs = builder.search_devs(parse_search_values_from_opts(opts))
change = parse_change_values_from_opts(opts)
_set_info_values(devs, change, opts)
def _parse_remove_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print(Commands.remove.__doc__.strip())
exit(EXIT_ERROR)
for arg in args:
devs.extend(builder.search_devs(
parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)))
return (devs, opts)
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _make_display_device_table(builder):
ip_width = 10
port_width = 4
rep_ip_width = 14
rep_port_width = 4
ip_ipv6 = rep_ipv6 = False
weight_width = 6
for dev in builder._iter_devs():
if is_valid_ipv6(dev['ip']):
ip_ipv6 = True
if is_valid_ipv6(dev['replication_ip']):
rep_ipv6 = True
ip_width = max(len(dev['ip']), ip_width)
rep_ip_width = max(len(dev['replication_ip']), rep_ip_width)
port_width = max(len(str(dev['port'])), port_width)
rep_port_width = max(len(str(dev['replication_port'])),
rep_port_width)
weight_width = max(len('%6.02f' % dev['weight']),
weight_width)
if ip_ipv6:
ip_width += 2
if rep_ipv6:
rep_ip_width += 2
header_line = ('Devices:%5s %6s %4s %' + str(ip_width)
+ 's:%-' + str(port_width) + 's %' +
str(rep_ip_width) + 's:%-' + str(rep_port_width) +
's %5s %' + str(weight_width) + 's %10s %7s %5s %s') % (
'id', 'region', 'zone', 'ip address',
'port', 'replication ip', 'port', 'name',
'weight', 'partitions', 'balance', 'flags',
'meta')
def print_dev_f(dev, balance_per_dev=0.00, flags=''):
def get_formated_ip(key):
value = dev[key]
if ':' in value:
value = '[%s]' % value
return value
dev_ip = get_formated_ip('ip')
dev_replication_ip = get_formated_ip('replication_ip')
format_string = ''.join(['%13d %6d %4d ',
'%', str(ip_width), 's:%-',
str(port_width), 'd ', '%',
str(rep_ip_width), 's', ':%-',
str(rep_port_width), 'd %5s %',
str(weight_width), '.02f'
' %10s %7.02f %5s %s'])
args = (dev['id'], dev['region'], dev['zone'], dev_ip, dev['port'],
dev_replication_ip, dev['replication_port'], dev['device'],
dev['weight'], dev['parts'], balance_per_dev, flags,
dev['meta'])
print(format_string % args)
return header_line, print_dev_f
class Commands(object):
@staticmethod
def unknown():
print('Unknown command: %s' % argv[2])
exit(EXIT_ERROR)
@staticmethod
def create():
"""
swift-ring-builder <builder_file> create <part_power> <replicas>
<min_part_hours>
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
<min_part_hours> is number of hours to restrict moving a partition more
than once.
"""
if len(argv) < 6:
print(Commands.create.__doc__.strip())
exit(EXIT_ERROR)
try:
builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
except ValueError as e:
print(e)
exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(builder_file), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
builder.save(pathjoin(backup_dir,
'%d.' % time() + basename(builder_file)))
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def default():
"""
swift-ring-builder <builder_file>
Shows information about the ring and the devices within. Output
includes a table that describes the report parameters (id, region,
port, flags, etc).
flags: possible values are 'DEL' and ''
DEL - indicates that the device is marked for removal from
ring and will be removed in next rebalance.
"""
try:
builder_id = builder.id
except AttributeError:
builder_id = "(not assigned)"
print('%s, build version %d, id %s' %
(builder_file, builder.version, builder_id))
balance = 0
ring_empty_error = None
regions = len(set(d['region'] for d in builder.devs
if d is not None))
zones = len(set((d['region'], d['zone']) for d in builder.devs
if d is not None))
dev_count = len([dev for dev in builder.devs
if dev is not None])
try:
balance = builder.get_balance()
except exceptions.EmptyRingError as e:
ring_empty_error = str(e)
dispersion_trailer = '' if builder.dispersion is None else (
', %.02f dispersion' % (builder.dispersion))
print('%d partitions, %.6f replicas, %d regions, %d zones, '
'%d devices, %.02f balance%s' % (
builder.parts, builder.replicas, regions, zones, dev_count,
balance, dispersion_trailer))
print('The minimum number of hours before a partition can be '
'reassigned is %s (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
print('The overload factor is %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload))
ring_dict = None
builder_dict = builder.get_ring().to_dict()
# compare ring file against builder file
if not exists(ring_file):
print('Ring file %s not found, '
'probably it hasn\'t been written yet' % ring_file)
else:
try:
ring_dict = RingData.load(ring_file).to_dict()
except Exception as exc:
print('Ring file %s is invalid: %r' % (ring_file, exc))
else:
if builder_dict == ring_dict:
print('Ring file %s is up-to-date' % ring_file)
else:
print('Ring file %s is obsolete' % ring_file)
if ring_empty_error:
balance_per_dev = defaultdict(int)
else:
balance_per_dev = builder._build_balance_per_dev()
header_line, print_dev_f = _make_display_device_table(builder)
print(header_line)
for dev in sorted(
builder._iter_devs(),
key=lambda x: (x['region'], x['zone'], x['ip'], x['device'])
):
flags = 'DEL' if dev in builder._remove_devs else ''
print_dev_f(dev, balance_per_dev[dev['id']], flags)
# Print some helpful info if partition power increase in progress
if (builder.next_part_power and
builder.next_part_power == (builder.part_power + 1)):
print('\nPreparing increase of partition power (%d -> %d)' % (
builder.part_power, builder.next_part_power))
print('Run "swift-object-relinker relink" on all nodes before '
'moving on to increase_partition_power.')
if (builder.next_part_power and
builder.part_power == builder.next_part_power):
print('\nIncreased partition power (%d -> %d)' % (
builder.part_power, builder.next_part_power))
if builder_dict != ring_dict:
print('First run "swift-ring-builder <builderfile> write_ring"'
' now and copy the updated .ring.gz file to all nodes.')
print('Run "swift-object-relinker cleanup" on all nodes before '
'moving on to finish_increase_partition_power.')
if ring_empty_error:
print(ring_empty_error)
exit(EXIT_SUCCESS)
@staticmethod
def search():
"""
swift-ring-builder <builder_file> search <search-value>
or
swift-ring-builder <builder_file> search
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Shows information about matching devices.
"""
if len(argv) < 4:
print(Commands.search.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
devs = builder.search_devs(_parse_search_values(argv[3:]))
if not devs:
print('No matching devices found')
exit(EXIT_ERROR)
print('Devices: id region zone ip address port '
'replication ip replication port name weight partitions '
'balance meta')
weighted_parts = builder.parts * builder.replicas / \
sum(d['weight'] for d in builder.devs if d is not None)
for dev in devs:
if not dev['weight']:
if dev['parts']:
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / \
(dev['weight'] * weighted_parts) - 100.0
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f %10s '
'%7.02f %s' %
(dev['id'], dev['region'], dev['zone'], dev['ip'],
dev['port'], dev['replication_ip'], dev['replication_port'],
dev['device'], dev['weight'], dev['parts'], balance,
dev['meta']))
exit(EXIT_SUCCESS)
@staticmethod
def list_parts():
"""
swift-ring-builder <builder_file> list_parts <search-value> [<search-value>] ..
or
swift-ring-builder <builder_file> list_parts
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Returns a 2 column list of all the partitions that are assigned to any of
the devices matching the search values given. The first column is the
assigned partition number and the second column is the number of device
matches for that partition. The list is ordered from most number of matches
to least. If there are a lot of devices to match against, this command
could take a while to run.
"""
if len(argv) < 4:
print(Commands.list_parts.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % builder_file)
exit(EXIT_ERROR)
devs = _parse_list_parts_values(argv[3:])
if not devs:
print('No matching devices found')
exit(EXIT_ERROR)
sorted_partition_count = _find_parts(devs)
if not sorted_partition_count:
print('No matching devices found')
exit(EXIT_ERROR)
print('Partition Matches')
for partition, count in sorted_partition_count:
print('%9d %7d' % (partition, count))
exit(EXIT_SUCCESS)
@staticmethod
def add():
"""
swift-ring-builder <builder_file> add
[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>
[[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>] ...
Where <r_ip> and <r_port> are replication ip and port.
or
swift-ring-builder <builder_file> add
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
[--replication-ip <r_ip or r_hostname>] [--replication-port <r_port>]
--device <device_name> --weight <weight>
[--meta <meta>]
Adds devices to the ring with the given information. No partitions will be
assigned to the new device until after running 'rebalance'. This is so you
can make multiple device changes and rebalance them all just once.
"""
if len(argv) < 5:
print(Commands.add.__doc__.strip())
exit(EXIT_ERROR)
if builder.next_part_power:
print('Partition power increase in progress. You need ')
print('to finish the increase first before adding devices.')
exit(EXIT_ERROR)
try:
for new_dev in _parse_add_values(argv[3:]):
for dev in builder.devs:
if dev is None:
continue
if dev['ip'] == new_dev['ip'] and \
dev['port'] == new_dev['port'] and \
dev['device'] == new_dev['device']:
print('Device %d already uses %s:%d/%s.' %
(dev['id'], dev['ip'],
dev['port'], dev['device']))
print("The on-disk ring builder is unchanged.\n")
exit(EXIT_ERROR)
dev_id = builder.add_dev(new_dev)
print('Device %s with %s weight got id %s' %
(format_device(new_dev), new_dev['weight'], dev_id))
except ValueError as err:
print(err)
print('The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_weight():
"""
swift-ring-builder <builder_file> set_weight <search-value> <new_weight>
[<search-value> <new_weight>] ...
[--yes]
or
swift-ring-builder <builder_file> set_weight
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight> <new_weight>
[--yes]
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port. <weight> and <new_weight> are the search weight and new
weight values respectively.
Any of the options are optional in both cases.
Resets the devices' weights. No partitions will be reassigned to or from
the device until after running 'rebalance'. This is so you can make
multiple device changes and rebalance them all just once.
Option --yes assume a yes response to all questions.
"""
# if len(argv) < 5 or len(argv) % 2 != 1:
if len(argv) < 5:
print(Commands.set_weight.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
_parse_set_weight_values(argv[3:])
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_region():
"""
swift-ring-builder <builder_file> set_region <search-value> <region>
[<search-value> <region] ...
or
swift-ring-builder <builder_file> set_region
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> <new region> [--yes]
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Resets the devices' regions. No partitions will be reassigned to or from
the device until after running 'rebalance'. This is so you can make
multiple device changes and rebalance them all just once.
Option --yes assume a yes response to all questions.
"""
if len(argv) < 5:
print(Commands.set_region.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
_parse_set_region_values(argv[3:])
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_zone():
"""
swift-ring-builder <builder_file> set_zone <search-value> <zone>
[<search-value> <zone] ...
or
swift-ring-builder <builder_file> set_zone
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> <new zone> [--yes]
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Resets the devices' zones. No partitions will be reassigned to or from
the device until after running 'rebalance'. This is so you can make
multiple device changes and rebalance them all just once.
Option --yes assume a yes response to all questions.
"""
# if len(argv) < 5 or len(argv) % 2 != 1:
if len(argv) < 5:
print(Commands.set_zone.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
_parse_set_zone_values(argv[3:])
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_info():
"""
swift-ring-builder <builder_file> set_info
<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
[<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>] ...
[--yes]
or
swift-ring-builder <builder_file> set_info
--ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta>
--change-ip <ip or hostname> --change-port <port>
--change-replication-ip <r_ip or r_hostname>
--change-replication-port <r_port>
--change-device <device_name>
--change-meta <meta>
[--yes]
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
For each search-value, resets the matched device's information.
This information isn't used to assign partitions, so you can use
'write_ring' afterward to rewrite the current ring with the newer
device information. Any of the parts are optional in the final
<ip>:<port>/<device_name>_<meta> parameter; just give what you
want to change. For instance set_info d74 _"snet: 5.6.7.8" would
just update the meta data for device id 74.
Option --yes assume a yes response to all questions.
"""
if len(argv) < 5:
print(Commands.set_info.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
try:
_parse_set_info_values(argv[3:])
except ValueError as err:
print(err)
exit(EXIT_ERROR)
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def remove():
"""
swift-ring-builder <builder_file> remove <search-value> [search-value ...]
[--yes]
or
swift-ring-builder <builder_file> remove
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
[--yes]
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Removes the device(s) from the ring. This should normally just be used for
a device that has failed. For a device you wish to decommission, it's best
to set its weight to 0, wait for it to drain all its data, then use this
remove command. This will not take effect until after running 'rebalance'.
This is so you can make multiple device changes and rebalance them all just
once.
Option --yes assume a yes response to all questions.
"""
if len(argv) < 4:
print(Commands.remove.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
if builder.next_part_power:
print('Partition power increase in progress. You need ')
print('to finish the increase first before removing devices.')
exit(EXIT_ERROR)
devs, opts = _parse_remove_values(argv[3:])
input_question = 'Are you sure you want to remove these ' \
'%s devices? (y/N) ' % len(devs)
abort_msg = 'Aborting device removals'
check_devs(devs, input_question, opts, abort_msg)
for dev in devs:
try:
builder.remove_dev(dev['id'])
except exceptions.RingBuilderError as e:
print('-' * 79)
print(
'An error occurred while removing device with id %d\n'
'This usually means that you attempted to remove\n'
'the last device in a ring. If this is the case,\n'
'consider creating a new ring instead.\n'
'The on-disk ring builder is unchanged.\n'
'Original exception message: %s' %
(dev['id'], e))
print('-' * 79)
exit(EXIT_ERROR)
print('%s marked for removal and will '
'be removed next rebalance.' % format_device(dev))
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def rebalance():
"""
swift-ring-builder <builder_file> rebalance [options]
Attempts to rebalance the ring by reassigning partitions that haven't been
recently reassigned.
"""
usage = Commands.rebalance.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('-f', '--force', action='store_true',
help='Force a rebalanced ring to save even '
'if < 1% of parts changed')
parser.add_option('-s', '--seed', help="seed to use for rebalance")
parser.add_option('-d', '--debug', action='store_true',
help="print debug information")
options, args = parser.parse_args(argv)
def get_seed(index):
if options.seed:
return options.seed
try:
return args[index]
except IndexError:
pass
if options.debug:
logger = logging.getLogger("swift.ring.builder")
logger.disabled = False
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stdout)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
if builder.next_part_power:
print('Partition power increase in progress.')
print('You need to finish the increase first before rebalancing.')
exit(EXIT_ERROR)
devs_changed = builder.devs_changed
min_part_seconds_left = builder.min_part_seconds_left
try:
last_balance = builder.get_balance()
last_dispersion = builder.dispersion
parts, balance, removed_devs = builder.rebalance(seed=get_seed(3))
dispersion = builder.dispersion
except exceptions.RingBuilderError as e:
print('-' * 79)
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print('-' * 79)
exit(EXIT_ERROR)
if not (parts or options.force or removed_devs):
print('No partitions could be reassigned.')
if min_part_seconds_left > 0:
print('The time between rebalances must be at least '
'min_part_hours: %s hours (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
else:
print('There is no need to do so at this time')
exit(EXIT_WARNING)
# If we set device's weight to zero, currently balance will be set
# special value(MAX_BALANCE) until zero weighted device return all
# its partitions. So we cannot check balance has changed.
# Thus we need to check balance or last_balance is special value.
be_cowardly = True
if options.force:
# User said save it, so we save it.
be_cowardly = False
elif devs_changed:
# We must save if a device changed; this could be something like
# a changed IP address.
be_cowardly = False
else:
# If balance or dispersion changed (presumably improved), then
# we should save to get the improvement.
balance_changed = (
abs(last_balance - balance) >= 1 or
(last_balance == MAX_BALANCE and balance == MAX_BALANCE))
dispersion_changed = last_dispersion is None or (
abs(last_dispersion - dispersion) >= 1)
if balance_changed or dispersion_changed:
be_cowardly = False
if be_cowardly:
print('Cowardly refusing to save rebalance as it did not change '
'at least 1%.')
exit(EXIT_WARNING)
try:
builder.validate()
except exceptions.RingValidationError as e:
print('-' * 79)
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print('-' * 79)
exit(EXIT_ERROR)
print('Reassigned %d (%.02f%%) partitions. '
'Balance is now %.02f. '
'Dispersion is now %.02f' % (
parts, 100.0 * parts / builder.parts,
balance,
builder.dispersion))
status = EXIT_SUCCESS
if builder.dispersion > 0:
print('-' * 79)
print(
'NOTE: Dispersion of %.06f indicates some parts are not\n'
' optimally dispersed.\n\n'
' You may want to adjust some device weights, increase\n'
' the overload or review the dispersion report.' %
builder.dispersion)
status = EXIT_WARNING
print('-' * 79)
elif balance > 5 and balance / 100.0 > builder.overload:
print('-' * 79)
print('NOTE: Balance of %.02f indicates you should push this ' %
balance)
print(' ring, wait at least %d hours, and rebalance/repush.'
% builder.min_part_hours)
print('-' * 79)
status = EXIT_WARNING
ts = time()
builder.get_ring().save(
pathjoin(backup_dir, '%d.' % ts + basename(ring_file)))
builder.save(pathjoin(backup_dir, '%d.' % ts + basename(builder_file)))
builder.get_ring().save(ring_file)
builder.save(builder_file)
exit(status)
@staticmethod
def dispersion():
r"""
swift-ring-builder <builder_file> dispersion <search_filter> [options]
Output report on dispersion.
--recalculate option will rebuild cached dispersion info and save builder
--verbose option will display dispersion graph broken down by tier
You can filter which tiers are evaluated to drill down using a regex
in the optional search_filter argument. i.e.
swift-ring-builder <builder_file> dispersion "r\d+z\d+$" -v
... would only display rows for the zone tiers
swift-ring-builder <builder_file> dispersion ".*\-[^/]*$" -v
... would only display rows for the server tiers
The reports columns are:
Tier : the name of the tier
parts : the total number of partitions with assignment in the tier
% : the percentage of parts in the tier with replicas over assigned
max : maximum replicas a part should have assigned at the tier
0 - N : the number of parts with that many replicas assigned
e.g.
Tier: parts % max 0 1 2 3
r1z1 1022 79.45 1 2 210 784 28
r1z1 has 1022 total parts assigned, 79% of them have more than the
recommend max replica count of 1 assigned. Only 2 parts in the ring
are *not* assigned in this tier (0 replica count), 210 parts have
the recommend replica count of 1, 784 have 2 replicas, and 28 sadly
have all three replicas in this tier.
"""
status = EXIT_SUCCESS
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % builder_file)
exit(EXIT_ERROR)
usage = Commands.dispersion.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('--recalculate', action='store_true',
help='Rebuild cached dispersion info and save')
parser.add_option('-v', '--verbose', action='store_true',
help='Display dispersion report for tiers')
options, args = parser.parse_args(argv)
if args[3:]:
search_filter = args[3]
else:
search_filter = None
orig_version = builder.version
report = dispersion_report(builder, search_filter=search_filter,
verbose=options.verbose,
recalculate=options.recalculate)
if builder.version != orig_version:
# we've already done the work, better go ahead and save it!
builder.save(builder_file)
print('Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % (
builder.dispersion, builder.get_balance(), builder.overload * 100))
print('Required overload is %.6f%%' % (
builder.get_required_overload() * 100))
if report['worst_tier']:
status = EXIT_WARNING
print('Worst tier is %.06f (%s)' % (report['max_dispersion'],
report['worst_tier']))
if report['graph']:
replica_range = list(range(int(math.ceil(builder.replicas + 1))))
part_count_width = '%%%ds' % max(len(str(builder.parts)), 5)
replica_counts_tmpl = ' '.join(part_count_width for i in
replica_range)
tiers = (tier for tier, _junk in report['graph'])
tier_width = max(max(map(len, tiers)), 30)
header_line = ('%-' + str(tier_width) +
's ' + part_count_width +
' %6s %6s ' + replica_counts_tmpl) % tuple(
['Tier', 'Parts', '%', 'Max'] + replica_range)
underline = '-' * len(header_line)
print(underline)
print(header_line)
print(underline)
for tier_name, dispersion in report['graph']:
replica_counts_repr = replica_counts_tmpl % tuple(
dispersion['replicas'])
template = ''.join([
'%-', str(tier_width), 's ',
part_count_width,
' %6.02f %6d %s',
])
args = (
tier_name,
dispersion['placed_parts'],
dispersion['dispersion'],
dispersion['max_replicas'],
replica_counts_repr,
)
print(template % args)
exit(status)
@staticmethod
def validate():
"""
swift-ring-builder <builder_file> validate
Just runs the validation routines on the ring.
"""
builder.validate()
exit(EXIT_SUCCESS)
@staticmethod
def write_ring():
"""
swift-ring-builder <builder_file> write_ring
Just rewrites the distributable ring file. This is done automatically after
a successful rebalance, so really this is only useful after one or more
'set_info' calls when no rebalance is needed but you want to send out the
new device information.
"""
if not builder.devs:
print('Unable to write empty ring.')
exit(EXIT_ERROR)
ring_data = builder.get_ring()
if not ring_data._replica2part2dev_id:
if ring_data.devs:
print('Warning: Writing a ring with no partition '
'assignments but with devices; did you forget to run '
'"rebalance"?')
ring_data.save(
pathjoin(backup_dir, '%d.' % time() + basename(ring_file)))
ring_data.save(ring_file)
exit(EXIT_SUCCESS)
@staticmethod
def write_builder():
"""
swift-ring-builder <ring_file> write_builder [min_part_hours]
Recreate a builder from a ring file (lossy) if you lost your builder
backups. (Protip: don't lose your builder backups).
[min_part_hours] is one of those numbers lost to the builder,
you can change it with set_min_part_hours.
"""
if exists(builder_file):
print('Cowardly refusing to overwrite existing '
'Ring Builder file: %s' % builder_file)
exit(EXIT_ERROR)
if len(argv) > 3:
min_part_hours = int(argv[3])
else:
stderr.write("WARNING: default min_part_hours may not match "
"the value in the lost builder.\n")
min_part_hours = 24
ring = Ring(ring_file)
for dev in ring.devs:
if dev is None:
continue
dev.update({
'parts': 0,
'parts_wanted': 0,
})
builder_dict = {
'part_power': 32 - ring._part_shift,
'replicas': float(ring.replica_count),
'min_part_hours': min_part_hours,
'parts': ring.partition_count,
'devs': ring.devs,
'devs_changed': False,
'version': ring.version or 0,
'_replica2part2dev': ring._replica2part2dev_id,
'_last_part_moves_epoch': None,
'_last_part_moves': None,
'_last_part_gather_start': 0,
'_remove_devs': [],
}
builder = RingBuilder.from_dict(builder_dict)
for parts in builder._replica2part2dev:
for dev_id in parts:
builder.devs[dev_id]['parts'] += 1
builder.save(builder_file)
@staticmethod
def pretend_min_part_hours_passed():
"""
swift-ring-builder <builder_file> pretend_min_part_hours_passed
Resets the clock on the last time a rebalance happened, thus
circumventing the min_part_hours check.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you run this command and deploy rebalanced rings before a replication
pass completes, you may introduce unavailability in your cluster. This
has an end-user impact.
"""
builder.pretend_min_part_hours_passed()
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_min_part_hours():
"""
swift-ring-builder <builder_file> set_min_part_hours <hours>
Changes the <min_part_hours> to the given <hours>. This should be set to
however long a full replication/update cycle takes. We're working on a way
to determine this more easily than scanning logs.
"""
if len(argv) < 4:
print(Commands.set_min_part_hours.__doc__.strip())
exit(EXIT_ERROR)
builder.change_min_part_hours(int(argv[3]))
print('The minimum number of hours before a partition can be '
'reassigned is now set to %s' % argv[3])
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_replicas():
"""
swift-ring-builder <builder_file> set_replicas <replicas>
Changes the replica count to the given <replicas>. <replicas> may
be a floating-point value, in which case some partitions will have
floor(<replicas>) replicas and some will have ceiling(<replicas>)
in the correct proportions.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print(Commands.set_replicas.__doc__.strip())
exit(EXIT_ERROR)
new_replicas = argv[3]
try:
new_replicas = float(new_replicas)
except ValueError:
print(Commands.set_replicas.__doc__.strip())
print("\"%s\" is not a valid number." % new_replicas)
exit(EXIT_ERROR)
if new_replicas < 1:
print("Replica count must be at least 1.")
exit(EXIT_ERROR)
builder.set_replicas(new_replicas)
print('The replica count is now %.6f.' % builder.replicas)
print('The change will take effect after the next rebalance.')
builder.save(builder_file)
exit(EXIT_SUCCESS)
@staticmethod
def set_overload():
"""
swift-ring-builder <builder_file> set_overload <overload>[%]
Changes the overload factor to the given <overload>.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print(Commands.set_overload.__doc__.strip())
exit(EXIT_ERROR)
new_overload = argv[3]
if new_overload.endswith('%'):
percent = True
new_overload = new_overload.rstrip('%')
else:
percent = False
try:
new_overload = float(new_overload)
except ValueError:
print(Commands.set_overload.__doc__.strip())
print("%r is not a valid number." % new_overload)
exit(EXIT_ERROR)
if percent:
new_overload *= 0.01
if new_overload < 0:
print("Overload must be non-negative.")
exit(EXIT_ERROR)
if new_overload > 1 and not percent:
print("!?! Warning overload is greater than 100% !?!")
status = EXIT_WARNING
else:
status = EXIT_SUCCESS
builder.set_overload(new_overload)
print('The overload factor is now %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload))
print('The change will take effect after the next rebalance.')
builder.save(builder_file)
exit(status)
@staticmethod
def prepare_increase_partition_power():
"""
swift-ring-builder <builder_file> prepare_increase_partition_power
Prepare the ring to increase the partition power by one.
A write_ring command is needed to make the change take effect.
Once the updated rings have been deployed to all servers you need to run
the swift-object-relinker tool to relink existing data.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you increase the partition power and deploy changed rings, you may
introduce unavailability in your cluster. This has an end-user impact. Make
sure you execute required operations to increase the partition power
accurately.
"""
if len(argv) < 3:
print(Commands.prepare_increase_partition_power.__doc__.strip())
exit(EXIT_ERROR)
if "object" not in basename(builder_file):
print(
'Partition power increase is only supported for object rings.')
exit(EXIT_ERROR)
if not builder.prepare_increase_partition_power():
print('Ring is already prepared for partition power increase.')
exit(EXIT_ERROR)
builder.save(builder_file)
print('The next partition power is now %d.' % builder.next_part_power)
print('The change will take effect after the next write_ring.')
print('Ensure your proxy-servers, object-replicators and ')
print('reconstructors are using the changed rings and relink ')
print('(using swift-object-relinker) your existing data')
print('before the partition power increase')
exit(EXIT_SUCCESS)
@staticmethod
def increase_partition_power():
"""
swift-ring-builder <builder_file> increase_partition_power
Increases the partition power by one. Needs to be run after
prepare_increase_partition_power has been run and all existing data has
been relinked using the swift-object-relinker tool.
A write_ring command is needed to make the change take effect.
Once the updated rings have been deployed to all servers you need to run
the swift-object-relinker tool to cleanup old data.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you increase the partition power and deploy changed rings, you may
introduce unavailability in your cluster. This has an end-user impact. Make
sure you execute required operations to increase the partition power
accurately.
"""
if len(argv) < 3:
print(Commands.increase_partition_power.__doc__.strip())
exit(EXIT_ERROR)
if builder.increase_partition_power():
print('The partition power is now %d.' % builder.part_power)
print('The change will take effect after the next write_ring.')
builder._update_last_part_moves()
builder.save(builder_file)
exit(EXIT_SUCCESS)
else:
print('Ring partition power cannot be increased. Either the ring')
print('was not prepared yet, or this operation has already run.')
exit(EXIT_ERROR)
@staticmethod
def cancel_increase_partition_power():
"""
swift-ring-builder <builder_file> cancel_increase_partition_power
Cancel the increase of the partition power.
A write_ring command is needed to make the change take effect.
Once the updated rings have been deployed to all servers you need to run
the swift-object-relinker tool to cleanup unneeded links.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you increase the partition power and deploy changed rings, you may
introduce unavailability in your cluster. This has an end-user impact. Make
sure you execute required operations to increase the partition power
accurately.
"""
if len(argv) < 3:
print(Commands.cancel_increase_partition_power.__doc__.strip())
exit(EXIT_ERROR)
if not builder.cancel_increase_partition_power():
print('Ring partition power increase cannot be canceled.')
exit(EXIT_ERROR)
builder.save(builder_file)
print('The next partition power is now %d.' % builder.next_part_power)
print('The change will take effect after the next write_ring.')
print('Ensure your object-servers are using the changed rings and')
print('cleanup (using swift-object-relinker) the hard links')
exit(EXIT_SUCCESS)
@staticmethod
def finish_increase_partition_power():
"""
swift-ring-builder <builder_file> finish_increase_partition_power
Finally removes the next_part_power flag. Has to be run after the
swift-object-relinker tool has been used to cleanup old existing data.
A write_ring command is needed to make the change take effect.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you increase the partition power and deploy changed rings, you may
introduce unavailability in your cluster. This has an end-user impact. Make
sure you execute required operations to increase the partition power
accurately.
"""
if len(argv) < 3:
print(Commands.finish_increase_partition_power.__doc__.strip())
exit(EXIT_ERROR)
if not builder.finish_increase_partition_power():
print('Ring partition power increase cannot be finished.')
exit(EXIT_ERROR)
print('The change will take effect after the next write_ring.')
builder.save(builder_file)
exit(EXIT_SUCCESS)
def main(arguments=None):
global argv, backup_dir, builder, builder_file, ring_file
if arguments is not None:
argv = arguments
else:
argv = sys_argv
if len(argv) < 2:
print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" %
globals())
print(Commands.default.__doc__.strip())
print()
cmds = [c for c in dir(Commands)
if getattr(Commands, c).__doc__ and not c.startswith('_') and
c != 'default']
cmds.sort()
for cmd in cmds:
print(getattr(Commands, cmd).__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
print()
for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
subsequent_indent=' '):
print(line)
print('Exit codes: 0 = operation successful\n'
' 1 = operation completed with warnings\n'
' 2 = error')
exit(EXIT_SUCCESS)
builder_file, ring_file = parse_builder_ring_filename_args(argv)
if builder_file != argv[1]:
print('Note: using %s instead of %s as builder file' % (
builder_file, argv[1]))
try:
builder = RingBuilder.load(builder_file)
except exceptions.UnPicklingError as e:
msg = str(e)
try:
CompositeRingBuilder.load(builder_file)
msg += ' (it appears to be a composite ring builder file?)'
except Exception: # noqa
pass
print(msg)
exit(EXIT_ERROR)
except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
if len(argv) < 3 or argv[2] not in ('create', 'write_builder'):
print(e)
exit(EXIT_ERROR)
except Exception as e:
print('Problem occurred while reading builder file: %s. %s' %
(builder_file, e))
exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(builder_file), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
if len(argv) == 2:
command = "default"
else:
command = argv[2]
if argv[0].endswith('-safe'):
try:
with lock_parent_directory(abspath(builder_file), 15):
getattr(Commands, command, Commands.unknown)()
except exceptions.LockTimeout:
print("Ring/builder dir currently locked.")
exit(2)
else:
getattr(Commands, command, Commands.unknown)()
| swift-master | swift/cli/ringbuilder.py |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
from swift.common import utils
from swift.common.db_replicator import roundrobin_datadirs
from swift.common.ring import ring
from swift.common.utils import Timestamp
from swift.container.backend import ContainerBroker, DATADIR
TAB = ' '
def broker_key(broker):
broker.get_info()
return broker.path
def container_type(broker):
return 'ROOT' if broker.is_root_container() else 'SHARD'
def collect_brokers(conf_path, names2nodes):
conf = utils.readconf(conf_path, 'container-replicator')
root = conf.get('devices', '/srv/node')
swift_dir = conf.get('swift_dir', '/etc/swift')
c_ring = ring.Ring(swift_dir, ring_name='container')
dirs = []
brokers = defaultdict(dict)
for node in c_ring.devs:
if node is None:
continue
datadir = os.path.join(root, node['device'], DATADIR)
if os.path.isdir(datadir):
dirs.append((datadir, node['id'], lambda *args: True))
for part, object_file, node_id in roundrobin_datadirs(dirs):
broker = ContainerBroker(object_file)
for node in c_ring.get_part_nodes(int(part)):
if node['id'] == node_id:
node_index = str(node['index'])
break
else:
node_index = 'handoff'
names2nodes[broker_key(broker)][(node_id, node_index)] = broker
return brokers
def print_broker_info(node, broker, indent_level=0):
indent = indent_level * TAB
info = broker.get_info()
raw_info = broker._get_info()
deleted_at = float(info['delete_timestamp'])
if deleted_at:
deleted_at = Timestamp(info['delete_timestamp']).isoformat
else:
deleted_at = ' - '
print('%s(%s) %s, objs: %s, bytes: %s, actual_objs: %s, put: %s, '
'deleted: %s' %
(indent, node[1][0], broker.get_db_state(),
info['object_count'], info['bytes_used'], raw_info['object_count'],
Timestamp(info['put_timestamp']).isoformat, deleted_at))
def print_db(node, broker, expect_type='ROOT', indent_level=0):
indent = indent_level * TAB
print('%s(%s) %s node id: %s, node index: %s' %
(indent, node[1][0], broker.db_file, node[0], node[1]))
actual_type = container_type(broker)
if actual_type != expect_type:
print('%s ERROR expected %s but found %s' %
(indent, expect_type, actual_type))
def print_own_shard_range(node, sr, indent_level):
indent = indent_level * TAB
range = '%r - %r' % (sr.lower, sr.upper)
print('%s(%s) %23s, objs: %3s, bytes: %3s, timestamp: %s (%s), '
'modified: %s (%s), %7s: %s (%s), deleted: %s, epoch: %s' %
(indent, node[1][0], range, sr.object_count, sr.bytes_used,
sr.timestamp.isoformat, sr.timestamp.internal,
sr.meta_timestamp.isoformat, sr.meta_timestamp.internal,
sr.state_text, sr.state_timestamp.isoformat,
sr.state_timestamp.internal, sr.deleted,
sr.epoch.internal if sr.epoch else None))
def print_own_shard_range_info(node, shard_ranges, indent_level=0):
shard_ranges.sort(key=lambda x: x.deleted)
for sr in shard_ranges:
print_own_shard_range(node, sr, indent_level)
def print_shard_range(node, sr, indent_level):
indent = indent_level * TAB
range = '%r - %r' % (sr.lower, sr.upper)
print('%s(%s) %23s, objs: %3s, bytes: %3s, timestamp: %s (%s), '
'modified: %s (%s), %7s: %s (%s), deleted: %s, epoch: %s %s' %
(indent, node[1][0], range, sr.object_count, sr.bytes_used,
sr.timestamp.isoformat, sr.timestamp.internal,
sr.meta_timestamp.isoformat, sr.meta_timestamp.internal,
sr.state_text, sr.state_timestamp.isoformat,
sr.state_timestamp.internal, sr.deleted,
sr.epoch.internal if sr.epoch else None, sr.name))
def print_shard_range_info(node, shard_ranges, indent_level=0):
shard_ranges.sort(key=lambda x: x.deleted)
for sr in shard_ranges:
print_shard_range(node, sr, indent_level)
def print_sharding_info(node, broker, indent_level=0):
indent = indent_level * TAB
print('%s(%s) %s' % (indent, node[1][0], broker.get_sharding_sysmeta()))
def print_container(name, name2nodes2brokers, expect_type='ROOT',
indent_level=0, used_names=None):
used_names = used_names or set()
indent = indent_level * TAB
node2broker = name2nodes2brokers[name]
ordered_by_index = sorted(node2broker.keys(), key=lambda x: x[1])
brokers = [(node, node2broker[node]) for node in ordered_by_index]
print('%sName: %s' % (indent, name))
if name in used_names:
print('%s (Details already listed)\n' % indent)
return
used_names.add(name)
print(indent + 'DB files:')
for node, broker in brokers:
print_db(node, broker, expect_type, indent_level=indent_level + 1)
print(indent + 'Info:')
for node, broker in brokers:
print_broker_info(node, broker, indent_level=indent_level + 1)
print(indent + 'Sharding info:')
for node, broker in brokers:
print_sharding_info(node, broker, indent_level=indent_level + 1)
print(indent + 'Own shard range:')
for node, broker in brokers:
shard_ranges = broker.get_shard_ranges(
include_deleted=True, include_own=True, exclude_others=True)
print_own_shard_range_info(node, shard_ranges,
indent_level=indent_level + 1)
print(indent + 'Shard ranges:')
shard_names = set()
for node, broker in brokers:
shard_ranges = broker.get_shard_ranges(include_deleted=True)
for sr_name in shard_ranges:
shard_names.add(sr_name.name)
print_shard_range_info(node, shard_ranges,
indent_level=indent_level + 1)
print(indent + 'Shards:')
for sr_name in shard_names:
print_container(sr_name, name2nodes2brokers, expect_type='SHARD',
indent_level=indent_level + 1, used_names=used_names)
print('\n')
def run(conf_paths):
# container_name -> (node id, node index) -> broker
name2nodes2brokers = defaultdict(dict)
for conf_path in conf_paths:
collect_brokers(conf_path, name2nodes2brokers)
print('First column on each line is (node index)\n')
for name, node2broker in name2nodes2brokers.items():
expect_root = False
for node, broker in node2broker.items():
expect_root = broker.is_root_container() or expect_root
if expect_root:
print_container(name, name2nodes2brokers)
if __name__ == '__main__':
conf_dir = '/etc/swift/container-server'
conf_paths = [os.path.join(conf_dir, p) for p in os.listdir(conf_dir)
if p.endswith(('conf', 'conf.d'))]
run(conf_paths)
| swift-master | swift/cli/shard-info.py |
swift-master | swift/cli/__init__.py |
|
# Copyright (c) 2015 Samuel Merritt <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a tool for analyzing how well the ring builder performs its job
in a particular scenario. It is intended to help developers quantify any
improvements or regressions in the ring builder; it is probably not useful
to others.
The ring builder analyzer takes a scenario file containing some initial
parameters for a ring builder plus a certain number of rounds. In each
round, some modifications are made to the builder, e.g. add a device, remove
a device, change a device's weight. Then, the builder is repeatedly
rebalanced until it settles down. Data about that round is printed, and the
next round begins.
Scenarios are specified in JSON. Example scenario for a gradual device
addition::
{
"part_power": 12,
"replicas": 3,
"overload": 0.1,
"random_seed": 203488,
"rounds": [
[
["add", "r1z2-10.20.30.40:6200/sda", 8000],
["add", "r1z2-10.20.30.40:6200/sdb", 8000],
["add", "r1z2-10.20.30.40:6200/sdc", 8000],
["add", "r1z2-10.20.30.40:6200/sdd", 8000],
["add", "r1z2-10.20.30.41:6200/sda", 8000],
["add", "r1z2-10.20.30.41:6200/sdb", 8000],
["add", "r1z2-10.20.30.41:6200/sdc", 8000],
["add", "r1z2-10.20.30.41:6200/sdd", 8000],
["add", "r1z2-10.20.30.43:6200/sda", 8000],
["add", "r1z2-10.20.30.43:6200/sdb", 8000],
["add", "r1z2-10.20.30.43:6200/sdc", 8000],
["add", "r1z2-10.20.30.43:6200/sdd", 8000],
["add", "r1z2-10.20.30.44:6200/sda", 8000],
["add", "r1z2-10.20.30.44:6200/sdb", 8000],
["add", "r1z2-10.20.30.44:6200/sdc", 8000]
], [
["add", "r1z2-10.20.30.44:6200/sdd", 1000]
], [
["set_weight", 15, 2000]
], [
["remove", 3],
["set_weight", 15, 3000]
], [
["set_weight", 15, 4000]
], [
["set_weight", 15, 5000]
], [
["set_weight", 15, 6000]
], [
["set_weight", 15, 7000]
], [
["set_weight", 15, 8000]
]]
}
"""
import argparse
import json
import sys
from swift.common.ring import builder
from swift.common.ring.utils import parse_add_value
ARG_PARSER = argparse.ArgumentParser(
description='Put the ring builder through its paces')
ARG_PARSER.add_argument(
'--check', '-c', action='store_true',
help="Just check the scenario, don't execute it.")
ARG_PARSER.add_argument(
'scenario_path',
help="Path to the scenario file")
class ParseCommandError(ValueError):
def __init__(self, name, round_index, command_index, msg):
msg = "Invalid %s (round %s, command %s): %s" % (
name, round_index, command_index, msg)
super(ParseCommandError, self).__init__(msg)
def _parse_weight(round_index, command_index, weight_str):
try:
weight = float(weight_str)
except ValueError as err:
raise ParseCommandError('weight', round_index, command_index, err)
if weight < 0:
raise ParseCommandError('weight', round_index, command_index,
'cannot be negative')
return weight
def _parse_add_command(round_index, command_index, command):
if len(command) != 3:
raise ParseCommandError(
'add command', round_index, command_index,
'expected array of length 3, but got %r' % command)
dev_str = command[1]
weight_str = command[2]
try:
dev = parse_add_value(dev_str)
except ValueError as err:
raise ParseCommandError('device specifier', round_index,
command_index, err)
dev['weight'] = _parse_weight(round_index, command_index, weight_str)
if dev['region'] is None:
dev['region'] = 1
default_key_map = {
'replication_ip': 'ip',
'replication_port': 'port',
}
for empty_key, default_key in default_key_map.items():
if dev[empty_key] is None:
dev[empty_key] = dev[default_key]
return ['add', dev]
def _parse_remove_command(round_index, command_index, command):
if len(command) != 2:
raise ParseCommandError('remove commnd', round_index, command_index,
"expected array of length 2, but got %r" %
(command,))
dev_str = command[1]
try:
dev_id = int(dev_str)
except ValueError as err:
raise ParseCommandError('device ID in remove',
round_index, command_index, err)
return ['remove', dev_id]
def _parse_set_weight_command(round_index, command_index, command):
if len(command) != 3:
raise ParseCommandError('remove command', round_index, command_index,
"expected array of length 3, but got %r" %
(command,))
dev_str = command[1]
weight_str = command[2]
try:
dev_id = int(dev_str)
except ValueError as err:
raise ParseCommandError('device ID in set_weight',
round_index, command_index, err)
weight = _parse_weight(round_index, command_index, weight_str)
return ['set_weight', dev_id, weight]
def _parse_save_command(round_index, command_index, command):
if len(command) != 2:
raise ParseCommandError(
command, round_index, command_index,
"expected array of length 2 but got %r" % (command,))
return ['save', command[1]]
def parse_scenario(scenario_data):
"""
Takes a serialized scenario and turns it into a data structure suitable
for feeding to run_scenario().
:returns: scenario
:raises ValueError: on invalid scenario
"""
parsed_scenario = {}
try:
raw_scenario = json.loads(scenario_data)
except ValueError as err:
raise ValueError("Invalid JSON in scenario file: %s" % err)
if not isinstance(raw_scenario, dict):
raise ValueError("Scenario must be a JSON object, not array or string")
if 'part_power' not in raw_scenario:
raise ValueError("part_power missing")
try:
parsed_scenario['part_power'] = int(raw_scenario['part_power'])
except ValueError as err:
raise ValueError("part_power not an integer: %s" % err)
if not 1 <= parsed_scenario['part_power'] <= 32:
raise ValueError("part_power must be between 1 and 32, but was %d"
% raw_scenario['part_power'])
if 'replicas' not in raw_scenario:
raise ValueError("replicas missing")
try:
parsed_scenario['replicas'] = float(raw_scenario['replicas'])
except ValueError as err:
raise ValueError("replicas not a float: %s" % err)
if parsed_scenario['replicas'] < 1:
raise ValueError("replicas must be at least 1, but is %f"
% parsed_scenario['replicas'])
if 'overload' not in raw_scenario:
raise ValueError("overload missing")
try:
parsed_scenario['overload'] = float(raw_scenario['overload'])
except ValueError as err:
raise ValueError("overload not a float: %s" % err)
if parsed_scenario['overload'] < 0:
raise ValueError("overload must be non-negative, but is %f"
% parsed_scenario['overload'])
if 'random_seed' not in raw_scenario:
raise ValueError("random_seed missing")
try:
parsed_scenario['random_seed'] = int(raw_scenario['random_seed'])
except ValueError as err:
raise ValueError("replicas not an integer: %s" % err)
if 'rounds' not in raw_scenario:
raise ValueError("rounds missing")
if not isinstance(raw_scenario['rounds'], list):
raise ValueError("rounds must be an array")
parser_for_command = {
'add': _parse_add_command,
'remove': _parse_remove_command,
'set_weight': _parse_set_weight_command,
'save': _parse_save_command,
}
parsed_scenario['rounds'] = []
for round_index, raw_round in enumerate(raw_scenario['rounds']):
if not isinstance(raw_round, list):
raise ValueError("round %d not an array" % round_index)
parsed_round = []
for command_index, command in enumerate(raw_round):
if command[0] not in parser_for_command:
raise ValueError(
"Unknown command (round %d, command %d): "
"'%s' should be one of %s" %
(round_index, command_index, command[0],
parser_for_command.keys()))
parsed_round.append(
parser_for_command[command[0]](
round_index, command_index, command))
parsed_scenario['rounds'].append(parsed_round)
return parsed_scenario
def run_scenario(scenario):
"""
Takes a parsed scenario (like from parse_scenario()) and runs it.
"""
seed = scenario['random_seed']
rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1)
rb.set_overload(scenario['overload'])
command_map = {
'add': rb.add_dev,
'remove': rb.remove_dev,
'set_weight': rb.set_dev_weight,
'save': rb.save,
}
for round_index, commands in enumerate(scenario['rounds']):
print("Round %d" % (round_index + 1))
for command in commands:
key = command.pop(0)
try:
command_f = command_map[key]
except KeyError:
raise ValueError("unknown command %r" % key)
command_f(*command)
rebalance_number = 1
parts_moved, old_balance, removed_devs = rb.rebalance(seed=seed)
rb.pretend_min_part_hours_passed()
print("\tRebalance 1: moved %d parts, balance is %.6f, %d removed "
"devs" % (parts_moved, old_balance, removed_devs))
while True:
rebalance_number += 1
parts_moved, new_balance, removed_devs = rb.rebalance(seed=seed)
rb.pretend_min_part_hours_passed()
print("\tRebalance %d: moved %d parts, balance is %.6f, "
"%d removed devs" % (rebalance_number, parts_moved,
new_balance, removed_devs))
if parts_moved == 0 and removed_devs == 0:
break
if abs(new_balance - old_balance) < 1 and not (
old_balance == builder.MAX_BALANCE and
new_balance == builder.MAX_BALANCE):
break
old_balance = new_balance
def main(argv=None):
args = ARG_PARSER.parse_args(argv)
try:
with open(args.scenario_path) as sfh:
scenario_data = sfh.read()
except OSError as err:
sys.stderr.write("Error opening scenario %s: %s\n" %
(args.scenario_path, err))
return 1
try:
scenario = parse_scenario(scenario_data)
except ValueError as err:
sys.stderr.write("Invalid scenario %s: %s\n" %
(args.scenario_path, err))
return 1
if not args.check:
run_scenario(scenario)
return 0
| swift-master | swift/cli/ring_builder_analyzer.py |
#!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
from collections import defaultdict
from six.moves.configparser import ConfigParser
from optparse import OptionParser
from sys import exit, stdout, stderr
from time import time
from eventlet import GreenPool, hubs, patcher, Timeout
from eventlet.pools import Pool
from swift.common import direct_client
from swift.common.internal_client import SimpleClient
from swift.common.ring import Ring
from swift.common.exceptions import ClientException
from swift.common.utils import compute_eta, get_time_units, \
config_true_value, node_to_string
from swift.common.storage_policy import POLICIES
unmounted = []
notfound = []
json_output = False
debug = False
insecure = False
def get_error_log(prefix):
def error_log(msg_or_exc):
global debug, unmounted, notfound
if hasattr(msg_or_exc, 'http_status'):
identifier = '%s:%s/%s' % (msg_or_exc.http_host,
msg_or_exc.http_port,
msg_or_exc.http_device)
if msg_or_exc.http_status == 507:
if identifier not in unmounted:
unmounted.append(identifier)
print('ERROR: %s is unmounted -- This will '
'cause replicas designated for that device to be '
'considered missing until resolved or the ring is '
'updated.' % (identifier), file=stderr)
stderr.flush()
if debug and identifier not in notfound:
notfound.append(identifier)
print('ERROR: %s returned a 404' % (identifier), file=stderr)
stderr.flush()
if not hasattr(msg_or_exc, 'http_status') or \
msg_or_exc.http_status not in (404, 507):
print('ERROR: %s: %s' % (prefix, msg_or_exc), file=stderr)
stderr.flush()
return error_log
def container_dispersion_report(coropool, connpool, account, container_ring,
retries, output_missing_partitions, policy):
with connpool.item() as conn:
containers = [c['name'] for c in conn.get_account(
prefix='dispersion_%d' % policy.idx, full_listing=True)[1]]
containers_listed = len(containers)
if not containers_listed:
print('No containers to query. Has '
'swift-dispersion-populate been run?', file=stderr)
stderr.flush()
return
retries_done = [0]
containers_queried = [0]
container_copies_missing = defaultdict(int)
container_copies_found = [0]
container_copies_expected = [0]
begun = time()
next_report = [time() + 2]
def direct(container, part, nodes):
found_count = 0
for node in nodes:
error_log = get_error_log(node_to_string(node))
try:
attempts, _junk = direct_client.retry(
direct_client.direct_head_container, node, part, account,
container, error_log=error_log, retries=retries)
retries_done[0] += attempts - 1
found_count += 1
except ClientException as err:
if err.http_status not in (404, 507):
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
container, err))
except (Exception, Timeout) as err:
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
container, err))
if output_missing_partitions and \
found_count < len(nodes):
missing = len(nodes) - found_count
print('\r\x1B[K', end='')
stdout.flush()
print('# Container partition %s missing %s cop%s' % (
part, missing, 'y' if missing == 1 else 'ies'), file=stderr)
container_copies_found[0] += found_count
containers_queried[0] += 1
container_copies_missing[len(nodes) - found_count] += 1
if time() >= next_report[0]:
next_report[0] = time() + 5
eta, eta_unit = compute_eta(begun, containers_queried[0],
containers_listed)
if not json_output:
print('\r\x1B[KQuerying containers: %d of %d, %d%s left, %d '
'retries' % (containers_queried[0], containers_listed,
round(eta), eta_unit, retries_done[0]),
end='')
stdout.flush()
container_parts = {}
for container in containers:
part, nodes = container_ring.get_nodes(account, container)
if part not in container_parts:
container_copies_expected[0] += len(nodes)
container_parts[part] = part
coropool.spawn(direct, container, part, nodes)
coropool.waitall()
distinct_partitions = len(container_parts)
copies_found = container_copies_found[0]
copies_expected = container_copies_expected[0]
value = 100.0 * copies_found / copies_expected
elapsed, elapsed_unit = get_time_units(time() - begun)
container_copies_missing.pop(0, None)
if not json_output:
print('\r\x1B[KQueried %d containers for dispersion reporting, '
'%d%s, %d retries' % (containers_listed, round(elapsed),
elapsed_unit, retries_done[0]))
if containers_listed - distinct_partitions:
print('There were %d overlapping partitions' % (
containers_listed - distinct_partitions))
for missing_copies, num_parts in container_copies_missing.items():
print(missing_string(num_parts, missing_copies,
container_ring.replica_count))
print('%.02f%% of container copies found (%d of %d)' % (
value, copies_found, copies_expected))
print('Sample represents %.02f%% of the container partition space' % (
100.0 * distinct_partitions / container_ring.partition_count))
stdout.flush()
return None
else:
results = {'retries': retries_done[0],
'overlapping': containers_listed - distinct_partitions,
'pct_found': value,
'copies_found': copies_found,
'copies_expected': copies_expected}
for missing_copies, num_parts in container_copies_missing.items():
results['missing_%d' % (missing_copies)] = num_parts
return results
def object_dispersion_report(coropool, connpool, account, object_ring,
retries, output_missing_partitions, policy):
container = 'dispersion_objects_%d' % policy.idx
with connpool.item() as conn:
try:
objects = [o['name'] for o in conn.get_container(
container, prefix='dispersion_', full_listing=True)[1]]
except ClientException as err:
if err.http_status != 404:
raise
print('No objects to query. Has '
'swift-dispersion-populate been run?', file=stderr)
stderr.flush()
return
objects_listed = len(objects)
if not objects_listed:
print('No objects to query. Has swift-dispersion-populate '
'been run?', file=stderr)
stderr.flush()
return
retries_done = [0]
objects_queried = [0]
object_copies_found = [0]
object_copies_expected = [0]
object_copies_missing = defaultdict(int)
begun = time()
next_report = [time() + 2]
headers = None
if policy is not None:
headers = {}
headers['X-Backend-Storage-Policy-Index'] = int(policy)
def direct(obj, part, nodes):
found_count = 0
for node in nodes:
error_log = get_error_log(node_to_string(node))
try:
attempts, _junk = direct_client.retry(
direct_client.direct_head_object, node, part, account,
container, obj, error_log=error_log, retries=retries,
headers=headers)
retries_done[0] += attempts - 1
found_count += 1
except ClientException as err:
if err.http_status not in (404, 507):
error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
container, obj, err))
except (Exception, Timeout) as err:
error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
container, obj, err))
if output_missing_partitions and \
found_count < len(nodes):
missing = len(nodes) - found_count
print('\r\x1B[K', end='')
stdout.flush()
print('# Object partition %s missing %s cop%s' % (
part, missing, 'y' if missing == 1 else 'ies'), file=stderr)
object_copies_found[0] += found_count
object_copies_missing[len(nodes) - found_count] += 1
objects_queried[0] += 1
if time() >= next_report[0]:
next_report[0] = time() + 5
eta, eta_unit = compute_eta(begun, objects_queried[0],
objects_listed)
if not json_output:
print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d '
'retries' % (objects_queried[0], objects_listed,
round(eta), eta_unit, retries_done[0]),
end='')
stdout.flush()
object_parts = {}
for obj in objects:
part, nodes = object_ring.get_nodes(account, container, obj)
if part not in object_parts:
object_copies_expected[0] += len(nodes)
object_parts[part] = part
coropool.spawn(direct, obj, part, nodes)
coropool.waitall()
distinct_partitions = len(object_parts)
copies_found = object_copies_found[0]
copies_expected = object_copies_expected[0]
value = 100.0 * copies_found / copies_expected
elapsed, elapsed_unit = get_time_units(time() - begun)
if not json_output:
print('\r\x1B[KQueried %d objects for dispersion reporting, '
'%d%s, %d retries' % (objects_listed, round(elapsed),
elapsed_unit, retries_done[0]))
if objects_listed - distinct_partitions:
print('There were %d overlapping partitions' % (
objects_listed - distinct_partitions))
for missing_copies, num_parts in object_copies_missing.items():
print(missing_string(num_parts, missing_copies,
object_ring.replica_count))
print('%.02f%% of object copies found (%d of %d)' %
(value, copies_found, copies_expected))
print('Sample represents %.02f%% of the object partition space' % (
100.0 * distinct_partitions / object_ring.partition_count))
stdout.flush()
return None
else:
results = {'retries': retries_done[0],
'overlapping': objects_listed - distinct_partitions,
'pct_found': value,
'copies_found': copies_found,
'copies_expected': copies_expected}
for missing_copies, num_parts in object_copies_missing.items():
results['missing_%d' % (missing_copies,)] = num_parts
return results
def missing_string(partition_count, missing_copies, copy_count):
exclamations = ''
missing_string = str(missing_copies)
if missing_copies == copy_count:
exclamations = '!!! '
missing_string = 'all'
elif copy_count - missing_copies == 1:
exclamations = '! '
verb_string = 'was'
partition_string = 'partition'
if partition_count > 1:
verb_string = 'were'
partition_string = 'partitions'
copy_string = 'copies'
if missing_copies == 1:
copy_string = 'copy'
return '%sThere %s %d %s missing %s %s.' % (
exclamations, verb_string, partition_count, partition_string,
missing_string, copy_string
)
def main():
patcher.monkey_patch()
hubs.get_hub().debug_exceptions = False
conffile = '/etc/swift/dispersion.conf'
parser = OptionParser(usage='''
Usage: %%prog [options] [conf_file]
[conf_file] defaults to %s'''.strip() % conffile)
parser.add_option('-j', '--dump-json', action='store_true', default=False,
help='dump dispersion report in json format')
parser.add_option('-d', '--debug', action='store_true', default=False,
help='print 404s to standard error')
parser.add_option('-p', '--partitions', action='store_true', default=False,
help='print missing partitions to standard error')
parser.add_option('--container-only', action='store_true', default=False,
help='Only run container report')
parser.add_option('--object-only', action='store_true', default=False,
help='Only run object report')
parser.add_option('--insecure', action='store_true', default=False,
help='Allow accessing insecure keystone server. '
'The keystone\'s certificate will not be verified.')
parser.add_option('-P', '--policy-name', dest='policy_name',
help="Specify storage policy name")
options, args = parser.parse_args()
if args:
conffile = args.pop(0)
if options.debug:
global debug
debug = True
c = ConfigParser()
if not c.read(conffile):
exit('Unable to read config file: %s' % conffile)
conf = dict(c.items('dispersion'))
if options.dump_json:
conf['dump_json'] = 'yes'
if options.object_only:
conf['container_report'] = 'no'
if options.container_only:
conf['object_report'] = 'no'
if options.insecure:
conf['keystone_api_insecure'] = 'yes'
if options.partitions:
conf['partitions'] = 'yes'
output = generate_report(conf, options.policy_name)
if json_output:
print(json.dumps(output))
def generate_report(conf, policy_name=None):
try:
# Delay importing so urllib3 will import monkey-patched modules
from swiftclient import get_auth
except ImportError:
from swift.common.internal_client import get_auth
global json_output
json_output = config_true_value(conf.get('dump_json', 'no'))
if policy_name is None:
policy = POLICIES.default
else:
policy = POLICIES.get_by_name(policy_name)
if policy is None:
exit('Unable to find policy: %s' % policy_name)
if not json_output:
print('Using storage policy: %s ' % policy.name)
swift_dir = conf.get('swift_dir', '/etc/swift')
retries = int(conf.get('retries', 5))
concurrency = int(conf.get('concurrency', 25))
endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
region_name = str(conf.get('region_name', ''))
container_report = config_true_value(conf.get('container_report', 'yes'))
object_report = config_true_value(conf.get('object_report', 'yes'))
if not (object_report or container_report):
exit("Neither container or object report is set to run")
user_domain_name = str(conf.get('user_domain_name', ''))
project_domain_name = str(conf.get('project_domain_name', ''))
project_name = str(conf.get('project_name', ''))
insecure = config_true_value(conf.get('keystone_api_insecure', 'no'))
coropool = GreenPool(size=concurrency)
os_options = {'endpoint_type': endpoint_type}
if user_domain_name:
os_options['user_domain_name'] = user_domain_name
if project_domain_name:
os_options['project_domain_name'] = project_domain_name
if project_name:
os_options['project_name'] = project_name
if region_name:
os_options['region_name'] = region_name
url, token = get_auth(conf['auth_url'], conf['auth_user'],
conf['auth_key'],
auth_version=conf.get('auth_version', '1.0'),
os_options=os_options,
insecure=insecure)
account = url.rsplit('/', 1)[1]
connpool = Pool(max_size=concurrency)
connpool.create = lambda: SimpleClient(
url=url, token=token, retries=retries)
container_ring = Ring(swift_dir, ring_name='container')
object_ring = Ring(swift_dir, ring_name=policy.ring_name)
output = {}
if container_report:
output['container'] = container_dispersion_report(
coropool, connpool, account, container_ring, retries,
conf.get('partitions'), policy)
if object_report:
output['object'] = object_dispersion_report(
coropool, connpool, account, object_ring, retries,
conf.get('partitions'), policy)
return output
if __name__ == '__main__':
main()
| swift-master | swift/cli/dispersion_report.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Enqueue background jobs to delete portions of a container's namespace.
Accepts prefix, marker, and end-marker args that work as in container
listings. Objects found in the listing will be marked to be deleted
by the object-expirer; until the object is actually deleted, it will
continue to appear in listings.
If there are many objects, this operation may take some time. Stats will
periodically be emitted so you know the process hasn't hung. These will
also include the last object marked for deletion; if there is a failure,
pass this as the ``--marker`` when retrying to minimize duplicative work.
'''
import argparse
import io
import itertools
import json
import six
import time
from swift.common.internal_client import InternalClient
from swift.common.utils import Timestamp, MD5_OF_EMPTY_STRING
from swift.obj.expirer import build_task_obj, ASYNC_DELETE_TYPE
OBJECTS_PER_UPDATE = 10000
def make_delete_jobs(account, container, objects, timestamp):
'''
Create a list of async-delete jobs
:param account: (native or unicode string) account to delete from
:param container: (native or unicode string) container to delete from
:param objects: (list of native or unicode strings) objects to delete
:param timestamp: (Timestamp) time at which objects should be marked
deleted
:returns: list of dicts appropriate for an UPDATE request to an
expiring-object queue
'''
if six.PY2:
if isinstance(account, str):
account = account.decode('utf8')
if isinstance(container, str):
container = container.decode('utf8')
return [
{
'name': build_task_obj(
timestamp, account, container,
obj.decode('utf8') if six.PY2 and isinstance(obj, str)
else obj, high_precision=True),
'deleted': 0,
'created_at': timestamp.internal,
'etag': MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': ASYNC_DELETE_TYPE,
} for obj in objects]
def mark_for_deletion(swift, account, container, marker, end_marker,
prefix, timestamp=None, yield_time=10):
'''
Enqueue jobs to async-delete some portion of a container's namespace
:param swift: InternalClient to use
:param account: account to delete from
:param container: container to delete from
:param marker: only delete objects after this name
:param end_marker: only delete objects before this name. Use ``None`` or
empty string to delete to the end of the namespace.
:param prefix: only delete objects starting with this prefix
:param timestamp: delete all objects as of this time. If ``None``, the
current time will be used.
:param yield_time: approximate period with which intermediate results
should be returned. If ``None``, disable intermediate
results.
:returns: If ``yield_time`` is ``None``, the number of objects marked for
deletion. Otherwise, a generator that will yield out tuples of
``(number of marked objects, last object name)`` approximately
every ``yield_time`` seconds. The final tuple will have ``None``
as the second element. This form allows you to retry when an
error occurs partway through while minimizing duplicate work.
'''
if timestamp is None:
timestamp = Timestamp.now()
def enqueue_deletes():
deleted = 0
obj_iter = swift.iter_objects(
account, container,
marker=marker, end_marker=end_marker, prefix=prefix)
time_marker = time.time()
while True:
to_delete = [obj['name'] for obj in itertools.islice(
obj_iter, OBJECTS_PER_UPDATE)]
if not to_delete:
break
delete_jobs = make_delete_jobs(
account, container, to_delete, timestamp)
swift.make_request(
'UPDATE',
swift.make_path('.expiring_objects', str(int(timestamp))),
headers={'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': timestamp.internal},
acceptable_statuses=(2,),
body_file=io.BytesIO(json.dumps(delete_jobs).encode('ascii')))
deleted += len(delete_jobs)
if yield_time is not None and \
time.time() - time_marker > yield_time:
yield deleted, to_delete[-1]
time_marker = time.time()
yield deleted, None
if yield_time is None:
for deleted, marker in enqueue_deletes():
if marker is None:
return deleted
else:
return enqueue_deletes()
def main(args=None):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config', default='/etc/swift/internal-client.conf',
help=('internal-client config file '
'(default: /etc/swift/internal-client.conf'))
parser.add_argument('--request-tries', type=int, default=3,
help='(default: 3)')
parser.add_argument('account', help='account from which to delete')
parser.add_argument('container', help='container from which to delete')
parser.add_argument(
'--prefix', default='',
help='only delete objects with this prefix (default: none)')
parser.add_argument(
'--marker', default='',
help='only delete objects after this marker (default: none)')
parser.add_argument(
'--end-marker', default='',
help='only delete objects before this end-marker (default: none)')
parser.add_argument(
'--timestamp', type=Timestamp, default=Timestamp.now(),
help='delete all objects as of this time (default: now)')
args = parser.parse_args(args)
swift = InternalClient(
args.config, 'Swift Container Deleter', args.request_tries,
global_conf={'log_name': 'container-deleter-ic'})
for deleted, marker in mark_for_deletion(
swift, args.account, args.container,
args.marker, args.end_marker, args.prefix, args.timestamp):
if marker is None:
print('Finished. Marked %d objects for deletion.' % deleted)
else:
print('Marked %d objects for deletion, through %r' % (
deleted, marker))
if __name__ == '__main__':
main()
| swift-master | swift/cli/container_deleter.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import itertools
import json
import os
import sqlite3
from collections import defaultdict
from six.moves import urllib
from swift.common.utils import hash_path, storage_directory, \
Timestamp, is_valid_ipv6
from swift.common.ring import Ring
from swift.common.request_helpers import is_sys_meta, is_user_meta, \
strip_sys_meta_prefix, strip_user_meta_prefix, \
is_object_transient_sysmeta, strip_object_transient_sysmeta_prefix
from swift.account.backend import AccountBroker, DATADIR as ABDATADIR
from swift.container.backend import ContainerBroker, DATADIR as CBDATADIR
from swift.obj.diskfile import get_data_dir, read_metadata, DATADIR_BASE, \
extract_policy
from swift.common.storage_policy import POLICIES
from swift.common.swob import wsgi_to_str
from swift.common.middleware.crypto.crypto_utils import load_crypto_meta
from swift.common.utils import md5
class InfoSystemExit(Exception):
"""
Indicates to the caller that a sys.exit(1) should be performed.
"""
pass
def parse_get_node_args(options, args):
"""
Parse the get_nodes commandline args
:returns: a tuple, (ring_path, args)
"""
ring_path = None
if options.policy_name:
if POLICIES.get_by_name(options.policy_name) is None:
raise InfoSystemExit('No policy named %r' % options.policy_name)
elif args and args[0].endswith('.ring.gz'):
if os.path.exists(args[0]):
ring_path = args.pop(0)
else:
raise InfoSystemExit('Ring file does not exist')
if options.quoted:
args = [urllib.parse.unquote(arg) for arg in args]
if len(args) == 1:
args = args[0].strip('/').split('/', 2)
if not ring_path and not options.policy_name:
raise InfoSystemExit('Need to specify policy_name or <ring.gz>')
if not (args or options.partition):
raise InfoSystemExit('No target specified')
if len(args) > 3:
raise InfoSystemExit('Invalid arguments')
return ring_path, args
def curl_head_command(ip, port, device, part, target, policy_index):
"""
Provide a string that is a well formatted curl command to HEAD an object
on a storage node.
:param ip: the ip of the node
:param port: the port of the node
:param device: the device of the node
:param target: the path of the target resource
:param policy_index: the policy_index of the target resource (can be None)
:returns: a string, a well formatted curl command
"""
if is_valid_ipv6(ip):
formatted_ip = '[%s]' % ip
else:
formatted_ip = ip
cmd = 'curl -g -I -XHEAD "http://%s:%s/%s/%s/%s"' % (
formatted_ip, port, device, part, urllib.parse.quote(target))
if policy_index is not None:
cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index',
policy_index)
cmd += ' --path-as-is'
return cmd
def print_ring_locations(ring, datadir, account, container=None, obj=None,
tpart=None, all_nodes=False, policy_index=None):
"""
print out ring locations of specified type
:param ring: ring instance
:param datadir: name of directory where things are stored. Usually one of
"accounts", "containers", "objects", or "objects-N".
:param account: account name
:param container: container name
:param obj: object name
:param tpart: target partition in ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
:param policy_index: include policy_index in curl headers
"""
if not ring:
raise ValueError("No ring specified")
if not datadir:
raise ValueError("No datadir specified")
if tpart is None and not account:
raise ValueError("No partition or account/container/object specified")
if not account and (container or obj):
raise ValueError("Container/object specified without account")
if obj and not container:
raise ValueError('Object specified without container')
if obj:
target = '%s/%s/%s' % (account, container, obj)
elif container:
target = '%s/%s' % (account, container)
else:
target = '%s' % (account)
if tpart:
part = int(tpart)
else:
part = ring.get_part(account, container, obj)
primary_nodes = ring.get_part_nodes(part)
handoff_nodes = ring.get_more_nodes(part)
if not all_nodes:
handoff_nodes = itertools.islice(handoff_nodes, len(primary_nodes))
handoff_nodes = list(handoff_nodes)
if account and not tpart:
path_hash = hash_path(account, container, obj)
else:
path_hash = None
print('Partition\t%s' % part)
print('Hash \t%s\n' % path_hash)
for node in primary_nodes:
print('Server:Port Device\t%s:%s %s' % (node['ip'], node['port'],
node['device']))
for node in handoff_nodes:
print('Server:Port Device\t%s:%s %s\t [Handoff]' % (
node['ip'], node['port'], node['device']))
print("\n")
for node in primary_nodes:
cmd = curl_head_command(node['ip'], node['port'], node['device'],
part, target, policy_index)
print(cmd)
for node in handoff_nodes:
cmd = curl_head_command(node['ip'], node['port'], node['device'],
part, target, policy_index)
cmd += ' # [Handoff]'
print(cmd)
print("\n\nUse your own device location of servers:")
print("such as \"export DEVICE=/srv/node\"")
if path_hash:
for node in primary_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' %
(node['ip'], node['device'],
storage_directory(datadir, part, path_hash)))
for node in handoff_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' %
(node['ip'], node['device'],
storage_directory(datadir, part, path_hash)))
else:
for node in primary_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' %
(node['ip'], node['device'], datadir, part))
for node in handoff_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"'
' # [Handoff]' %
(node['ip'], node['device'], datadir, part))
print('\nnote: `/srv/node*` is used as default value of `devices`, the '
'real value is set in the config file on each storage node.')
def print_db_info_metadata(db_type, info, metadata, drop_prefixes=False,
verbose=False):
"""
print out data base info/metadata based on its type
:param db_type: database type, account or container
:param info: dict of data base info
:param metadata: dict of data base metadata
:param drop_prefixes: if True, strip "X-Account-Meta-",
"X-Container-Meta-", "X-Account-Sysmeta-", and
"X-Container-Sysmeta-" when displaying
User Metadata and System Metadata dicts
"""
if info is None:
raise ValueError('DB info is None')
if db_type not in ['container', 'account']:
raise ValueError('Wrong DB type')
try:
account = info['account']
container = None
if db_type == 'container':
container = info['container']
path = '/%s/%s' % (account, container)
else:
path = '/%s' % account
print('Path: %s' % path)
print(' Account: %s' % account)
if db_type == 'container':
print(' Container: %s' % container)
print(' Deleted: %s' % info['is_deleted'])
path_hash = hash_path(account, container)
if db_type == 'container':
print(' Container Hash: %s' % path_hash)
else:
print(' Account Hash: %s' % path_hash)
print('Metadata:')
print(' Created at: %s (%s)' %
(Timestamp(info['created_at']).isoformat,
info['created_at']))
print(' Put Timestamp: %s (%s)' %
(Timestamp(info['put_timestamp']).isoformat,
info['put_timestamp']))
print(' Delete Timestamp: %s (%s)' %
(Timestamp(info['delete_timestamp']).isoformat,
info['delete_timestamp']))
print(' Status Timestamp: %s (%s)' %
(Timestamp(info['status_changed_at']).isoformat,
info['status_changed_at']))
if db_type == 'account':
print(' Container Count: %s' % info['container_count'])
print(' Object Count: %s' % info['object_count'])
print(' Bytes Used: %s' % info['bytes_used'])
if db_type == 'container':
try:
policy_name = POLICIES[info['storage_policy_index']].name
except KeyError:
policy_name = 'Unknown'
print(' Storage Policy: %s (%s)' % (
policy_name, info['storage_policy_index']))
print(' Reported Put Timestamp: %s (%s)' %
(Timestamp(info['reported_put_timestamp']).isoformat,
info['reported_put_timestamp']))
print(' Reported Delete Timestamp: %s (%s)' %
(Timestamp(info['reported_delete_timestamp']).isoformat,
info['reported_delete_timestamp']))
print(' Reported Object Count: %s' %
info['reported_object_count'])
print(' Reported Bytes Used: %s' % info['reported_bytes_used'])
print(' Chexor: %s' % info['hash'])
print(' UUID: %s' % info['id'])
except KeyError as e:
raise ValueError('Info is incomplete: %s' % e)
meta_prefix = 'x_' + db_type + '_'
for key, value in info.items():
if key.lower().startswith(meta_prefix):
title = key.replace('_', '-').title()
print(' %s: %s' % (title, value))
user_metadata = {}
sys_metadata = {}
for key, (value, timestamp) in metadata.items():
if is_user_meta(db_type, key):
if drop_prefixes:
key = strip_user_meta_prefix(db_type, key)
user_metadata[key] = value
elif is_sys_meta(db_type, key):
if drop_prefixes:
key = strip_sys_meta_prefix(db_type, key)
sys_metadata[key] = value
else:
title = key.replace('_', '-').title()
print(' %s: %s' % (title, value))
if sys_metadata:
print(' System Metadata:')
for key, value in sys_metadata.items():
print(' %s: %s' % (key, value))
else:
print('No system metadata found in db file')
if user_metadata:
print(' User Metadata:')
for key, value in user_metadata.items():
print(' %s: %s' % (key, value))
else:
print('No user metadata found in db file')
if db_type == 'container':
print('Sharding Metadata:')
shard_type = 'root' if info['is_root'] else 'shard'
print(' Type: %s' % shard_type)
print(' State: %s' % info['db_state'])
if info.get('shard_ranges'):
num_shards = len(info['shard_ranges'])
print('Shard Ranges (%d):' % num_shards)
count_by_state = defaultdict(int)
for srange in info['shard_ranges']:
count_by_state[(srange.state, srange.state_text)] += 1
print(' States:')
for key_state, count in sorted(count_by_state.items()):
key, state = key_state
print(' %9s: %s' % (state, count))
if verbose:
for srange in info['shard_ranges']:
srange = dict(srange, state_text=srange.state_text)
print(' Name: %(name)s' % srange)
print(' lower: %(lower)r, upper: %(upper)r' % srange)
print(' Object Count: %(object_count)d, Bytes Used: '
'%(bytes_used)d, State: %(state_text)s (%(state)d)'
% srange)
print(' Created at: %s (%s)'
% (Timestamp(srange['timestamp']).isoformat,
srange['timestamp']))
print(' Meta Timestamp: %s (%s)'
% (Timestamp(srange['meta_timestamp']).isoformat,
srange['meta_timestamp']))
else:
print('(Use -v/--verbose to show more Shard Ranges details)')
def print_obj_metadata(metadata, drop_prefixes=False):
"""
Print out basic info and metadata from object, as returned from
:func:`swift.obj.diskfile.read_metadata`.
Metadata should include the keys: name, Content-Type, and
X-Timestamp.
Additional metadata is displayed unmodified.
:param metadata: dict of object metadata
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
:raises ValueError:
"""
user_metadata = {}
sys_metadata = {}
transient_sys_metadata = {}
other_metadata = {}
if not metadata:
raise ValueError('Metadata is None')
path = metadata.pop('name', '')
content_type = metadata.pop('Content-Type', '')
ts = Timestamp(metadata.pop('X-Timestamp', 0))
account = container = obj = obj_hash = None
if path:
try:
account, container, obj = path.split('/', 3)[1:]
except ValueError:
raise ValueError('Path is invalid for object %r' % path)
else:
obj_hash = hash_path(account, container, obj)
print('Path: %s' % path)
print(' Account: %s' % account)
print(' Container: %s' % container)
print(' Object: %s' % obj)
print(' Object hash: %s' % obj_hash)
else:
print('Path: Not found in metadata')
if content_type:
print('Content-Type: %s' % content_type)
else:
print('Content-Type: Not found in metadata')
if ts:
print('Timestamp: %s (%s)' % (ts.isoformat, ts.internal))
else:
print('Timestamp: Not found in metadata')
for key, value in metadata.items():
if is_user_meta('Object', key):
if drop_prefixes:
key = strip_user_meta_prefix('Object', key)
user_metadata[key] = value
elif is_sys_meta('Object', key):
if drop_prefixes:
key = strip_sys_meta_prefix('Object', key)
sys_metadata[key] = value
elif is_object_transient_sysmeta(key):
if drop_prefixes:
key = strip_object_transient_sysmeta_prefix(key)
transient_sys_metadata[key] = value
else:
other_metadata[key] = value
def print_metadata(title, items):
print(title)
if items:
for key, value in sorted(items.items()):
print(' %s: %s' % (key, value))
else:
print(' No metadata found')
print_metadata('System Metadata:', sys_metadata)
print_metadata('Transient System Metadata:', transient_sys_metadata)
print_metadata('User Metadata:', user_metadata)
print_metadata('Other Metadata:', other_metadata)
for label, meta in [
('Data crypto details',
sys_metadata.get('X-Object-Sysmeta-Crypto-Body-Meta')),
('Metadata crypto details',
transient_sys_metadata.get('X-Object-Transient-Sysmeta-Crypto-Meta')),
]:
if meta is None:
continue
print('%s: %s' % (
label,
json.dumps(load_crypto_meta(meta, b64decode=False), indent=2,
sort_keys=True, separators=(',', ': '))))
def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False,
drop_prefixes=False, verbose=False):
if db_type not in ('account', 'container'):
print("Unrecognized DB type: internal error")
raise InfoSystemExit()
if not os.path.exists(db_file) or not db_file.endswith('.db'):
print("DB file doesn't exist")
raise InfoSystemExit()
if not db_file.startswith(('/', './')):
db_file = './' + db_file # don't break if the bare db file is given
if db_type == 'account':
broker = AccountBroker(db_file, stale_reads_ok=stale_reads_ok)
datadir = ABDATADIR
else:
broker = ContainerBroker(db_file, stale_reads_ok=stale_reads_ok)
datadir = CBDATADIR
try:
info = broker.get_info()
except sqlite3.OperationalError as err:
if 'no such table' in str(err):
print("Does not appear to be a DB of type \"%s\": %s"
% (db_type, db_file))
raise InfoSystemExit()
raise
account = info['account']
container = None
info['is_deleted'] = broker.is_deleted()
if db_type == 'container':
container = info['container']
info['is_root'] = broker.is_root_container()
sranges = broker.get_shard_ranges()
if sranges:
info['shard_ranges'] = sranges
print_db_info_metadata(
db_type, info, broker.metadata, drop_prefixes, verbose)
try:
ring = Ring(swift_dir, ring_name=db_type)
except Exception:
ring = None
else:
print_ring_locations(ring, datadir, account, container)
def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
policy_name='', drop_prefixes=False):
"""
Display information about an object read from the datafile.
Optionally verify the datafile content matches the ETag metadata.
:param datafile: path on disk to object file
:param check_etag: boolean, will read datafile content and verify
computed checksum matches value stored in
metadata.
:param swift_dir: the path on disk to rings
:param policy_name: optionally the name to use when finding the ring
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
"""
if not os.path.exists(datafile):
print("Data file doesn't exist")
raise InfoSystemExit()
if not datafile.startswith(('/', './')):
datafile = './' + datafile
policy_index = None
ring = None
datadir = DATADIR_BASE
# try to extract policy index from datafile disk path
fullpath = os.path.abspath(datafile)
policy_index = int(extract_policy(fullpath) or POLICIES.legacy)
try:
if policy_index:
datadir += '-' + str(policy_index)
ring = Ring(swift_dir, ring_name='object-' + str(policy_index))
elif policy_index == 0:
ring = Ring(swift_dir, ring_name='object')
except IOError:
# no such ring
pass
if policy_name:
policy = POLICIES.get_by_name(policy_name)
if policy:
policy_index_for_name = policy.idx
if (policy_index is not None and
policy_index_for_name is not None and
policy_index != policy_index_for_name):
print('Warning: Ring does not match policy!')
print('Double check your policy name!')
if not ring and policy_index_for_name:
ring = POLICIES.get_object_ring(policy_index_for_name,
swift_dir)
datadir = get_data_dir(policy_index_for_name)
with open(datafile, 'rb') as fp:
try:
metadata = read_metadata(fp)
except EOFError:
print("Invalid metadata")
raise InfoSystemExit()
metadata = {wsgi_to_str(k): v if k == 'name' else wsgi_to_str(v)
for k, v in metadata.items()}
etag = metadata.pop('ETag', '')
length = metadata.pop('Content-Length', '')
path = metadata.get('name', '')
print_obj_metadata(metadata, drop_prefixes)
# Optional integrity check; it's useful, but slow.
file_len = None
if check_etag:
h = md5(usedforsecurity=False)
file_len = 0
while True:
data = fp.read(64 * 1024)
if not data:
break
h.update(data)
file_len += len(data)
h = h.hexdigest()
if etag:
if h == etag:
print('ETag: %s (valid)' % etag)
else:
print("ETag: %s doesn't match file hash of %s!" %
(etag, h))
else:
print('ETag: Not found in metadata')
else:
print('ETag: %s (not checked)' % etag)
file_len = os.fstat(fp.fileno()).st_size
if length:
if file_len == int(length):
print('Content-Length: %s (valid)' % length)
else:
print("Content-Length: %s doesn't match file length of %s"
% (length, file_len))
else:
print('Content-Length: Not found in metadata')
account, container, obj = path.split('/', 3)[1:]
if ring:
print_ring_locations(ring, datadir, account, container, obj,
policy_index=policy_index)
def print_item_locations(ring, ring_name=None, account=None, container=None,
obj=None, **kwargs):
"""
Display placement information for an item based on ring lookup.
If a ring is provided it always takes precedence, but warnings will be
emitted if it doesn't match other optional arguments like the policy_name
or ring_name.
If no ring is provided the ring_name and/or policy_name will be used to
lookup the ring.
:param ring: a ring instance
:param ring_name: server type, or storage policy ring name if object ring
:param account: account name
:param container: container name
:param obj: object name
:param partition: part number for non path lookups
:param policy_name: name of storage policy to use to lookup the ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
"""
policy_name = kwargs.get('policy_name', None)
part = kwargs.get('partition', None)
all_nodes = kwargs.get('all', False)
swift_dir = kwargs.get('swift_dir', '/etc/swift')
if ring and policy_name:
policy = POLICIES.get_by_name(policy_name)
if policy:
if ring_name != policy.ring_name:
print('Warning: mismatch between ring and policy name!')
else:
print('Warning: Policy %s is not valid' % policy_name)
policy_index = None
if ring is None and (obj or part):
if not policy_name:
print('Need a ring or policy')
raise InfoSystemExit()
policy = POLICIES.get_by_name(policy_name)
if not policy:
print('No policy named %r' % policy_name)
raise InfoSystemExit()
policy_index = int(policy)
ring = POLICIES.get_object_ring(policy_index, swift_dir)
ring_name = (POLICIES.get_by_name(policy_name)).ring_name
if (container or obj) and not account:
print('No account specified')
raise InfoSystemExit()
if obj and not container:
print('No container specified')
raise InfoSystemExit()
if not account and not part:
print('No target specified')
raise InfoSystemExit()
loc = '<type>'
if part and ring_name:
if '-' in ring_name and ring_name.startswith('object'):
loc = 'objects-' + ring_name.split('-', 1)[1]
else:
loc = ring_name + 's'
if account and container and obj:
loc = 'objects'
if '-' in ring_name and ring_name.startswith('object'):
policy_index = int(ring_name.rsplit('-', 1)[1])
loc = 'objects-%d' % policy_index
if account and container and not obj:
loc = 'containers'
if not any([ring, ring_name]):
ring = Ring(swift_dir, ring_name='container')
else:
if ring_name != 'container':
print('Warning: account/container specified ' +
'but ring not named "container"')
if account and not container and not obj:
loc = 'accounts'
if not any([ring, ring_name]):
ring = Ring(swift_dir, ring_name='account')
else:
if ring_name != 'account':
print('Warning: account specified ' +
'but ring not named "account"')
if account:
print('\nAccount \t%s' % urllib.parse.quote(account))
if container:
print('Container\t%s' % urllib.parse.quote(container))
if obj:
print('Object \t%s\n\n' % urllib.parse.quote(obj))
print_ring_locations(ring, loc, account, container, obj, part, all_nodes,
policy_index=policy_index)
| swift-master | swift/cli/info.py |
# Copyright (c) 2010-2012 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for generating a form signature for use with FormPost middleware.
"""
from __future__ import print_function
import hmac
import six
from hashlib import sha1
from os.path import basename
from time import time
def main(argv):
if len(argv) != 7:
prog = basename(argv[0])
print('Syntax: %s <path> <redirect> <max_file_size> '
'<max_file_count> <seconds> <key>' % prog)
print()
print('Where:')
print(' <path> The prefix to use for form uploaded')
print(' objects. For example:')
print(' /v1/account/container/object_prefix_ would')
print(' ensure all form uploads have that path')
print(' prepended to the browser-given file name.')
print(' <redirect> The URL to redirect the browser to after')
print(' the uploads have completed.')
print(' <max_file_size> The maximum file size per file uploaded.')
print(' <max_file_count> The maximum number of uploaded files')
print(' allowed.')
print(' <seconds> The number of seconds from now to allow')
print(' the form post to begin.')
print(' <key> The X-Account-Meta-Temp-URL-Key for the')
print(' account.')
print()
print('Example output:')
print(' Expires: 1323842228')
print(' Signature: 18de97e47345a82c4dbfb3b06a640dbb')
print()
print('Sample form:')
print()
print('NOTE: the <form> tag\'s "action" attribute does not contain '
'the Swift cluster\'s hostname.')
print('You should manually add it before using the form.')
print()
print('<form action="/v1/a/c/o" method="POST" '
'enctype="multipart/form-data">')
print(' <input type="hidden" name="max_file_size" value="123" />')
print(' ... more HTML ...')
print(' <input type="submit" />')
print('</form>')
return 1
path, redirect, max_file_size, max_file_count, seconds, key = argv[1:]
try:
max_file_size = int(max_file_size)
except ValueError:
max_file_size = -1
if max_file_size < 0:
print('Please use a <max_file_size> value greater than or equal to 0.')
return 1
try:
max_file_count = int(max_file_count)
except ValueError:
max_file_count = 0
if max_file_count < 1:
print('Please use a positive <max_file_count> value.')
return 1
try:
expires = int(time() + int(seconds))
except ValueError:
expires = 0
if expires < 1:
print('Please use a positive <seconds> value.')
return 1
parts = path.split('/', 4)
# Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have
# account and container values, and optionally have an object prefix.
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
print('<path> must point to a container at least.')
print('For example: /v1/account/container')
print(' Or: /v1/account/container/object_prefix')
return 1
data = '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires)
if six.PY3:
data = data if isinstance(data, six.binary_type) else \
data.encode('utf8')
key = key if isinstance(key, six.binary_type) else \
key.encode('utf8')
sig = hmac.new(key, data,
sha1).hexdigest()
print(' Expires:', expires)
print('Signature:', sig)
print('')
print('Sample form:\n')
print('NOTE: the <form> tag\'s "action" attribute does not '
'contain the Swift cluster\'s hostname.')
print('You should manually add it before using the form.\n')
print('<form action="%s" method="POST" enctype="multipart/form-data">'
% path)
if redirect:
print(' <input type="hidden" name="redirect" value="%s" />'
% redirect)
print(' <input type="hidden" name="max_file_size" value="%d" />'
% max_file_size)
print(' <input type="hidden" name="max_file_count" value="%d" />'
% max_file_count)
print(' <input type="hidden" name="expires" value="%d" />' % expires)
print(' <input type="hidden" name="signature" value="%s" />' % sig)
print(' <!-- This signature allows for at most %d files, -->'
% max_file_count)
print(' <!-- but it may also have any smaller number. -->')
print(' <!-- Remove file inputs as needed. -->')
for i in range(max_file_count):
print(' <input type="file" name="file%d" />' % i)
print(' <br />')
print(' <input type="submit" />')
print('</form>')
return 0
| swift-master | swift/cli/form_signature.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import socket
from six import string_types
from six.moves.urllib.parse import urlparse
from swift.common.utils import (
SWIFT_CONF_FILE, md5_hash_for_file, set_swift_dir)
from swift.common.ring import Ring
from swift.common.storage_policy import POLICIES, reload_storage_policies
import eventlet
import json
import optparse
import time
import sys
import six
import os
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size // 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemetry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6200/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().get('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time in UTC
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
"""
Get a list of hosts in the rings.
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_names: Collection of ring names, such as
['object', 'object-2']
:returns: a set of tuples containing the ip and port of hosts
"""
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
devs = [d for r in rings for d in r.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
if self.server_type == 'object':
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('.ring.gz'):
ring_names.add(ring_name)
else:
ring_name = '%s.ring.gz' % self.server_type
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
rings[ring_name] = md5_hash_for_file(
os.path.join(swift_dir, ring_name))
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
if not remote_ring_name.startswith(self.server_type):
continue
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = md5_hash_for_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[drive_audit_errors] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
unmounted = {}
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
unmounted[url] = []
errors[url] = []
for i in response:
if not isinstance(i['mounted'], bool):
errors[url].append(i['device'])
else:
unmounted[url].append(i['device'])
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def server_type_check(self, hosts):
"""
Check for server types on the ring
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
errors = {}
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Validating server type '%s' on %s hosts..." %
(self._ptime(), self.server_type, len(hosts)))
for url, response, status in self.pool.imap(
recon.scout_server_type, hosts):
if status == 200:
if response != self.server_type + '-server':
errors[url] = response
print("%s/%s hosts ok, %s error[s] while checking hosts." % (
len(hosts) - len(errors), len(hosts), len(errors)))
for host in errors:
print("Invalid: %s is %s" % (host, errors[host]))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
response.get('object_expiration_pass'))
stats['expired_last_pass'].append(
response.get('expired_last_pass'))
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def _calculate_least_and_most_recent(self, url_time_data):
"""calulate and print the least and most recent urls
Given a list of url and time tuples calulate the most and least
recent timings and print it out.
:param url_time_data: list of url and time tuples: [(url, time_), ..]
"""
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, last in url_time_data:
if last is None:
continue
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
self._ptime(least_recent_time),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
self._ptime(most_recent_time),
elapsed, elapsed_unit, host))
def reconstruction_check(self, hosts):
"""
Obtain and print reconstructon statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = []
last_stats = []
recon = Scout("reconstruction/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on reconstructors" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats.append(response.get('object_reconstruction_time'))
last = response.get('object_reconstruction_last', 0)
last_stats.append((url, last))
if stats:
computed = self._gen_stats(stats,
name='object_reconstruction_time')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[object_reconstruction_time] - No hosts returned "
"valid data.")
else:
print("[object_reconstruction_time] - No hosts returned "
"valid data.")
self._calculate_least_and_most_recent(last_stats)
print("=" * 79)
def replication_check(self, hosts):
"""
Obtain and print replication statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
stats = {'replication_time': [], 'failure': [], 'success': [],
'attempted': []}
last_stats = []
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on replication" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['replication_time'].append(
response.get('replication_time',
response.get('object_replication_time', 0)))
repl_stats = response.get('replication_stats')
if repl_stats:
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last',
response.get('object_replication_last', 0))
last_stats.append((url, last))
for k in stats:
if stats[k]:
if k != 'replication_time':
computed = self._gen_stats(stats[k],
name='replication_%s' % k)
else:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
self._calculate_least_and_most_recent(last_stats)
print("=" * 79)
def updater_check(self, hosts):
"""
Obtain and print updater statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' %
self.server_type])
if len(stats) > 0:
computed = self._gen_stats(stats, name='updater_last_sweep')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
scan = {}
adone = '%s_auditor_pass_completed' % self.server_type
afail = '%s_audits_failed' % self.server_type
apass = '%s_audits_passed' % self.server_type
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
if scan[i][adone] is not None]
stats[afail] = [scan[i][afail] for i in scan
if scan[i][afail] is not None]
stats[apass] = [scan[i][apass] for i in scan
if scan[i][apass] is not None]
stats[asince] = [scan[i][asince] for i in scan
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
if computed['reported'] > 0:
self._print_stats(computed)
if len(stats[asince]) >= 1:
low = min(stats[asince])
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
Generator that yields all values for given key in a recon cache entry.
This is for use with object auditor recon cache entries. If the
object auditor has run in parallel, the recon cache will have entries
of the form: {'object_auditor_stats_ALL': { 'disk1': {..},
'disk2': {..},
'disk3': {..},
...}}
If the object auditor hasn't run in parallel, the recon cache will have
entries of the form: {'object_auditor_stats_ALL': {...}}.
The ZBF auditor doesn't run in parallel. However, if a subset of
devices is selected for auditing, the recon cache will have an entry
of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}}
We use this generator to find all instances of a particular key in
these multi-level dictionaries.
"""
for k, v in recon_entry.items():
if isinstance(v, dict):
for value in self.nested_get_value(key, v):
yield value
if k == key:
yield v
def object_auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
all_scan = {}
zbf_scan = {}
atime = 'audit_time'
bprocessed = 'bytes_processed'
passes = 'passes'
errors = 'errors'
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL']
if response['object_auditor_stats_ZBF']:
zbf_scan[url] = response['object_auditor_stats_ZBF']
if len(all_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, all_scan[i]))
for i in all_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
all_scan[i])) for i in all_scan]
stats[passes] = [sum(self.nested_get_value(passes, all_scan[i]))
for i in all_scan]
stats[errors] = [sum(self.nested_get_value(errors, all_scan[i]))
for i in all_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
all_scan[i])) for i in all_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ALL_auditor] - No hosts returned valid data.")
else:
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i]))
for i in zbf_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
zbf_scan[i])) for i in zbf_scan]
stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i]))
for i in zbf_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
zbf_scan[i])) for i in zbf_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def sharding_check(self, hosts):
"""
Obtain and print sharding statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6221), ('127.0.0.2', 6231)])
"""
stats = {'sharding_time': [],
'attempted': [], 'failure': [], 'success': []}
recon = Scout("sharding", self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on sharders" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['sharding_time'].append(response.get('sharding_time', 0))
shard_stats = response.get('sharding_stats')
if shard_stats:
# Sharding has a ton more stats, like "no_change".
# Not sure if we need them at all, or maybe for -v.
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(shard_stats.get(stat_key))
last = response.get('sharding_last', 0)
if last is None:
continue
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
self._ptime(least_recent_time),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
self._ptime(most_recent_time),
elapsed, elapsed_unit, host))
print("=" * 79)
def load_check(self, hosts):
"""
Obtain and print load average statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
load1 = {}
load5 = {}
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking load averages" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
load5[url] = response['5m']
load15[url] = response['15m']
stats = {"1m": load1, "5m": load5, "15m": load15}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
Obtain and print quarantine statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
objq = {}
conq = {}
acctq = {}
stats = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking quarantine" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
conq[url] = response['containers']
acctq[url] = response['accounts']
for key in response.get('policies', {}):
pkey = "objects_%s" % key
stats.setdefault(pkey, {})
stats[pkey][url] = response['policies'][key]['objects']
stats.update({"objects": objq, "containers": conq, "accounts": acctq})
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
Obtain and print /proc/net/sockstat statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
inuse4 = {}
mem = {}
inuse6 = {}
timewait = {}
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking socket usage" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes']
inuse6[url] = response.get('tcp6_in_use', 0)
timewait[url] = response['time_wait']
orphan[url] = response['orphan']
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
"tcp6_in_use": inuse6, "time_wait": timewait,
"orphan": orphan}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, lowest=0, human_readable=False):
"""
Obtain and print disk usage statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
stats = {}
highs = []
lows = []
raw_total_used = []
raw_total_avail = []
percents = {}
top_percents = [(None, 0)] * top
low_percents = [(None, 100)] * lowest
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
# We want to only query each host once, but we don't care
# which of the available ports we use. So we filter hosts by
# constructing a host->port dictionary, since the dict
# constructor ensures each key is unique, thus each host
# appears only once in filtered_hosts.
filtered_hosts = set(dict(hosts).items())
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, filtered_hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
raw_total_used.append(entry['used'])
raw_total_avail.append(entry['avail'])
hostusage.append(round(used, 2))
for ident, oused in top_percents:
if oused < used:
top_percents.append(
(url + ' ' + entry['device'], used))
top_percents.sort(key=lambda x: -x[1])
top_percents.pop()
break
for ident, oused in low_percents:
if oused > used:
low_percents.append(
(url + ' ' + entry['device'], used))
low_percents.sort(key=lambda x: x[1])
low_percents.pop()
break
stats[url] = hostusage
for url in stats:
if len(stats[url]) > 0:
# get per host hi/los for another day
low = min(stats[url])
high = max(stats[url])
highs.append(high)
lows.append(low)
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
avg_used = 100.0 * raw_used / raw_total
if human_readable:
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
if low_percents:
print('LOWEST %s' % lowest)
for ident, used in low_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def time_check(self, hosts, jitter=0.0):
"""
Check a time synchronization of hosts with current time
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
:param jitter: Maximal allowed time jitter
"""
jitter = abs(jitter)
matches = 0
errors = 0
recon = Scout("time", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking time-sync" % self._ptime())
for url, ts_remote, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
if (ts_remote + jitter < ts_start or ts_remote - jitter > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = self._ptime(ts_end)
ts_remote_f = self._ptime(ts_remote)
print("!! %s current time is %s, but remote is %s, "
"differs by %.4f sec" % (
url,
ts_end_f,
ts_remote_f,
diff))
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def version_check(self, hosts):
"""
Check OS Swift version of hosts. Inform if differs.
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
"""
versions = set()
errors = 0
print("[%s] Checking versions" % self._ptime())
recon = Scout("version", self.verbose, self.suppress_errors,
self.timeout)
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
versions.add(response['version'])
if self.verbose:
print("-> %s installed version %s" % (
url, response['version']))
if not len(versions):
print("No hosts returned valid data.")
elif len(versions) == 1:
print("Versions matched (%s), "
"%s error[s] while checking hosts." % (
versions.pop(), errors))
else:
print("Versions not matched (%s), "
"%s error[s] while checking hosts." % (
", ".join(sorted(versions)), errors))
print("=" * 79)
def _get_ring_names(self, policy=None):
"""
Retrieve name of ring files.
If no policy is passed and the server type is object,
the ring names of all storage-policies are retrieved.
:param policy: name or index of storage policy, only applicable
with server_type==object.
:returns: list of ring names.
"""
if self.server_type == 'object':
ring_names = [p.ring_name for p in POLICIES if (
p.name == policy or not policy or (
policy.isdigit() and int(policy) == int(p) or
(isinstance(policy, string_types)
and policy in p.aliases)))]
else:
ring_names = [self.server_type]
return ring_names
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print("=" * 79)
usage = '''
usage: %prog <server_type> [<server_type> [<server_type>]]
[-v] [--suppress] [-a] [-r] [-u] [-d] [-R]
[-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable]
<server_type>\taccount|container|object
Defaults to object server.
ex: %prog container -l --auditor
'''
args = optparse.OptionParser(usage)
args.add_option('--verbose', '-v', action="store_true",
help="Print verbose info")
args.add_option('--suppress', action="store_true",
help="Suppress most connection related errors")
args.add_option('--async', '-a',
action="store_true", dest="async_check",
help="Get async stats")
args.add_option('--replication', '-r', action="store_true",
help="Get replication stats")
args.add_option('--reconstruction', '-R', action="store_true",
help="Get reconstruction stats")
args.add_option('--auditor', action="store_true",
help="Get auditor stats")
args.add_option('--updater', action="store_true",
help="Get updater stats")
args.add_option('--expirer', action="store_true",
help="Get expirer stats")
args.add_option('--sharding', action="store_true",
help="Get sharding stats")
args.add_option('--unmounted', '-u', action="store_true",
help="Check cluster for unmounted devices")
args.add_option('--diskusage', '-d', action="store_true",
help="Get disk usage stats")
args.add_option('--human-readable', action="store_true",
help="Use human readable suffix for disk usage stats")
args.add_option('--loadstats', '-l', action="store_true",
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--validate-servers', action="store_true",
help="Validate servers on the ring")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats")
args.add_option('--time', '-T', action="store_true",
help="Check time synchronization")
args.add_option('--jitter', type="float", default=0.0,
help="Maximal allowed time jitter")
args.add_option('--swift-versions', action="store_true",
help="Check swift versions")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--lowest', type='int', metavar='COUNT', default=0,
help='Also show the lowest COUNT entries in rank \
order.')
args.add_option('--all', action="store_true",
help="Perform all checks. Equal to \t\t\t-arRudlqT "
"--md5 --sockstat --auditor --updater --expirer "
"--driveaudit --validate-servers --swift-versions")
args.add_option('--region', type="int",
help="Only query servers in specified region")
args.add_option('--zone', '-z', type="int",
help="Only query servers in specified zone")
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
help="Time to wait for a response from a server",
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
args.add_option('--policy', '-p',
help='Only query object servers in specified '
'storage policy (specified as name or index).')
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > len(self.check_types):
args.print_help()
sys.exit(0)
if arguments:
arguments = set(arguments)
if arguments.issubset(self.check_types):
server_types = arguments
else:
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else: # default
server_types = ['object']
swift_dir = options.swiftdir
if set_swift_dir(swift_dir):
reload_storage_policies()
self.verbose = options.verbose
self.suppress_errors = options.suppress
self.timeout = options.timeout
for server_type in server_types:
self.server_type = server_type
ring_names = self._get_ring_names(options.policy)
if not ring_names:
print('Invalid Storage Policy: %s' % options.policy)
args.print_help()
sys.exit(0)
hosts = self.get_hosts(options.region, options.zone,
swift_dir, ring_names)
print("--> Starting reconnaissance on %s hosts (%s)" %
(len(hosts), self.server_type))
print("=" * 79)
if options.all:
if self.server_type == 'object':
self.async_check(hosts)
self.object_auditor_check(hosts)
self.updater_check(hosts)
self.expirer_check(hosts)
self.reconstruction_check(hosts)
elif self.server_type == 'container':
self.auditor_check(hosts)
self.updater_check(hosts)
self.sharding_check(hosts)
elif self.server_type == 'account':
self.auditor_check(hosts)
self.replication_check(hosts)
self.umount_check(hosts)
self.load_check(hosts)
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
self.driveaudit_check(hosts)
self.time_check(hosts, options.jitter)
self.version_check(hosts)
else:
if options.async_check:
if self.server_type == 'object':
self.async_check(hosts)
else:
print("Error: Can't check asyncs on non object "
"servers.")
print("=" * 79)
if options.unmounted:
self.umount_check(hosts)
if options.replication:
self.replication_check(hosts)
if options.auditor:
if self.server_type == 'object':
self.object_auditor_check(hosts)
else:
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print("Error: Can't check updaters on account "
"servers.")
print("=" * 79)
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print("Error: Can't check expirer on non object "
"servers.")
print("=" * 79)
if options.sharding:
if self.server_type == 'container':
self.sharding_check(hosts)
else:
print("Error: Can't check sharding on non container "
"servers.")
print("=" * 79)
if options.reconstruction:
if self.server_type == 'object':
self.reconstruction_check(hosts)
else:
print("Error: Can't check reconstruction stats on "
"non object servers.")
print("=" * 79)
if options.validate_servers:
self.server_type_check(hosts)
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
if options.driveaudit:
self.driveaudit_check(hosts)
if options.time:
self.time_check(hosts, options.jitter)
if options.swift_versions:
self.version_check(hosts)
def main():
try:
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print('\n')
| swift-master | swift/cli/recon.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import os
import uuid
from time import ctime, time
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
from six.moves.urllib.parse import urlparse
import swift.common.db
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, head_object,
InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.swob import normalize_etag
from swift.common.utils import (
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, validate_sync_to,
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT
from swift.common.wsgi import ConfigString
from swift.common.middleware.versioned_writes.object_versioning import (
SYSMETA_VERSIONS_CONT, SYSMETA_VERSIONS_SYMLINK)
# The default internal client config body is to support upgrades without
# requiring deployment of the new /etc/swift/internal-client.conf
ic_conf_body = """
[DEFAULT]
[pipeline:main]
pipeline = catch_errors proxy-logging cache symlink proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = true
[filter:symlink]
use = egg:swift#symlink
[filter:cache]
use = egg:swift#memcache
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
""".lstrip()
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
"""
log_route = 'container-sync'
def __init__(self, conf, container_ring=None, logger=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = logger or get_logger(conf, log_route=self.log_route)
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = float(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a container sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Per container stats. These are collected per container.
#: puts - the number of puts that were done for the container
#: deletes - the number of deletes that were fot the container
#: bytes - the total number of bytes transferred per the container
self.container_stats = collections.defaultdict(int)
self.container_stats.clear()
#: Time of last stats report.
self.reported = time()
self.swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(self.swift_dir,
ring_name='container')
bind_ip = conf.get('bind_ip', '0.0.0.0')
self._myips = whataremyips(bind_ip)
self._myport = int(conf.get('bind_port', 6201))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.conn_timeout = float(conf.get('conn_timeout', 5))
request_tries = int(conf.get('request_tries') or 3)
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
'Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options')
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
try:
self.swift = InternalClient(
internal_client_conf, 'Swift Container Sync', request_tries,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
except (OSError, IOError) as err:
if err.errno != errno.ENOENT and \
not str(err).endswith(' not found'):
raise
raise SystemExit(
'Unable to load internal client from config: '
'%(conf)r (%(error)s)'
% {'conf': internal_client_conf_path, 'error': err})
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_stats.clear()
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info('Begin container sync "once" mode')
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
'Container sync "once" mode completed: %.02fs', elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
'Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed',
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_report(self, start, end, sync_point1, sync_point2, info,
max_row):
self.logger.info('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s',
{'container': '%s/%s' % (info['account'],
info['container']),
'start': start,
'end': end,
'puts': self.container_stats['puts'],
'posts': 0,
'deletes': self.container_stats['deletes'],
'bytes': self.container_stats['bytes'],
'point1': sync_point1,
'point2': sync_point2,
'total': max_row})
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path, logger=self.logger)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if is_local_device(self._myips, self._myport,
node['ip'], node['port']):
break
else:
return
if broker.metadata.get(SYSMETA_VERSIONS_CONT):
self.container_skips += 1
self.logger.increment('skips')
self.logger.warning('Skipping container %s/%s with '
'object versioning configured' % (
info['account'], info['container']))
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.items():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
'ERROR %(db_file)s: %(validate_sync_to_err)s',
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
start_at = time()
stop_at = start_at + self.container_time
next_sync_point = None
sync_stage_time = start_at
try:
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
# This node will only initially sync out one third
# of the objects (if 3 replicas, 1/4 if 4, etc.)
# and will skip problematic rows as needed in case of
# faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the
# nodes didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None,
next_sync_point)
else:
next_sync_point = sync_point2
sync_stage_time = time()
while sync_stage_time < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of
# the objects (if 3 replicas, 1/4 if 4, etc.).
# It'll come back around to the section above
# and attempt to sync previously skipped rows in case
# the other nodes didn't succeed or in case it failed
# to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
sync_stage_time = time()
self.container_syncs += 1
self.logger.increment('syncs')
finally:
self.container_report(start_at, sync_stage_time,
sync_point1,
next_sync_point,
info, broker.get_max_row())
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception('ERROR Syncing %s',
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
realm, realm_key, method, headers):
"""
Updates container sync headers
:param name: The name of the object
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param method: HTTP method to create sig with
:param headers: headers to update with container sync headers
"""
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(name)
sig = self.realms_conf.get_sig(method, path,
headers.get('x-timestamp', 0),
nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (realm,
nonce,
sig)
else:
headers['x-container-sync-key'] = user_key
def _object_in_remote_container(self, name, sync_to, user_key,
realm, realm_key, timestamp):
"""
Performs head object on remote to eliminate extra remote put and
local get object calls
:param name: The name of the object in the updated row in the local
database triggering the sync update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param timestamp: last modified date of local object
:returns: True if object already exists in remote
"""
headers = {'x-timestamp': timestamp.internal}
self._update_sync_to_headers(name, sync_to, user_key, realm,
realm_key, 'HEAD', headers)
try:
metadata, _ = head_object(sync_to, name=name,
headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
retries=0)
remote_ts = Timestamp(metadata.get('x-timestamp', 0))
self.logger.debug("remote obj timestamp %s local obj %s" %
(timestamp.internal, remote_ts.internal))
if timestamp <= remote_ts:
return True
# Object in remote should be updated
return False
except ClientException as http_err:
# Object not in remote
if http_err.http_status == 404:
return False
raise http_err
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
Update can be either delete or put.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
# extract last modified time from the created_at value
ts_data, ts_ctype, ts_meta = decode_timestamps(
row['created_at'])
if row['deleted']:
# when sync'ing a deleted object, use ts_data - this is the
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
self._update_sync_to_headers(row['name'], sync_to,
user_key, realm, realm_key,
'DELETE', headers)
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
timeout=self.conn_timeout)
except ClientException as err:
if err.http_status not in (
HTTP_NOT_FOUND, HTTP_CONFLICT):
raise
self.container_deletes += 1
self.container_stats['deletes'] += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
if self._object_in_remote_container(row['name'],
sync_to, user_key, realm,
realm_key, ts_meta):
return True
exc = None
# look up for the newest one; the symlink=get query-string has
# no effect unless symlinks are enabled in the internal client
# in which case it ensures that symlink objects retain their
# symlink property when sync'd.
headers_out = {'X-Newest': True,
'X-Backend-Storage-Policy-Index':
str(info['storage_policy_index'])}
try:
source_obj_status, headers, body = \
self.swift.get_object(info['account'],
info['container'], row['name'],
headers=headers_out,
acceptable_statuses=(2, 4),
params={'symlink': 'get'})
except (Exception, UnexpectedResponse, Timeout) as err:
headers = {}
body = None
exc = err
# skip object_versioning links; this is in case the container
# metadata is out of date
if headers.get(SYSMETA_VERSIONS_SYMLINK):
self.logger.info(
'Skipping versioning symlink %s/%s/%s ' % (
info['account'], info['container'],
row['name']))
return True
timestamp = Timestamp(headers.get('x-timestamp', 0))
if timestamp < ts_meta:
if exc:
raise exc
raise Exception(
'Unknown exception trying to GET: '
'%(account)r %(container)r %(object)r' %
{'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = normalize_etag(headers['etag'])
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
self._update_sync_to_headers(row['name'], sync_to, user_key,
realm, realm_key, 'PUT', headers)
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,
timeout=self.conn_timeout)
self.container_puts += 1
self.container_stats['puts'] += 1
self.container_stats['bytes'] += row['size']
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
'Unauth %(sync_from)r => %(sync_to)r',
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
'Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r',
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
'ERROR Syncing %(db_file)s %(row)s',
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout):
self.logger.exception(
'ERROR Syncing %(db_file)s %(row)s',
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
| swift-master | swift/container/sync.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
import traceback
from eventlet import Timeout
import six
from six.moves.urllib.parse import quote
import swift.common.db
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR, \
RECORD_TYPE_SHARD, UNSHARDED, SHARDING, SHARDED, SHARD_UPDATE_STATES
from swift.container.replicator import ContainerReplicatorRpc
from swift.common.db import DatabaseAlreadyExists
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.request_helpers import split_and_validate_path, \
is_sys_or_user_meta, validate_internal_container, validate_internal_obj, \
validate_container_params
from swift.common.utils import get_logger, hash_path, public, \
Timestamp, storage_directory, validate_sync_to, \
config_true_value, timing_stats, replication, \
override_bytes_from_content_type, get_log_line, \
config_fallocate_value, fs_has_free_space, list_from_csv, \
ShardRange
from swift.common.constraints import valid_timestamp, check_utf8, \
check_drive, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import HTTP_NO_CONTENT, HTTP_NOT_FOUND, is_success
from swift.common.middleware import listing_formats
from swift.common.storage_policy import POLICIES
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPException, HTTPMovedPermanently, \
wsgi_to_str, str_to_wsgi
def gen_resp_headers(info, is_deleted=False):
"""
Convert container info dict to headers.
"""
# backend headers are always included
headers = {
'X-Backend-Timestamp': Timestamp(info.get('created_at', 0)).internal,
'X-Backend-PUT-Timestamp': Timestamp(info.get(
'put_timestamp', 0)).internal,
'X-Backend-DELETE-Timestamp': Timestamp(
info.get('delete_timestamp', 0)).internal,
'X-Backend-Status-Changed-At': Timestamp(
info.get('status_changed_at', 0)).internal,
'X-Backend-Storage-Policy-Index': info.get('storage_policy_index', 0),
}
if not is_deleted:
# base container info on deleted containers is not exposed to client
headers.update({
'X-Container-Object-Count': info.get('object_count', 0),
'X-Container-Bytes-Used': info.get('bytes_used', 0),
'X-Timestamp': Timestamp(info.get('created_at', 0)).normal,
'X-PUT-Timestamp': Timestamp(
info.get('put_timestamp', 0)).normal,
'X-Backend-Sharding-State': info.get('db_state', UNSHARDED),
})
return headers
def get_container_name_and_placement(req):
"""
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container = split_and_validate_path(req, 4)
validate_internal_container(account, container)
return drive, part, account, container
def get_obj_name_and_placement(req):
"""
Split and validate path for an object.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
validate_internal_obj(account, container, obj)
return drive, part, account, container, obj
class ContainerController(BaseStorageServer):
"""WSGI Controller for the container server."""
# Ensure these are all lowercase
save_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to']
server_type = 'container-server'
def __init__(self, conf, logger=None):
super(ContainerController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='container-server')
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = float(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.replicator_rpc = ContainerReplicatorRpc(
self.root, DATADIR, ContainerBroker, self.mount_check,
logger=self.logger)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
self.auto_create_account_prefix = \
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.shards_account_prefix = (
self.auto_create_account_prefix + 'shards_')
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
if 'allow_versions' in conf:
self.logger.warning('Option allow_versions is deprecated. '
'Configure the versioned_writes middleware in '
'the proxy-server instead. This option will '
'be ignored in a future release.')
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
swift.common.db.QUERY_LOGGING = \
config_true_value(conf.get('db_query_logging', 'f'))
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
self.fallocate_reserve, self.fallocate_is_percent = \
config_fallocate_value(conf.get('fallocate_reserve', '1%'))
def _get_container_broker(self, drive, part, account, container, **kwargs):
"""
Get a DB broker for the container.
:param drive: drive that holds the container
:param part: partition the container is in
:param account: account name
:param container: container name
:returns: ContainerBroker object
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
kwargs.setdefault('account', account)
kwargs.setdefault('container', container)
kwargs.setdefault('logger', self.logger)
return ContainerBroker(db_path, **kwargs)
def get_and_validate_policy_index(self, req):
"""
Validate that the index supplied maps to a policy.
:returns: policy index from request, or None if not present
:raises HTTPBadRequest: if the supplied index is bogus
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = req.headers.get(header, None)
if policy_index is None:
return None
try:
policy_index = int(policy_index)
policy = POLICIES.get_by_index(policy_index)
if policy is None:
raise ValueError
except ValueError:
raise HTTPBadRequest(
request=req, content_type="text/plain",
body="Invalid %s %r" % (header, policy_index))
else:
return int(policy)
def account_update(self, req, account, container, broker):
"""
Update the account server(s) with latest container info.
:param req: swob.Request object
:param account: account name
:param container: container name
:param broker: container DB broker object
:returns: if all the account requests return a 404 error code,
HTTPNotFound response object,
if the account cannot be updated due to a malformed header,
an HTTPBadRequest response object,
otherwise None.
"""
account_hosts = [h.strip() for h in
req.headers.get('X-Account-Host', '').split(',')]
account_devices = [d.strip() for d in
req.headers.get('X-Account-Device', '').split(',')]
account_partition = req.headers.get('X-Account-Partition', '')
if len(account_hosts) != len(account_devices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(
'ERROR Account update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"', {
'hosts': req.headers.get('X-Account-Host', ''),
'devices': req.headers.get('X-Account-Device', '')})
return HTTPBadRequest(req=req)
if account_partition:
# zip is lazy on py3, but we need a list, so force evaluation.
# On py2 it's an extra list copy, but the list is so small
# (one element per replica in account ring, usually 3) that it
# doesn't matter.
updates = list(zip(account_hosts, account_devices))
else:
updates = []
account_404s = 0
for account_host, account_device in updates:
account_ip, account_port = account_host.rsplit(':', 1)
new_path = '/' + '/'.join([account, container])
info = broker.get_info()
account_headers = HeaderKeyDict({
'x-put-timestamp': info['put_timestamp'],
'x-delete-timestamp': info['delete_timestamp'],
'x-object-count': info['object_count'],
'x-bytes-used': info['bytes_used'],
'x-trans-id': req.headers.get('x-trans-id', '-'),
'X-Backend-Storage-Policy-Index': info['storage_policy_index'],
'user-agent': 'container-server %s' % os.getpid(),
'referer': req.as_referer()})
if req.headers.get('x-account-override-deleted', 'no').lower() == \
'yes':
account_headers['x-account-override-deleted'] = 'yes'
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
account_ip, account_port, account_device,
account_partition, 'PUT', new_path, account_headers)
with Timeout(self.node_timeout):
account_response = conn.getresponse()
account_response.read()
if account_response.status == HTTP_NOT_FOUND:
account_404s += 1
elif not is_success(account_response.status):
self.logger.error(
'ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s',
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except (Exception, Timeout):
self.logger.exception(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)',
{'ip': account_ip, 'port': account_port,
'device': account_device})
if updates and account_404s == len(updates):
return HTTPNotFound(req=req)
else:
return None
def _update_sync_store(self, broker, method):
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s during %s' %
(broker.db_file, method))
def _redirect_to_shard(self, req, broker, obj_name):
"""
If the request indicates that it can accept a redirection, look for a
shard range that contains ``obj_name`` and if one exists return a
HTTPMovedPermanently response.
:param req: an instance of :class:`~swift.common.swob.Request`
:param broker: a container broker
:param obj_name: an object name
:return: an instance of :class:`swift.common.swob.HTTPMovedPermanently`
if a shard range exists for the given ``obj_name``, otherwise None.
"""
if not config_true_value(
req.headers.get('x-backend-accept-redirect', False)):
# We want to avoid fetching shard ranges for the (more
# time-sensitive) object-server update, so allow some misplaced
# objects to land between when we've started sharding and when the
# proxy learns about it. Note that this path is also used by old,
# pre-sharding updaters during a rolling upgrade.
return None
shard_ranges = broker.get_shard_ranges(
includes=obj_name, states=SHARD_UPDATE_STATES)
if not shard_ranges:
return None
# note: obj_name may be included in both a created sub-shard and its
# sharding parent. get_shard_ranges will return the created sub-shard
# in preference to the parent, which is the desired result.
containing_range = shard_ranges[0]
location = "/%s/%s" % (containing_range.name, obj_name)
if location != quote(location) and not config_true_value(
req.headers.get('x-backend-accept-quoted-location', False)):
# Sender expects the destination to be unquoted, but it isn't safe
# to send unquoted. Eat the update for now and let the sharder
# move it later. Should only come up during rolling upgrades.
return None
headers = {'Location': quote(location),
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp':
containing_range.timestamp.internal}
# we do not want the host added to the location
req.environ['swift.leave_relative_location'] = True
return HTTPMovedPermanently(headers=headers, request=req)
def check_free_space(self, drive):
drive_root = os.path.join(self.root, drive)
return fs_has_free_space(
drive_root, self.fallocate_reserve, self.fallocate_is_percent)
@public
@timing_stats()
def DELETE(self, req):
"""Handle HTTP DELETE request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
req_timestamp = valid_timestamp(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
# policy index is only relevant for delete_obj (and transitively for
# auto create accounts)
obj_policy_index = self.get_and_validate_policy_index(req) or 0
broker = self._get_container_broker(drive, part, account, container)
if obj:
self._maybe_autocreate(broker, req_timestamp, account,
obj_policy_index, req)
elif not os.path.exists(broker.db_file):
return HTTPNotFound()
if obj: # delete object
# redirect if a shard range exists for the object name
redirect = self._redirect_to_shard(req, broker, obj)
if redirect:
return redirect
broker.delete_object(obj, req.headers.get('x-timestamp'),
obj_policy_index)
return HTTPNoContent(request=req)
else:
# delete container
if not broker.empty():
return HTTPConflict(request=req)
existed = Timestamp(broker.get_info()['put_timestamp']) and \
not broker.is_deleted()
broker.delete_db(req_timestamp.internal)
if not broker.is_deleted():
return HTTPConflict(request=req)
self._update_sync_store(broker, 'DELETE')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if existed:
return HTTPNoContent(request=req)
return HTTPNotFound()
def _update_or_create(self, req, broker, timestamp, new_container_policy,
requested_policy_index):
"""
Create new database broker or update timestamps for existing database.
:param req: the swob request object
:param broker: the broker instance for the container
:param timestamp: internalized timestamp
:param new_container_policy: the storage policy index to use
when creating the container
:param requested_policy_index: the storage policy index sent in the
request, may be None
:returns: created, a bool, if database did not previously exist
"""
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, new_container_policy)
except DatabaseAlreadyExists:
pass
else:
return True # created
recreated = broker.is_deleted()
if recreated:
# only set storage policy on deleted containers
broker.set_storage_policy_index(new_container_policy,
timestamp=timestamp)
elif requested_policy_index is not None:
# validate requested policy with existing container
if requested_policy_index != broker.storage_policy_index:
raise HTTPConflict(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
broker.update_put_timestamp(timestamp)
if broker.is_deleted():
raise HTTPConflict(request=req)
if recreated:
broker.update_status_changed_at(timestamp)
return recreated
def _should_autocreate(self, account, req):
auto_create_header = req.headers.get('X-Backend-Auto-Create')
if auto_create_header:
# If the caller included an explicit X-Backend-Auto-Create header,
# assume they know the behavior they want
return config_true_value(auto_create_header)
if account.startswith(self.shards_account_prefix):
# we have to specical case this subset of the
# auto_create_account_prefix because we don't want the updater
# accidently auto-creating shards; only the sharder creates
# shards and it will explicitly tell the server to do so
return False
return account.startswith(self.auto_create_account_prefix)
def _maybe_autocreate(self, broker, req_timestamp, account,
policy_index, req):
created = False
should_autocreate = self._should_autocreate(account, req)
if should_autocreate and not os.path.exists(broker.db_file):
if policy_index is None:
raise HTTPBadRequest(
'X-Backend-Storage-Policy-Index header is required')
try:
broker.initialize(req_timestamp.internal, policy_index)
except DatabaseAlreadyExists:
pass
else:
created = True
if not os.path.exists(broker.db_file):
raise HTTPNotFound()
return created
def _update_metadata(self, req, broker, req_timestamp, method):
metadata = {
wsgi_to_str(key): (wsgi_to_str(value), req_timestamp.internal)
for key, value in req.headers.items()
if key.lower() in self.save_headers
or is_sys_or_user_meta('container', key)}
if metadata:
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
metadata['X-Container-Sync-To'][0] != \
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
self._update_sync_store(broker, method)
@public
@timing_stats()
def PUT(self, req):
"""Handle HTTP PUT request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
if obj: # put container object
# obj put expects the policy_index header, default is for
# legacy support during upgrade.
obj_policy_index = requested_policy_index or 0
self._maybe_autocreate(
broker, req_timestamp, account, obj_policy_index, req)
# redirect if a shard exists for this object name
response = self._redirect_to_shard(req, broker, obj)
if response:
return response
broker.put_object(obj, req_timestamp.internal,
int(req.headers['x-size']),
wsgi_to_str(req.headers['x-content-type']),
wsgi_to_str(req.headers['x-etag']), 0,
obj_policy_index,
wsgi_to_str(req.headers.get(
'x-content-type-timestamp')),
wsgi_to_str(req.headers.get('x-meta-timestamp')))
return HTTPCreated(request=req)
record_type = req.headers.get('x-backend-record-type', '').lower()
if record_type == RECORD_TYPE_SHARD:
try:
# validate incoming data...
shard_ranges = [ShardRange.from_dict(sr)
for sr in json.loads(req.body)]
except (ValueError, KeyError, TypeError) as err:
return HTTPBadRequest('Invalid body: %r' % err)
created = self._maybe_autocreate(
broker, req_timestamp, account, requested_policy_index, req)
self._update_metadata(req, broker, req_timestamp, 'PUT')
if shard_ranges:
# TODO: consider writing the shard ranges into the pending
# file, but if so ensure an all-or-none semantic for the write
broker.merge_shard_ranges(shard_ranges)
else: # put container
if requested_policy_index is None:
# use the default index sent by the proxy if available
new_container_policy = req.headers.get(
'X-Backend-Storage-Policy-Default', int(POLICIES.default))
else:
new_container_policy = requested_policy_index
created = self._update_or_create(req, broker,
req_timestamp.internal,
new_container_policy,
requested_policy_index)
self._update_metadata(req, broker, req_timestamp, 'PUT')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if created:
return HTTPCreated(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
else:
return HTTPAccepted(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
@public
@timing_stats(sample_rate=0.1)
def HEAD(self, req):
"""Handle HTTP HEAD request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
out_content_type = listing_formats.get_listing_content_type(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=headers)
headers.update(
(str_to_wsgi(key), str_to_wsgi(value))
for key, (value, timestamp) in broker.metadata.items()
if value != '' and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)))
headers['Content-Type'] = out_content_type
resp = HTTPNoContent(request=req, headers=headers, charset='utf-8')
resp.last_modified = Timestamp(headers['X-PUT-Timestamp']).ceil()
return resp
def update_data_record(self, record):
"""
Perform any mutations to container listing records that are common to
all serialization formats, and returns it as a dict.
Converts created time to iso timestamp.
Replaces size with 'swift_bytes' content type parameter.
:params record: object entry record
:returns: modified record
"""
if isinstance(record, ShardRange):
created = record.timestamp
response = dict(record)
else:
(name, created, size, content_type, etag) = record[:5]
name_ = name.decode('utf8') if six.PY2 else name
if content_type is None:
return {'subdir': name_}
response = {
'bytes': size, 'hash': etag, 'name': name_,
'content_type': content_type}
override_bytes_from_content_type(response, logger=self.logger)
response['last_modified'] = Timestamp(created).isoformat
return response
@public
@timing_stats()
def GET(self, req):
"""
Handle HTTP GET request.
The body of the response to a successful GET request contains a listing
of either objects or shard ranges. The exact content of the listing is
determined by a combination of request headers and query string
parameters, as follows:
* The type of the listing is determined by the
``X-Backend-Record-Type`` header. If this header has value ``shard``
then the response body will be a list of shard ranges; if this header
has value ``auto``, and the container state is ``sharding`` or
``sharded``, then the listing will be a list of shard ranges;
otherwise the response body will be a list of objects.
* Both shard range and object listings may be filtered according to
the constraints described below. However, the
``X-Backend-Ignore-Shard-Name-Filter`` header may be used to override
the application of the ``marker``, ``end_marker``, ``includes`` and
``reverse`` parameters to shard range listings. These parameters will
be ignored if the header has the value 'sharded' and the current db
sharding state is also 'sharded'. Note that this header does not
override the ``states`` constraint on shard range listings.
* The order of both shard range and object listings may be reversed by
using a ``reverse`` query string parameter with a
value in :attr:`swift.common.utils.TRUE_VALUES`.
* Both shard range and object listings may be constrained to a name
range by the ``marker`` and ``end_marker`` query string parameters.
Object listings will only contain objects whose names are greater
than any ``marker`` value and less than any ``end_marker`` value.
Shard range listings will only contain shard ranges whose namespace
is greater than or includes any ``marker`` value and is less than or
includes any ``end_marker`` value.
* Shard range listings may also be constrained by an ``includes`` query
string parameter. If this parameter is present the listing will only
contain shard ranges whose namespace includes the value of the
parameter; any ``marker`` or ``end_marker`` parameters are ignored
* The length of an object listing may be constrained by the ``limit``
parameter. Object listings may also be constrained by ``prefix``,
``delimiter`` and ``path`` query string parameters.
* Shard range listings will include deleted shard ranges if and only if
the ``X-Backend-Include-Deleted`` header value is one of
:attr:`swift.common.utils.TRUE_VALUES`. Object listings never
include deleted objects.
* Shard range listings may be constrained to include only shard ranges
whose state is specified by a query string ``states`` parameter. If
present, the ``states`` parameter should be a comma separated list of
either the string or integer representation of
:data:`~swift.common.utils.ShardRange.STATES`.
Two alias values may be used in a ``states`` parameter value:
``listing`` will cause the listing to include all shard ranges in a
state suitable for contributing to an object listing; ``updating``
will cause the listing to include all shard ranges in a state
suitable to accept an object update.
If either of these aliases is used then the shard range listing will
if necessary be extended with a synthesised 'filler' range in order
to satisfy the requested name range when insufficient actual shard
ranges are found. Any 'filler' shard range will cover the otherwise
uncovered tail of the requested name range and will point back to the
same container.
* Listings are not normally returned from a deleted container. However,
the ``X-Backend-Override-Deleted`` header may be used with a value in
:attr:`swift.common.utils.TRUE_VALUES` to force a shard range
listing to be returned from a deleted container whose DB file still
exists.
:param req: an instance of :class:`swift.common.swob.Request`
:returns: an instance of :class:`swift.common.swob.Response`
"""
drive, part, account, container, obj = get_obj_name_and_placement(req)
params = validate_container_params(req)
path = params.get('path')
prefix = params.get('prefix')
delimiter = params.get('delimiter')
marker = params.get('marker', '')
end_marker = params.get('end_marker')
limit = params['limit']
reverse = config_true_value(params.get('reverse'))
out_content_type = listing_formats.get_listing_content_type(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
record_type = req.headers.get('x-backend-record-type', '').lower()
db_state = info.get('db_state')
if record_type == 'auto' and db_state in (SHARDING, SHARDED):
record_type = 'shard'
if record_type == 'shard':
override_deleted = info and config_true_value(
req.headers.get('x-backend-override-deleted', False))
resp_headers = gen_resp_headers(
info, is_deleted=is_deleted and not override_deleted)
if is_deleted and not override_deleted:
return HTTPNotFound(request=req, headers=resp_headers)
resp_headers['X-Backend-Record-Type'] = 'shard'
includes = params.get('includes')
override_filter_hdr = req.headers.get(
'x-backend-override-shard-name-filter', '').lower()
if override_filter_hdr == db_state == 'sharded':
# respect the request to send back *all* ranges if the db is in
# sharded state
resp_headers['X-Backend-Override-Shard-Name-Filter'] = 'true'
marker = end_marker = includes = None
reverse = False
states = params.get('states')
fill_gaps = include_own = False
if states:
states = list_from_csv(states)
fill_gaps = any(('listing' in states, 'updating' in states))
# 'auditing' is used during shard audit; if the shard is
# shrinking then it needs to get acceptor shard ranges, which
# may be the root container itself, so use include_own
include_own = 'auditing' in states
try:
states = broker.resolve_shard_range_states(states)
except ValueError:
return HTTPBadRequest(request=req, body='Bad state')
include_deleted = config_true_value(
req.headers.get('x-backend-include-deleted', False))
container_list = broker.get_shard_ranges(
marker, end_marker, includes, reverse, states=states,
include_deleted=include_deleted, fill_gaps=fill_gaps,
include_own=include_own)
else:
requested_policy_index = self.get_and_validate_policy_index(req)
resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=resp_headers)
resp_headers['X-Backend-Record-Type'] = 'object'
storage_policy_index = (
requested_policy_index if requested_policy_index is not None
else info['storage_policy_index'])
resp_headers['X-Backend-Record-Storage-Policy-Index'] = \
storage_policy_index
# Use the retired db while container is in process of sharding,
# otherwise use current db
src_broker = broker.get_brokers()[0]
container_list = src_broker.list_objects_iter(
limit, marker, end_marker, prefix, delimiter, path,
storage_policy_index=storage_policy_index,
reverse=reverse, allow_reserved=req.allow_reserved_names)
return self.create_listing(req, out_content_type, info, resp_headers,
broker.metadata, container_list, container)
def create_listing(self, req, out_content_type, info, resp_headers,
metadata, container_list, container):
for key, (value, _timestamp) in metadata.items():
if value and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)):
resp_headers[str_to_wsgi(key)] = str_to_wsgi(value)
listing = [self.update_data_record(record)
for record in container_list]
if out_content_type.endswith('/xml'):
body = listing_formats.container_to_xml(listing, container)
elif out_content_type.endswith('/json'):
body = json.dumps(listing).encode('ascii')
else:
body = listing_formats.listing_to_text(listing)
ret = Response(request=req, headers=resp_headers, body=body,
content_type=out_content_type, charset='utf-8')
ret.last_modified = Timestamp(resp_headers['X-PUT-Timestamp']).ceil()
if not ret.body:
ret.status_int = HTTP_NO_CONTENT
return ret
@public
@replication
@timing_stats(sample_rate=0.01)
def REPLICATE(self, req):
"""
Handle HTTP REPLICATE request (json-encoded RPC calls for replication.)
"""
post_args = split_and_validate_path(req, 3)
drive, partition, hash = post_args
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
try:
args = json.load(req.environ['wsgi.input'])
except ValueError as err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)
ret.request = req
return ret
@public
@timing_stats()
def UPDATE(self, req):
"""
Handle HTTP UPDATE request (merge_items RPCs coming from the proxy.)
"""
drive, part, account, container = get_container_name_and_placement(req)
req_timestamp = valid_timestamp(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
self._maybe_autocreate(broker, req_timestamp, account,
requested_policy_index, req)
try:
objs = json.load(req.environ['wsgi.input'])
except ValueError as err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
broker.merge_items(objs)
return HTTPAccepted(request=req)
@public
@timing_stats()
def POST(self, req):
"""
Handle HTTP POST request.
A POST request will update the container's ``put_timestamp``, unless
it has an ``X-Backend-No-Timestamp-Update`` header with a truthy value.
:param req: an instance of :class:`~swift.common.swob.Request`.
"""
drive, part, account, container = get_container_name_and_placement(req)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container)
if broker.is_deleted():
return HTTPNotFound(request=req)
if not config_true_value(
req.headers.get('x-backend-no-timestamp-update', False)):
broker.update_put_timestamp(req_timestamp.internal)
self._update_metadata(req, broker, req_timestamp, 'POST')
return HTTPNoContent(request=req)
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(wsgi_to_str(req.path_info), internal=True):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(
'ERROR __call__ error with %(method)s %(path)s ',
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
if self.log_requests:
trans_time = time.time() - start_time
log_message = get_log_line(req, res, trans_time, '',
self.log_format,
self.anonymization_method,
self.anonymization_salt)
if req.method.upper() == 'REPLICATE':
self.logger.debug(log_message)
else:
self.logger.info(log_message)
return res(env, start_response)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf)
| swift-master | swift/container/server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from collections import defaultdict
from eventlet import Timeout
from random import choice
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR, SHARDED
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPOk, HTTPAccepted
from swift.common.http import is_success
from swift.common.utils import Timestamp, majority_size, get_db_files
class ContainerReplicator(db_replicator.Replicator):
server_type = 'container'
brokerclass = ContainerBroker
datadir = DATADIR
default_port = 6201
def __init__(self, conf, logger=None):
super(ContainerReplicator, self).__init__(conf, logger=logger)
self.reconciler_cleanups = self.sync_store = None
def report_up_to_date(self, full_info):
reported_key_map = {
'reported_put_timestamp': 'put_timestamp',
'reported_delete_timestamp': 'delete_timestamp',
'reported_bytes_used': 'bytes_used',
'reported_object_count': 'count',
}
for reported, value_key in reported_key_map.items():
if full_info[reported] != full_info[value_key]:
return False
return True
def _gather_sync_args(self, replication_info):
parent = super(ContainerReplicator, self)
sync_args = parent._gather_sync_args(replication_info)
if len(POLICIES) > 1:
sync_args += tuple(replication_info[k] for k in
('status_changed_at', 'count',
'storage_policy_index'))
return sync_args
def _handle_sync_response(self, node, response, info, broker, http,
different_region=False):
if is_success(response.status):
remote_info = json.loads(response.data.decode('ascii'))
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp.now()
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at.internal)
sync_timestamps = ('created_at', 'put_timestamp',
'delete_timestamp')
if any(info[key] != remote_info[key] for key in sync_timestamps):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
if remote_info.get('shard_max_row', -1) >= 0:
# Grab remote's shard ranges, too
self._fetch_and_merge_shard_ranges(http, broker)
return super(ContainerReplicator, self)._handle_sync_response(
node, response, info, broker, http, different_region)
def _sync_shard_ranges(self, broker, http, local_id):
# TODO: currently the number of shard ranges is expected to be _much_
# less than normal objects so all are sync'd on each cycle. However, in
# future there should be sync points maintained much like for object
# syncing so that only new shard range rows are sync'd.
shard_range_data = broker.get_all_shard_range_data()
if shard_range_data:
if not self._send_replicate_request(
http, 'merge_shard_ranges', shard_range_data, local_id):
return False
self.logger.debug('%s synced %s shard ranges to %s',
broker.db_file, len(shard_range_data),
'%(ip)s:%(port)s/%(device)s' % http.node)
return True
def _choose_replication_mode(self, node, rinfo, info, local_sync, broker,
http, different_region):
if 'shard_max_row' in rinfo:
# Always replicate shard ranges to new-enough swift
shard_range_success = self._sync_shard_ranges(
broker, http, info['id'])
else:
shard_range_success = False
self.logger.warning(
'%s is unable to replicate shard ranges to peer %s; '
'peer may need upgrading', broker.db_file,
'%(ip)s:%(port)s/%(device)s' % node)
if broker.sharding_initiated():
if info['db_state'] == SHARDED and len(
broker.get_objects(limit=1)) == 0:
self.logger.debug('%s is sharded and has nothing more to '
'replicate to peer %s',
broker.db_file,
'%(ip)s:%(port)s/%(device)s' % node)
else:
# Only print the scary warning if there was something that
# didn't get replicated
self.logger.warning(
'%s is able to shard -- refusing to replicate objects to '
'peer %s; have shard ranges and will wait for cleaving',
broker.db_file,
'%(ip)s:%(port)s/%(device)s' % node)
self.stats['deferred'] += 1
return shard_range_success
success = super(ContainerReplicator, self)._choose_replication_mode(
node, rinfo, info, local_sync, broker, http,
different_region)
return shard_range_success and success
def _fetch_and_merge_shard_ranges(self, http, broker):
with Timeout(self.node_timeout):
response = http.replicate('get_shard_ranges')
if response and is_success(response.status):
broker.merge_shard_ranges(json.loads(
response.data.decode('ascii')))
def find_local_handoff_for_part(self, part):
"""
Find a device in the ring that is on this node on which to place a
partition. Preference is given to a device that is a primary location
for the partition. If no such device is found then a local device with
weight is chosen, and failing that any local device.
:param part: a partition
:returns: a node entry from the ring
"""
if not self._local_device_ids:
raise RuntimeError('Cannot find local handoff; no local devices')
for node in self.ring.get_part_nodes(part):
if node['id'] in self._local_device_ids:
return node
# don't attempt to minimize handoff depth: just choose any local
# device, but start by only picking a device with a weight, just in
# case some devices are being drained...
local_devs_with_weight = [
dev for dev in self._local_device_ids.values()
if dev.get('weight', 0)]
if local_devs_with_weight:
return choice(local_devs_with_weight)
# we have to return something, so choose any local device..
node = choice(list(self._local_device_ids.values()))
self.logger.warning(
"Could not find a non-zero weight device for handoff partition "
"%d, falling back device %s" %
(part, node['device']))
return node
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
broker, initialized = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, account, container,
logger=self.logger, put_timestamp=timestamp,
storage_policy_index=0)
self.logger.increment('reconciler_db_created' if initialized
else 'reconciler_db_exists')
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
def feed_reconciler(self, container, item_list):
"""
Add queue entries for rows in item_list to the local reconciler
container database.
:param container: the name of the reconciler container
:param item_list: the list of rows to enqueue
:returns: True if successfully enqueued
"""
try:
reconciler = self.get_reconciler_broker(container)
except Exception:
self.logger.exception('Failed to get reconciler broker for '
'container %s', container)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
try:
reconciler.merge_items(item_list)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: trying to merge '
'%d items to reconciler container %s',
len(item_list), reconciler.db_file)
return False
return True
def dump_to_reconciler(self, broker, point):
"""
Look for object rows for objects updates in the wrong storage policy
in broker with a ``ROWID`` greater than the rowid given as point.
:param broker: the container broker with misplaced objects
:param point: the last verified ``reconciler_sync_point``
:returns: the last successful enqueued rowid
"""
max_sync = broker.get_max_row()
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
errors = True
point = misplaced[-1]['ROWID']
if not errors:
low_sync = point
misplaced = broker.get_misplaced_since(point, self.per_diff)
return low_sync
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s' %
broker.db_file)
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
return
max_sync = self.dump_to_reconciler(broker, point)
success = responses.count(True) >= majority_size(len(responses))
if max_sync > point and success:
# to be safe, only slide up the sync point with a majority on
# replication
broker.update_reconciler_sync(max_sync)
def cleanup_post_replicate(self, broker, orig_info, responses):
if broker.sharding_required():
# despite being a handoff, since we're sharding we're not going to
# do any cleanup so we can continue cleaving - this is still
# considered "success"
self.logger.debug(
'Not deleting db %s (requires sharding, state %s)',
broker.db_file, broker.get_db_state())
return True
return super(ContainerReplicator, self).cleanup_post_replicate(
broker, orig_info, responses)
def delete_db(self, broker):
"""
Ensure that reconciler databases are only cleaned up at the end of the
replication run.
"""
if (self.reconciler_cleanups is not None and
broker.account == MISPLACED_OBJECTS_ACCOUNT):
# this container shouldn't be here, make sure it's cleaned up
self.reconciler_cleanups[broker.container] = broker
return
if self.sync_store:
try:
# DB is going to get deleted. Be preemptive about it
self.sync_store.remove_synced_container(broker)
except Exception:
self.logger.exception('Failed to remove sync_store entry %s' %
broker.db_file)
return super(ContainerReplicator, self).delete_db(broker)
def replicate_reconcilers(self):
"""
Ensure any items merged to reconciler containers during replication
are pushed out to correct nodes and any reconciler containers that do
not belong on this node are removed.
"""
self.logger.info('Replicating %d reconciler containers',
len(self.reconciler_containers))
for part, reconciler, node_id in self.reconciler_containers.values():
self.cpool.spawn_n(
self._replicate_object, part, reconciler.db_file, node_id)
self.cpool.waitall()
# wipe out the cache do disable bypass in delete_db
cleanups = self.reconciler_cleanups
self.reconciler_cleanups = self.reconciler_containers = None
self.logger.info('Cleaning up %d reconciler containers',
len(cleanups))
for reconciler in cleanups.values():
self.cpool.spawn_n(self.delete_db, reconciler)
self.cpool.waitall()
self.logger.info('Finished reconciler replication')
def run_once(self, *args, **kwargs):
self.reconciler_containers = {}
self.reconciler_cleanups = {}
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
if any([self.reconciler_containers, self.reconciler_cleanups]):
self.replicate_reconcilers()
return rv
class ContainerReplicatorRpc(db_replicator.ReplicatorRpc):
def _db_file_exists(self, db_path):
return bool(get_db_files(db_path))
def _parse_sync_args(self, args):
parent = super(ContainerReplicatorRpc, self)
remote_info = parent._parse_sync_args(args)
if len(args) > 9:
remote_info['status_changed_at'] = args[7]
remote_info['count'] = args[8]
remote_info['storage_policy_index'] = args[9]
return remote_info
def _get_synced_replication_info(self, broker, remote_info):
"""
Sync the remote_info storage_policy_index if needed and return the
newly synced replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
info = broker.get_replication_info()
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp.now().internal
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at)
info = broker.get_replication_info()
return info
def _abort_rsync_then_merge(self, db_file, old_filename):
if super(ContainerReplicatorRpc, self)._abort_rsync_then_merge(
db_file, old_filename):
return True
# if the local db has started sharding since the original 'sync'
# request then abort object replication now; instantiate a fresh broker
# each time this check if performed so to get latest state
broker = ContainerBroker(db_file, logger=self.logger)
return broker.sharding_initiated()
def _post_rsync_then_merge_hook(self, existing_broker, new_broker):
# Note the following hook will need to change to using a pointer and
# limit in the future.
new_broker.merge_shard_ranges(
existing_broker.get_all_shard_range_data())
def merge_shard_ranges(self, broker, args):
broker.merge_shard_ranges(args[0])
return HTTPAccepted()
def get_shard_ranges(self, broker, args):
return HTTPOk(headers={'Content-Type': 'application/json'},
body=json.dumps(broker.get_all_shard_range_data()))
| swift-master | swift/container/replicator.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pluggable Back-ends for Container Server
"""
import errno
import os
from uuid import uuid4
import six
from six.moves import range
from six.moves.urllib.parse import unquote
import sqlite3
from eventlet import tpool
from swift.common.constraints import CONTAINER_LISTING_LIMIT
from swift.common.exceptions import LockTimeout
from swift.common.utils import Timestamp, encode_timestamps, \
decode_timestamps, extract_swift_bytes, storage_directory, hash_path, \
ShardRange, renamer, MD5_OF_EMPTY_STRING, mkdirs, get_db_files, \
parse_db_filename, make_db_file_path, split_path, RESERVED_BYTE, \
ShardRangeList, Namespace
from swift.common.db import DatabaseBroker, utf8encode, BROKER_TIMEOUT, \
zero_like, DatabaseAlreadyExists, SQLITE_ARG_LIMIT
DATADIR = 'containers'
RECORD_TYPE_OBJECT = 'object'
RECORD_TYPE_SHARD = 'shard'
SHARD_RANGE_TABLE = 'shard_range'
NOTFOUND = 'not_found'
UNSHARDED = 'unsharded'
SHARDING = 'sharding'
SHARDED = 'sharded'
COLLAPSED = 'collapsed'
SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING]
SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED]
SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING]
# when auditing a shard gets its own shard range, which could be in any state
# except FOUND, and any potential acceptors excluding FOUND ranges that may be
# unwanted overlaps
SHARD_AUDITING_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHARDED, ShardRange.SHRINKING,
ShardRange.SHRUNK]
# shard's may not be fully populated while in the FOUND and CREATED
# state, so shards should only update their own shard range's object
# stats when they are in the following states
SHARD_UPDATE_STAT_STATES = [ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING, ShardRange.SHARDED,
ShardRange.SHRINKING, ShardRange.SHRUNK]
# attribute names in order used when transforming shard ranges from dicts to
# tuples and vice-versa
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
'bytes_used', 'meta_timestamp', 'deleted', 'state',
'state_timestamp', 'epoch', 'reported', 'tombstones')
POLICY_STAT_TABLE_CREATE = '''
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
'''
POLICY_STAT_TRIGGER_SCRIPT = '''
CREATE TRIGGER object_insert_policy_stat AFTER INSERT ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size
WHERE storage_policy_index = new.storage_policy_index;
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT new.storage_policy_index,
(1 - new.deleted),
new.size
WHERE NOT EXISTS(
SELECT changes() as change
FROM policy_stat
WHERE change <> 0
);
UPDATE container_info
SET hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_delete_policy_stat AFTER DELETE ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size
WHERE storage_policy_index = old.storage_policy_index;
UPDATE container_info
SET hash = chexor(hash, old.name, old.created_at);
END;
'''
CONTAINER_INFO_TABLE_SCRIPT = '''
CREATE TABLE container_info (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1,
storage_policy_index INTEGER DEFAULT 0,
reconciler_sync_point INTEGER DEFAULT -1
);
'''
CONTAINER_STAT_VIEW_SCRIPT = '''
CREATE VIEW container_stat
AS SELECT ci.account, ci.container, ci.created_at,
ci.put_timestamp, ci.delete_timestamp,
ci.reported_put_timestamp, ci.reported_delete_timestamp,
ci.reported_object_count, ci.reported_bytes_used, ci.hash,
ci.id, ci.status, ci.status_changed_at, ci.metadata,
ci.x_container_sync_point1, ci.x_container_sync_point2,
ci.reconciler_sync_point,
ci.storage_policy_index,
coalesce(ps.object_count, 0) AS object_count,
coalesce(ps.bytes_used, 0) AS bytes_used
FROM container_info ci LEFT JOIN policy_stat ps
ON ci.storage_policy_index = ps.storage_policy_index;
CREATE TRIGGER container_stat_update
INSTEAD OF UPDATE ON container_stat
BEGIN
UPDATE container_info
SET account = NEW.account,
container = NEW.container,
created_at = NEW.created_at,
put_timestamp = NEW.put_timestamp,
delete_timestamp = NEW.delete_timestamp,
reported_put_timestamp = NEW.reported_put_timestamp,
reported_delete_timestamp = NEW.reported_delete_timestamp,
reported_object_count = NEW.reported_object_count,
reported_bytes_used = NEW.reported_bytes_used,
hash = NEW.hash,
id = NEW.id,
status = NEW.status,
status_changed_at = NEW.status_changed_at,
metadata = NEW.metadata,
x_container_sync_point1 = NEW.x_container_sync_point1,
x_container_sync_point2 = NEW.x_container_sync_point2,
storage_policy_index = NEW.storage_policy_index,
reconciler_sync_point = NEW.reconciler_sync_point;
END;
'''
def update_new_item_from_existing(new_item, existing):
"""
Compare the data and meta related timestamps of a new object item with
the timestamps of an existing object record, and update the new item
with data and/or meta related attributes from the existing record if
their timestamps are newer.
The multiple timestamps are encoded into a single string for storing
in the 'created_at' column of the objects db table.
:param new_item: A dict of object update attributes
:param existing: A dict of existing object attributes
:return: True if any attributes of the new item dict were found to be
newer than the existing and therefore not updated, otherwise
False implying that the updated item is equal to the existing.
"""
# item[created_at] may be updated so keep a copy of the original
# value in case we process this item again
new_item.setdefault('data_timestamp', new_item['created_at'])
# content-type and metadata timestamps may be encoded in
# item[created_at], or may be set explicitly.
item_ts_data, item_ts_ctype, item_ts_meta = decode_timestamps(
new_item['data_timestamp'])
if new_item.get('ctype_timestamp'):
item_ts_ctype = Timestamp(new_item.get('ctype_timestamp'))
item_ts_meta = item_ts_ctype
if new_item.get('meta_timestamp'):
item_ts_meta = Timestamp(new_item.get('meta_timestamp'))
if not existing:
# encode new_item timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
return True
# decode existing timestamp into separate data, content-type and
# metadata timestamps
rec_ts_data, rec_ts_ctype, rec_ts_meta = decode_timestamps(
existing['created_at'])
# Extract any swift_bytes values from the content_type values. This is
# necessary because the swift_bytes value to persist should be that at the
# most recent data timestamp whereas the content-type value to persist is
# that at the most recent content-type timestamp. The two values happen to
# be stored in the same database column for historical reasons.
for item in (new_item, existing):
content_type, swift_bytes = extract_swift_bytes(item['content_type'])
item['content_type'] = content_type
item['swift_bytes'] = swift_bytes
newer_than_existing = [True, True, True]
if rec_ts_data >= item_ts_data:
# apply data attributes from existing record
new_item.update([(k, existing[k])
for k in ('size', 'etag', 'deleted', 'swift_bytes')])
item_ts_data = rec_ts_data
newer_than_existing[0] = False
if rec_ts_ctype >= item_ts_ctype:
# apply content-type attribute from existing record
new_item['content_type'] = existing['content_type']
item_ts_ctype = rec_ts_ctype
newer_than_existing[1] = False
if rec_ts_meta >= item_ts_meta:
# apply metadata timestamp from existing record
item_ts_meta = rec_ts_meta
newer_than_existing[2] = False
# encode updated timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
# append the most recent swift_bytes onto the most recent content_type in
# new_item and restore existing to its original state
for item in (new_item, existing):
if item['swift_bytes']:
item['content_type'] += ';swift_bytes=%s' % item['swift_bytes']
del item['swift_bytes']
return any(newer_than_existing)
def merge_shards(shard_data, existing):
"""
Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with
any items of ``existing`` that take precedence over the corresponding item
in ``shard_data``.
:param shard_data: a dict representation of shard range that may be
modified by this method.
:param existing: a dict representation of shard range.
:returns: True if ``shard data`` has any item(s) that are considered to
take precedence over the corresponding item in ``existing``
"""
if not existing:
return True
if existing['timestamp'] < shard_data['timestamp']:
# note that currently we do not roll forward any meta or state from
# an item that was created at older time, newer created time trumps
shard_data['reported'] = 0 # reset the latch
return True
elif existing['timestamp'] > shard_data['timestamp']:
return False
new_content = False
# timestamp must be the same, so preserve existing range bounds and deleted
for k in ('lower', 'upper', 'deleted'):
shard_data[k] = existing[k]
# now we need to look for meta data updates
if existing['meta_timestamp'] >= shard_data['meta_timestamp']:
for k in ('object_count', 'bytes_used', 'meta_timestamp'):
shard_data[k] = existing[k]
shard_data['tombstones'] = existing.get('tombstones', -1)
else:
new_content = True
# We can latch the reported flag
if existing['reported'] and \
existing['object_count'] == shard_data['object_count'] and \
existing['bytes_used'] == shard_data['bytes_used'] and \
existing.get('tombstones', -1) == shard_data['tombstones'] and \
existing['state'] == shard_data['state'] and \
existing['epoch'] == shard_data['epoch']:
shard_data['reported'] = 1
else:
shard_data.setdefault('reported', 0)
if shard_data['reported'] and not existing['reported']:
new_content = True
if (existing['state_timestamp'] == shard_data['state_timestamp']
and shard_data['state'] > existing['state']):
new_content = True
elif existing['state_timestamp'] >= shard_data['state_timestamp']:
for k in ('state', 'state_timestamp', 'epoch'):
shard_data[k] = existing[k]
else:
new_content = True
return new_content
def sift_shard_ranges(new_shard_ranges, existing_shard_ranges):
"""
Compares new and existing shard ranges, updating the new shard ranges with
any more recent state from the existing, and returns shard ranges sorted
into those that need adding because they contain new or updated state and
those that need deleting because their state has been superseded.
:param new_shard_ranges: a list of dicts, each of which represents a shard
range.
:param existing_shard_ranges: a dict mapping shard range names to dicts
representing a shard range.
:return: a tuple (to_add, to_delete); to_add is a list of dicts, each of
which represents a shard range that is to be added to the existing
shard ranges; to_delete is a set of shard range names that are to be
deleted.
"""
to_delete = set()
to_add = {}
for item in new_shard_ranges:
item_ident = item['name']
existing = existing_shard_ranges.get(item_ident)
if merge_shards(item, existing):
# exists with older timestamp
if item_ident in existing_shard_ranges:
to_delete.add(item_ident)
# duplicate entries in item_list
if (item_ident not in to_add or
merge_shards(item, to_add[item_ident])):
to_add[item_ident] = item
return to_add.values(), to_delete
class ContainerBroker(DatabaseBroker):
"""
Encapsulates working with a container database.
Note that this may involve multiple on-disk DB files if the container
becomes sharded:
* :attr:`_db_file` is the path to the legacy container DB name, i.e.
``<hash>.db``. This file should exist for an initialised broker that
has never been sharded, but will not exist once a container has been
sharded.
* :attr:`db_files` is a list of existing db files for the broker. This
list should have at least one entry for an initialised broker, and
should have two entries while a broker is in SHARDING state.
* :attr:`db_file` is the path to whichever db is currently authoritative
for the container. Depending on the container's state, this may not be
the same as the ``db_file`` argument given to :meth:`~__init__`, unless
``force_db_file`` is True in which case :attr:`db_file` is always equal
to the ``db_file`` argument given to :meth:`~__init__`.
* :attr:`pending_file` is always equal to :attr:`_db_file` extended with
``.pending``, i.e. ``<hash>.db.pending``.
"""
db_type = 'container'
db_contains_type = 'object'
db_reclaim_timestamp = 'created_at'
delete_meta_whitelist = ['x-container-sysmeta-shard-quoted-root',
'x-container-sysmeta-shard-root',
'x-container-sysmeta-sharding']
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False, skip_commits=False,
force_db_file=False):
self._init_db_file = db_file
base_db_file = make_db_file_path(db_file, None)
super(ContainerBroker, self).__init__(
base_db_file, timeout, logger, account, container, pending_timeout,
stale_reads_ok, skip_commits=skip_commits)
# the root account and container are populated on demand
self._root_account = self._root_container = None
self._force_db_file = force_db_file
self._db_files = None
@classmethod
def create_broker(cls, device_path, part, account, container, logger=None,
epoch=None, put_timestamp=None,
storage_policy_index=None):
"""
Create a ContainerBroker instance. If the db doesn't exist, initialize
the db file.
:param device_path: device path
:param part: partition number
:param account: account name string
:param container: container name string
:param logger: a logger instance
:param epoch: a timestamp to include in the db filename
:param put_timestamp: initial timestamp if broker needs to be
initialized
:param storage_policy_index: the storage policy index
:return: a tuple of (``broker``, ``initialized``) where ``broker`` is
an instance of :class:`swift.container.backend.ContainerBroker` and
``initialized`` is True if the db file was initialized, False
otherwise.
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = make_db_file_path(
os.path.join(device_path, db_dir, hsh + '.db'), epoch)
broker = ContainerBroker(db_path, account=account, container=container,
logger=logger)
initialized = False
if not os.path.exists(broker.db_file):
try:
broker.initialize(put_timestamp, storage_policy_index)
initialized = True
except DatabaseAlreadyExists:
pass
return broker, initialized
def get_db_state(self):
"""
Returns the current state of on disk db files.
"""
if not self.db_files:
return NOTFOUND
if len(self.db_files) > 1:
return SHARDING
if self.db_epoch is None:
# never been sharded
return UNSHARDED
if self.db_epoch != self.get_own_shard_range().epoch:
return UNSHARDED
if not self.has_other_shard_ranges():
return COLLAPSED
return SHARDED
def sharding_initiated(self):
"""
Returns True if a broker has shard range state that would be necessary
for sharding to have been initiated, False otherwise.
"""
own_shard_range = self.get_own_shard_range()
if own_shard_range.state in ShardRange.CLEAVING_STATES:
return self.has_other_shard_ranges()
return False
def sharding_required(self):
"""
Returns True if a broker has shard range state that would be necessary
for sharding to have been initiated but has not yet completed sharding,
False otherwise.
"""
db_state = self.get_db_state()
return (db_state == SHARDING or
(db_state == UNSHARDED and self.sharding_initiated()))
def is_sharded(self):
return self.get_db_state() == SHARDED
def reload_db_files(self):
"""
Reloads the cached list of valid on disk db files for this broker.
"""
# reset connection so the next access will use the correct DB file
self.conn = None
self._db_files = get_db_files(self._init_db_file)
@property
def db_files(self):
"""
Gets the cached list of valid db files that exist on disk for this
broker.
The cached list may be refreshed by calling
:meth:`~swift.container.backend.ContainerBroker.reload_db_files`.
:return: A list of paths to db files ordered by ascending epoch;
the list may be empty.
"""
if not self._db_files:
self.reload_db_files()
return self._db_files
@property
def db_file(self):
"""
Get the path to the primary db file for this broker. This is typically
the db file for the most recent sharding epoch. However, if no db files
exist on disk, or if ``force_db_file`` was True when the broker was
constructed, then the primary db file is the file passed to the broker
constructor.
:return: A path to a db file; the file does not necessarily exist.
"""
if self._force_db_file:
return self._init_db_file
if self.db_files:
return self.db_files[-1]
return self._init_db_file
@property
def db_epoch(self):
hash_, epoch, ext = parse_db_filename(self.db_file)
return epoch
@property
def storage_policy_index(self):
if not hasattr(self, '_storage_policy_index'):
self._storage_policy_index = \
self._get_info()['storage_policy_index']
return self._storage_policy_index
@property
def path(self):
self._populate_instance_cache()
return '%s/%s' % (self.account, self.container)
def _initialize(self, conn, put_timestamp, storage_policy_index):
"""
Create a brand new container database (tables, indices, triggers, etc.)
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
if not self.container:
raise ValueError(
'Attempting to create a new database with no container set')
if storage_policy_index is None:
storage_policy_index = 0
self.create_object_table(conn)
self.create_policy_stat_table(conn, storage_policy_index)
self.create_container_info_table(conn, put_timestamp,
storage_policy_index)
self.create_shard_range_table(conn)
self._db_files = None
def create_object_table(self, conn):
"""
Create the object table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""" + POLICY_STAT_TRIGGER_SCRIPT)
def create_container_info_table(self, conn, put_timestamp,
storage_policy_index):
"""
Create the container_info table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
Also creates the container_stat view.
:param conn: DB connection object
:param put_timestamp: put timestamp
:param storage_policy_index: storage policy index
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
# The container_stat view is for compatibility; old versions of Swift
# expected a container_stat table with columns "object_count" and
# "bytes_used", but when that stuff became per-storage-policy and
# moved to the policy_stat table, we stopped creating those columns in
# container_stat.
#
# To retain compatibility, we create the container_stat view with some
# triggers to make it behave like the old container_stat table. This
# way, if an old version of Swift encounters a database with the new
# schema, it can still work.
#
# Note that this can occur during a rolling Swift upgrade if a DB gets
# rsynced from an old node to a new, so it's necessary for
# availability during upgrades. The fact that it enables downgrades is
# a nice bonus.
conn.executescript(CONTAINER_INFO_TABLE_SCRIPT +
CONTAINER_STAT_VIEW_SCRIPT)
conn.execute("""
INSERT INTO container_info (account, container, created_at, id,
put_timestamp, status_changed_at, storage_policy_index)
VALUES (?, ?, ?, ?, ?, ?, ?);
""", (self.account, self.container, Timestamp.now().internal,
self._new_db_id(), put_timestamp, put_timestamp,
storage_policy_index))
def create_policy_stat_table(self, conn, storage_policy_index=0):
"""
Create policy_stat table.
:param conn: DB connection object
:param storage_policy_index: the policy_index the container is
being created with
"""
conn.executescript(POLICY_STAT_TABLE_CREATE)
conn.execute("""
INSERT INTO policy_stat (storage_policy_index)
VALUES (?)
""", (storage_policy_index,))
def create_shard_range_table(self, conn):
"""
Create the shard_range table which is specific to the container DB.
:param conn: DB connection object
"""
# Use execute (not executescript) so we get the benefits of our
# GreenDBConnection. Creating a table requires a whole-DB lock;
# *any* in-progress cursor will otherwise trip a "database is locked"
# error.
conn.execute("""
CREATE TABLE %s (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT,
reported INTEGER DEFAULT 0,
tombstones INTEGER DEFAULT -1
);
""" % SHARD_RANGE_TABLE)
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON %s
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""" % SHARD_RANGE_TABLE)
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_object_deleted_name' '''):
self._db_version = 1
return self._db_version
def _get_deleted_key(self, connection):
if self.get_db_version(connection) < 1:
return '+deleted'
return 'deleted'
def _newid(self, conn):
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = 0, reported_delete_timestamp = 0,
reported_object_count = 0, reported_bytes_used = 0''')
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
(name, timestamp, size, content_type, etag, deleted) = entry[:6]
if len(entry) > 6:
storage_policy_index = entry[6]
else:
storage_policy_index = 0
content_type_timestamp = meta_timestamp = None
if len(entry) > 7:
content_type_timestamp = entry[7]
if len(entry) > 8:
meta_timestamp = entry[8]
item_list.append({'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': content_type_timestamp,
'meta_timestamp': meta_timestamp})
def _empty(self):
self._commit_puts_stale_ok()
with self.get() as conn:
try:
row = conn.execute(
'SELECT max(object_count) from policy_stat').fetchone()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
row = conn.execute(
'SELECT object_count from container_stat').fetchone()
return zero_like(row[0])
def empty(self):
"""
Check if container DB is empty.
This method uses more stringent checks on object count than
:meth:`is_deleted`: this method checks that there are no objects in any
policy; if the container is in the process of sharding then both fresh
and retiring databases are checked to be empty; if a root container has
shard ranges then they are checked to be empty.
:returns: True if the database has no active objects, False otherwise
"""
if not all(broker._empty() for broker in self.get_brokers()):
return False
if self.is_root_container() and self.sharding_initiated():
# sharded shards don't get updates from their shards so their shard
# usage should not be relied upon
return self.get_shard_usage()['object_count'] <= 0
return True
def delete_object(self, name, timestamp, storage_policy_index=0):
"""
Mark an object deleted.
:param name: object name to be deleted
:param timestamp: timestamp when the object was marked as deleted
:param storage_policy_index: the storage policy index for the object
"""
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
deleted=1, storage_policy_index=storage_policy_index)
def make_tuple_for_pickle(self, record):
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'],
record['ctype_timestamp'],
record['meta_timestamp'])
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
storage_policy_index=0, ctype_timestamp=None,
meta_timestamp=None):
"""
Creates an object in the DB with its metadata.
:param name: object name to be created
:param timestamp: timestamp of when the object was created
:param size: object size
:param content_type: object content-type
:param etag: object etag
:param deleted: if True, marks the object as deleted and sets the
deleted_at timestamp to timestamp
:param storage_policy_index: the storage policy index for the object
:param ctype_timestamp: timestamp of when content_type was last
updated
:param meta_timestamp: timestamp of when metadata was last updated
"""
record = {'name': name, 'created_at': timestamp, 'size': size,
'content_type': content_type, 'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': ctype_timestamp,
'meta_timestamp': meta_timestamp}
self.put_record(record)
def remove_objects(self, lower, upper, max_row=None):
"""
Removes object records in the given namespace range from the object
table.
Note that objects are removed regardless of their storage_policy_index.
:param lower: defines the lower bound of object names that will be
removed; names greater than this value will be removed; names less
than or equal to this value will not be removed.
:param upper: defines the upper bound of object names that will be
removed; names less than or equal to this value will be removed;
names greater than this value will not be removed. The empty string
is interpreted as there being no upper bound.
:param max_row: if specified only rows less than or equal to max_row
will be removed
"""
query_conditions = []
query_args = []
if max_row is not None:
query_conditions.append('ROWID <= ?')
query_args.append(str(max_row))
if lower:
query_conditions.append('name > ?')
query_args.append(lower)
if upper:
query_conditions.append('name <= ?')
query_args.append(upper)
query = 'DELETE FROM object WHERE deleted in (0, 1)'
if query_conditions:
query += ' AND ' + ' AND '.join(query_conditions)
with self.get() as conn:
conn.execute(query, query_args)
conn.commit()
def _is_deleted_info(self, object_count, put_timestamp, delete_timestamp,
**kwargs):
"""
Apply delete logic to database info.
:returns: True if the DB is considered to be deleted, False otherwise
"""
# The container is considered deleted if the delete_timestamp
# value is greater than the put_timestamp, and there are no
# objects in the container.
return zero_like(object_count) and (
Timestamp(delete_timestamp) > Timestamp(put_timestamp))
def _is_deleted(self, conn):
"""
Check if the DB is considered to be deleted.
This object count used in this check is the same as the container
object count that would be returned in the result of :meth:`get_info`
and exposed to a client i.e. it is based on the container_stat view for
the current storage policy index or relevant shard range usage.
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
info = conn.execute('''
SELECT put_timestamp, delete_timestamp, object_count
FROM container_stat''').fetchone()
info = dict(info)
info.update(self._get_alternate_object_stats()[1])
return self._is_deleted_info(**info)
def is_old_enough_to_reclaim(self, now, reclaim_age):
with self.get() as conn:
info = conn.execute('''
SELECT put_timestamp, delete_timestamp
FROM container_stat''').fetchone()
return (Timestamp(now - reclaim_age) >
Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp']))
def is_empty_enough_to_reclaim(self):
if self.is_root_container() and (self.has_other_shard_ranges() or
self.get_db_state() == SHARDING):
return False
return self.empty()
def is_reclaimable(self, now, reclaim_age):
return self.is_old_enough_to_reclaim(now, reclaim_age) and \
self.is_empty_enough_to_reclaim()
def get_info_is_deleted(self):
"""
Get the is_deleted status and info for the container.
:returns: a tuple, in the form (info, is_deleted) info is a dict as
returned by get_info and is_deleted is a boolean.
"""
if not os.path.exists(self.db_file):
return {}, True
info = self.get_info()
return info, self._is_deleted_info(**info)
def get_replication_info(self):
info = super(ContainerBroker, self).get_replication_info()
info['shard_max_row'] = self.get_max_row(SHARD_RANGE_TABLE)
return info
def _do_get_info_query(self, conn):
data = None
trailing_sync = 'x_container_sync_point1, x_container_sync_point2'
trailing_pol = 'storage_policy_index'
errors = set()
while not data:
try:
data = conn.execute(('''
SELECT account, container, created_at, put_timestamp,
delete_timestamp, status, status_changed_at,
object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
id, %s, %s
FROM container_stat
''') % (trailing_sync, trailing_pol)).fetchone()
except sqlite3.OperationalError as err:
err_msg = str(err)
if err_msg in errors:
# only attempt migration once
raise
errors.add(err_msg)
if 'no such column: storage_policy_index' in err_msg:
trailing_pol = '0 AS storage_policy_index'
elif 'no such column: x_container_sync_point' in err_msg:
trailing_sync = '-1 AS x_container_sync_point1, ' \
'-1 AS x_container_sync_point2'
else:
raise
data = dict(data)
# populate instance cache
self._storage_policy_index = data['storage_policy_index']
self.account = data['account']
self.container = data['container']
return data
def _get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
return self._do_get_info_query(conn)
def _populate_instance_cache(self, conn=None):
# load cached instance attributes from the database if necessary
if self.container is None:
with self.maybe_get(conn) as conn:
self._do_get_info_query(conn)
def _get_alternate_object_stats(self):
state = self.get_db_state()
if state == SHARDING:
other_info = self.get_brokers()[0]._get_info()
stats = {'object_count': other_info['object_count'],
'bytes_used': other_info['bytes_used']}
elif state == SHARDED and self.is_root_container():
stats = self.get_shard_usage()
else:
stats = {}
return state, stats
def get_info(self):
"""
Get global data for the container.
:returns: dict with keys: account, container, created_at,
put_timestamp, delete_timestamp, status, status_changed_at,
object_count, bytes_used, reported_put_timestamp,
reported_delete_timestamp, reported_object_count,
reported_bytes_used, hash, id, x_container_sync_point1,
x_container_sync_point2, and storage_policy_index,
db_state.
"""
data = self._get_info()
state, stats = self._get_alternate_object_stats()
data.update(stats)
data['db_state'] = state
return data
def set_x_container_sync_points(self, sync_point1, sync_point2):
with self.get() as conn:
try:
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
except sqlite3.OperationalError as err:
if 'no such column: x_container_sync_point' not in \
str(err):
raise
self._migrate_add_container_sync_points(conn)
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
conn.commit()
def _set_x_container_sync_points(self, conn, sync_point1, sync_point2):
if sync_point1 is not None and sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?,
x_container_sync_point2 = ?
''', (sync_point1, sync_point2))
elif sync_point1 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?
''', (sync_point1,))
elif sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point2 = ?
''', (sync_point2,))
def get_policy_stats(self):
with self.get() as conn:
try:
info = conn.execute('''
SELECT storage_policy_index, object_count, bytes_used
FROM policy_stat
''').fetchall()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
info = conn.execute('''
SELECT 0 as storage_policy_index, object_count, bytes_used
FROM container_stat
''').fetchall()
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def has_multiple_policies(self):
with self.get() as conn:
try:
curs = conn.execute('''
SELECT count(storage_policy_index)
FROM policy_stat
''').fetchone()
except sqlite3.OperationalError as err:
if 'no such table: policy_stat' not in str(err):
raise
# no policy_stat row
return False
if curs and curs[0] > 1:
return True
# only one policy_stat row
return False
def set_storage_policy_index(self, policy_index, timestamp=None):
"""
Update the container_stat policy_index and status_changed_at.
"""
if timestamp is None:
timestamp = Timestamp.now().internal
def _setit(conn):
conn.execute('''
INSERT OR IGNORE INTO policy_stat (storage_policy_index)
VALUES (?)
''', (policy_index,))
conn.execute('''
UPDATE container_stat
SET storage_policy_index = ?,
status_changed_at = MAX(?, status_changed_at)
WHERE storage_policy_index <> ?
''', (policy_index, timestamp, policy_index))
conn.commit()
with self.get() as conn:
try:
_setit(conn)
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
self._migrate_add_storage_policy(conn)
_setit(conn)
self._storage_policy_index = policy_index
def reported(self, put_timestamp, delete_timestamp, object_count,
bytes_used):
"""
Update reported stats, available with container's `get_info`.
:param put_timestamp: put_timestamp to update
:param delete_timestamp: delete_timestamp to update
:param object_count: object_count to update
:param bytes_used: bytes_used to update
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = ?, reported_delete_timestamp = ?,
reported_object_count = ?, reported_bytes_used = ?
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None, storage_policy_index=0, reverse=False,
include_deleted=False, since_row=None,
transform_func=None, all_policies=False,
allow_reserved=False):
"""
Get a list of objects sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimiter based on
the path
:param storage_policy_index: storage policy index for query
:param reverse: reverse the result order.
:param include_deleted: if True, include only deleted objects; if
False (default), include only undeleted objects; otherwise, include
both deleted and undeleted objects.
:param since_row: include only items whose ROWID is greater than
the given row id; by default all rows are included.
:param transform_func: an optional function that if given will be
called for each object to get a transformed version of the object
to include in the listing; should have same signature as
:meth:`~_transform_record`; defaults to :meth:`~_transform_record`.
:param all_policies: if True, include objects for all storage policies
ignoring any value given for ``storage_policy_index``
:param allow_reserved: exclude names with reserved-byte by default
:returns: list of tuples of (name, created_at, size, content_type,
etag, deleted)
"""
if include_deleted is True:
deleted_arg = ' = 1'
elif include_deleted is False:
deleted_arg = ' = 0'
else:
deleted_arg = ' in (0, 1)'
if transform_func is None:
transform_func = self._transform_record
delim_force_gte = False
if six.PY2:
(marker, end_marker, prefix, delimiter, path) = utf8encode(
marker, end_marker, prefix, delimiter, path)
self._commit_puts_stale_ok()
if reverse:
# Reverse the markers if we are reversing the listing.
marker, end_marker = end_marker, marker
if path is not None:
prefix = path
if path:
prefix = path = path.rstrip('/') + '/'
delimiter = '/'
elif delimiter and not prefix:
prefix = ''
if prefix:
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
orig_marker = marker
with self.get() as conn:
results = []
deleted_key = self._get_deleted_key(conn)
query_keys = ['name', 'created_at', 'size', 'content_type',
'etag', deleted_key]
while len(results) < limit:
query_args = []
query_conditions = []
if end_marker and (not prefix or end_marker < end_prefix):
query_conditions.append('name < ?')
query_args.append(end_marker)
elif prefix:
query_conditions.append('name < ?')
query_args.append(end_prefix)
if delim_force_gte:
query_conditions.append('name >= ?')
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and (not prefix or marker >= prefix):
query_conditions.append('name > ?')
query_args.append(marker)
elif prefix:
query_conditions.append('name >= ?')
query_args.append(prefix)
if not allow_reserved:
query_conditions.append('name >= ?')
query_args.append(chr(ord(RESERVED_BYTE) + 1))
query_conditions.append(deleted_key + deleted_arg)
if since_row:
query_conditions.append('ROWID > ?')
query_args.append(since_row)
def build_query(keys, conditions, args):
query = 'SELECT ' + ', '.join(keys) + ' FROM object '
if conditions:
query += 'WHERE ' + ' AND '.join(conditions)
tail_query = '''
ORDER BY name %s LIMIT ?
''' % ('DESC' if reverse else '')
return query + tail_query, args + [limit - len(results)]
# storage policy filter
if all_policies:
query, args = build_query(
query_keys + ['storage_policy_index'],
query_conditions,
query_args)
else:
query, args = build_query(
query_keys + ['storage_policy_index'],
query_conditions + ['storage_policy_index = ?'],
query_args + [storage_policy_index])
try:
curs = conn.execute(query, tuple(args))
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
query, args = build_query(
query_keys + ['0 as storage_policy_index'],
query_conditions, query_args)
curs = conn.execute(query, tuple(args))
curs.row_factory = None
# Delimiters without a prefix is ignored, further if there
# is no delimiter then we can simply return the result as
# prefixes are now handled in the SQL statement.
if prefix is None or not delimiter:
return [transform_func(r) for r in curs]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
name = row[0]
if reverse:
end_marker = name
else:
marker = name
if len(results) >= limit:
curs.close()
return results
end = name.find(delimiter, len(prefix))
if path is not None:
if name == path:
continue
if end >= 0 and len(name) > end + len(delimiter):
if reverse:
end_marker = name[:end + len(delimiter)]
else:
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
curs.close()
break
elif end >= 0:
if reverse:
end_marker = name[:end + len(delimiter)]
else:
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + len(delimiter)]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
break
results.append(transform_func(row))
if not rowcount:
break
return results
def get_objects(self, limit=None, marker='', end_marker='',
include_deleted=None, since_row=None):
"""
Returns a list of objects, including deleted objects, in all policies.
Each object in the list is described by a dict with keys {'name',
'created_at', 'size', 'content_type', 'etag', 'deleted',
'storage_policy_index'}.
:param limit: maximum number of entries to get
:param marker: if set, objects with names less than or equal to this
value will not be included in the list.
:param end_marker: if set, objects with names greater than or equal to
this value will not be included in the list.
:param include_deleted: if True, include only deleted objects; if
False, include only undeleted objects; otherwise (default), include
both deleted and undeleted objects.
:param since_row: include only items whose ROWID is greater than
the given row id; by default all rows are included.
:return: a list of dicts, each describing an object.
"""
limit = CONTAINER_LISTING_LIMIT if limit is None else limit
return self.list_objects_iter(
limit, marker, end_marker, prefix=None, delimiter=None, path=None,
reverse=False, include_deleted=include_deleted,
transform_func=self._record_to_dict, since_row=since_row,
all_policies=True, allow_reserved=True
)
def _transform_record(self, record):
"""
Returns a tuple of (name, last-modified time, size, content_type and
etag) for the given record.
The given record's created_at timestamp is decoded into separate data,
content-type and meta timestamps and the metadata timestamp is used as
the last-modified time value.
"""
t_data, t_ctype, t_meta = decode_timestamps(record[1])
return (record[0], t_meta.internal) + record[2:5]
def _record_to_dict(self, rec):
if rec:
keys = ('name', 'created_at', 'size', 'content_type', 'etag',
'deleted', 'storage_policy_index')
return dict(zip(keys, rec))
return None
def merge_items(self, item_list, source=None):
"""
Merge items into the object table.
:param item_list: list of dictionaries of {'name', 'created_at',
'size', 'content_type', 'etag', 'deleted',
'storage_policy_index', 'ctype_timestamp',
'meta_timestamp'}
:param source: if defined, update incoming_sync with the source
"""
for item in item_list:
if six.PY2 and isinstance(item['name'], six.text_type):
item['name'] = item['name'].encode('utf-8')
elif not six.PY2 and isinstance(item['name'], six.binary_type):
item['name'] = item['name'].decode('utf-8')
def _really_really_merge_items(conn):
curs = conn.cursor()
if self.get_db_version(conn) >= 1:
query_mod = ' deleted IN (0, 1) AND '
else:
query_mod = ''
curs.execute('BEGIN IMMEDIATE')
# Get sqlite records for objects in item_list that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
records = {}
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [rec['name'] for rec in
item_list[offset:offset + SQLITE_ARG_LIMIT]]
records.update(
((rec[0], rec[6]), rec) for rec in curs.execute(
'SELECT name, created_at, size, content_type,'
'etag, deleted, storage_policy_index '
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
','.join('?' * len(chunk)), chunk))
# Sort item_list into things that need adding and deleting, based
# on results of created_at query.
to_delete = set()
to_add = {}
for item in item_list:
item.setdefault('storage_policy_index', 0) # legacy
item_ident = (item['name'], item['storage_policy_index'])
existing = self._record_to_dict(records.get(item_ident))
if update_new_item_from_existing(item, existing):
if item_ident in records: # exists with older timestamp
to_delete.add(item_ident)
if item_ident in to_add: # duplicate entries in item_list
update_new_item_from_existing(item, to_add[item_ident])
to_add[item_ident] = item
if to_delete:
curs.executemany(
'DELETE FROM object WHERE ' + query_mod +
'name=? AND storage_policy_index=?',
(item_ident for item_ident in to_delete))
if to_add:
curs.executemany(
'INSERT INTO object (name, created_at, size, content_type,'
'etag, deleted, storage_policy_index) '
'VALUES (?, ?, ?, ?, ?, ?, ?)',
((rec['name'], rec['created_at'], rec['size'],
rec['content_type'], rec['etag'], rec['deleted'],
rec['storage_policy_index'])
for rec in to_add.values()))
if source:
# for replication we rely on the remote end sending merges in
# order with no gaps to increment sync_points
sync_point = item_list[-1]['ROWID']
curs.execute('''
UPDATE incoming_sync SET
sync_point=max(?, sync_point) WHERE remote_id=?
''', (sync_point, source))
if curs.rowcount < 1:
curs.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, source))
conn.commit()
def _really_merge_items(conn):
return tpool.execute(_really_really_merge_items, conn)
with self.get() as conn:
try:
return _really_merge_items(conn)
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
self._migrate_add_storage_policy(conn)
return _really_merge_items(conn)
def merge_shard_ranges(self, shard_ranges):
"""
Merge shard ranges into the shard range table.
:param shard_ranges: a shard range or a list of shard ranges; each
shard range should be an instance of
:class:`~swift.common.utils.ShardRange` or a dict representation of
a shard range having ``SHARD_RANGE_KEYS``.
"""
if not shard_ranges:
return
if not isinstance(shard_ranges, (list, ShardRangeList)):
shard_ranges = [shard_ranges]
item_list = []
for item in shard_ranges:
if isinstance(item, ShardRange):
item = dict(item)
for col in ('name', 'lower', 'upper'):
if six.PY2 and isinstance(item[col], six.text_type):
item[col] = item[col].encode('utf-8')
elif not six.PY2 and isinstance(item[col], six.binary_type):
item[col] = item[col].decode('utf-8')
item_list.append(item)
def _really_merge_items(conn):
curs = conn.cursor()
curs.execute('BEGIN IMMEDIATE')
# Get rows for items that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
records = {}
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [record['name'] for record
in item_list[offset:offset + SQLITE_ARG_LIMIT]]
records.update(
(rec[0], dict(zip(SHARD_RANGE_KEYS, rec)))
for rec in curs.execute(
'SELECT %s FROM %s '
'WHERE deleted IN (0, 1) AND name IN (%s)' %
(', '.join(SHARD_RANGE_KEYS), SHARD_RANGE_TABLE,
','.join('?' * len(chunk))), chunk))
to_add, to_delete = sift_shard_ranges(item_list, records)
if to_delete:
curs.executemany(
'DELETE FROM %s WHERE deleted in (0, 1) '
'AND name = ?' % SHARD_RANGE_TABLE,
((item_ident,) for item_ident in to_delete))
if to_add:
vals = ','.join('?' * len(SHARD_RANGE_KEYS))
curs.executemany(
'INSERT INTO %s (%s) VALUES (%s)' %
(SHARD_RANGE_TABLE, ','.join(SHARD_RANGE_KEYS), vals),
tuple([item[k] for k in SHARD_RANGE_KEYS]
for item in to_add))
conn.commit()
migrations = {
'no such column: reported':
self._migrate_add_shard_range_reported,
'no such column: tombstones':
self._migrate_add_shard_range_tombstones,
('no such table: %s' % SHARD_RANGE_TABLE):
self.create_shard_range_table,
}
migrations_done = set()
with self.get() as conn:
while True:
try:
return _really_merge_items(conn)
except sqlite3.OperationalError as err:
# Without the rollback, new enough (>= py37) python/sqlite3
# will panic:
# sqlite3.OperationalError: cannot start a transaction
# within a transaction
conn.rollback()
for err_str, migration in migrations.items():
if err_str in migrations_done:
continue
if err_str in str(err):
migration(conn)
migrations_done.add(err_str)
break
else:
raise
def get_reconciler_sync(self):
with self.get() as conn:
try:
return conn.execute('''
SELECT reconciler_sync_point FROM container_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
return -1
def update_reconciler_sync(self, point):
query = '''
UPDATE container_stat
SET reconciler_sync_point = ?
'''
with self.get() as conn:
try:
conn.execute(query, (point,))
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
self._migrate_add_storage_policy(conn)
conn.execute(query, (point,))
conn.commit()
def get_misplaced_since(self, start, count):
"""
Get a list of objects which are in a storage policy different
from the container's storage policy.
:param start: last reconciler sync point
:param count: maximum number of entries to get
:returns: list of dicts with keys: name, created_at, size,
content_type, etag, storage_policy_index
"""
qry = '''
SELECT ROWID, name, created_at, size, content_type, etag,
deleted, storage_policy_index
FROM object
WHERE ROWID > ?
AND storage_policy_index != (
SELECT storage_policy_index FROM container_stat LIMIT 1)
ORDER BY ROWID ASC LIMIT ?
'''
self._commit_puts_stale_ok()
with self.get() as conn:
try:
cur = conn.execute(qry, (start, count))
except sqlite3.OperationalError as err:
if "no such column: storage_policy_index" not in str(err):
raise
return []
return list(dict(row) for row in cur.fetchall())
def _migrate_add_container_sync_points(self, conn):
"""
Add the x_container_sync_point columns to the 'container_stat' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point1 INTEGER DEFAULT -1;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point2 INTEGER DEFAULT -1;
COMMIT;
''')
def _migrate_add_storage_policy(self, conn):
"""
Migrate the container schema to support tracking objects from
multiple storage policies. If the container_stat table has any
pending migrations, they are applied now before copying into
container_info.
* create the 'policy_stat' table.
* copy the current 'object_count' and 'bytes_used' columns to a
row in the 'policy_stat' table.
* add the storage_policy_index column to the 'object' table.
* drop the 'object_insert' and 'object_delete' triggers.
* add the 'object_insert_policy_stat' and
'object_delete_policy_stat' triggers.
* create container_info table for non-policy container info
* insert values from container_stat into container_info
* drop container_stat table
* create container_stat view
"""
# I tried just getting the list of column names in the current
# container_stat table with a pragma table_info, but could never get
# it inside the same transaction as the DDL (non-DML) statements:
# https://docs.python.org/2/library/sqlite3.html
# #controlling-transactions
# So we just apply all pending migrations to container_stat and copy a
# static known list of column names into container_info.
try:
self._migrate_add_container_sync_points(conn)
except sqlite3.OperationalError as e:
if 'duplicate column' in str(e):
conn.execute('ROLLBACK;')
else:
raise
try:
conn.executescript("""
ALTER TABLE container_stat
ADD COLUMN metadata TEXT DEFAULT '';
""")
except sqlite3.OperationalError as e:
if 'duplicate column' not in str(e):
raise
column_names = ', '.join((
'account', 'container', 'created_at', 'put_timestamp',
'delete_timestamp', 'reported_put_timestamp',
'reported_object_count', 'reported_bytes_used', 'hash', 'id',
'status', 'status_changed_at', 'metadata',
'x_container_sync_point1', 'x_container_sync_point2'))
conn.executescript(
'BEGIN;' +
POLICY_STAT_TABLE_CREATE +
'''
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT 0, object_count, bytes_used
FROM container_stat;
ALTER TABLE object
ADD COLUMN storage_policy_index INTEGER DEFAULT 0;
DROP TRIGGER object_insert;
DROP TRIGGER object_delete;
''' +
POLICY_STAT_TRIGGER_SCRIPT +
CONTAINER_INFO_TABLE_SCRIPT +
'''
INSERT INTO container_info (%s)
SELECT %s FROM container_stat;
DROP TABLE IF EXISTS container_stat;
''' % (column_names, column_names) +
CONTAINER_STAT_VIEW_SCRIPT +
'COMMIT;')
def _migrate_add_shard_range_reported(self, conn):
"""
Add the reported column to the 'shard_range' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE %s
ADD COLUMN reported INTEGER DEFAULT 0;
COMMIT;
''' % SHARD_RANGE_TABLE)
def _migrate_add_shard_range_tombstones(self, conn):
"""
Add the tombstones column to the 'shard_range' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE %s
ADD COLUMN tombstones INTEGER DEFAULT -1;
COMMIT;
''' % SHARD_RANGE_TABLE)
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
super(ContainerBroker, self)._reclaim_other_stuff(
conn, age_timestamp, sync_timestamp)
# populate instance cache, but use existing conn to avoid deadlock
# when it has a pending update
self._populate_instance_cache(conn=conn)
try:
conn.execute('''
DELETE FROM %s WHERE deleted = 1 AND timestamp < ?
AND name != ?
''' % SHARD_RANGE_TABLE, (sync_timestamp, self.path))
except sqlite3.OperationalError as err:
if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
raise
def _get_shard_range_rows(self, connection=None, marker=None,
end_marker=None, includes=None,
include_deleted=False, states=None,
include_own=False, exclude_others=False,
limit=None):
"""
Returns a list of shard range rows.
To get all shard ranges use ``include_own=True``. To get only the
broker's own shard range use ``include_own=True`` and
``exclude_others=True``.
:param connection: db connection
:param marker: restricts the returned list to rows whose namespace
includes or is greater than the marker value. ``marker`` is ignored
if ``includes`` is specified.
:param end_marker: restricts the returned list to rows whose namespace
includes or is less than the end_marker value. ``end_marker`` is
ignored if ``includes`` is specified.
:param includes: restricts the returned list to the shard range that
includes the given value; if ``includes`` is specified then
``marker`` and ``end_marker`` are ignored, but other constraints
are applied (e.g. ``exclude_others`` and ``include_deleted``).
:param include_deleted: include rows marked as deleted.
:param states: include only rows matching the given state(s); can be an
int or a list of ints.
:param include_own: boolean that governs whether the row whose name
matches the broker's path is included in the returned list. If
True, that row is included unless it is excluded by other
constraints (e.g. ``marker``, ``end_marker``, ``includes``). If
False, that row is not included. Default is False.
:param exclude_others: boolean that governs whether the rows whose
names do not match the broker's path are included in the returned
list. If True, those rows are not included, otherwise they are
included. Default is False.
:param limit: restricts the returned list to the given number of rows.
Should be a whole number; negative values will be ignored.
The ``limit`` parameter is useful to optimise a search
when the maximum number of expected matching rows is known, and
particularly when that maximum number is much less than the total
number of rows in the DB. However, the DB search is not ordered and
the subset of rows returned when ``limit`` is less than all
possible matching rows is therefore unpredictable.
:return: a list of tuples.
"""
if exclude_others and not include_own:
return []
included_states = set()
if isinstance(states, (list, tuple, set)):
included_states.update(states)
elif states is not None:
included_states.add(states)
# defaults to be used when legacy db's are missing columns
default_values = {'reported': 0,
'tombstones': -1}
def do_query(conn, defaults=None):
condition = ''
conditions = []
params = []
if not include_deleted:
conditions.append('deleted=0')
if included_states:
conditions.append('state in (%s)' % ','.join(
'?' * len(included_states)))
params.extend(included_states)
if not include_own:
conditions.append('name != ?')
params.append(self.path)
if exclude_others:
conditions.append('name = ?')
params.append(self.path)
if includes is None:
if end_marker:
conditions.append('lower < ?')
params.append(end_marker)
if marker:
conditions.append("(upper = '' OR upper > ?)")
params.append(marker)
else:
conditions.extend(('lower < ?', "(upper = '' OR upper >= ?)"))
params.extend((includes, includes))
if conditions:
condition = ' WHERE ' + ' AND '.join(conditions)
if limit is not None and limit >= 0:
condition += ' LIMIT %d' % limit
columns = SHARD_RANGE_KEYS[:-2]
for column in SHARD_RANGE_KEYS[-2:]:
if column in defaults:
columns += (('%s as %s' %
(default_values[column], column)),)
else:
columns += (column,)
sql = '''
SELECT %s
FROM %s%s;
''' % (', '.join(columns), SHARD_RANGE_TABLE, condition)
data = conn.execute(sql, params)
data.row_factory = None
return [row for row in data]
with self.maybe_get(connection) as conn:
defaults = set()
attempts = len(default_values) + 1
while attempts:
attempts -= 1
try:
return do_query(conn, defaults)
except sqlite3.OperationalError as err:
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
return []
if not attempts:
raise
new_defaults = set()
for column in default_values.keys():
if 'no such column: %s' % column in str(err):
new_defaults.add(column)
if not new_defaults:
raise
if new_defaults.intersection(defaults):
raise
defaults.update(new_defaults)
@classmethod
def resolve_shard_range_states(cls, states):
"""
Given a list of values each of which may be the name of a state, the
number of a state, or an alias, return the set of state numbers
described by the list.
The following alias values are supported: 'listing' maps to all states
that are considered valid when listing objects; 'updating' maps to all
states that are considered valid for redirecting an object update;
'auditing' maps to all states that are considered valid for a shard
container that is updating its own shard range table from a root (this
currently maps to all states except FOUND).
:param states: a list of values each of which may be the name of a
state, the number of a state, or an alias
:return: a set of integer state numbers, or None if no states are given
:raises ValueError: if any value in the given list is neither a valid
state nor a valid alias
"""
if states:
resolved_states = set()
for state in states:
if state == 'listing':
resolved_states.update(SHARD_LISTING_STATES)
elif state == 'updating':
resolved_states.update(SHARD_UPDATE_STATES)
elif state == 'auditing':
resolved_states.update(SHARD_AUDITING_STATES)
else:
resolved_states.add(ShardRange.resolve_state(state)[0])
return resolved_states
return None
def get_shard_ranges(self, marker=None, end_marker=None, includes=None,
reverse=False, include_deleted=False, states=None,
include_own=False, exclude_others=False,
fill_gaps=False):
"""
Returns a list of persisted shard ranges.
:param marker: restricts the returned list to shard ranges whose
namespace includes or is greater than the marker value. If
``reverse=True`` then ``marker`` is treated as ``end_marker``.
``marker`` is ignored if ``includes`` is specified.
:param end_marker: restricts the returned list to shard ranges whose
namespace includes or is less than the end_marker value. If
``reverse=True`` then ``end_marker`` is treated as ``marker``.
``end_marker`` is ignored if ``includes`` is specified.
:param includes: restricts the returned list to the shard range that
includes the given value; if ``includes`` is specified then
``fill_gaps``, ``marker`` and ``end_marker`` are ignored, but other
constraints are applied (e.g. ``exclude_others`` and
``include_deleted``).
:param reverse: reverse the result order.
:param include_deleted: include items that have the delete marker set.
:param states: if specified, restricts the returned list to shard
ranges that have the given state(s); can be a list of ints or a
single int.
:param include_own: boolean that governs whether the row whose name
matches the broker's path is included in the returned list. If
True, that row is included unless it is excluded by other
constraints (e.g. ``marker``, ``end_marker``, ``includes``). If
False, that row is not included. Default is False.
:param exclude_others: boolean that governs whether the rows whose
names do not match the broker's path are included in the returned
list. If True, those rows are not included, otherwise they are
included. Default is False.
:param fill_gaps: if True, insert a modified copy of own shard range to
fill any gap between the end of any found shard ranges and the
upper bound of own shard range. Gaps enclosed within the found
shard ranges are not filled. ``fill_gaps`` is ignored if
``includes`` is specified.
:return: a list of instances of :class:`swift.common.utils.ShardRange`.
"""
if includes is None and (marker == Namespace.MAX
or end_marker == Namespace.MIN):
return []
if reverse:
marker, end_marker = end_marker, marker
if marker and end_marker and marker >= end_marker:
return []
shard_ranges = [
ShardRange(*row)
for row in self._get_shard_range_rows(
marker=marker, end_marker=end_marker, includes=includes,
include_deleted=include_deleted, states=states,
include_own=include_own, exclude_others=exclude_others)]
shard_ranges.sort(key=ShardRange.sort_key)
if includes:
return shard_ranges[:1] if shard_ranges else []
if fill_gaps:
own_shard_range = self.get_own_shard_range()
if shard_ranges:
last_upper = shard_ranges[-1].upper
else:
last_upper = max(marker or own_shard_range.lower,
own_shard_range.lower)
required_upper = min(end_marker or own_shard_range.upper,
own_shard_range.upper)
if required_upper > last_upper:
filler_sr = own_shard_range
filler_sr.lower = last_upper
filler_sr.upper = required_upper
shard_ranges.append(filler_sr)
if reverse:
shard_ranges.reverse()
return shard_ranges
def get_own_shard_range(self, no_default=False):
"""
Returns a shard range representing this broker's own shard range. If no
such range has been persisted in the broker's shard ranges table then a
default shard range representing the entire namespace will be returned.
The ``object_count`` and ``bytes_used`` of the returned shard range are
not guaranteed to be up-to-date with the current object stats for this
broker. Callers that require up-to-date stats should use the
``get_info`` method.
:param no_default: if True and the broker's own shard range is not
found in the shard ranges table then None is returned, otherwise a
default shard range is returned.
:return: an instance of :class:`~swift.common.utils.ShardRange`
"""
rows = self._get_shard_range_rows(
include_own=True, include_deleted=True, exclude_others=True,
limit=1)
if rows:
own_shard_range = ShardRange(*rows[0])
elif no_default:
own_shard_range = None
else:
own_shard_range = ShardRange(
self.path, Timestamp.now(), ShardRange.MIN, ShardRange.MAX,
state=ShardRange.ACTIVE)
return own_shard_range
def is_own_shard_range(self, shard_range):
return shard_range.name == self.path
def enable_sharding(self, epoch):
"""
Updates this broker's own shard range with the given epoch, sets its
state to SHARDING and persists it in the DB.
:param epoch: a :class:`~swift.utils.common.Timestamp`
:return: the broker's updated own shard range.
"""
own_shard_range = self.get_own_shard_range()
own_shard_range.update_state(ShardRange.SHARDING, epoch)
own_shard_range.epoch = epoch
self.merge_shard_ranges(own_shard_range)
return own_shard_range
def get_shard_usage(self):
"""
Get the aggregate object stats for all shard ranges in states ACTIVE,
SHARDING or SHRINKING.
:return: a dict with keys {bytes_used, object_count}
"""
with self.get() as conn:
sql = '''
SELECT COALESCE(SUM(bytes_used), 0),
COALESCE(SUM(object_count), 0)
FROM %s
WHERE state in (%s)
AND deleted = 0
AND name != ?
''' % (SHARD_RANGE_TABLE, ','.join('?' * len(SHARD_STATS_STATES)))
cur = conn.execute(sql, SHARD_STATS_STATES + [self.path])
bytes_used, object_count = cur.fetchone()
return {'bytes_used': bytes_used,
'object_count': object_count}
def has_other_shard_ranges(self):
"""
This function tells if there is any shard range other than the
broker's own shard range, that is not marked as deleted.
:return: A boolean value as described above.
"""
with self.get() as conn:
sql = '''
SELECT 1 FROM %s
WHERE deleted = 0 AND name != ? LIMIT 1
''' % (SHARD_RANGE_TABLE)
try:
data = conn.execute(sql, [self.path])
data.row_factory = None
return True if [row for row in data] else False
except sqlite3.OperationalError as err:
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
return False
else:
raise
def get_all_shard_range_data(self):
"""
Returns a list of all shard range data, including own shard range and
deleted shard ranges.
:return: A list of dict representations of a ShardRange.
"""
shard_ranges = self.get_shard_ranges(include_deleted=True,
include_own=True)
return [dict(sr) for sr in shard_ranges]
def set_sharding_state(self):
"""
Creates and initializes a fresh DB file in preparation for sharding a
retiring DB. The broker's own shard range must have an epoch timestamp
for this method to succeed.
:return: True if the fresh DB was successfully created, False
otherwise.
"""
epoch = self.get_own_shard_range().epoch
if not epoch:
self.logger.warning("Container '%s' cannot be set to sharding "
"state: missing epoch", self.path)
return False
state = self.get_db_state()
if not state == UNSHARDED:
self.logger.warning("Container '%s' cannot be set to sharding "
"state while in %s state", self.path, state)
return False
info = self.get_info()
# The tmp_dir is cleaned up by the replicators after reclaim_age, so if
# we initially create the fresh DB there, we will already have cleanup
# covered if there is an error.
tmp_dir = os.path.join(self.get_device_path(), 'tmp')
if not os.path.exists(tmp_dir):
mkdirs(tmp_dir)
tmp_db_file = os.path.join(tmp_dir, "fresh%s.db" % str(uuid4()))
fresh_broker = ContainerBroker(tmp_db_file, self.timeout, self.logger,
self.account, self.container)
fresh_broker.initialize(info['put_timestamp'],
info['storage_policy_index'])
# copy relevant data from the retiring db to the fresh db
fresh_broker.update_metadata(self.metadata)
fresh_broker.merge_shard_ranges(self.get_all_shard_range_data())
# copy sync points so that any peer in sync with retiring db will
# appear to be in sync with the fresh db, although the peer shouldn't
# attempt to replicate objects to a db with shard ranges.
for incoming in (True, False):
syncs = self.get_syncs(incoming)
fresh_broker.merge_syncs(syncs, incoming)
max_row = self.get_max_row()
with fresh_broker.get() as fresh_broker_conn:
# Initialise the rowid to continue from where the retiring db ended
try:
sql = "INSERT into object " \
"(ROWID, name, created_at, size, content_type, etag) " \
"values (?, 'tmp_sharding', ?, 0, '', ?)"
fresh_broker_conn.execute(
sql, (max_row, Timestamp.now().internal,
MD5_OF_EMPTY_STRING))
fresh_broker_conn.execute(
'DELETE FROM object WHERE ROWID = ?', (max_row,))
fresh_broker_conn.commit()
except sqlite3.OperationalError as err:
self.logger.error(
'Failed to set the ROWID of the fresh database for %s: %s',
self.path, err)
return False
# sync the retiring container stat into the fresh db. At least the
# things that either aren't covered through the normal
# broker api, and things that wont just be regenerated.
try:
sql = 'UPDATE container_stat SET created_at=?, '
sql += 'delete_timestamp=?, status=?, status_changed_at=?'
sql_data = (info['created_at'], info['delete_timestamp'],
info['status'], info['status_changed_at'])
# 'reported_*' items are not sync'd because this is consistent
# with when a new DB is created after rsync'ing to another
# node (see _newid()). 'hash' should not be sync'd because
# this DB has no object rows.
fresh_broker_conn.execute(sql, sql_data)
fresh_broker_conn.commit()
except sqlite3.OperationalError as err:
self.logger.error(
'Failed to sync the container_stat table/view with the '
'fresh database for %s: %s',
self.path, err)
return False
# Rename to the new database
fresh_db_filename = make_db_file_path(self._db_file, epoch)
renamer(tmp_db_file, fresh_db_filename)
self.reload_db_files()
return True
def set_sharded_state(self):
"""
Unlink's the broker's retiring DB file.
:return: True if the retiring DB was successfully unlinked, False
otherwise.
"""
state = self.get_db_state()
if not state == SHARDING:
self.logger.warning("Container %r cannot be set to sharded "
"state while in %s state",
self.path, state)
return False
self.reload_db_files()
if len(self.db_files) < 2:
self.logger.warning(
'Refusing to delete db file for %r: no fresher db file found '
'in %r.', self.path, self.db_files)
return False
retiring_file = self.db_files[-2]
try:
os.unlink(retiring_file)
self.logger.debug('Unlinked retiring db %r', retiring_file)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.exception('Failed to unlink %r' % self._db_file)
return False
self.reload_db_files()
if len(self.db_files) >= 2:
self.logger.warning(
'Still have multiple db files after unlinking %r: %r',
retiring_file, self.db_files)
return False
return True
def get_brokers(self):
"""
Return a list of brokers for component dbs. The list has two entries
while the db state is sharding: the first entry is a broker for the
retiring db with ``skip_commits`` set to ``True``; the second entry is
a broker for the fresh db with ``skip_commits`` set to ``False``. For
any other db state the list has one entry.
:return: a list of :class:`~swift.container.backend.ContainerBroker`
"""
if len(self.db_files) > 2:
self.logger.warning('Unexpected db files will be ignored: %s' %
self.db_files[:-2])
brokers = []
db_files = self.db_files[-2:]
while db_files:
db_file = db_files.pop(0)
sub_broker = ContainerBroker(
db_file, self.timeout, self.logger, self.account,
self.container, self.pending_timeout, self.stale_reads_ok,
force_db_file=True, skip_commits=bool(db_files))
brokers.append(sub_broker)
return brokers
def set_sharding_sysmeta(self, key, value):
"""
Updates the broker's metadata stored under the given key
prefixed with a sharding specific namespace.
:param key: metadata key in the sharding metadata namespace.
:param value: metadata value
"""
self.update_metadata({'X-Container-Sysmeta-Shard-' + key:
(value, Timestamp.now().internal)})
def get_sharding_sysmeta_with_timestamps(self):
"""
Returns sharding specific info from the broker's metadata with
timestamps.
:param key: if given the value stored under ``key`` in the sharding
info will be returned.
:return: a dict of sharding info with their timestamps.
"""
prefix = 'X-Container-Sysmeta-Shard-'
return {
k[len(prefix):]: v
for k, v in self.metadata.items()
if k.startswith(prefix)
}
def get_sharding_sysmeta(self, key=None):
"""
Returns sharding specific info from the broker's metadata.
:param key: if given the value stored under ``key`` in the sharding
info will be returned.
:return: either a dict of sharding info or the value stored under
``key`` in that dict.
"""
info = self.get_sharding_sysmeta_with_timestamps()
if key:
return info.get(key, (None, None))[0]
else:
return {k: v[0] for k, v in info.items()}
def _get_root_meta(self):
"""
Get the (unquoted) root path, plus the header the info came from.
If no info available, returns ``(None, None)``
"""
path = self.get_sharding_sysmeta('Quoted-Root')
if path:
return 'X-Container-Sysmeta-Shard-Quoted-Root', unquote(path)
path = self.get_sharding_sysmeta('Root')
if path:
return 'X-Container-Sysmeta-Shard-Root', path
return None, None
def _load_root_info(self):
"""
Load the root container name and account for the container represented
by this broker.
The root container path, if set, is stored in sysmeta under the key
``X-Container-Sysmeta-Shard-Root``. If this sysmeta is not set then the
container is considered to be a root container and ``_root_account``
and ``_root_container`` are set equal to the broker ``account`` and
``container`` attributes respectively.
"""
hdr, path = self._get_root_meta()
if not path:
# Ensure account/container get populated
self._populate_instance_cache()
self._root_account = self.account
self._root_container = self.container
return
try:
self._root_account, self._root_container = split_path(
'/' + path, 2, 2)
except ValueError:
raise ValueError("Expected %s to be of the form "
"'account/container', got %r" % (hdr, path))
@property
def root_account(self):
if not self._root_account:
self._load_root_info()
return self._root_account
@property
def root_container(self):
if not self._root_container:
self._load_root_info()
return self._root_container
@property
def root_path(self):
return '%s/%s' % (self.root_account, self.root_container)
def is_root_container(self):
"""
Returns True if this container is a root container, False otherwise.
A root container is a container that is not a shard of another
container.
"""
_, path = self._get_root_meta()
if path is not None:
# We have metadata telling us where the root is; it's
# authoritative; shards should always have this metadata even when
# deleted
return self.path == path
# Else, we're either a root or a legacy deleted shard whose sharding
# sysmeta was deleted
own_shard_range = self.get_own_shard_range(no_default=True)
if not own_shard_range:
return True # Never been sharded
if own_shard_range.deleted:
# When shard ranges shrink, they get marked deleted
return False
else:
# But even when a root collapses, empties, and gets deleted, its
# own_shard_range is left alive
return True
def _get_next_shard_range_upper(self, shard_size, last_upper=None):
"""
Returns the name of the object that is ``shard_size`` rows beyond
``last_upper`` in the object table ordered by name. If ``last_upper``
is not given then it defaults to the start of object table ordered by
name.
:param last_upper: the upper bound of the last found shard range.
:return: an object name, or None if the number of rows beyond
``last_upper`` is less than ``shard_size``.
"""
self._commit_puts_stale_ok()
with self.get() as connection:
sql = ('SELECT name FROM object WHERE %s=0 ' %
self._get_deleted_key(connection))
args = []
if last_upper:
sql += "AND name > ? "
args.append(str(last_upper))
sql += "ORDER BY name LIMIT 1 OFFSET %d" % (shard_size - 1)
row = connection.execute(sql, args).fetchone()
return row['name'] if row else None
def find_shard_ranges(self, shard_size, limit=-1, existing_ranges=None,
minimum_shard_size=1):
"""
Scans the container db for shard ranges. Scanning will start at the
upper bound of the any ``existing_ranges`` that are given, otherwise
at ``ShardRange.MIN``. Scanning will stop when ``limit`` shard ranges
have been found or when no more shard ranges can be found. In the
latter case, the upper bound of the final shard range will be equal to
the upper bound of the container namespace.
This method does not modify the state of the db; callers are
responsible for persisting any shard range data in the db.
:param shard_size: the size of each shard range
:param limit: the maximum number of shard points to be found; a
negative value (default) implies no limit.
:param existing_ranges: an optional list of existing ShardRanges; if
given, this list should be sorted in order of upper bounds; the
scan for new shard ranges will start at the upper bound of the last
existing ShardRange.
:param minimum_shard_size: Minimum size of the final shard range. If
this is greater than one then the final shard range may be extended
to more than shard_size in order to avoid a further shard range
with less minimum_shard_size rows.
:return: a tuple; the first value in the tuple is a list of
dicts each having keys {'index', 'lower', 'upper', 'object_count'}
in order of ascending 'upper'; the second value in the tuple is a
boolean which is True if the last shard range has been found, False
otherwise.
"""
existing_ranges = existing_ranges or []
minimum_shard_size = max(minimum_shard_size, 1)
object_count = self.get_info().get('object_count', 0)
if shard_size + minimum_shard_size > object_count:
# container not big enough to shard
return [], False
own_shard_range = self.get_own_shard_range()
progress = 0
progress_reliable = True
# update initial state to account for any existing shard ranges
if existing_ranges:
if all([sr.state == ShardRange.FOUND
for sr in existing_ranges]):
progress = sum([sr.object_count for sr in existing_ranges])
else:
# else: object count in existing shard ranges may have changed
# since they were found so progress cannot be reliably
# calculated; use default progress of zero - that's ok,
# progress is used for optimisation not correctness
progress_reliable = False
last_shard_upper = existing_ranges[-1].upper
if last_shard_upper >= own_shard_range.upper:
# == implies all ranges were previously found
# > implies an acceptor range has been set into which this
# shard should cleave itself
return [], True
else:
last_shard_upper = own_shard_range.lower
found_ranges = []
sub_broker = self.get_brokers()[0]
index = len(existing_ranges)
while limit is None or limit < 0 or len(found_ranges) < limit:
if progress + shard_size + minimum_shard_size > object_count:
# next shard point is within minimum_size rows of the final
# object name, or beyond it, so don't bother with db query.
# This shard will have <= shard_size + (minimum_size - 1) rows.
next_shard_upper = None
else:
try:
next_shard_upper = sub_broker._get_next_shard_range_upper(
shard_size, last_shard_upper)
except (sqlite3.OperationalError, LockTimeout):
self.logger.exception(
"Problem finding shard upper in %r: " % self.db_file)
break
if (next_shard_upper is None or
next_shard_upper > own_shard_range.upper):
# We reached the end of the container namespace, or possibly
# beyond if the container has misplaced objects. In either case
# limit the final shard range to own_shard_range.upper.
next_shard_upper = own_shard_range.upper
if progress_reliable:
# object count may include misplaced objects so the final
# shard size may not be accurate until cleaved, but at
# least the sum of shard sizes will equal the unsharded
# object_count
shard_size = object_count - progress
# NB shard ranges are created with a non-zero object count for a
# few reasons:
# 1. so that the apparent container object count remains
# consistent;
# 2. the container is non-deletable while shards have been found
# but not yet cleaved; and
# 3. So we have a rough idea of size of the shards should be
# while cleaving.
found_ranges.append(
{'index': index,
'lower': str(last_shard_upper),
'upper': str(next_shard_upper),
'object_count': shard_size})
if next_shard_upper == own_shard_range.upper:
return found_ranges, True
progress += shard_size
last_shard_upper = next_shard_upper
index += 1
return found_ranges, False
| swift-master | swift/container/backend.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
import functools
import socket
import itertools
import logging
from eventlet import GreenPile, GreenPool, Timeout
import six
from swift.common import constraints
from swift.common.daemon import Daemon
from swift.common.direct_client import (
direct_head_container, direct_delete_container_object,
direct_put_container_object, ClientException)
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.request_helpers import MISPLACED_OBJECTS_ACCOUNT, \
USE_REPLICATION_NETWORK_HEADER
from swift.common.utils import get_logger, split_path, majority_size, \
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
LRUCache, decode_timestamps, hash_path
from swift.common.storage_policy import POLICIES
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
CONTAINER_POLICY_TTL = 30
def cmp_policy_info(info, remote_info):
"""
You have to squint to see it, but the general strategy is just:
if either has been recreated:
return the newest (of the recreated)
else
return the oldest
I tried cleaning it up for awhile, but settled on just writing a bunch of
tests instead. Once you get an intuitive sense for the nuance here you
can try and see there's a better way to spell the boolean logic but it all
ends up looking sorta hairy.
:returns: -1 if info is correct, 1 if remote_info is better
"""
def is_deleted(info):
return (info['delete_timestamp'] > info['put_timestamp'] and
info.get('count', info.get('object_count', 0)) == 0)
def cmp(a, b):
if a < b:
return -1
elif b < a:
return 1
else:
return 0
deleted = is_deleted(info)
remote_deleted = is_deleted(remote_info)
if any([deleted, remote_deleted]):
if not deleted:
return -1
elif not remote_deleted:
return 1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
def has_been_recreated(info):
return (info['put_timestamp'] > info['delete_timestamp'] >
Timestamp(0))
remote_recreated = has_been_recreated(remote_info)
recreated = has_been_recreated(info)
if any([remote_recreated, recreated]):
if not recreated:
return 1
elif not remote_recreated:
return -1
# both have been recreated, everything devoles to here eventually
most_recent_successful_delete = max(info['delete_timestamp'],
remote_info['delete_timestamp'])
if info['put_timestamp'] < most_recent_successful_delete:
return 1
elif remote_info['put_timestamp'] < most_recent_successful_delete:
return -1
return cmp(info['status_changed_at'], remote_info['status_changed_at'])
def incorrect_policy_index(info, remote_info):
"""
Compare remote_info to info and decide if the remote storage policy index
should be used instead of ours.
"""
if 'storage_policy_index' not in remote_info:
return False
if remote_info['storage_policy_index'] == info['storage_policy_index']:
return False
# Only return True if remote_info has the better data;
# see the docstring for cmp_policy_info
return cmp_policy_info(info, remote_info) > 0
def translate_container_headers_to_info(headers):
default_timestamp = Timestamp(0).internal
return {
'storage_policy_index': int(headers['X-Backend-Storage-Policy-Index']),
'put_timestamp': headers.get('x-backend-put-timestamp',
default_timestamp),
'delete_timestamp': headers.get('x-backend-delete-timestamp',
default_timestamp),
'status_changed_at': headers.get('x-backend-status-changed-at',
default_timestamp),
}
def best_policy_index(headers):
container_info = [translate_container_headers_to_info(header_set)
for header_set in headers]
container_info.sort(key=functools.cmp_to_key(cmp_policy_info))
return container_info[0]['storage_policy_index']
def get_reconciler_container_name(obj_timestamp):
"""
Get the name of a container into which a misplaced object should be
enqueued. The name is the object's last modified time rounded down to the
nearest hour.
:param obj_timestamp: a string representation of the object's 'created_at'
time from it's container db row.
:return: a container name
"""
# Use last modified time of object to determine reconciler container name
_junk, _junk, ts_meta = decode_timestamps(obj_timestamp)
return str(int(ts_meta) //
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
MISPLACED_OBJECTS_CONTAINER_DIVISOR)
def get_reconciler_obj_name(policy_index, account, container, obj):
return "%(policy_index)d:/%(acc)s/%(con)s/%(obj)s" % {
'policy_index': policy_index, 'acc': account,
'con': container, 'obj': obj}
def get_reconciler_content_type(op):
try:
return {
'put': 'application/x-put',
'delete': 'application/x-delete',
}[op.lower()]
except KeyError:
raise ValueError('invalid operation type %r' % op)
def get_row_to_q_entry_translator(broker):
account = broker.root_account
container = broker.root_container
op_type = {
0: get_reconciler_content_type('put'),
1: get_reconciler_content_type('delete'),
}
def translator(obj_info):
name = get_reconciler_obj_name(obj_info['storage_policy_index'],
account, container,
obj_info['name'])
return {
'name': name,
'deleted': 0,
'created_at': obj_info['created_at'],
'etag': obj_info['created_at'],
'content_type': op_type[obj_info['deleted']],
'size': 0,
}
return translator
def add_to_reconciler_queue(container_ring, account, container, obj,
obj_policy_index, obj_timestamp, op,
force=False, conn_timeout=5, response_timeout=15):
"""
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a majority of containers got the update.
"""
container_name = get_reconciler_container_name(obj_timestamp)
object_name = get_reconciler_obj_name(obj_policy_index, account,
container, obj)
if force:
# this allows an operator to re-enqueue an object that has
# already been popped from the queue to be reprocessed, but
# could potentially prevent out of order updates from making it
# into the queue
x_timestamp = Timestamp.now().internal
else:
x_timestamp = obj_timestamp
q_op_type = get_reconciler_content_type(op)
headers = {
'X-Size': 0,
'X-Etag': obj_timestamp,
'X-Timestamp': x_timestamp,
'X-Content-Type': q_op_type,
USE_REPLICATION_NETWORK_HEADER: 'true',
}
def _check_success(*args, **kwargs):
try:
direct_put_container_object(*args, **kwargs)
return 1
except (ClientException, Timeout, socket.error):
return 0
pile = GreenPile()
part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
container_name)
for node in nodes:
pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
container_name, object_name, headers=headers,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
successes = sum(pile)
if successes >= majority_size(len(nodes)):
return container_name
else:
return False
def slightly_later_timestamp(ts, offset=1):
return Timestamp(ts, offset=offset).internal
def parse_raw_obj(obj_info):
"""
Translate a reconciler container listing entry to a dictionary
containing the parts of the misplaced object queue entry.
:param obj_info: an entry in an a container listing with the
required keys: name, content_type, and hash
:returns: a queue entry dict with the keys: q_policy_index, account,
container, obj, q_op, q_ts, q_record, and path
"""
if six.PY2:
raw_obj_name = obj_info['name'].encode('utf-8')
else:
raw_obj_name = obj_info['name']
policy_index, obj_name = raw_obj_name.split(':', 1)
q_policy_index = int(policy_index)
account, container, obj = split_path(obj_name, 3, 3, rest_with_last=True)
try:
q_op = {
'application/x-put': 'PUT',
'application/x-delete': 'DELETE',
}[obj_info['content_type']]
except KeyError:
raise ValueError('invalid operation type %r' %
obj_info.get('content_type', None))
return {
'q_policy_index': q_policy_index,
'account': account,
'container': container,
'obj': obj,
'q_op': q_op,
'q_ts': decode_timestamps((obj_info['hash']))[0],
'q_record': last_modified_date_to_timestamp(
obj_info['last_modified']),
'path': '/%s/%s/%s' % (account, container, obj)
}
@LRUCache(maxtime=CONTAINER_POLICY_TTL)
def direct_get_container_policy_index(container_ring, account_name,
container_name):
"""
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a majority
"""
def _eat_client_exception(*args):
try:
return direct_head_container(*args, headers={
USE_REPLICATION_NETWORK_HEADER: 'true'})
except ClientException as err:
if err.http_status == 404:
return err.http_headers
except (Timeout, socket.error):
pass
pile = GreenPile()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pile.spawn(_eat_client_exception, node, part, account_name,
container_name)
headers = [x for x in pile if x is not None]
if len(headers) < majority_size(len(nodes)):
return
return best_policy_index(headers)
def direct_delete_container_entry(container_ring, account_name, container_name,
object_name, headers=None):
"""
Talk directly to the primary container servers to delete a particular
object listing. Does not talk to object servers; use this only when a
container entry does not actually have a corresponding object.
"""
if headers is None:
headers = {}
headers[USE_REPLICATION_NETWORK_HEADER] = 'true'
pool = GreenPool()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pool.spawn_n(direct_delete_container_object, node, part, account_name,
container_name, object_name, headers=headers)
# This either worked or it didn't; if it didn't, we'll retry on the next
# reconciler loop when we see the queue entry again.
pool.waitall()
class ContainerReconciler(Daemon):
"""
Move objects that are in the wrong storage policy.
"""
log_route = 'container-reconciler'
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
# This option defines how long an un-processable misplaced object
# marker will be retried before it is abandoned. It is not coupled
# with the tombstone reclaim age in the consistency engine.
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.interval = float(conf.get('interval', 30))
conf_path = conf.get('__file__') or \
'/etc/swift/container-reconciler.conf'
self.logger = logger or get_logger(
conf, log_route=self.log_route)
request_tries = int(conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
conf_path,
'Swift Container Reconciler',
request_tries,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.stats = defaultdict(int)
self.last_stat_time = time.time()
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
if self.processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
self.process = int(self.conf.get('process', 0))
if self.process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if self.processes and self.process >= self.processes:
raise ValueError(
'process must be less than processes')
def stats_log(self, metric, msg, *args, **kwargs):
"""
Update stats tracking for metric and emit log message.
"""
level = kwargs.pop('level', logging.DEBUG)
log_message = '%s: ' % metric + msg
self.logger.log(level, log_message, *args, **kwargs)
self.stats[metric] += 1
def log_stats(self, force=False):
"""
Dump stats to logger, noop when stats have been already been
logged in the last minute.
"""
now = time.time()
should_log = force or (now - self.last_stat_time > 60)
if should_log:
self.last_stat_time = now
self.logger.info('Reconciler Stats: %r', dict(**self.stats))
def pop_queue(self, container, obj, q_ts, q_record):
"""
Issue a delete object request to the container for the misplaced
object queue entry.
:param container: the misplaced objects container
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param q_record: the timestamp of the queue entry
N.B. q_ts will normally be the same time as q_record except when
an object was manually re-enqued.
"""
q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
q_path, q_ts, x_timestamp)
headers = {'X-Timestamp': x_timestamp}
direct_delete_container_entry(
self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,
container, obj, headers=headers)
def can_reconcile_policy(self, policy_index):
pol = POLICIES.get_by_index(policy_index)
if pol:
pol.load_ring(self.swift_dir, reload_time=self.ring_check_interval)
return pol.object_ring.next_part_power is None
return False
def throw_tombstones(self, account, container, obj, timestamp,
policy_index, path):
"""
Issue a delete object request to the given storage_policy.
:param account: the account name
:param container: the container name
:param obj: the object name
:param timestamp: the timestamp of the object to delete
:param policy_index: the policy index to direct the request
:param path: the path to be used for logging
"""
x_timestamp = slightly_later_timestamp(timestamp)
self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
'%s (%s) will be deleted',
path, timestamp, policy_index, x_timestamp)
headers = {
'X-Timestamp': x_timestamp,
'X-Backend-Storage-Policy-Index': policy_index,
}
success = False
try:
self.swift.delete_object(account, container, obj,
acceptable_statuses=(2, 404),
headers=headers)
except UnexpectedResponse as err:
self.stats_log('cleanup_failed', '%r (%f) was not cleaned up '
'in storage_policy %s (%s)', path, timestamp,
policy_index, err)
else:
success = True
self.stats_log('cleanup_success', '%r (%f) was successfully '
'removed from policy_index %s', path, timestamp,
policy_index)
return success
def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
q_op, path, **kwargs):
"""
Perform object reconciliation.
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the object name
:param q_policy_index: the policy index of the source indicated by the
queue entry.
:param q_ts: the timestamp of the misplaced object
:param q_op: the operation of the misplaced request
:param path: the full path of the misplaced object for logging
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
container_policy_index = direct_get_container_policy_index(
self.swift.container_ring, account, container)
if container_policy_index is None:
self.stats_log('unavailable_container', '%r (%f) unable to '
'determine the destination policy_index',
path, q_ts)
return False
if container_policy_index == q_policy_index:
self.stats_log('noop_object', '%r (%f) container policy_index '
'%s matches queue policy index %s', path, q_ts,
container_policy_index, q_policy_index)
return True
# don't reconcile if the source or container policy_index is in the
# middle of a PPI
if not self.can_reconcile_policy(q_policy_index):
self.stats_log('ppi_skip', 'Source policy (%r) in the middle of '
'a part power increase (PPI)', q_policy_index)
return False
if not self.can_reconcile_policy(container_policy_index):
self.stats_log('ppi_skip', 'Container policy (%r) in the middle '
'of a part power increase (PPI)',
container_policy_index)
return False
# check if object exists in the destination already
self.logger.debug('checking for %r (%f) in destination '
'policy_index %s', path, q_ts,
container_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index}
try:
dest_obj = self.swift.get_object_metadata(
account, container, obj, headers=headers,
acceptable_statuses=(2, 4))
except UnexpectedResponse:
self.stats_log('unavailable_destination', '%r (%f) unable to '
'determine the destination timestamp, if any',
path, q_ts)
return False
dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0))
if dest_ts >= q_ts:
self.stats_log('found_object', '%r (%f) in policy_index %s '
'is newer than queue (%f)', path, dest_ts,
container_policy_index, q_ts)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
# object is misplaced
self.stats_log('misplaced_object', '%r (%f) in policy_index %s '
'should be in policy_index %s', path, q_ts,
q_policy_index, container_policy_index)
# fetch object from the source location
self.logger.debug('fetching %r (%f) from storage policy %s', path,
q_ts, q_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': q_policy_index}
try:
source_obj_status, source_obj_info, source_obj_iter = \
self.swift.get_object(account, container, obj,
headers=headers,
acceptable_statuses=(2, 4))
except UnexpectedResponse as err:
source_obj_status = err.resp.status_int
source_obj_info = {}
source_obj_iter = None
source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0))
if source_obj_status == 404 and q_op == 'DELETE':
return self.ensure_tombstone_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts)
else:
return self.ensure_object_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts, source_obj_status,
source_obj_info, source_obj_iter)
def ensure_object_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
source_obj_status, source_obj_info,
source_obj_iter, **kwargs):
"""
Validate source object will satisfy the misplaced object queue entry
and move to destination.
:param q_policy_index: the policy_index for the source object
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param path: the full path of the misplaced object for logging
:param container_policy_index: the policy_index of the destination
:param source_ts: the timestamp of the source object
:param source_obj_status: the HTTP status source object request
:param source_obj_info: the HTTP headers of the source object request
:param source_obj_iter: the body iter of the source object request
"""
if source_obj_status // 100 != 2 or source_ts < q_ts:
if q_ts < time.time() - self.reclaim_age:
# it's old and there are no tombstones or anything; give up
self.stats_log('lost_source', '%r (%s) was not available in '
'policy_index %s and has expired', path,
q_ts.internal, q_policy_index,
level=logging.CRITICAL)
return True
# the source object is unavailable or older than the queue
# entry; a version that will satisfy the queue entry hopefully
# exists somewhere in the cluster, so wait and try again
self.stats_log('unavailable_source', '%r (%s) in '
'policy_index %s responded %s (%s)', path,
q_ts.internal, q_policy_index, source_obj_status,
source_ts.internal, level=logging.WARNING)
return False
# optimistically move any source with a timestamp >= q_ts
ts = max(Timestamp(source_ts), q_ts)
# move the object
put_timestamp = slightly_later_timestamp(ts, offset=2)
self.stats_log('copy_attempt', '%r (%f) in policy_index %s will be '
'moved to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
headers = source_obj_info.copy()
headers['X-Backend-Storage-Policy-Index'] = container_policy_index
headers['X-Timestamp'] = put_timestamp
try:
self.swift.upload_object(
FileLikeIter(source_obj_iter), account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('copy_failed', 'upload %r (%f) from '
'policy_index %s to policy_index %s '
'returned %s', path, source_ts, q_policy_index,
container_policy_index, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to upload %r (%f) '
'from policy_index %s to policy_index %s ', path,
source_ts, q_policy_index, container_policy_index,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('copy_success', '%r (%f) moved from policy_index %s '
'to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def ensure_tombstone_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
**kwargs):
"""
Issue a DELETE request against the destination to match the
misplaced DELETE against the source.
"""
delete_timestamp = slightly_later_timestamp(q_ts, offset=2)
self.stats_log('delete_attempt', '%r (%f) in policy_index %s '
'will be deleted from policy_index %s (%s)', path,
source_ts, q_policy_index, container_policy_index,
delete_timestamp)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index,
'X-Timestamp': delete_timestamp,
}
try:
self.swift.delete_object(account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('delete_failed', 'delete %r (%f) from '
'policy_index %s (%s) returned %s', path,
source_ts, container_policy_index,
delete_timestamp, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to delete %r (%f) '
'from policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('delete_success', '%r (%f) deleted from '
'policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.INFO)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def reconcile_object(self, info):
"""
Process a possibly misplaced object write request. Determine correct
destination storage policy by checking with primary containers. Check
source and destination, copying or deleting into destination and
cleaning up the source as needed.
This method wraps _reconcile_object for exception handling.
:param info: a queue entry dict
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
self.logger.debug('checking placement for %r (%f) '
'in policy_index %s', info['path'],
info['q_ts'], info['q_policy_index'])
success = False
try:
success = self._reconcile_object(**info)
except: # noqa
self.logger.exception('Unhandled Exception trying to '
'reconcile %r (%f) in policy_index %s',
info['path'], info['q_ts'],
info['q_policy_index'])
if success:
metric = 'success'
msg = 'was handled successfully'
else:
metric = 'retry'
msg = 'must be retried'
msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg
self.stats_log(metric, msg, info, level=logging.INFO)
self.log_stats()
return success
def _iter_containers(self):
"""
Generate a list of containers to process.
"""
# hit most recent container first instead of waiting on the updaters
current_container = get_reconciler_container_name(time.time())
yield current_container
self.logger.debug('looking for containers in %s',
MISPLACED_OBJECTS_ACCOUNT)
container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
while True:
one_page = None
try:
one_page = list(itertools.islice(
container_gen, constraints.CONTAINER_LISTING_LIMIT))
except UnexpectedResponse as err:
self.logger.error('Error listing containers in '
'account %s (%s)',
MISPLACED_OBJECTS_ACCOUNT, err)
if not one_page:
# don't generally expect more than one page
break
# reversed order since we expect older containers to be empty
for c in reversed(one_page):
container = c['name']
if six.PY2:
# encoding here is defensive
container = container.encode('utf8')
if container == current_container:
continue # we've already hit this one this pass
yield container
def _iter_objects(self, container):
"""
Generate a list of objects to process.
:param container: the name of the container to process
If the given container is empty and older than reclaim_age this
processor will attempt to reap it.
"""
self.logger.debug('looking for objects in %s', container)
found_obj = False
try:
for raw_obj in self.swift.iter_objects(
MISPLACED_OBJECTS_ACCOUNT, container):
found_obj = True
yield raw_obj
except UnexpectedResponse as err:
self.logger.error('Error listing objects in container %s (%s)',
container, err)
if float(container) < time.time() - self.reclaim_age and \
not found_obj:
# Try to delete old empty containers so the queue doesn't
# grow without bound. It's ok if there's a conflict.
self.swift.delete_container(
MISPLACED_OBJECTS_ACCOUNT, container,
acceptable_statuses=(2, 404, 409, 412))
def should_process(self, queue_item):
"""
Check if a given entry should be handled by this process.
:param container: the queue container
:param queue_item: an entry from the queue
"""
if not self.processes:
return True
hexdigest = hash_path(
queue_item['account'], queue_item['container'], queue_item['obj'])
return int(hexdigest, 16) % self.processes == self.process
def process_queue_item(self, q_container, q_entry, queue_item):
"""
Process an entry and remove from queue on success.
:param q_container: the queue container
:param q_entry: the raw_obj name from the q_container
:param queue_item: a parsed entry from the queue
"""
finished = self.reconcile_object(queue_item)
if finished:
self.pop_queue(q_container, q_entry,
queue_item['q_ts'],
queue_item['q_record'])
def reconcile(self):
"""
Main entry point for concurrent processing of misplaced objects.
Iterate over all queue entries and delegate processing to spawned
workers in the pool.
"""
self.logger.debug('pulling items from the queue')
pool = GreenPool(self.concurrency)
for container in self._iter_containers():
self.logger.debug('checking container %s', container)
for raw_obj in self._iter_objects(container):
try:
queue_item = parse_raw_obj(raw_obj)
except Exception:
self.stats_log('invalid_record',
'invalid queue record: %r', raw_obj,
level=logging.ERROR, exc_info=True)
continue
if self.should_process(queue_item):
pool.spawn_n(self.process_queue_item,
container, raw_obj['name'], queue_item)
self.log_stats()
pool.waitall()
def run_once(self, *args, **kwargs):
"""
Process every entry in the queue.
"""
try:
self.reconcile()
except: # noqa
self.logger.exception('Unhandled Exception trying to reconcile')
self.log_stats(force=True)
def run_forever(self, *args, **kwargs):
while True:
self.run_once(*args, **kwargs)
self.stats = defaultdict(int)
self.logger.info('sleeping between intervals (%ss)', self.interval)
time.sleep(self.interval)
| swift-master | swift/container/reconciler.py |
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from swift.common.utils import audit_location_generator, mkdirs
from swift.container.backend import DATADIR
SYNC_DATADIR = 'sync_containers'
class ContainerSyncStore(object):
"""
Filesystem based store for local containers that needs to be synced.
The store holds a list of containers that need to be synced by the
container sync daemon. The store is local to the container server node,
that is, only containers whose databases are kept locally on the node are
listed.
"""
def __init__(self, devices, logger, mount_check):
self.devices = os.path.normpath(os.path.join('/', devices)) + '/'
self.logger = logger
self.mount_check = mount_check
def _container_to_synced_container_path(self, path):
# path is assumed to be of the form:
# /srv/node/sdb/containers/part/.../*.db
# or more generally:
# devices/device/containers/part/.../*.db
# Below we split the path to the following parts:
# devices, device, rest
devices = self.devices
path = os.path.normpath(path)
device = path[len(devices):path.rfind(DATADIR)]
rest = path[path.rfind(DATADIR) + len(DATADIR) + 1:]
return os.path.join(devices, device, SYNC_DATADIR, rest)
def _synced_container_to_container_path(self, path):
# synced path is assumed to be of the form:
# /srv/node/sdb/sync_containers/part/.../*.db
# or more generally:
# devices/device/sync_containers/part/.../*.db
# Below we split the path to the following parts:
# devices, device, rest
devices = self.devices
path = os.path.normpath(path)
device = path[len(devices):path.rfind(SYNC_DATADIR)]
rest = path[path.rfind(SYNC_DATADIR) + len(SYNC_DATADIR) + 1:]
return os.path.join(devices, device, DATADIR, rest)
def add_synced_container(self, broker):
"""
Adds the container db represented by broker to the list of synced
containers.
:param broker: An instance of ContainerBroker representing the
container to add.
"""
sync_file = self._container_to_synced_container_path(broker.db_file)
stat = None
try:
stat = os.stat(sync_file)
except OSError as oserr:
if oserr.errno != errno.ENOENT:
raise oserr
if stat is not None:
return
sync_path = os.path.dirname(sync_file)
mkdirs(sync_path)
try:
os.symlink(broker.db_file, sync_file)
except OSError as oserr:
if (oserr.errno != errno.EEXIST or
not os.path.islink(sync_file)):
raise oserr
def remove_synced_container(self, broker):
"""
Removes the container db represented by broker from the list of synced
containers.
:param broker: An instance of ContainerBroker representing the
container to remove.
"""
sync_file = broker.db_file
sync_file = self._container_to_synced_container_path(sync_file)
try:
os.unlink(sync_file)
os.removedirs(os.path.dirname(sync_file))
except OSError as oserr:
if oserr.errno != errno.ENOENT:
raise oserr
def update_sync_store(self, broker):
"""
Add or remove a symlink to/from the sync-containers directory
according to the broker's metadata.
Decide according to the broker x-container-sync-to and
x-container-sync-key whether a symlink needs to be added or
removed.
We mention that if both metadata items do not appear
at all, the container has never been set for sync in reclaim_age
in which case we do nothing. This is important as this method is
called for ALL containers from the container replicator.
Once we realize that we do need to do something, we check if
the container is marked for delete, in which case we want to
remove the symlink
For adding a symlink we notice that both x-container-sync-to and
x-container-sync-key exist and are valid, that is, are not empty.
At this point we know we need to do something, the container
is not marked for delete and the condition to add a symlink
is not met. conclusion need to remove the symlink.
:param broker: An instance of ContainerBroker
"""
# If the broker metadata does not have both x-container-sync-to
# and x-container-sync-key it has *never* been set. Make sure
# we do nothing in this case
if ('X-Container-Sync-To' not in broker.metadata and
'X-Container-Sync-Key' not in broker.metadata):
return
if broker.is_deleted():
self.remove_synced_container(broker)
return
# If both x-container-sync-to and x-container-sync-key
# exist and valid, add the symlink
sync_to = sync_key = None
if 'X-Container-Sync-To' in broker.metadata:
sync_to = broker.metadata['X-Container-Sync-To'][0]
if 'X-Container-Sync-Key' in broker.metadata:
sync_key = broker.metadata['X-Container-Sync-Key'][0]
if sync_to and sync_key:
self.add_synced_container(broker)
return
self.remove_synced_container(broker)
def synced_containers_generator(self):
"""
Iterates over the list of synced containers
yielding the path of the container db
"""
all_locs = audit_location_generator(self.devices, SYNC_DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
# What we want to yield is the real path as its being used for
# initiating a container broker. The broker would break if not
# given the db real path, as it e.g. assumes the existence of
# .pending in the same path
yield self._synced_container_to_container_path(path)
| swift-master | swift/container/sync_store.py |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import json
import logging
import operator
import time
from collections import defaultdict
from operator import itemgetter
from random import random
import os
import six
from six.moves.urllib.parse import quote
from eventlet import Timeout
from contextlib import contextmanager
from swift.common import internal_client
from swift.common.constraints import check_drive, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.direct_client import (direct_put_container,
DirectClientException)
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.ring.utils import is_local_device
from swift.common.swob import str_to_wsgi
from swift.common.utils import get_logger, config_true_value, \
dump_recon_cache, whataremyips, Timestamp, ShardRange, GreenAsyncPile, \
config_positive_int_value, quorum_size, parse_override_options, \
Everything, config_auto_int_value, ShardRangeList, config_percent_value, \
node_to_string
from swift.container.backend import ContainerBroker, \
RECORD_TYPE_SHARD, UNSHARDED, SHARDING, SHARDED, COLLAPSED, \
SHARD_UPDATE_STATES, sift_shard_ranges, SHARD_UPDATE_STAT_STATES
from swift.container.replicator import ContainerReplicator
CLEAVE_SUCCESS = 0
CLEAVE_FAILED = 1
CLEAVE_EMPTY = 2
def sharding_enabled(broker):
# NB all shards will by default have been created with
# X-Container-Sysmeta-Sharding set and will therefore be candidates for
# sharding, along with explicitly configured root containers.
sharding = broker.metadata.get('X-Container-Sysmeta-Sharding')
if sharding and config_true_value(sharding[0]):
return True
# if broker has been marked deleted it will have lost sysmeta, but we still
# need to process the broker (for example, to shrink any shard ranges) so
# fallback to checking if it has any shard ranges
if broker.has_other_shard_ranges():
return True
return False
def make_shard_ranges(broker, shard_data, shards_account_prefix):
timestamp = Timestamp.now()
shard_ranges = []
for data in shard_data:
# Make a copy so we don't mutate the original
kwargs = data.copy()
path = ShardRange.make_path(
shards_account_prefix + broker.root_account,
broker.root_container, broker.container,
timestamp, kwargs.pop('index'))
shard_ranges.append(ShardRange(path, timestamp, **kwargs))
return shard_ranges
def _find_discontinuity(paths, start):
# select the path that reaches furthest from start into the namespace
start_paths = [path for path in paths if path.lower == start]
start_paths.sort(key=lambda p: p.upper)
longest_start_path = start_paths[-1]
# search for paths that end further into the namespace (note: these must
# have a lower that differs from the start_path upper, otherwise they would
# be part of the start_path longer!)
end_paths = [path for path in paths
if path.upper > longest_start_path.upper]
if end_paths:
# select those that begin nearest the start of the namespace
end_paths.sort(key=lambda p: p.lower)
end_paths = [p for p in end_paths if p.lower == end_paths[0].lower]
# select the longest of those
end_paths.sort(key=lambda p: p.upper)
longest_end_path = end_paths[-1]
else:
longest_end_path = None
return longest_start_path, longest_end_path
def find_paths_with_gaps(shard_ranges, within_range=None):
"""
Find gaps in the shard ranges and pairs of shard range paths that lead to
and from those gaps. For each gap a single pair of adjacent paths is
selected. The concatenation of all selected paths and gaps will span the
entire namespace with no overlaps.
:param shard_ranges: a list of instances of ShardRange.
:param within_range: an optional ShardRange that constrains the search
space; the method will only return gaps within this range. The default
is the entire namespace.
:return: A list of tuples of ``(start_path, gap_range, end_path)`` where
``start_path`` is a list of ShardRanges leading to the gap,
``gap_range`` is a ShardRange synthesized to describe the namespace
gap, and ``end_path`` is a list of ShardRanges leading from the gap.
When gaps start or end at the namespace minimum or maximum bounds,
``start_path`` and ``end_path`` may be 'null' paths that contain a
single ShardRange covering either the minimum or maximum of the
namespace.
"""
timestamp = Timestamp.now()
within_range = within_range or ShardRange('entire/namespace', timestamp)
shard_ranges = ShardRangeList(shard_ranges)
# note: find_paths results do not include shrinking ranges
paths = find_paths(shard_ranges)
# add paths covering no namespace at start and end of namespace to ensure
# that a start_path and end_path is always found even when there is a gap
# at the start or end of the namespace
null_start = ShardRange('null/start', timestamp,
lower=ShardRange.MIN,
upper=ShardRange.MIN,
state=ShardRange.FOUND)
null_end = ShardRange('null/end', timestamp,
lower=ShardRange.MAX,
upper=ShardRange.MAX,
state=ShardRange.FOUND)
paths.extend([ShardRangeList([null_start]), ShardRangeList([null_end])])
paths_with_gaps = []
start = null_start.lower
while True:
start_path, end_path = _find_discontinuity(paths, start)
if end_path is None:
# end of namespace reached
break
start = end_path.lower
if start_path.upper > end_path.lower:
# overlap
continue
gap_range = ShardRange('gap/index_%06d' % len(paths_with_gaps),
timestamp,
lower=start_path.upper,
upper=end_path.lower)
if gap_range.overlaps(within_range):
paths_with_gaps.append((start_path, gap_range, end_path))
return paths_with_gaps
def _is_parent_or_child(shard_range, other, time_period):
"""
Test if shard range ``shard_range`` is the parent or a child of another
shard range ``other`` within past time period ``time_period``. This method
is limited to work only within the scope of the same user-facing account
(with and without shard prefix).
:param shard_range: an instance of ``ShardRange``.
:param other: an instance of ``ShardRange``.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: True if ``shard_range`` is the parent or a child of ``other``
within past time period, False otherwise, assuming that they are within
the same account.
"""
exclude_age = (time.time() - float(time_period)) if time_period > 0 else 0
if shard_range.is_child_of(other) and shard_range.timestamp >= exclude_age:
return True
if other.is_child_of(shard_range) and other.timestamp >= exclude_age:
return True
return False
def find_overlapping_ranges(
shard_ranges, exclude_parent_child=False, time_period=0):
"""
Find all pairs of overlapping ranges in the given list.
:param shard_ranges: A list of :class:`~swift.utils.ShardRange`
:param exclude_parent_child: If True then overlapping pairs that have a
parent-child relationship within the past time period
``time_period`` are excluded from the returned set. Default is
False.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: a set of tuples, each tuple containing ranges that overlap with
each other.
"""
result = set()
for i, shard_range in enumerate(shard_ranges):
if exclude_parent_child:
overlapping = [
sr for sr in shard_ranges[i + 1:]
if shard_range.name != sr.name and shard_range.overlaps(sr) and
not _is_parent_or_child(shard_range, sr, time_period)]
else:
overlapping = [
sr for sr in shard_ranges[i + 1:]
if shard_range.name != sr.name and shard_range.overlaps(sr)]
if overlapping:
overlapping.append(shard_range)
overlapping.sort(key=ShardRange.sort_key)
result.add(tuple(overlapping))
return result
def is_sharding_candidate(shard_range, threshold):
# note: use *object* count as the condition for sharding: tombstones will
# eventually be reclaimed so should not trigger sharding
return (shard_range.state == ShardRange.ACTIVE and
shard_range.object_count >= threshold)
def is_shrinking_candidate(shard_range, shrink_threshold, expansion_limit,
states=None):
# typically shrink_threshold < expansion_limit but check both just in case
# note: use *row* count (objects plus tombstones) as the condition for
# shrinking to avoid inadvertently moving large numbers of tombstones into
# an acceptor
states = states or (ShardRange.ACTIVE,)
return (shard_range.state in states and
shard_range.row_count < shrink_threshold and
shard_range.row_count <= expansion_limit)
def find_sharding_candidates(broker, threshold, shard_ranges=None):
# this should only execute on root containers; the goal is to find
# large shard containers that should be sharded.
# First cut is simple: assume root container shard usage stats are good
# enough to make decision.
if shard_ranges is None:
shard_ranges = broker.get_shard_ranges(states=[ShardRange.ACTIVE])
candidates = []
for shard_range in shard_ranges:
if not is_sharding_candidate(shard_range, threshold):
continue
shard_range.update_state(ShardRange.SHARDING,
state_timestamp=Timestamp.now())
shard_range.epoch = shard_range.state_timestamp
candidates.append(shard_range)
return candidates
def find_shrinking_candidates(broker, shrink_threshold, expansion_limit):
# this is only here to preserve a legacy public function signature;
# superseded by find_compactible_shard_sequences
merge_pairs = {}
# restrict search to sequences with one donor
results = find_compactible_shard_sequences(broker, shrink_threshold,
expansion_limit, 1, -1,
include_shrinking=True)
for sequence in results:
# map acceptor -> donor list
merge_pairs[sequence[-1]] = sequence[-2]
return merge_pairs
def find_compactible_shard_sequences(broker,
shrink_threshold,
expansion_limit,
max_shrinking,
max_expanding,
include_shrinking=False):
"""
Find sequences of shard ranges that could be compacted into a single
acceptor shard range.
This function does not modify shard ranges.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param shrink_threshold: the number of rows below which a shard may be
considered for shrinking into another shard
:param expansion_limit: the maximum number of rows that an acceptor shard
range should have after other shard ranges have been compacted into it
:param max_shrinking: the maximum number of shard ranges that should be
compacted into each acceptor; -1 implies unlimited.
:param max_expanding: the maximum number of acceptors to be found (i.e. the
maximum number of sequences to be returned); -1 implies unlimited.
:param include_shrinking: if True then existing compactible sequences are
included in the results; default is False.
:returns: A list of :class:`~swift.common.utils.ShardRangeList` each
containing a sequence of neighbouring shard ranges that may be
compacted; the final shard range in the list is the acceptor
"""
# this should only execute on root containers that have sharded; the
# goal is to find small shard containers that could be retired by
# merging with a neighbour.
# First cut is simple: assume root container shard usage stats are good
# enough to make decision; only merge with upper neighbour so that
# upper bounds never change (shard names include upper bound).
shard_ranges = broker.get_shard_ranges()
own_shard_range = broker.get_own_shard_range()
def sequence_complete(sequence):
# a sequence is considered complete if any of the following are true:
# - the final shard range has more objects than the shrink_threshold,
# so should not be shrunk (this shard will be the acceptor)
# - the max number of shard ranges to be compacted (max_shrinking) has
# been reached
# - the total number of objects in the sequence has reached the
# expansion_limit
if (sequence and
(not is_shrinking_candidate(
sequence[-1], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)) or
0 < max_shrinking < len(sequence) or
sequence.row_count >= expansion_limit)):
return True
return False
compactible_sequences = []
index = 0
expanding = 0
while ((max_expanding < 0 or expanding < max_expanding) and
index < len(shard_ranges)):
if not is_shrinking_candidate(
shard_ranges[index], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)):
# this shard range cannot be the start of a new or existing
# compactible sequence, move on
index += 1
continue
# start of a *possible* sequence
sequence = ShardRangeList([shard_ranges[index]])
for shard_range in shard_ranges[index + 1:]:
# attempt to add contiguous shard ranges to the sequence
if sequence.upper < shard_range.lower:
# found a gap! break before consuming this range because it
# could become the first in the next sequence
break
if shard_range.state not in (ShardRange.ACTIVE,
ShardRange.SHRINKING):
# found? created? sharded? don't touch it
break
if shard_range.state == ShardRange.SHRINKING:
# already shrinking: add to sequence unconditionally
sequence.append(shard_range)
elif (sequence.row_count + shard_range.row_count
<= expansion_limit):
# add to sequence: could be a donor or acceptor
sequence.append(shard_range)
if sequence_complete(sequence):
break
else:
break
index += len(sequence)
if (index == len(shard_ranges) and
len(shard_ranges) == len(sequence) and
not sequence_complete(sequence) and
sequence.includes(own_shard_range)):
# special case: only one sequence has been found, which consumes
# all shard ranges, encompasses the entire namespace, has no more
# than expansion_limit records and whose shard ranges are all
# shrinkable; all the shards in the sequence can be shrunk to the
# root, so append own_shard_range to the sequence to act as an
# acceptor; note: only shrink to the root when *all* the remaining
# shard ranges can be simultaneously shrunk to the root.
sequence.append(own_shard_range)
if len(sequence) < 2 or sequence[-1].state not in (ShardRange.ACTIVE,
ShardRange.SHARDED):
# this sequence doesn't end with a suitable acceptor shard range
continue
# all valid sequences are counted against the max_expanding allowance
# even if the sequence is already shrinking
expanding += 1
if (all([sr.state != ShardRange.SHRINKING for sr in sequence]) or
include_shrinking):
compactible_sequences.append(sequence)
return compactible_sequences
def finalize_shrinking(broker, acceptor_ranges, donor_ranges, timestamp):
"""
Update donor shard ranges to shrinking state and merge donors and acceptors
to broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param acceptor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be acceptors.
:param donor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be donors; these will have their state and timestamp
updated.
:param timestamp: timestamp to use when updating donor state
"""
for donor in donor_ranges:
if donor.update_state(ShardRange.SHRINKING):
# Set donor state to shrinking state_timestamp defines new epoch
donor.epoch = donor.state_timestamp = timestamp
broker.merge_shard_ranges(acceptor_ranges + donor_ranges)
def process_compactible_shard_sequences(broker, sequences):
"""
Transform the given sequences of shard ranges into a list of acceptors and
a list of shrinking donors. For each given sequence the final ShardRange in
the sequence (the acceptor) is expanded to accommodate the other
ShardRanges in the sequence (the donors). The donors and acceptors are then
merged into the broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param sequences: A list of :class:`~swift.common.utils.ShardRangeList`
"""
timestamp = Timestamp.now()
acceptor_ranges = []
shrinking_ranges = []
for sequence in sequences:
donors = sequence[:-1]
shrinking_ranges.extend(donors)
# Update the acceptor container with its expanded bounds to prevent it
# treating objects cleaved from the donor as misplaced.
acceptor = sequence[-1]
if acceptor.expand(donors):
# Update the acceptor container with its expanded bounds to prevent
# it treating objects cleaved from the donor as misplaced.
acceptor.timestamp = timestamp
if acceptor.update_state(ShardRange.ACTIVE):
# Ensure acceptor state is ACTIVE (when acceptor is root)
acceptor.state_timestamp = timestamp
acceptor_ranges.append(acceptor)
finalize_shrinking(broker, acceptor_ranges, shrinking_ranges, timestamp)
def find_paths(shard_ranges):
"""
Returns a list of all continuous paths through the shard ranges. An
individual path may not necessarily span the entire namespace, but it will
span a continuous namespace without gaps.
:param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
:return: A list of :class:`~swift.common.utils.ShardRangeList`.
"""
# A node is a point in the namespace that is used as a bound of any shard
# range. Shard ranges form the edges between nodes.
# First build a dict mapping nodes to a list of edges that leave that node
# (in other words, shard ranges whose lower bound equals the node)
node_successors = collections.defaultdict(list)
for shard_range in shard_ranges:
if shard_range.state == ShardRange.SHRINKING:
# shrinking shards are not a viable edge in any path
continue
node_successors[shard_range.lower].append(shard_range)
paths = []
def clone_path(other=None):
# create a new path, possibly cloning another path, and add it to the
# list of all paths through the shards
path = ShardRangeList() if other is None else ShardRangeList(other)
paths.append(path)
return path
# we need to keep track of every path that ends at each node so that when
# we visit the node we can extend those paths, or clones of them, with the
# edges that leave the node
paths_to_node = collections.defaultdict(list)
# visit the nodes in ascending order by name...
for node, edges in sorted(node_successors.items()):
if not edges:
# this node is a dead-end, so there's no path updates to make
continue
if not paths_to_node[node]:
# this is either the first node to be visited, or it has no paths
# leading to it, so we need to start a new path here
paths_to_node[node].append(clone_path([]))
for path_to_node in paths_to_node[node]:
# extend each path that arrives at this node with all of the
# possible edges that leave the node; if more than edge leaves the
# node then we will make clones of the path to the node and extend
# those clones, adding to the collection of all paths though the
# shards
for i, edge in enumerate(edges):
if i == len(edges) - 1:
# the last edge is used to extend the original path to the
# node; there is nothing special about the last edge, but
# doing this last means the original path to the node can
# be cloned for all other edges before being modified here
path = path_to_node
else:
# for all but one of the edges leaving the node we need to
# make a clone the original path
path = clone_path(path_to_node)
# extend the path with the edge
path.append(edge)
# keep track of which node this path now arrives at
paths_to_node[edge.upper].append(path)
return paths
def rank_paths(paths, shard_range_to_span):
"""
Sorts the given list of paths such that the most preferred path is the
first item in the list.
:param paths: A list of :class:`~swift.common.utils.ShardRangeList`.
:param shard_range_to_span: An instance of
:class:`~swift.common.utils.ShardRange` that describes the namespace
that would ideally be spanned by a path. Paths that include this
namespace will be preferred over those that do not.
:return: A sorted list of :class:`~swift.common.utils.ShardRangeList`.
"""
def sort_key(path):
# defines the order of preference for paths through shards
return (
# complete path for the namespace
path.includes(shard_range_to_span),
# most cleaving progress
path.find_lower(lambda sr: sr.state not in (
ShardRange.CLEAVED, ShardRange.ACTIVE)),
# largest object count
path.object_count,
# fewest timestamps
-1 * len(path.timestamps),
# newest timestamp
sorted(path.timestamps)[-1]
)
paths.sort(key=sort_key, reverse=True)
return paths
def combine_shard_ranges(new_shard_ranges, existing_shard_ranges):
"""
Combines new and existing shard ranges based on most recent state.
:param new_shard_ranges: a list of ShardRange instances.
:param existing_shard_ranges: a list of ShardRange instances.
:return: a list of ShardRange instances.
"""
new_shard_ranges = [dict(sr) for sr in new_shard_ranges]
existing_shard_ranges = [dict(sr) for sr in existing_shard_ranges]
to_add, to_delete = sift_shard_ranges(
new_shard_ranges,
dict((sr['name'], sr) for sr in existing_shard_ranges))
result = [ShardRange.from_dict(existing)
for existing in existing_shard_ranges
if existing['name'] not in to_delete]
result.extend([ShardRange.from_dict(sr) for sr in to_add])
return sorted([sr for sr in result if not sr.deleted],
key=ShardRange.sort_key)
def update_own_shard_range_stats(broker, own_shard_range):
"""
Update the ``own_shard_range`` with the up-to-date object stats from
the ``broker``.
Note: this method does not persist the updated ``own_shard_range``;
callers should use ``broker.merge_shard_ranges`` if the updated stats
need to be persisted.
:param broker: an instance of ``ContainerBroker``.
:param own_shard_range: and instance of ``ShardRange``.
:returns: ``own_shard_range`` with up-to-date ``object_count``
and ``bytes_used``.
"""
info = broker.get_info()
own_shard_range.update_meta(
info['object_count'], info['bytes_used'])
return own_shard_range
class CleavingContext(object):
"""
Encapsulates metadata associated with the process of cleaving a retiring
DB. This metadata includes:
* ``ref``: The unique part of the key that is used when persisting a
serialized ``CleavingContext`` as sysmeta in the DB. The unique part of
the key is based off the DB id. This ensures that each context is
associated with a specific DB file. The unique part of the key is
included in the ``CleavingContext`` but should not be modified by any
caller.
* ``cursor``: the upper bound of the last shard range to have been
cleaved from the retiring DB.
* ``max_row``: the retiring DB's max row; this is updated to the value of
the retiring DB's ``max_row`` every time a ``CleavingContext`` is
loaded for that DB, and may change during the process of cleaving the
DB.
* ``cleave_to_row``: the value of ``max_row`` at the moment when cleaving
starts for the DB. When cleaving completes (i.e. the cleave cursor has
reached the upper bound of the cleaving namespace), ``cleave_to_row``
is compared to the current ``max_row``: if the two values are not equal
then rows have been added to the DB which may not have been cleaved, in
which case the ``CleavingContext`` is ``reset`` and cleaving is
re-started.
* ``last_cleave_to_row``: the minimum DB row from which cleaving should
select objects to cleave; this is initially set to None i.e. all rows
should be cleaved. If the ``CleavingContext`` is ``reset`` then the
``last_cleave_to_row`` is set to the current value of
``cleave_to_row``, which in turn is set to the current value of
``max_row`` by a subsequent call to ``start``. The repeated cleaving
therefore only selects objects in rows greater than the
``last_cleave_to_row``, rather than cleaving the whole DB again.
* ``ranges_done``: the number of shard ranges that have been cleaved from
the retiring DB.
* ``ranges_todo``: the number of shard ranges that are yet to be
cleaved from the retiring DB.
"""
def __init__(self, ref, cursor='', max_row=None, cleave_to_row=None,
last_cleave_to_row=None, cleaving_done=False,
misplaced_done=False, ranges_done=0, ranges_todo=0):
self.ref = ref
self._cursor = None
self.cursor = cursor
self.max_row = max_row
self.cleave_to_row = cleave_to_row
self.last_cleave_to_row = last_cleave_to_row
self.cleaving_done = cleaving_done
self.misplaced_done = misplaced_done
self.ranges_done = ranges_done
self.ranges_todo = ranges_todo
def __iter__(self):
yield 'ref', self.ref
yield 'cursor', self.cursor
yield 'max_row', self.max_row
yield 'cleave_to_row', self.cleave_to_row
yield 'last_cleave_to_row', self.last_cleave_to_row
yield 'cleaving_done', self.cleaving_done
yield 'misplaced_done', self.misplaced_done
yield 'ranges_done', self.ranges_done
yield 'ranges_todo', self.ranges_todo
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(
'%s=%r' % prop for prop in self))
def _encode(cls, value):
if value is not None and six.PY2 and isinstance(value, six.text_type):
return value.encode('utf-8')
return value
@property
def cursor(self):
return self._cursor
@cursor.setter
def cursor(self, value):
self._cursor = self._encode(value)
@property
def marker(self):
return self.cursor + '\x00'
@classmethod
def _make_ref(cls, broker):
return broker.get_info()['id']
@classmethod
def load_all(cls, broker):
"""
Returns all cleaving contexts stored in the broker's DB.
:param broker: an instance of :class:`ContainerBroker`
:return: list of tuples of (CleavingContext, timestamp)
"""
brokers = broker.get_brokers()
sysmeta = brokers[-1].get_sharding_sysmeta_with_timestamps()
contexts = []
for key, (val, timestamp) in sysmeta.items():
# If the value is blank, then the metadata is
# marked for deletion
if key.startswith("Context-") and val:
try:
contexts.append((cls(**json.loads(val)), timestamp))
except ValueError:
continue
return contexts
@classmethod
def load(cls, broker):
"""
Returns a CleavingContext tracking the cleaving progress of the given
broker's DB.
:param broker: an instances of :class:`ContainerBroker`
:return: An instance of :class:`CleavingContext`.
"""
brokers = broker.get_brokers()
ref = cls._make_ref(brokers[0])
data = brokers[-1].get_sharding_sysmeta('Context-' + ref)
data = json.loads(data) if data else {}
data['ref'] = ref
data['max_row'] = brokers[0].get_max_row()
return cls(**data)
def store(self, broker):
"""
Persists the serialized ``CleavingContext`` as sysmeta in the given
broker's DB.
:param broker: an instances of :class:`ContainerBroker`
"""
broker.set_sharding_sysmeta('Context-' + self.ref,
json.dumps(dict(self)))
def reset(self):
self.cursor = ''
self.ranges_done = 0
self.ranges_todo = 0
self.cleaving_done = False
self.misplaced_done = False
self.last_cleave_to_row = self.cleave_to_row
def start(self):
self.cursor = ''
self.ranges_done = 0
self.ranges_todo = 0
self.cleaving_done = False
self.cleave_to_row = self.max_row
def range_done(self, new_cursor):
self.ranges_done += 1
self.ranges_todo -= 1
self.cursor = new_cursor
def done(self):
return all((self.misplaced_done, self.cleaving_done,
self.max_row == self.cleave_to_row))
def delete(self, broker):
# These will get reclaimed when `_reclaim_metadata` in
# common/db.py is called.
broker.set_sharding_sysmeta('Context-' + self.ref, '')
class ContainerSharderConf(object):
def __init__(self, conf=None):
conf = conf if conf else {}
def get_val(key, validator, default):
"""
Get a value from conf and validate it.
:param key: key to lookup value in the ``conf`` dict.
:param validator: A function that will passed the value from the
``conf`` dict and should return the value to be set. This
function should raise a ValueError if the ``conf`` value if not
valid.
:param default: value to use if ``key`` is not found in ``conf``.
:raises: ValueError if the value read from ``conf`` is invalid.
:returns: the configuration value.
"""
try:
return validator(conf.get(key, default))
except ValueError as err:
raise ValueError('Error setting %s: %s' % (key, err))
self.shard_container_threshold = get_val(
'shard_container_threshold', config_positive_int_value, 1000000)
self.max_shrinking = get_val(
'max_shrinking', int, 1)
self.max_expanding = get_val(
'max_expanding', int, -1)
self.shard_scanner_batch_size = get_val(
'shard_scanner_batch_size', config_positive_int_value, 10)
self.cleave_batch_size = get_val(
'cleave_batch_size', config_positive_int_value, 2)
self.cleave_row_batch_size = get_val(
'cleave_row_batch_size', config_positive_int_value, 10000)
self.broker_timeout = get_val(
'broker_timeout', config_positive_int_value, 60)
self.recon_candidates_limit = get_val(
'recon_candidates_limit', int, 5)
self.recon_sharded_timeout = get_val(
'recon_sharded_timeout', int, 43200)
self.container_sharding_timeout = get_val(
'container_sharding_timeout', int, 172800)
self.conn_timeout = get_val(
'conn_timeout', float, 5)
self.auto_shard = get_val(
'auto_shard', config_true_value, False)
# deprecated percent options still loaded...
self.shrink_threshold = get_val(
'shard_shrink_point', self.percent_of_threshold, 10)
self.expansion_limit = get_val(
'shard_shrink_merge_point', self.percent_of_threshold, 75)
# ...but superseded by absolute options if present in conf
self.shrink_threshold = get_val(
'shrink_threshold', int, self.shrink_threshold)
self.expansion_limit = get_val(
'expansion_limit', int, self.expansion_limit)
self.rows_per_shard = get_val(
'rows_per_shard', config_positive_int_value,
max(self.shard_container_threshold // 2, 1))
self.minimum_shard_size = get_val(
'minimum_shard_size', config_positive_int_value,
max(self.rows_per_shard // 5, 1))
def percent_of_threshold(self, val):
return int(config_percent_value(val) * self.shard_container_threshold)
@classmethod
def validate_conf(cls, namespace):
ops = {'<': operator.lt,
'<=': operator.le}
checks = (('minimum_shard_size', '<=', 'rows_per_shard'),
('shrink_threshold', '<=', 'minimum_shard_size'),
('rows_per_shard', '<', 'shard_container_threshold'),
('expansion_limit', '<', 'shard_container_threshold'))
for key1, op, key2 in checks:
try:
val1 = getattr(namespace, key1)
val2 = getattr(namespace, key2)
except AttributeError:
# swift-manage-shard-ranges uses a subset of conf options for
# each command so only validate those actually in the namespace
continue
if not ops[op](val1, val2):
raise ValueError('%s (%d) must be %s %s (%d)'
% (key1, val1, op, key2, val2))
DEFAULT_SHARDER_CONF = vars(ContainerSharderConf())
class ContainerSharder(ContainerSharderConf, ContainerReplicator):
"""Shards containers."""
log_route = 'container-sharder'
def __init__(self, conf, logger=None):
logger = logger or get_logger(conf, log_route=self.log_route)
ContainerReplicator.__init__(self, conf, logger=logger)
ContainerSharderConf.__init__(self, conf)
ContainerSharderConf.validate_conf(self)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
auto_create_account_prefix = \
self.conf['auto_create_account_prefix']
else:
auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.shards_account_prefix = (auto_create_account_prefix + 'shards_')
self.sharding_candidates = []
self.shrinking_candidates = []
replica_count = self.ring.replica_count
quorum = quorum_size(replica_count)
self.shard_replication_quorum = config_auto_int_value(
conf.get('shard_replication_quorum'), quorum)
if self.shard_replication_quorum > replica_count:
self.logger.warning(
'shard_replication_quorum of %s exceeds replica count %s'
', reducing to %s', self.shard_replication_quorum,
replica_count, replica_count)
self.shard_replication_quorum = replica_count
self.existing_shard_replication_quorum = config_auto_int_value(
conf.get('existing_shard_replication_quorum'),
self.shard_replication_quorum)
if self.existing_shard_replication_quorum > replica_count:
self.logger.warning(
'existing_shard_replication_quorum of %s exceeds replica count'
' %s, reducing to %s', self.existing_shard_replication_quorum,
replica_count, replica_count)
self.existing_shard_replication_quorum = replica_count
# internal client
request_tries = config_positive_int_value(
conf.get('request_tries', 3))
internal_client_conf_path = conf.get('internal_client_conf_path',
'/etc/swift/internal-client.conf')
try:
self.int_client = internal_client.InternalClient(
internal_client_conf_path,
'Swift Container Sharder',
request_tries,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
except (OSError, IOError) as err:
if err.errno != errno.ENOENT and \
not str(err).endswith(' not found'):
raise
raise SystemExit(
'Unable to load internal client from config: %r (%s)' %
(internal_client_conf_path, err))
self.stats_interval = float(conf.get('stats_interval', '3600'))
self.reported = 0
def _format_log_msg(self, broker, msg, *args):
# make best effort to include broker properties...
try:
db_file = broker.db_file
except Exception: # noqa
db_file = ''
try:
path = broker.path
except Exception: # noqa
path = ''
if args:
msg = msg % args
return '%s, path: %s, db: %s' % (msg, quote(path), db_file)
def _log(self, level, broker, msg, *args):
if not self.logger.isEnabledFor(level):
return
self.logger.log(level, self._format_log_msg(broker, msg, *args))
def debug(self, broker, msg, *args, **kwargs):
self._log(logging.DEBUG, broker, msg, *args, **kwargs)
def info(self, broker, msg, *args, **kwargs):
self._log(logging.INFO, broker, msg, *args, **kwargs)
def warning(self, broker, msg, *args, **kwargs):
self._log(logging.WARNING, broker, msg, *args, **kwargs)
def error(self, broker, msg, *args, **kwargs):
self._log(logging.ERROR, broker, msg, *args, **kwargs)
def exception(self, broker, msg, *args, **kwargs):
if not self.logger.isEnabledFor(logging.ERROR):
return
self.logger.exception(self._format_log_msg(broker, msg, *args))
def _zero_stats(self):
"""Zero out the stats."""
super(ContainerSharder, self)._zero_stats()
# all sharding stats that are additional to the inherited replicator
# stats are maintained under the 'sharding' key in self.stats
self.stats['sharding'] = defaultdict(lambda: defaultdict(int))
self.sharding_candidates = []
self.shrinking_candidates = []
def _append_stat(self, category, key, value):
if not self.stats['sharding'][category][key]:
self.stats['sharding'][category][key] = list()
self.stats['sharding'][category][key].append(value)
def _min_stat(self, category, key, value):
current = self.stats['sharding'][category][key]
if not current:
self.stats['sharding'][category][key] = value
else:
self.stats['sharding'][category][key] = min(current, value)
def _max_stat(self, category, key, value):
current = self.stats['sharding'][category][key]
if not current:
self.stats['sharding'][category][key] = value
else:
self.stats['sharding'][category][key] = max(current, value)
def _increment_stat(self, category, key, statsd=False):
self._update_stat(category, key, step=1, statsd=statsd)
def _update_stat(self, category, key, step=1, statsd=False):
if step:
self.stats['sharding'][category][key] += step
if statsd:
statsd_key = '%s_%s' % (category, key)
self.logger.update_stats(statsd_key, step)
def _make_stats_info(self, broker, node, own_shard_range):
try:
file_size = os.stat(broker.db_file).st_size
except OSError:
file_size = None
return {'path': broker.db_file,
'node_index': node.get('index'),
'account': broker.account,
'container': broker.container,
'root': broker.root_path,
'object_count': own_shard_range.object_count,
'meta_timestamp': own_shard_range.meta_timestamp.internal,
'file_size': file_size}
def _identify_sharding_candidate(self, broker, node):
own_shard_range = broker.get_own_shard_range()
update_own_shard_range_stats(broker, own_shard_range)
if is_sharding_candidate(
own_shard_range, self.shard_container_threshold):
self.sharding_candidates.append(
self._make_stats_info(broker, node, own_shard_range))
def _identify_shrinking_candidate(self, broker, node):
sequences = find_compactible_shard_sequences(
broker, self.shrink_threshold, self.expansion_limit,
self.max_shrinking, self.max_expanding)
# compactible_ranges are all apart from final acceptor in each sequence
compactible_ranges = sum(len(seq) - 1 for seq in sequences)
if compactible_ranges:
own_shard_range = broker.get_own_shard_range()
update_own_shard_range_stats(broker, own_shard_range)
shrink_candidate = self._make_stats_info(
broker, node, own_shard_range)
# The number of ranges/donors that can be shrunk if the
# tool is used with the current max_shrinking, max_expanding
# settings.
shrink_candidate['compactible_ranges'] = compactible_ranges
self.shrinking_candidates.append(shrink_candidate)
def _transform_candidate_stats(self, category, candidates, sort_keys):
category['found'] = len(candidates)
candidates.sort(key=itemgetter(*sort_keys), reverse=True)
if self.recon_candidates_limit >= 0:
category['top'] = candidates[:self.recon_candidates_limit]
else:
category['top'] = candidates
def _record_sharding_progress(self, broker, node, error):
db_state = broker.get_db_state()
if db_state not in (UNSHARDED, SHARDING, SHARDED):
return
own_shard_range = broker.get_own_shard_range()
if own_shard_range.state not in ShardRange.CLEAVING_STATES:
return
if db_state == SHARDED:
contexts = CleavingContext.load_all(broker)
if not contexts:
return
context_ts = max(float(ts) for c, ts in contexts)
if context_ts + self.recon_sharded_timeout \
< float(Timestamp.now()):
# last context timestamp too old for the
# broker to be recorded
return
update_own_shard_range_stats(broker, own_shard_range)
info = self._make_stats_info(broker, node, own_shard_range)
info['state'] = own_shard_range.state_text
info['db_state'] = broker.get_db_state()
states = [ShardRange.FOUND, ShardRange.CREATED,
ShardRange.CLEAVED, ShardRange.ACTIVE]
shard_ranges = broker.get_shard_ranges(states=states)
state_count = {}
for state in states:
state_count[ShardRange.STATES[state]] = 0
for shard_range in shard_ranges:
state_count[shard_range.state_text] += 1
info.update(state_count)
info['error'] = error and str(error)
self._append_stat('sharding_in_progress', 'all', info)
if broker.sharding_required() and (
own_shard_range.epoch is not None) and (
float(own_shard_range.epoch) +
self.container_sharding_timeout <
time.time()):
# Note: There is no requirement that own_shard_range.epoch equals
# the time at which the own_shard_range was merged into the
# container DB, which predicates sharding starting. But s-m-s-r and
# auto-sharding do set epoch and then merge, so we use it to tell
# whether sharding has been taking too long or not.
self.warning(
broker, 'Cleaving has not completed in %.2f seconds since %s. '
'DB state: %s, own_shard_range state: %s, state count of '
'shard ranges: %s' %
(time.time() - float(own_shard_range.epoch),
own_shard_range.epoch.isoformat, db_state,
own_shard_range.state_text, str(state_count)))
def _report_stats(self):
# report accumulated stats since start of one sharder cycle
default_stats = ('attempted', 'success', 'failure')
category_keys = (
('visited', default_stats + ('skipped', 'completed')),
('scanned', default_stats + ('found', 'min_time', 'max_time')),
('created', default_stats),
('cleaved', default_stats + ('min_time', 'max_time',)),
('misplaced', default_stats + ('found', 'placed', 'unplaced')),
('audit_root', default_stats + ('has_overlap', 'num_overlap')),
('audit_shard', default_stats),
)
now = time.time()
last_report = time.ctime(self.stats['start'])
elapsed = now - self.stats['start']
sharding_stats = self.stats['sharding']
for category, keys in category_keys:
stats = sharding_stats[category]
msg = ' '.join(['%s:%s' % (k, str(stats[k])) for k in keys])
self.logger.info('Since %s %s - %s', last_report, category, msg)
# transform the sharding and shrinking candidate states
# first sharding
category = self.stats['sharding']['sharding_candidates']
self._transform_candidate_stats(category, self.sharding_candidates,
sort_keys=('object_count',))
# next shrinking
category = self.stats['sharding']['shrinking_candidates']
self._transform_candidate_stats(category, self.shrinking_candidates,
sort_keys=('compactible_ranges',))
dump_recon_cache(
{'sharding_stats': self.stats,
'sharding_time': elapsed,
'sharding_last': now},
self.rcache, self.logger)
self.reported = now
def _periodic_report_stats(self):
if (time.time() - self.reported) >= self.stats_interval:
self._report_stats()
def _check_node(self, node):
"""
:return: The path to the device, if the node is mounted.
Returns False if the node is unmounted.
"""
if not node:
return False
if not is_local_device(self.ips, self.port,
node['replication_ip'],
node['replication_port']):
return False
try:
return check_drive(self.root, node['device'], self.mount_check)
except ValueError:
self.logger.warning(
'Skipping %(device)s as it is not mounted' % node)
return False
def _fetch_shard_ranges(self, broker, newest=False, params=None,
include_deleted=False):
path = self.int_client.make_path(broker.root_account,
broker.root_container)
params = params or {}
params.setdefault('format', 'json')
headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Deleted': 'true',
'X-Backend-Include-Deleted': str(include_deleted)}
if newest:
headers['X-Newest'] = 'true'
try:
resp = self.int_client.make_request(
'GET', path, headers, acceptable_statuses=(2,),
params=params)
except internal_client.UnexpectedResponse as err:
self.warning(broker, "Failed to get shard ranges from %s: %s",
quote(broker.root_path), err)
return None
record_type = resp.headers.get('x-backend-record-type')
if record_type != 'shard':
err = 'unexpected record type %r' % record_type
self.error(broker, "Failed to get shard ranges from %s: %s",
quote(broker.root_path), err)
return None
try:
data = json.loads(resp.body)
if not isinstance(data, list):
raise ValueError('not a list')
return [ShardRange.from_dict(shard_range)
for shard_range in data]
except (ValueError, TypeError, KeyError) as err:
self.error(broker,
"Failed to get shard ranges from %s: invalid data: %r",
quote(broker.root_path), err)
return None
def _put_container(self, broker, node, part, account, container, headers,
body):
try:
direct_put_container(node, part, account, container,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout,
headers=headers, contents=body)
except DirectClientException as err:
self.warning(broker,
'Failed to put shard ranges to %s %s/%s: %s',
node_to_string(node, replication=True),
quote(account), quote(container), err.http_status)
except (Exception, Timeout) as err:
self.exception(broker,
'Failed to put shard ranges to %s %s/%s: %s',
node_to_string(node, replication=True),
quote(account), quote(container), err)
else:
return True
return False
def _send_shard_ranges(self, broker, account, container, shard_ranges,
headers=None):
body = json.dumps([dict(sr, reported=0)
for sr in shard_ranges]).encode('ascii')
part, nodes = self.ring.get_nodes(account, container)
headers = headers or {}
headers.update({'X-Backend-Record-Type': RECORD_TYPE_SHARD,
USE_REPLICATION_NETWORK_HEADER: 'True',
'User-Agent': 'container-sharder %s' % os.getpid(),
'X-Timestamp': Timestamp.now().normal,
'Content-Length': len(body),
'Content-Type': 'application/json'})
pool = GreenAsyncPile(len(nodes))
for node in nodes:
pool.spawn(self._put_container, broker, node, part, account,
container, headers, body)
results = pool.waitall(None)
return results.count(True) >= quorum_size(self.ring.replica_count)
def _get_shard_broker(self, shard_range, root_path, policy_index):
"""
Get a broker for a container db for the given shard range. If one of
the shard container's primary nodes is a local device then that will be
chosen for the db, otherwise the first of the shard container's handoff
nodes that is local will be chosen.
:param shard_range: a :class:`~swift.common.utils.ShardRange`
:param root_path: the path of the shard's root container
:param policy_index: the storage policy index
:returns: a tuple of ``(part, broker, node_id, put_timestamp)`` where
``part`` is the shard container's partition,
``broker`` is an instance of
:class:`~swift.container.backend.ContainerBroker`,
``node_id`` is the id of the selected node,
``put_timestamp`` is the put_timestamp if the broker needed to
be initialized.
"""
part = self.ring.get_part(shard_range.account, shard_range.container)
node = self.find_local_handoff_for_part(part)
put_timestamp = Timestamp.now().internal
shard_broker, initialized = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, shard_range.account,
shard_range.container, epoch=shard_range.epoch,
storage_policy_index=policy_index, put_timestamp=put_timestamp)
# Get the valid info into the broker.container, etc
shard_broker.get_info()
shard_broker.merge_shard_ranges(shard_range)
shard_broker.set_sharding_sysmeta('Quoted-Root', quote(root_path))
# NB: we *used* to do
# shard_broker.set_sharding_sysmeta('Root', root_path)
# but that isn't safe for container names with nulls or newlines (or
# possibly some other characters). We consciously *don't* make any
# attempt to set the old meta; during an upgrade, some shards may think
# they are in fact roots, but it cleans up well enough once everyone's
# upgraded.
shard_broker.update_metadata({
'X-Container-Sysmeta-Sharding':
('True', Timestamp.now().internal)})
put_timestamp = put_timestamp if initialized else None
return part, shard_broker, node['id'], put_timestamp
def _audit_root_container(self, broker):
# This is the root container, and therefore the tome of knowledge,
# all we can do is check there is nothing screwy with the ranges
self._increment_stat('audit_root', 'attempted')
warnings = []
own_shard_range = broker.get_own_shard_range()
if own_shard_range.state in ShardRange.SHARDING_STATES:
shard_ranges = [sr for sr in broker.get_shard_ranges()
if sr.state != ShardRange.SHRINKING]
paths_with_gaps = find_paths_with_gaps(shard_ranges)
if paths_with_gaps:
warnings.append(
'missing range(s): %s' %
' '.join(['%s-%s' % (gap.lower, gap.upper)
for (_, gap, _) in paths_with_gaps]))
for state in ShardRange.STATES:
if state == ShardRange.SHRINKING:
# Shrinking is how we resolve overlaps; we've got to
# allow multiple shards in that state
continue
shard_ranges = broker.get_shard_ranges(states=state)
# Transient overlaps can occur during the period immediately after
# sharding if a root learns about new child shards before it learns
# that the parent has sharded. These overlaps are normally
# corrected as an up-to-date version of the parent shard range is
# replicated to the root. Parent-child overlaps are therefore
# ignored for a reclaim age after the child was created. After
# that, parent-child overlaps may indicate that there is
# permanently stale parent shard range data, perhaps from a node
# that has been offline, so these are reported.
overlaps = find_overlapping_ranges(
shard_ranges, exclude_parent_child=True,
time_period=self.reclaim_age)
if overlaps:
self._increment_stat('audit_root', 'has_overlap')
self._update_stat('audit_root', 'num_overlap',
step=len(overlaps))
all_overlaps = ', '.join(
[' '.join(['%s-%s' % (sr.lower, sr.upper)
for sr in overlapping_ranges])
for overlapping_ranges in sorted(list(overlaps))])
warnings.append(
'overlapping ranges in state %r: %s' %
(ShardRange.STATES[state], all_overlaps))
# We've seen a case in production where the roots own_shard_range
# epoch is reset to None, and state set to ACTIVE (like re-defaulted)
# Epoch it important to sharding so we want to detect if this happens
# 1. So we can alert, and 2. to see how common it is.
if own_shard_range.epoch is None and broker.db_epoch:
warnings.append('own_shard_range reset to None should be %s'
% broker.db_epoch)
if warnings:
self.warning(broker, 'Audit failed for root: %s',
', '.join(warnings))
self._increment_stat('audit_root', 'failure', statsd=True)
return False
self._increment_stat('audit_root', 'success', statsd=True)
return True
def _merge_shard_ranges_from_root(self, broker, shard_ranges,
own_shard_range):
"""
Merge appropriate items from the given ``shard_ranges`` into the
``broker``. The selection of items that are merged will depend upon the
state of the shard.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param shard_ranges: A list of instances of
:class:`~swift.common.utils.ShardRange` describing the shard ranges
fetched from the root container.
:param own_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the shard's own shard range.
:return: a tuple of ``own_shard_range, own_shard_range_from_root``. The
returned``own_shard_range`` will have been updated if the matching
``own_shard_range_from_root`` has newer data.
``own_shard_range_from_root`` will be None if no such matching
shard range is found in ``shard_ranges``.
"""
own_shard_range_from_root = None
children_shard_ranges = []
other_shard_ranges = []
for shard_range in shard_ranges:
# look for this shard range in the list of shard ranges received
# from root; the root may have different lower and upper bounds for
# this shard (e.g. if this shard has been expanded in the root to
# accept a shrinking shard) so we only match on name.
if shard_range.name == own_shard_range.name:
# If we find our own shard range in the root response, merge
# it and reload own shard range (note: own_range_from_root may
# not necessarily be 'newer' than the own shard range we
# already have, but merging will get us to the 'newest' state)
self.debug(broker, 'Updating own shard range from root')
own_shard_range_from_root = shard_range
broker.merge_shard_ranges(own_shard_range_from_root)
orig_own_shard_range = own_shard_range
own_shard_range = broker.get_own_shard_range()
if (orig_own_shard_range != own_shard_range or
orig_own_shard_range.state != own_shard_range.state):
self.info(broker,
'Updated own shard range from %s to %s',
orig_own_shard_range, own_shard_range)
elif shard_range.is_child_of(own_shard_range):
children_shard_ranges.append(shard_range)
else:
other_shard_ranges.append(shard_range)
if children_shard_ranges and not broker.is_sharded():
# Merging shard ranges from the root is only necessary until this
# DB is fully cleaved and reaches SHARDED DB state, after which it
# is useful for debugging for the set of sub-shards to which a
# shards has sharded to be frozen.
self.debug(broker, 'Updating %d children shard ranges from root',
len(children_shard_ranges))
broker.merge_shard_ranges(children_shard_ranges)
if (other_shard_ranges
and own_shard_range.state in ShardRange.CLEAVING_STATES
and not broker.is_sharded()):
# Other shard ranges returned from the root may need to be merged
# for the purposes of sharding or shrinking this shard:
#
# Shrinking states: If the up-to-date state is shrinking, the
# shards fetched from root may contain shards into which this shard
# is to shrink itself. Shrinking is initiated by modifying multiple
# neighboring shard range states *in the root*, rather than
# modifying a shard directly. We therefore need to learn about
# *other* neighboring shard ranges from the root, possibly
# including the root itself. We need to include shrunk state too,
# because one replica of a shard may already have moved the
# own_shard_range state to shrunk while another replica may still
# be in the process of shrinking.
#
# Sharding states: Normally a shard will shard to its own children.
# However, in some circumstances a shard may need to shard to other
# non-children sub-shards. For example, a shard range repair may
# cause a child sub-shard to be deleted and its namespace covered
# by another 'acceptor' shard.
#
# Therefore, if the up-to-date own_shard_range state indicates that
# sharding or shrinking is in progress, then other shard ranges
# will be merged, with the following caveats: we never expect a
# shard to shard to any ancestor shard range including the root,
# but containers might ultimately *shrink* to root; we never want
# to cleave to a container that is itself sharding or shrinking;
# the merged shard ranges should not result in gaps or overlaps in
# the namespace of this shard.
#
# Note: the search for ancestors is guaranteed to find the parent
# and root *if they are present*, but if any ancestor is missing
# then there is a chance that older generations in the
# other_shard_ranges will not be filtered and could be merged. That
# is only a problem if they are somehow still in ACTIVE state, and
# no overlap is detected, so the ancestor is merged.
ancestor_names = [
sr.name for sr in own_shard_range.find_ancestors(shard_ranges)]
filtered_other_shard_ranges = [
sr for sr in other_shard_ranges
if (sr.name not in ancestor_names
and (sr.state not in ShardRange.CLEAVING_STATES
or sr.deleted))
]
if own_shard_range.state in ShardRange.SHRINKING_STATES:
root_shard_range = own_shard_range.find_root(
other_shard_ranges)
if (root_shard_range and
root_shard_range.state == ShardRange.ACTIVE):
filtered_other_shard_ranges.append(root_shard_range)
existing_shard_ranges = broker.get_shard_ranges()
combined_shard_ranges = combine_shard_ranges(
filtered_other_shard_ranges, existing_shard_ranges)
overlaps = find_overlapping_ranges(combined_shard_ranges)
paths_with_gaps = find_paths_with_gaps(
combined_shard_ranges, own_shard_range)
if not (overlaps or paths_with_gaps):
# only merge if shard ranges appear to be *good*
self.debug(broker,
'Updating %s other shard range(s) from root',
len(filtered_other_shard_ranges))
broker.merge_shard_ranges(filtered_other_shard_ranges)
return own_shard_range, own_shard_range_from_root
def _delete_shard_container(self, broker, own_shard_range):
"""
Mark a shard container as deleted if it was sharded or shrunk more than
reclaim_age in the past. (The DB file will be removed by the replicator
after a further reclaim_age.)
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param own_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the shard's own shard range.
"""
delete_age = time.time() - self.reclaim_age
deletable_states = (ShardRange.SHARDED, ShardRange.SHRUNK)
if (own_shard_range.state in deletable_states and
own_shard_range.deleted and
own_shard_range.timestamp < delete_age and
broker.empty()):
broker.delete_db(Timestamp.now().internal)
self.debug(broker, 'Marked shard container as deleted')
def _do_audit_shard_container(self, broker):
warnings = []
if not broker.account.startswith(self.shards_account_prefix):
warnings.append('account not in shards namespace %r' %
self.shards_account_prefix)
own_shard_range = broker.get_own_shard_range(no_default=True)
if not own_shard_range:
self.warning(broker, 'Audit failed for shard: missing own shard '
'range (skipping)')
return False, warnings
# Get the root view of the world, at least that part of the world
# that overlaps with this shard's namespace. The
# 'states=auditing' parameter will cause the root to include
# its own shard range in the response, which is necessary for the
# particular case when this shard should be shrinking to the root
# container; when not shrinking to root, but to another acceptor,
# the root range should be in sharded state and will not interfere
# with cleaving, listing or updating behaviour.
shard_ranges = self._fetch_shard_ranges(
broker, newest=True,
params={'marker': str_to_wsgi(own_shard_range.lower_str),
'end_marker': str_to_wsgi(own_shard_range.upper_str),
'states': 'auditing'},
include_deleted=True)
if shard_ranges:
own_shard_range, own_shard_range_from_root = \
self._merge_shard_ranges_from_root(
broker, shard_ranges, own_shard_range)
if not own_shard_range_from_root:
# this is not necessarily an error - some replicas of the
# root may not yet know about this shard container, or the
# shard's own shard range could become deleted and
# reclaimed from the root under rare conditions
warnings.append('root has no matching shard range')
elif not own_shard_range.deleted:
warnings.append('unable to get shard ranges from root')
# else, our shard range is deleted, so root may have reclaimed it
self._delete_shard_container(broker, own_shard_range)
return True, warnings
def _audit_shard_container(self, broker):
self._increment_stat('audit_shard', 'attempted')
success, warnings = self._do_audit_shard_container(broker)
if warnings:
self.warning(broker, 'Audit warnings for shard: %s',
', '.join(warnings))
self._increment_stat(
'audit_shard', 'success' if success else 'failure', statsd=True)
return success
def _audit_cleave_contexts(self, broker):
now = Timestamp.now()
for context, last_mod in CleavingContext.load_all(broker):
last_mod = Timestamp(last_mod)
is_done = context.done() and last_mod.timestamp + \
self.recon_sharded_timeout < now.timestamp
is_stale = last_mod.timestamp + self.reclaim_age < now.timestamp
if is_done or is_stale:
context.delete(broker)
def _audit_container(self, broker):
if broker.is_deleted():
if broker.is_old_enough_to_reclaim(time.time(), self.reclaim_age) \
and not broker.is_empty_enough_to_reclaim():
self.warning(broker,
'Reclaimable db stuck waiting for shrinking')
# if the container has been marked as deleted, all metadata will
# have been erased so no point auditing. But we want it to pass, in
# case any objects exist inside it.
return True
self._audit_cleave_contexts(broker)
if broker.is_root_container():
return self._audit_root_container(broker)
return self._audit_shard_container(broker)
def yield_objects(self, broker, src_shard_range, since_row=None,
batch_size=None):
"""
Iterates through all object rows in ``src_shard_range`` in name order
yielding them in lists of up to ``batch_size`` in length. All batches
of rows that are not marked deleted are yielded before all batches of
rows that are marked deleted.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
:param since_row: include only object rows whose ROWID is greater than
the given row id; by default all object rows are included.
:param batch_size: The maximum number of object rows to include in each
yielded batch; defaults to cleave_row_batch_size.
:return: a generator of tuples of (list of rows, broker info dict)
"""
if (src_shard_range.lower == ShardRange.MAX or
src_shard_range.upper == ShardRange.MIN):
# this is an unexpected condition but handled with an early return
# just in case, because:
# lower == ShardRange.MAX -> marker == ''
# which could result in rows being erroneously yielded.
return
batch_size = batch_size or self.cleave_row_batch_size
for include_deleted in (False, True):
marker = src_shard_range.lower_str
while True:
info = broker.get_info()
info['max_row'] = broker.get_max_row()
start = time.time()
objects = broker.get_objects(
limit=batch_size,
marker=marker,
end_marker=src_shard_range.end_marker,
include_deleted=include_deleted,
since_row=since_row)
self.debug(broker, 'got %s rows (deleted=%s) in %ss',
len(objects), include_deleted, time.time() - start)
if objects:
yield objects, info
if len(objects) < batch_size:
break
marker = objects[-1]['name']
def yield_objects_to_shard_range(self, broker, src_shard_range,
dest_shard_ranges):
"""
Iterates through all object rows in ``src_shard_range`` to place them
in destination shard ranges provided by the ``dest_shard_ranges``
function. Yields tuples of ``(batch of object rows, destination shard
range in which those object rows belong, broker info)``.
If no destination shard range exists for a batch of object rows then
tuples are yielded of ``(batch of object rows, None, broker info)``.
This indicates to the caller that there are a non-zero number of object
rows for which no destination shard range was found.
Note that the same destination shard range may be referenced in more
than one yielded tuple.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
:param dest_shard_ranges: A function which should return a list of
destination shard ranges sorted in the order defined by
:meth:`~swift.common.utils.ShardRange.sort_key`.
:return: a generator of tuples of ``(object row list, shard range,
broker info dict)`` where ``shard_range`` may be ``None``.
"""
# calling dest_shard_ranges() may result in a request to fetch shard
# ranges, so first check that the broker actually has misplaced object
# rows in the source namespace
for _ in self.yield_objects(broker, src_shard_range, batch_size=1):
break
else:
return
dest_shard_range_iter = iter(dest_shard_ranges())
src_shard_range_marker = src_shard_range.lower
for dest_shard_range in dest_shard_range_iter:
if dest_shard_range.upper <= src_shard_range.lower:
continue
if dest_shard_range.lower > src_shard_range_marker:
# no destination for a sub-namespace of the source namespace
sub_src_range = src_shard_range.copy(
lower=src_shard_range_marker, upper=dest_shard_range.lower)
for objs, info in self.yield_objects(broker, sub_src_range):
yield objs, None, info
sub_src_range = src_shard_range.copy(
lower=max(dest_shard_range.lower, src_shard_range.lower),
upper=min(dest_shard_range.upper, src_shard_range.upper))
for objs, info in self.yield_objects(broker, sub_src_range):
yield objs, dest_shard_range, info
src_shard_range_marker = dest_shard_range.upper
if dest_shard_range.upper >= src_shard_range.upper:
# the entire source namespace has been traversed
break
else:
# dest_shard_ranges_iter was exhausted before reaching the end of
# the source namespace
sub_src_range = src_shard_range.copy(lower=src_shard_range_marker)
for objs, info in self.yield_objects(broker, sub_src_range):
yield objs, None, info
def _post_replicate_hook(self, broker, info, responses):
# override superclass behaviour
pass
def _replicate_and_delete(self, broker, dest_shard_range, part,
dest_broker, node_id, info):
success, responses = self._replicate_object(
part, dest_broker.db_file, node_id)
quorum = quorum_size(self.ring.replica_count)
if not success and responses.count(True) < quorum:
self.warning(broker, 'Failed to sufficiently replicate misplaced '
'objects to %s (not removing)',
dest_shard_range)
return False
if broker.get_info()['id'] != info['id']:
# the db changed - don't remove any objects
success = False
else:
# remove objects up to the max row of the db sampled prior to
# the first object yielded for this destination; objects added
# after that point may not have been yielded and replicated so
# it is not safe to remove them yet
broker.remove_objects(
dest_shard_range.lower_str,
dest_shard_range.upper_str,
max_row=info['max_row'])
success = True
if not success:
self.warning(broker,
'Refused to remove misplaced objects for dest %s',
dest_shard_range)
return success
def _move_objects(self, src_broker, src_shard_range, policy_index,
shard_range_fetcher):
# move objects from src_shard_range in src_broker to destination shard
# ranges provided by shard_range_fetcher
dest_brokers = {} # map shard range -> broker
placed = unplaced = 0
success = True
for objs, dest_shard_range, info in self.yield_objects_to_shard_range(
src_broker, src_shard_range, shard_range_fetcher):
if not dest_shard_range:
unplaced += len(objs)
success = False
continue
if dest_shard_range.name == src_broker.path:
self.debug(src_broker,
'Skipping source as misplaced objects destination')
# in shrinking context, the misplaced objects might actually be
# correctly placed if the root has expanded this shard but this
# broker has not yet been updated
continue
if dest_shard_range not in dest_brokers:
part, dest_broker, node_id, put_timestamp = \
self._get_shard_broker(
dest_shard_range, src_broker.root_path, policy_index)
stat = 'db_exists' if put_timestamp is None else 'db_created'
self._increment_stat('misplaced', stat, statsd=True)
# save the broker info that was sampled prior to the *first*
# yielded objects for this destination
destination = {'part': part,
'dest_broker': dest_broker,
'node_id': node_id,
'info': info}
dest_brokers[dest_shard_range] = destination
else:
destination = dest_brokers[dest_shard_range]
destination['dest_broker'].merge_items(objs)
placed += len(objs)
if unplaced:
self.warning(src_broker, 'Failed to find destination for at least '
'%s misplaced objects', unplaced)
# TODO: consider executing the replication jobs concurrently
for dest_shard_range, dest_args in dest_brokers.items():
self.debug(src_broker,
'moving misplaced objects found in range %s',
dest_shard_range)
success &= self._replicate_and_delete(
src_broker, dest_shard_range, **dest_args)
self._update_stat('misplaced', 'placed', step=placed, statsd=True)
self._update_stat('misplaced', 'unplaced', step=unplaced, statsd=True)
return success, placed, unplaced
def _make_shard_range_fetcher(self, broker, src_shard_range):
# returns a function that will lazy load shard ranges on demand;
# this means only one lookup is made for all misplaced ranges.
outer = {}
def shard_range_fetcher():
if not outer:
if broker.is_root_container():
ranges = broker.get_shard_ranges(
marker=src_shard_range.lower_str,
end_marker=src_shard_range.end_marker,
states=SHARD_UPDATE_STATES)
else:
# TODO: the root may not yet know about shard ranges to
# which a shard is sharding, but those could come from
# the broker
ranges = self._fetch_shard_ranges(
broker, newest=True,
params={'states': 'updating',
'marker': str_to_wsgi(
src_shard_range.lower_str),
'end_marker': str_to_wsgi(
src_shard_range.end_marker)})
outer['ranges'] = iter(ranges)
return outer['ranges']
return shard_range_fetcher
def _make_default_misplaced_object_bounds(self, broker):
# Objects outside of this container's own range are misplaced.
own_shard_range = broker.get_own_shard_range()
bounds = []
if own_shard_range.lower:
bounds.append(('', own_shard_range.lower))
if own_shard_range.upper:
bounds.append((own_shard_range.upper, ''))
return bounds
def _make_misplaced_object_bounds(self, broker):
bounds = []
state = broker.get_db_state()
if state == SHARDED:
# Anything in the object table is treated as a misplaced object.
bounds.append(('', ''))
if not bounds and state == SHARDING:
# Objects outside of this container's own range are misplaced.
# Objects in already cleaved shard ranges are also misplaced.
cleave_context = CleavingContext.load(broker)
if cleave_context.cursor:
bounds.append(('', cleave_context.cursor))
own_shard_range = broker.get_own_shard_range()
if own_shard_range.upper:
bounds.append((own_shard_range.upper, ''))
return bounds or self._make_default_misplaced_object_bounds(broker)
def _move_misplaced_objects(self, broker, src_broker=None,
src_bounds=None):
"""
Search for objects in the given broker that do not belong in that
broker's namespace and move those objects to their correct shard
container.
:param broker: An instance of :class:`swift.container.ContainerBroker`.
:param src_broker: optional alternative broker to use as the source
of misplaced objects; if not specified then ``broker`` is used as
the source.
:param src_bounds: optional list of (lower, upper) namespace bounds to
use when searching for misplaced objects
:return: True if all misplaced objects were sufficiently replicated to
their correct shard containers, False otherwise
"""
self.debug(broker, 'Looking for misplaced objects')
self._increment_stat('misplaced', 'attempted')
src_broker = src_broker or broker
if src_bounds is None:
src_bounds = self._make_misplaced_object_bounds(broker)
# (ab)use ShardRange instances to encapsulate source namespaces
src_ranges = [ShardRange('dont/care', Timestamp.now(), lower, upper)
for lower, upper in src_bounds]
self.debug(broker, 'misplaced object source bounds %s', src_bounds)
policy_index = broker.storage_policy_index
success = True
num_placed = num_unplaced = 0
for src_shard_range in src_ranges:
part_success, part_placed, part_unplaced = self._move_objects(
src_broker, src_shard_range, policy_index,
self._make_shard_range_fetcher(broker, src_shard_range))
success &= part_success
num_placed += part_placed
num_unplaced += part_unplaced
if num_placed or num_unplaced:
# the found stat records the number of DBs in which any misplaced
# rows were found, not the total number of misplaced rows
self._increment_stat('misplaced', 'found', statsd=True)
self.debug(broker, 'Placed %s misplaced objects (%s unplaced)',
num_placed, num_unplaced)
self._increment_stat('misplaced', 'success' if success else 'failure',
statsd=True)
self.debug(broker, 'Finished handling misplaced objects')
return success
def _find_shard_ranges(self, broker):
"""
Scans the container to find shard ranges and adds them to the shard
ranges table. If there are existing shard ranges then scanning starts
from the upper bound of the uppermost existing shard range.
:param broker: An instance of :class:`swift.container.ContainerBroker`
:return: a tuple of (success, num of shard ranges found) where success
is True if the last shard range has been found, False otherwise.
"""
own_shard_range = broker.get_own_shard_range()
shard_ranges = broker.get_shard_ranges()
if shard_ranges and shard_ranges[-1].upper >= own_shard_range.upper:
self.debug(broker, 'Scan for shard ranges already completed')
return 0
self.info(broker, 'Starting scan for shard ranges')
self._increment_stat('scanned', 'attempted')
start = time.time()
shard_data, last_found = broker.find_shard_ranges(
self.rows_per_shard, limit=self.shard_scanner_batch_size,
existing_ranges=shard_ranges,
minimum_shard_size=self.minimum_shard_size)
elapsed = time.time() - start
if not shard_data:
if last_found:
self.info(broker, "Already found all shard ranges")
self._increment_stat('scanned', 'success', statsd=True)
else:
# we didn't find anything
self.warning(broker, "No shard ranges found")
self._increment_stat('scanned', 'failure', statsd=True)
return 0
shard_ranges = make_shard_ranges(
broker, shard_data, self.shards_account_prefix)
broker.merge_shard_ranges(shard_ranges)
num_found = len(shard_ranges)
self.info(broker, "Completed scan for shard ranges: %d found",
num_found)
self._update_stat('scanned', 'found', step=num_found)
self._min_stat('scanned', 'min_time', round(elapsed / num_found, 3))
self._max_stat('scanned', 'max_time', round(elapsed / num_found, 3))
if last_found:
self.info(broker, "Final shard range reached.")
self._increment_stat('scanned', 'success', statsd=True)
return num_found
def _create_shard_containers(self, broker):
# Create shard containers that are ready to receive redirected object
# updates. Do this now, so that redirection can begin immediately
# without waiting for cleaving to complete.
found_ranges = broker.get_shard_ranges(states=ShardRange.FOUND)
created_ranges = []
for shard_range in found_ranges:
self._increment_stat('created', 'attempted')
shard_range.update_state(ShardRange.CREATED)
headers = {
'X-Backend-Storage-Policy-Index': broker.storage_policy_index,
'X-Container-Sysmeta-Shard-Quoted-Root': quote(
broker.root_path),
'X-Container-Sysmeta-Sharding': 'True',
'X-Backend-Auto-Create': 'True'}
# NB: we *used* to send along
# 'X-Container-Sysmeta-Shard-Root': broker.root_path
# but that isn't safe for container names with nulls or newlines
# (or possibly some other characters). We consciously *don't* make
# any attempt to set the old meta; during an upgrade, some shards
# may think they are in fact roots, but it cleans up well enough
# once everyone's upgraded.
success = self._send_shard_ranges(
broker, shard_range.account, shard_range.container,
[shard_range], headers=headers)
if success:
self.debug(broker, 'PUT new shard range container for %s',
shard_range)
self._increment_stat('created', 'success', statsd=True)
else:
self.error(broker, 'PUT of new shard container %r failed',
shard_range)
self._increment_stat('created', 'failure', statsd=True)
# break, not continue, because elsewhere it is assumed that
# finding and cleaving shard ranges progresses linearly, so we
# do not want any subsequent shard ranges to be in created
# state while this one is still in found state
break
created_ranges.append(shard_range)
if created_ranges:
broker.merge_shard_ranges(created_ranges)
if not broker.is_root_container():
self._send_shard_ranges(broker, broker.root_account,
broker.root_container, created_ranges)
self.info(broker, "Completed creating %d shard range containers",
len(created_ranges))
return len(created_ranges)
def _cleave_shard_broker(self, broker, cleaving_context, shard_range,
own_shard_range, shard_broker, put_timestamp,
shard_part, node_id):
result = CLEAVE_SUCCESS
start = time.time()
# only cleave from the retiring db - misplaced objects handler will
# deal with any objects in the fresh db
source_broker = broker.get_brokers()[0]
# if this range has been cleaved before but replication
# failed then the shard db may still exist and it may not be
# necessary to merge all the rows again
source_db_id = source_broker.get_info()['id']
source_max_row = source_broker.get_max_row()
sync_point = shard_broker.get_sync(source_db_id)
if sync_point < source_max_row or source_max_row == -1:
sync_from_row = max(cleaving_context.last_cleave_to_row or -1,
sync_point)
objects = None
for objects, info in self.yield_objects(
source_broker, shard_range,
since_row=sync_from_row):
shard_broker.merge_items(objects)
if objects is None:
self.info(broker, "Cleaving %r - zero objects found",
shard_range)
if shard_broker.get_info()['put_timestamp'] == put_timestamp:
# This was just created; don't need to replicate this
# SR because there was nothing there. So cleanup and
# remove the shard_broker from its hand off location.
# Because nothing was here we wont count it in the shard
# batch count.
result = CLEAVE_EMPTY
# Else, it wasn't newly created by us, and
# we don't know what's in it or why. Let it get
# replicated and counted in the batch count.
# Note: the max row stored as a sync point is sampled *before*
# objects are yielded to ensure that is less than or equal to
# the last yielded row. Other sync points are also copied from the
# source broker to the shards; if another replica of the source
# happens to subsequently cleave into a primary replica of the
# shard then it will only need to cleave rows after its last sync
# point with this replica of the source broker.
shard_broker.merge_syncs(
[{'sync_point': source_max_row, 'remote_id': source_db_id}] +
source_broker.get_syncs())
else:
self.debug(broker, "Cleaving %r - shard db already in sync",
shard_range)
replication_quorum = self.existing_shard_replication_quorum
if own_shard_range.state in ShardRange.SHRINKING_STATES:
if shard_range.includes(own_shard_range):
# When shrinking to a single acceptor that completely encloses
# this shard's namespace, include deleted own (donor) shard
# range in the replicated db so that when acceptor next updates
# root it will atomically update its namespace *and* delete the
# donor. This reduces the chance of a temporary listing gap if
# this shard fails to update the root with its SHRUNK/deleted
# state. Don't do this when sharding a shard or shrinking to
# multiple acceptors because in those cases the donor namespace
# should not be deleted until *all* shards are cleaved.
if own_shard_range.update_state(ShardRange.SHRUNK):
own_shard_range.set_deleted()
broker.merge_shard_ranges(own_shard_range)
shard_broker.merge_shard_ranges(own_shard_range)
elif shard_range.state == ShardRange.CREATED:
# The shard range object stats may have changed since the shard
# range was found, so update with stats of objects actually
# copied to the shard broker. Only do this the first time each
# shard range is cleaved.
info = shard_broker.get_info()
shard_range.update_meta(
info['object_count'], info['bytes_used'])
# Update state to CLEAVED; only do this when sharding, not when
# shrinking
shard_range.update_state(ShardRange.CLEAVED)
shard_broker.merge_shard_ranges(shard_range)
replication_quorum = self.shard_replication_quorum
if result == CLEAVE_EMPTY:
self.delete_db(shard_broker)
else: # result == CLEAVE_SUCCESS:
self.info(broker, 'Replicating new shard container %s for %s',
quote(shard_broker.path), own_shard_range)
success, responses = self._replicate_object(
shard_part, shard_broker.db_file, node_id)
replication_successes = responses.count(True)
if (not success and (not responses or
replication_successes < replication_quorum)):
# insufficient replication or replication not even attempted;
# break because we don't want to progress the cleave cursor
# until each shard range has been successfully cleaved
self.warning(broker,
'Failed to sufficiently replicate cleaved shard '
'%s: %s successes, %s required', shard_range,
replication_successes, replication_quorum)
self._increment_stat('cleaved', 'failure', statsd=True)
result = CLEAVE_FAILED
else:
elapsed = round(time.time() - start, 3)
self._min_stat('cleaved', 'min_time', elapsed)
self._max_stat('cleaved', 'max_time', elapsed)
self.info(broker, 'Cleaved %s in %gs', shard_range,
elapsed)
self._increment_stat('cleaved', 'success', statsd=True)
if result in (CLEAVE_SUCCESS, CLEAVE_EMPTY):
broker.merge_shard_ranges(shard_range)
cleaving_context.range_done(shard_range.upper_str)
if shard_range.upper >= own_shard_range.upper:
# cleaving complete
cleaving_context.cleaving_done = True
cleaving_context.store(broker)
return result
def _cleave_shard_range(self, broker, cleaving_context, shard_range,
own_shard_range):
self.info(broker, "Cleaving from row %s into %s for %r",
cleaving_context.last_cleave_to_row,
quote(shard_range.name), shard_range)
self._increment_stat('cleaved', 'attempted')
policy_index = broker.storage_policy_index
shard_part, shard_broker, node_id, put_timestamp = \
self._get_shard_broker(shard_range, broker.root_path,
policy_index)
stat = 'db_exists' if put_timestamp is None else 'db_created'
self._increment_stat('cleaved', stat, statsd=True)
return self._cleave_shard_broker(
broker, cleaving_context, shard_range, own_shard_range,
shard_broker, put_timestamp, shard_part, node_id)
def _cleave(self, broker):
# Returns True if misplaced objects have been moved and the entire
# container namespace has been successfully cleaved, False otherwise
if broker.is_sharded():
self.debug(broker, 'Passing over already sharded container')
return True
cleaving_context = CleavingContext.load(broker)
if not cleaving_context.misplaced_done:
# ensure any misplaced objects in the source broker are moved; note
# that this invocation of _move_misplaced_objects is targetted at
# the *retiring* db.
self.debug(broker,
'Moving any misplaced objects from sharding container')
bounds = self._make_default_misplaced_object_bounds(broker)
cleaving_context.misplaced_done = self._move_misplaced_objects(
broker, src_broker=broker.get_brokers()[0],
src_bounds=bounds)
cleaving_context.store(broker)
if cleaving_context.cleaving_done:
self.debug(broker, 'Cleaving already complete for container')
return cleaving_context.misplaced_done
shard_ranges = broker.get_shard_ranges(marker=cleaving_context.marker)
# Ignore shrinking shard ranges: we never want to cleave objects to a
# shrinking shard. Shrinking shard ranges are to be expected in a root;
# shrinking shard ranges (other than own shard range) are not normally
# expected in a shard but can occur if there is an overlapping shard
# range that has been discovered from the root.
ranges_todo = [sr for sr in shard_ranges
if sr.state != ShardRange.SHRINKING]
if cleaving_context.cursor:
# always update ranges_todo in case shard ranges have changed since
# last visit
cleaving_context.ranges_todo = len(ranges_todo)
self.debug(broker, 'Continuing to cleave (%s done, %s todo)',
cleaving_context.ranges_done,
cleaving_context.ranges_todo)
else:
cleaving_context.start()
own_shard_range = broker.get_own_shard_range()
cleaving_context.cursor = own_shard_range.lower_str
cleaving_context.ranges_todo = len(ranges_todo)
self.info(broker, 'Starting to cleave (%s todo)',
cleaving_context.ranges_todo)
own_shard_range = broker.get_own_shard_range(no_default=True)
if own_shard_range is None:
# A default should never be SHRINKING or SHRUNK but because we
# may write own_shard_range back to broker, let's make sure
# it can't be defaulted.
self.warning(broker, 'Failed to get own_shard_range')
ranges_todo = [] # skip cleaving
ranges_done = []
for shard_range in ranges_todo:
if cleaving_context.cleaving_done:
# note: there may still be ranges_todo, for example: if this
# shard is shrinking and has merged a root shard range in
# sharded state along with an active acceptor shard range, but
# the root range is irrelevant
break
if len(ranges_done) == self.cleave_batch_size:
break
if shard_range.lower > cleaving_context.cursor:
self.info(broker, 'Stopped cleave at gap: %r - %r' %
(cleaving_context.cursor, shard_range.lower))
break
if shard_range.state not in (ShardRange.CREATED,
ShardRange.CLEAVED,
ShardRange.ACTIVE):
self.info(broker, 'Stopped cleave at unready %s', shard_range)
break
cleave_result = self._cleave_shard_range(
broker, cleaving_context, shard_range, own_shard_range)
if cleave_result == CLEAVE_SUCCESS:
ranges_done.append(shard_range)
elif cleave_result == CLEAVE_FAILED:
break
# else: CLEAVE_EMPTY: no errors, but no rows found either. keep
# going, and don't count it against our batch size
# _cleave_shard_range always store()s the context on success; *also* do
# that here in case we hit a failure right off the bat or ended loop
# with skipped ranges
cleaving_context.store(broker)
self.debug(broker, 'Cleaved %s shard ranges', len(ranges_done))
return (cleaving_context.misplaced_done and
cleaving_context.cleaving_done)
def _complete_sharding(self, broker):
cleaving_context = CleavingContext.load(broker)
if cleaving_context.done():
# Move all CLEAVED shards to ACTIVE state and if a shard then
# delete own shard range; these changes will be simultaneously
# reported in the next update to the root container.
own_shard_range = broker.get_own_shard_range(no_default=True)
if own_shard_range is None:
# This is more of a belts and braces, not sure we could even
# get this far with without an own_shard_range. But because
# we will be writing own_shard_range back, we need to make sure
self.warning(broker, 'Failed to get own_shard_range')
return False
own_shard_range.update_meta(0, 0)
if own_shard_range.state in ShardRange.SHRINKING_STATES:
own_shard_range.update_state(ShardRange.SHRUNK)
modified_shard_ranges = []
else:
own_shard_range.update_state(ShardRange.SHARDED)
modified_shard_ranges = broker.get_shard_ranges(
states=ShardRange.CLEAVED)
for sr in modified_shard_ranges:
sr.update_state(ShardRange.ACTIVE)
if (not broker.is_root_container() and not
own_shard_range.deleted):
own_shard_range = own_shard_range.copy(
timestamp=Timestamp.now(), deleted=1)
modified_shard_ranges.append(own_shard_range)
broker.merge_shard_ranges(modified_shard_ranges)
if broker.set_sharded_state():
return True
else:
self.warning(broker, 'Failed to remove retiring db file')
else:
self.warning(broker, 'Repeat cleaving required, context: %s',
dict(cleaving_context))
cleaving_context.reset()
cleaving_context.store(broker)
return False
def _find_and_enable_sharding_candidates(self, broker, shard_ranges=None):
candidates = find_sharding_candidates(
broker, self.shard_container_threshold, shard_ranges)
if candidates:
self.debug(broker, 'Identified %s sharding candidates',
len(candidates))
broker.merge_shard_ranges(candidates)
def _find_and_enable_shrinking_candidates(self, broker):
if not broker.is_sharded():
self.warning(broker, 'Cannot shrink a not yet sharded container')
return
compactible_sequences = find_compactible_shard_sequences(
broker, self.shrink_threshold, self.expansion_limit,
self.max_shrinking, self.max_expanding, include_shrinking=True)
self.debug(broker, 'Found %s compactible sequences of length(s) %s' %
(len(compactible_sequences),
[len(s) for s in compactible_sequences]))
process_compactible_shard_sequences(broker, compactible_sequences)
own_shard_range = broker.get_own_shard_range()
for sequence in compactible_sequences:
acceptor = sequence[-1]
donors = ShardRangeList(sequence[:-1])
self.debug(broker,
'shrinking %d objects from %d shard ranges into %s' %
(donors.object_count, len(donors), acceptor))
if acceptor.name != own_shard_range.name:
self._send_shard_ranges(broker, acceptor.account,
acceptor.container, [acceptor])
acceptor.increment_meta(donors.object_count, donors.bytes_used)
# Now send a copy of the expanded acceptor, with an updated
# timestamp, to each donor container. This forces each donor to
# asynchronously cleave its entire contents to the acceptor and
# delete itself. The donor will pass its own deleted shard range to
# the acceptor when cleaving. Subsequent updates from the donor or
# the acceptor will then update the root to have the deleted donor
# shard range.
for donor in donors:
self._send_shard_ranges(broker, donor.account,
donor.container, [donor, acceptor])
def _update_root_container(self, broker):
own_shard_range = broker.get_own_shard_range(no_default=True)
if not own_shard_range:
return
# Don't update the osr stats including tombstones unless its CLEAVED+
if own_shard_range.state in SHARD_UPDATE_STAT_STATES:
# do a reclaim *now* in order to get best estimate of tombstone
# count that is consistent with the current object_count
reclaimer = self._reclaim(broker)
tombstones = reclaimer.get_tombstone_count()
self.debug(broker, 'tombstones = %d', tombstones)
# shrinking candidates are found in the root DB so that's the only
# place we need up to date tombstone stats.
own_shard_range.update_tombstones(tombstones)
update_own_shard_range_stats(broker, own_shard_range)
if not own_shard_range.reported:
broker.merge_shard_ranges(own_shard_range)
# we can't use `state not in SHARD_UPDATE_STAT_STATES` to return
# because there are cases we still want to update root even if the
# stats are wrong. Such as it's a new shard or something else has
# decided to remove the latch to update root.
if own_shard_range.reported:
return
# now get a consistent list of own and other shard ranges
shard_ranges = broker.get_shard_ranges(
include_own=True,
include_deleted=True)
# send everything
if self._send_shard_ranges(broker, broker.root_account,
broker.root_container, shard_ranges,
{'Referer': quote(broker.path)}):
# on success, mark ourselves as reported so we don't keep
# hammering the root
own_shard_range.reported = True
broker.merge_shard_ranges(own_shard_range)
self.debug(broker, 'updated root objs=%d, tombstones=%s',
own_shard_range.object_count,
own_shard_range.tombstones)
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
state = broker.get_db_state()
is_deleted = broker.is_deleted()
self.debug(broker, 'Starting processing, state %s%s', state,
' (deleted)' if is_deleted else '')
if not self._audit_container(broker):
return
# now look and deal with misplaced objects.
move_start_ts = time.time()
self._move_misplaced_objects(broker)
self.logger.timing_since(
'sharder.sharding.move_misplaced', move_start_ts)
is_leader = node['index'] == 0 and self.auto_shard and not is_deleted
if state in (UNSHARDED, COLLAPSED):
if is_leader and broker.is_root_container():
# bootstrap sharding of root container
own_shard_range = broker.get_own_shard_range()
update_own_shard_range_stats(broker, own_shard_range)
self._find_and_enable_sharding_candidates(
broker, shard_ranges=[own_shard_range])
own_shard_range = broker.get_own_shard_range()
if own_shard_range.state in ShardRange.CLEAVING_STATES:
if broker.has_other_shard_ranges():
# container has been given shard ranges rather than
# found them e.g. via replication or a shrink event,
# or manually triggered cleaving.
db_start_ts = time.time()
if broker.set_sharding_state():
state = SHARDING
self.info(broker, 'Kick off container cleaving, '
'own shard range in state %r',
own_shard_range.state_text)
self.logger.timing_since(
'sharder.sharding.set_state', db_start_ts)
elif is_leader:
if broker.set_sharding_state():
state = SHARDING
else:
self.debug(broker,
'Own shard range in state %r but no shard '
'ranges and not leader; remaining unsharded',
own_shard_range.state_text)
if state == SHARDING:
cleave_start_ts = time.time()
if is_leader:
num_found = self._find_shard_ranges(broker)
else:
num_found = 0
# create shard containers for newly found ranges
num_created = self._create_shard_containers(broker)
if num_found or num_created:
# share updated shard range state with other nodes
self._replicate_object(part, broker.db_file, node['id'])
# always try to cleave any pending shard ranges
cleave_complete = self._cleave(broker)
self.logger.timing_since(
'sharder.sharding.cleave', cleave_start_ts)
if cleave_complete:
if self._complete_sharding(broker):
state = SHARDED
self._increment_stat('visited', 'completed', statsd=True)
self.info(broker, 'Completed cleaving, DB set to sharded '
'state')
self.logger.timing_since(
'sharder.sharding.completed',
float(broker.get_own_shard_range().epoch))
else:
self.info(broker, 'Completed cleaving, DB remaining in '
'sharding state')
if not broker.is_deleted():
if state == SHARDED and broker.is_root_container():
# look for shrink stats
send_start_ts = time.time()
self._identify_shrinking_candidate(broker, node)
if is_leader:
self._find_and_enable_shrinking_candidates(broker)
self._find_and_enable_sharding_candidates(broker)
for shard_range in broker.get_shard_ranges(
states=[ShardRange.SHARDING]):
self._send_shard_ranges(broker, shard_range.account,
shard_range.container,
[shard_range])
self.logger.timing_since(
'sharder.sharding.send_sr', send_start_ts)
if not broker.is_root_container():
# Update the root container with this container's shard range
# info; do this even when sharded in case previous attempts
# failed; don't do this if there is no own shard range. When
# sharding a shard, this is when the root will see the new
# shards move to ACTIVE state and the sharded shard
# simultaneously become deleted.
update_start_ts = time.time()
self._update_root_container(broker)
self.logger.timing_since(
'sharder.sharding.update_root', update_start_ts)
self.debug(broker,
'Finished processing, state %s%s',
broker.get_db_state(), ' (deleted)' if is_deleted else '')
def _one_shard_cycle(self, devices_to_shard, partitions_to_shard):
"""
The main function, everything the sharder does forks from this method.
The sharder loops through each container with sharding enabled and each
sharded container on the server, on each container it:
- audits the container
- checks and deals with misplaced items
- cleaves any shard ranges as required
- if not a root container, reports shard range stats to the root
container
"""
self.logger.info('Container sharder cycle starting, auto-sharding %s',
self.auto_shard)
if isinstance(devices_to_shard, (list, tuple)):
self.logger.info('(Override devices: %s)',
', '.join(str(d) for d in devices_to_shard))
if isinstance(partitions_to_shard, (list, tuple)):
self.logger.info('(Override partitions: %s)',
', '.join(str(p) for p in partitions_to_shard))
self._zero_stats()
self._local_device_ids = {}
dirs = []
self.ips = whataremyips(self.bind_ip)
for node in self.ring.devs:
device_path = self._check_node(node)
if not device_path:
continue
datadir = os.path.join(device_path, self.datadir)
if os.path.isdir(datadir):
# Populate self._local_device_ids so we can find devices for
# shard containers later
self._local_device_ids[node['id']] = node
if node['device'] not in devices_to_shard:
continue
part_filt = self._partition_dir_filter(
node['id'],
partitions_to_shard)
dirs.append((datadir, node, part_filt))
if not dirs:
self.logger.info('Found no containers directories')
for part, path, node in self.roundrobin_datadirs(dirs):
# NB: get_part_nodes always provides an 'index' key;
# this will be used in leader selection
for primary in self.ring.get_part_nodes(int(part)):
if node['id'] == primary['id']:
node = primary
break
else:
# Set index such that we'll *never* be selected as a leader
node['index'] = 'handoff'
broker = ContainerBroker(path, logger=self.logger,
timeout=self.broker_timeout)
error = None
try:
self._identify_sharding_candidate(broker, node)
if sharding_enabled(broker):
self._increment_stat('visited', 'attempted')
self._process_broker(broker, node, part)
self._increment_stat('visited', 'success', statsd=True)
else:
self._increment_stat('visited', 'skipped')
except (Exception, Timeout) as err:
self._increment_stat('visited', 'failure', statsd=True)
self.exception(broker, 'Unhandled exception while processing: '
'%s', err)
error = err
try:
self._record_sharding_progress(broker, node, error)
except (Exception, Timeout) as error:
self.exception(broker, 'Unhandled exception while dumping '
'progress: %s', error)
self._periodic_report_stats()
self._report_stats()
@contextmanager
def _set_auto_shard_from_command_line(self, **kwargs):
conf_auto_shard = self.auto_shard
auto_shard = kwargs.get('auto_shard', None)
if auto_shard is not None:
self.auto_shard = config_true_value(auto_shard)
try:
yield
finally:
self.auto_shard = conf_auto_shard
def run_forever(self, *args, **kwargs):
"""Run the container sharder until stopped."""
with self._set_auto_shard_from_command_line(**kwargs):
self.reported = time.time()
time.sleep(random() * self.interval)
while True:
begin = time.time()
try:
self._one_shard_cycle(devices_to_shard=Everything(),
partitions_to_shard=Everything())
except (Exception, Timeout):
self.logger.increment('errors')
self.logger.exception('Exception in sharder')
elapsed = time.time() - begin
self.logger.info(
'Container sharder cycle completed: %.02fs', elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the container sharder once."""
self.logger.info('Begin container sharder "once" mode')
override_options = parse_override_options(once=True, **kwargs)
devices_to_shard = override_options.devices or Everything()
partitions_to_shard = override_options.partitions or Everything()
with self._set_auto_shard_from_command_line(**kwargs):
begin = self.reported = time.time()
self._one_shard_cycle(devices_to_shard=devices_to_shard,
partitions_to_shard=partitions_to_shard)
elapsed = time.time() - begin
self.logger.info(
'Container sharder "once" mode completed: %.02fs', elapsed)
| swift-master | swift/container/sharder.py |
swift-master | swift/container/__init__.py |
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import signal
import sys
import time
from random import random, shuffle
from tempfile import mkstemp
from eventlet import spawn, Timeout
import swift.common.db
from swift.common.constraints import check_drive
from swift.container.backend import ContainerBroker, DATADIR
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout, LockTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, config_true_value, \
dump_recon_cache, majority_size, Timestamp, EventletRateLimiter, \
eventlet_monkey_patch, node_to_string
from swift.common.daemon import Daemon
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
from swift.common.recon import RECON_CONTAINER_FILE, DEFAULT_RECON_CACHE_PATH
class ContainerUpdater(Daemon):
"""Update container information in account listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='container-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = float(conf.get('interval', 300))
self.account_ring = None
self.concurrency = int(conf.get('concurrency', 4))
if 'slowdown' in conf:
self.logger.warning(
'The slowdown option is deprecated in favor of '
'containers_per_second. This option may be ignored in a '
'future release.')
containers_per_second = 1 / (
float(conf.get('slowdown', '0.01')) + 0.01)
else:
containers_per_second = 50
self.max_containers_per_second = \
float(conf.get('containers_per_second',
containers_per_second))
self.rate_limiter = EventletRateLimiter(self.max_containers_per_second)
self.node_timeout = float(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.no_changes = 0
self.successes = 0
self.failures = 0
self.account_suppressions = {}
self.account_suppression_time = \
float(conf.get('account_suppression_time', 60))
self.new_account_suppressions = None
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_CONTAINER_FILE)
self.user_agent = 'container-updater %s' % os.getpid()
def get_account_ring(self):
"""Get the account ring. Load it if it hasn't been yet."""
if not self.account_ring:
self.account_ring = Ring(self.swift_dir, ring_name='account')
return self.account_ring
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.logger.error('ERROR: Failed to get paths to drive '
'partitions: %s', e)
return []
def get_paths(self):
"""
Get paths to all of the partitions on each drive to be processed.
:returns: a list of paths
"""
paths = []
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device, self.mount_check)
except ValueError as err:
self.logger.warning("%s", err)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):
continue
for partition in self._listdir(con_path):
paths.append(os.path.join(con_path, partition))
shuffle(paths)
return paths
def _load_suppressions(self, filename):
try:
with open(filename, 'r') as tmpfile:
for line in tmpfile:
account, until = line.split()
until = float(until)
self.account_suppressions[account] = until
except Exception:
self.logger.exception(
'ERROR with loading suppressions from %s: ', filename)
finally:
os.unlink(filename)
def run_forever(self, *args, **kwargs):
"""
Run the updater continuously.
"""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin container update sweep')
begin = time.time()
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.items()
if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
# read from account ring to ensure it's fresh
self.get_account_ring().get_nodes('')
for path in self.get_paths():
while len(pid2filename) >= self.concurrency:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
fd, tmpfilename = mkstemp()
os.close(fd)
pid = os.fork()
if pid:
pid2filename[pid] = tmpfilename
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.no_changes = 0
self.successes = 0
self.failures = 0
self.new_account_suppressions = open(tmpfilename, 'w')
forkbegin = time.time()
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
'Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes',
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pid2filename:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info('Container update sweep completed: %.02fs',
elapsed)
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Run the updater once.
"""
eventlet_monkey_patch()
self.logger.info('Begin container update single threaded sweep')
begin = time.time()
self.no_changes = 0
self.successes = 0
self.failures = 0
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info(
'Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes',
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
def container_sweep(self, path):
"""
Walk the path looking for container DBs and process them.
:param path: path to walk
"""
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.db'):
dbfile = os.path.join(root, file)
try:
self.process_container(dbfile)
except (Exception, Timeout) as e:
self.logger.exception(
"Error processing container %s: %s", dbfile, e)
self.rate_limiter.wait()
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
try:
info = broker.get_info()
except LockTimeout as e:
self.logger.info(
"Failed to get container info (Lock timeout: %s); skipping.",
str(e))
return
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if Timestamp(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if not broker.is_root_container():
# Don't double-up account stats.
# The sharder should get these stats to the root container,
# and the root's updater will get them to the right account.
info['object_count'] = info['bytes_used'] = 0
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'],
info['storage_policy_index'])
for node in nodes]
successes = 0
stub404s = 0
for event in events:
result = event.wait()
if is_success(result):
successes += 1
if result == 404:
stub404s += 1
if successes >= majority_size(len(events)):
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
'Update report sent for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
elif stub404s == len(events):
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
'Update report stub for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
broker.quarantine('no account replicas exist')
# All that's left at this point is a few sacks of Gnocchi,
# easily collected by the dark data watcher in object auditor.
else:
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
'Update report failed for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print(info['account'], until,
file=self.new_account_suppressions)
# Only track timing data for attempted updates:
self.logger.timing_since('timing', start_time)
else:
self.logger.increment('no_changes')
self.no_changes += 1
def container_report(self, node, part, container, put_timestamp,
delete_timestamp, count, bytes,
storage_policy_index):
"""
Report container info to an account server.
:param node: node dictionary from the account ring
:param part: partition the account is on
:param container: container name
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
:param count: object count in the container
:param bytes: bytes used in the container
:param storage_policy_index: the policy index for the container
"""
with ConnectionTimeout(self.conn_timeout):
try:
headers = {
'X-Put-Timestamp': put_timestamp,
'X-Delete-Timestamp': delete_timestamp,
'X-Object-Count': count,
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes',
'X-Backend-Storage-Policy-Index': storage_policy_index,
'user-agent': self.user_agent}
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], part, 'PUT', container, headers=headers)
except (Exception, Timeout):
self.logger.exception(
'ERROR account update failed with %s (will retry later):',
node_to_string(node, replication=True))
return HTTP_INTERNAL_SERVER_ERROR
with Timeout(self.node_timeout):
try:
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, Timeout):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
'Exception with %s',
node_to_string(node, replication=True))
return HTTP_INTERNAL_SERVER_ERROR
finally:
conn.close()
| swift-master | swift/container/updater.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.container.backend import ContainerBroker
from swift.common.db_auditor import DatabaseAuditor
class ContainerAuditor(DatabaseAuditor):
"""Audit containers."""
server_type = "container"
broker_class = ContainerBroker
def _audit(self, job, broker):
return None
| swift-master | swift/container/auditor.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
import sys
import textwrap
import six
from six.moves.configparser import ConfigParser
from swift.common.utils import (
config_true_value, quorum_size, whataremyips, list_from_csv,
config_positive_int_value, get_zero_indexed_base_string, load_pkg_resource)
from swift.common.ring import Ring, RingData
from swift.common import utils
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES
LEGACY_POLICY_NAME = 'Policy-0'
VALID_CHARS = '-' + string.ascii_letters + string.digits
DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication'
EC_POLICY = 'erasure_coding'
DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576
class BindPortsCache(object):
def __init__(self, swift_dir, ring_ip):
self.swift_dir = swift_dir
self.mtimes_by_ring_path = {}
self.portsets_by_ring_path = {}
self.my_ips = set(whataremyips(ring_ip))
def all_bind_ports_for_node(self):
"""
Given an iterable of IP addresses identifying a storage backend server,
return a set of all bind ports defined in all rings for this storage
backend server.
The caller is responsible for not calling this method (which performs
at least a stat on all ring files) too frequently.
"""
# NOTE: we don't worry about disappearing rings here because you can't
# ever delete a storage policy.
for policy in POLICIES:
# NOTE: we must NOT use policy.load_ring to load the ring. Users
# of this utility function will not need the actual ring data, just
# the bind ports.
#
# This is duplicated with Ring.__init__ just a bit...
serialized_path = os.path.join(self.swift_dir,
policy.ring_name + '.ring.gz')
try:
new_mtime = os.path.getmtime(serialized_path)
except OSError:
continue
old_mtime = self.mtimes_by_ring_path.get(serialized_path)
if not old_mtime or old_mtime != new_mtime:
self.portsets_by_ring_path[serialized_path] = set(
dev['port']
for dev in RingData.load(serialized_path,
metadata_only=True).devs
if dev and dev['ip'] in self.my_ips)
self.mtimes_by_ring_path[serialized_path] = new_mtime
# No "break" here so that the above line will update the
# mtimes_by_ring_path entry for any ring that changes, not just
# the first one we notice.
# Return the requested set of ports from our (now-freshened) cache
return six.moves.reduce(set.union,
self.portsets_by_ring_path.values(), set())
class PolicyError(ValueError):
def __init__(self, msg, index=None):
if index is not None:
msg += ', for index %r' % index
super(PolicyError, self).__init__(msg)
def _get_policy_string(base, policy_index):
return get_zero_indexed_base_string(base, policy_index)
def get_policy_string(base, policy_or_index):
"""
Helper function to construct a string from a base and the policy.
Used to encode the policy index into either a file name or a
directory name by various modules.
:param base: the base string
:param policy_or_index: StoragePolicy instance, or an index
(string or int), if None the legacy
storage Policy-0 is assumed.
:returns: base name with policy index added
:raises PolicyError: if no policy exists with the given policy_index
"""
if isinstance(policy_or_index, BaseStoragePolicy):
policy = policy_or_index
else:
policy = POLICIES.get_by_index(policy_or_index)
if policy is None:
raise PolicyError("Unknown policy", index=policy_or_index)
return _get_policy_string(base, int(policy))
def split_policy_string(policy_string):
"""
Helper function to convert a string representing a base and a
policy. Used to decode the policy from either a file name or
a directory name by various modules.
:param policy_string: base name with policy index added
:raises PolicyError: if given index does not map to a valid policy
:returns: a tuple, in the form (base, policy) where base is the base
string and policy is the StoragePolicy instance for the
index encoded in the policy_string.
"""
if '-' in policy_string:
base, policy_index = policy_string.rsplit('-', 1)
else:
base, policy_index = policy_string, None
policy = POLICIES.get_by_index(policy_index)
if get_policy_string(base, policy) != policy_string:
raise PolicyError("Unknown policy", index=policy_index)
return base, policy
class BaseStoragePolicy(object):
"""
Represents a storage policy. Not meant to be instantiated directly;
implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc)
or use :func:`~swift.common.storage_policy.reload_storage_policies` to
load POLICIES from ``swift.conf``.
The object_ring property is lazy loaded once the service's ``swift_dir``
is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may
be over-ridden via object_ring kwarg at create time for testing or
actively loaded with :meth:`~StoragePolicy.load_ring`.
"""
policy_type_to_policy_cls = {}
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None, aliases='',
diskfile_module='egg:swift#replication.fs'):
# do not allow BaseStoragePolicy class to be instantiated directly
if type(self) == BaseStoragePolicy:
raise TypeError("Can't instantiate BaseStoragePolicy directly")
# policy parameter validation
try:
self.idx = int(idx)
except ValueError:
raise PolicyError('Invalid index', idx)
if self.idx < 0:
raise PolicyError('Invalid index', idx)
self.alias_list = []
self.add_name(name)
if aliases:
names_list = list_from_csv(aliases)
for alias in names_list:
if alias == name:
continue
self.add_name(alias)
self.is_deprecated = config_true_value(is_deprecated)
self.is_default = config_true_value(is_default)
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
raise PolicyError('Invalid type', self.policy_type)
if self.is_deprecated and self.is_default:
raise PolicyError('Deprecated policy can not be default. '
'Invalid config', self.idx)
self.ring_name = _get_policy_string('object', self.idx)
self.object_ring = object_ring
self.diskfile_module = diskfile_module
@property
def name(self):
return self.alias_list[0]
@name.setter
def name_setter(self, name):
self._validate_policy_name(name)
self.alias_list[0] = name
@property
def aliases(self):
return ", ".join(self.alias_list)
def __int__(self):
return self.idx
def __hash__(self):
return hash(self.idx)
def __eq__(self, other):
return self.idx == int(other)
def __ne__(self, other):
return self.idx != int(other)
def __lt__(self, other):
return self.idx < int(other)
def __gt__(self, other):
return self.idx > int(other)
def __repr__(self):
return ("%s(%d, %r, is_default=%s, "
"is_deprecated=%s, policy_type=%r)") % \
(self.__class__.__name__, self.idx, self.alias_list,
self.is_default, self.is_deprecated, self.policy_type)
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their StoragePolicy class. This will also set the policy_type
attribute on the registered implementation.
"""
def register_wrapper(policy_cls):
if policy_type in cls.policy_type_to_policy_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_policy_cls[policy_type],
policy_type))
cls.policy_type_to_policy_cls[policy_type] = policy_cls
policy_cls.policy_type = policy_type
return policy_cls
return register_wrapper
@classmethod
def _config_options_map(cls):
"""
Map config option name to StoragePolicy parameter name.
"""
return {
'name': 'name',
'aliases': 'aliases',
'policy_type': 'policy_type',
'default': 'is_default',
'deprecated': 'is_deprecated',
'diskfile_module': 'diskfile_module'
}
@classmethod
def from_config(cls, policy_index, options):
config_to_policy_option_map = cls._config_options_map()
policy_options = {}
for config_option, value in options.items():
try:
policy_option = config_to_policy_option_map[config_option]
except KeyError:
raise PolicyError('Invalid option %r in '
'storage-policy section' % config_option,
index=policy_index)
policy_options[policy_option] = value
return cls(policy_index, **policy_options)
def get_info(self, config=False):
"""
Return the info dict and conf file options for this policy.
:param config: boolean, if True all config options are returned
"""
info = {}
for config_option, policy_attribute in \
self._config_options_map().items():
info[config_option] = getattr(self, policy_attribute)
if not config:
# remove some options for public consumption
if not self.is_default:
info.pop('default')
if not self.is_deprecated:
info.pop('deprecated')
info.pop('policy_type')
info.pop('diskfile_module')
return info
def _validate_policy_name(self, name):
"""
Helper function to determine the validity of a policy name. Used
to check policy names before setting them.
:param name: a name string for a single policy name.
:raises PolicyError: if the policy name is invalid.
"""
if not name:
raise PolicyError('Invalid name %r' % name, self.idx)
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
msg = 'Names are used as HTTP headers, and can not ' \
'reliably contain any characters not in %r. ' \
'Invalid name %r' % (VALID_CHARS, name)
raise PolicyError(msg, self.idx)
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
raise PolicyError(msg, self.idx)
if name.upper() in (existing_name.upper() for existing_name
in self.alias_list):
msg = 'The name %s is already assigned to this policy.' % name
raise PolicyError(msg, self.idx)
def add_name(self, name):
"""
Adds an alias name to the storage policy. Shouldn't be called
directly from the storage policy but instead through the
storage policy collection class, so lookups by name resolve
correctly.
:param name: a new alias for the storage policy
"""
self._validate_policy_name(name)
self.alias_list.append(name)
def remove_name(self, name):
"""
Removes an alias name from the storage policy. Shouldn't be called
directly from the storage policy but instead through the storage
policy collection class, so lookups by name resolve correctly. If
the name removed is the primary name then the next available alias
will be adopted as the new primary name.
:param name: a name assigned to the storage policy
"""
if name not in self.alias_list:
raise PolicyError("%s is not a name assigned to policy %s"
% (name, self.idx))
if len(self.alias_list) == 1:
raise PolicyError("Cannot remove only name %s from policy %s. "
"Policies must have at least one name."
% (name, self.idx))
else:
self.alias_list.remove(name)
def change_primary_name(self, name):
"""
Changes the primary/default name of the policy to a specified name.
:param name: a string name to replace the current primary name.
"""
if name == self.name:
return
elif name in self.alias_list:
self.remove_name(name)
else:
self._validate_policy_name(name)
self.alias_list.insert(0, name)
def validate_ring_data(self, ring_data):
"""
Validation hook used when loading the ring; currently only used for EC
"""
def load_ring(self, swift_dir, reload_time=None):
"""
Load the ring for this policy immediately.
:param swift_dir: path to rings
:param reload_time: time interval in seconds to check for a ring change
"""
if self.object_ring:
if reload_time is not None:
self.object_ring.reload_time = reload_time
return
self.object_ring = Ring(
swift_dir, ring_name=self.ring_name,
validation_hook=self.validate_ring_data, reload_time=reload_time)
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to
consider the client request successful.
"""
raise NotImplementedError()
def get_diskfile_manager(self, *args, **kwargs):
"""
Return an instance of the diskfile manager class configured for this
storage policy.
:param args: positional args to pass to the diskfile manager
constructor.
:param kwargs: keyword args to pass to the diskfile manager
constructor.
:return: A disk file manager instance.
"""
try:
dfm_cls = load_pkg_resource('swift.diskfile', self.diskfile_module)
except ImportError as err:
raise PolicyError(
'Unable to load diskfile_module %s for policy %s: %s' %
(self.diskfile_module, self.name, err))
try:
dfm_cls.check_policy(self)
except ValueError:
raise PolicyError(
'Invalid diskfile_module %s for policy %s:%s (%s)' %
(self.diskfile_module, int(self), self.name, self.policy_type))
return dfm_cls(*args, **kwargs)
@BaseStoragePolicy.register(REPL_POLICY)
class StoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'replication'. Default storage policy
class unless otherwise overridden from swift.conf.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
@property
def quorum(self):
"""
Quorum concept in the replication case:
floor(number of replica / 2) + 1
"""
if not self.object_ring:
raise PolicyError('Ring is not loaded')
return quorum_size(self.object_ring.replica_count)
@BaseStoragePolicy.register(EC_POLICY)
class ECStoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'erasure_coding'.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
def __init__(self, idx, name='', aliases='', is_default=False,
is_deprecated=False, object_ring=None,
diskfile_module='egg:swift#erasure_coding.fs',
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
ec_type=None, ec_ndata=None, ec_nparity=None,
ec_duplication_factor=1):
super(ECStoragePolicy, self).__init__(
idx=idx, name=name, aliases=aliases, is_default=is_default,
is_deprecated=is_deprecated, object_ring=object_ring,
diskfile_module=diskfile_module)
# Validate erasure_coding policy specific members
# ec_type is one of the EC implementations supported by PyEClib
if ec_type is None:
raise PolicyError('Missing ec_type')
if ec_type not in VALID_EC_TYPES:
raise PolicyError('Wrong ec_type %s for policy %s, should be one'
' of "%s"' % (ec_type, self.name,
', '.join(VALID_EC_TYPES)))
self._ec_type = ec_type
# Define _ec_ndata as the number of EC data fragments
# Accessible as the property "ec_ndata"
try:
value = int(ec_ndata)
if value <= 0:
raise ValueError
self._ec_ndata = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_data_fragments %r' %
ec_ndata, index=self.idx)
# Define _ec_nparity as the number of EC parity fragments
# Accessible as the property "ec_nparity"
try:
value = int(ec_nparity)
if value <= 0:
raise ValueError
self._ec_nparity = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_parity_fragments %r'
% ec_nparity, index=self.idx)
# Define _ec_segment_size as the encode segment unit size
# Accessible as the property "ec_segment_size"
try:
value = int(ec_segment_size)
if value <= 0:
raise ValueError
self._ec_segment_size = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_object_segment_size %r' %
ec_segment_size, index=self.idx)
if self._ec_type == 'isa_l_rs_vand' and self._ec_nparity >= 5:
logger = logging.getLogger("swift.common.storage_policy")
if not logger.handlers:
# If nothing else, log to stderr
logger.addHandler(logging.StreamHandler(sys.__stderr__))
logger.warning(
'Storage policy %s uses an EC configuration known to harm '
'data durability. Any data in this policy should be migrated. '
'See https://bugs.launchpad.net/swift/+bug/1639691 for '
'more information.' % self.name)
if not is_deprecated:
raise PolicyError(
'Storage policy %s uses an EC configuration known to harm '
'data durability. This policy MUST be deprecated.'
% self.name)
# Initialize PyECLib EC backend
try:
self.pyeclib_driver = \
ECDriver(k=self._ec_ndata, m=self._ec_nparity,
ec_type=self._ec_type)
except ECDriverError as e:
raise PolicyError("Error creating EC policy (%s)" % e,
index=self.idx)
# quorum size in the EC case depends on the choice of EC scheme.
self._ec_quorum_size = \
self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed()
self._fragment_size = None
self._ec_duplication_factor = \
config_positive_int_value(ec_duplication_factor)
@property
def ec_type(self):
return self._ec_type
@property
def ec_ndata(self):
return self._ec_ndata
@property
def ec_nparity(self):
return self._ec_nparity
@property
def ec_n_unique_fragments(self):
return self._ec_ndata + self._ec_nparity
@property
def ec_segment_size(self):
return self._ec_segment_size
@property
def fragment_size(self):
"""
Maximum length of a fragment, including header.
NB: a fragment archive is a sequence of 0 or more max-length
fragments followed by one possibly-shorter fragment.
"""
# Technically pyeclib's get_segment_info signature calls for
# (data_len, segment_size) but on a ranged GET we don't know the
# ec-content-length header before we need to compute where in the
# object we should request to align with the fragment size. So we
# tell pyeclib a lie - from it's perspective, as long as data_len >=
# segment_size it'll give us the answer we want. From our
# perspective, because we only use this answer to calculate the
# *minimum* size we should read from an object body even if data_len <
# segment_size we'll still only read *the whole one and only last
# fragment* and pass than into pyeclib who will know what to do with
# it just as it always does when the last fragment is < fragment_size.
if self._fragment_size is None:
self._fragment_size = self.pyeclib_driver.get_segment_info(
self.ec_segment_size, self.ec_segment_size)['fragment_size']
return self._fragment_size
@property
def ec_scheme_description(self):
"""
This short hand form of the important parts of the ec schema is stored
in Object System Metadata on the EC Fragment Archives for debugging.
"""
return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity)
@property
def ec_duplication_factor(self):
return self._ec_duplication_factor
def __repr__(self):
extra_info = ''
if self.ec_duplication_factor != 1:
extra_info = ', ec_duplication_factor=%d' % \
self.ec_duplication_factor
return ("%s, EC config(ec_type=%s, ec_segment_size=%d, "
"ec_ndata=%d, ec_nparity=%d%s)") % \
(super(ECStoragePolicy, self).__repr__(), self.ec_type,
self.ec_segment_size, self.ec_ndata, self.ec_nparity,
extra_info)
@classmethod
def _config_options_map(cls):
options = super(ECStoragePolicy, cls)._config_options_map()
options.update({
'ec_type': 'ec_type',
'ec_object_segment_size': 'ec_segment_size',
'ec_num_data_fragments': 'ec_ndata',
'ec_num_parity_fragments': 'ec_nparity',
'ec_duplication_factor': 'ec_duplication_factor',
})
return options
def get_info(self, config=False):
info = super(ECStoragePolicy, self).get_info(config=config)
if not config:
info.pop('ec_object_segment_size')
info.pop('ec_num_data_fragments')
info.pop('ec_num_parity_fragments')
info.pop('ec_type')
info.pop('ec_duplication_factor')
return info
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to consider
the client PUT request successful.
The quorum size for EC policies defines the minimum number
of data + parity elements required to be able to guarantee
the desired fault tolerance, which is the number of data
elements supplemented by the minimum number of parity
elements required by the chosen erasure coding scheme.
For example, for Reed-Solomon, the minimum number parity
elements required is 1, and thus the quorum_size requirement
is ec_ndata + 1.
Given the number of parity elements required is not the same
for every erasure coding scheme, consult PyECLib for
min_parity_fragments_needed()
"""
return self._ec_quorum_size * self.ec_duplication_factor
def validate_ring_data(self, ring_data):
"""
EC specific validation
Replica count check - we need _at_least_ (#data + #parity) replicas
configured. Also if the replica count is larger than exactly that
number there's a non-zero risk of error for code that is
considering the number of nodes in the primary list from the ring.
"""
configured_fragment_count = ring_data.replica_count
required_fragment_count = \
(self.ec_n_unique_fragments) * self.ec_duplication_factor
if configured_fragment_count != required_fragment_count:
raise RingLoadError(
'EC ring for policy %s needs to be configured with '
'exactly %d replicas. Got %s.' % (
self.name, required_fragment_count,
configured_fragment_count))
def get_backend_index(self, node_index):
"""
Backend index for PyECLib
:param node_index: integer of node index
:return: integer of actual fragment index. if param is not an integer,
return None instead
"""
try:
node_index = int(node_index)
except ValueError:
return None
return node_index % self.ec_n_unique_fragments
class StoragePolicyCollection(object):
"""
This class represents the collection of valid storage policies for the
cluster and is instantiated as :class:`StoragePolicy` objects are added to
the collection when ``swift.conf`` is parsed by
:func:`parse_storage_policies`.
When a StoragePolicyCollection is created, the following validation
is enforced:
* If a policy with index 0 is not declared and no other policies defined,
Swift will create one
* The policy index must be a non-negative integer
* If no policy is declared as the default and no other policies are
defined, the policy with index 0 is set as the default
* Policy indexes must be unique
* Policy names are required
* Policy names are case insensitive
* Policy names must contain only letters, digits or a dash
* Policy names must be unique
* The policy name 'Policy-0' can only be used for the policy with index 0
* If any policies are defined, exactly one policy must be declared default
* Deprecated policies can not be declared the default
"""
def __init__(self, pols):
self.default = []
self.by_name = {}
self.by_index = {}
self._validate_policies(pols)
def _add_policy(self, policy):
"""
Add pre-validated policies to internal indexes.
"""
for name in policy.alias_list:
self.by_name[name.upper()] = policy
self.by_index[int(policy)] = policy
def __repr__(self):
return (textwrap.dedent("""
StoragePolicyCollection([
%s
])
""") % ',\n '.join(repr(p) for p in self)).strip()
def __len__(self):
return len(self.by_index)
def __getitem__(self, key):
return self.by_index[key]
def __iter__(self):
return iter(self.by_index.values())
def _validate_policies(self, policies):
"""
:param policies: list of policies
"""
for policy in policies:
if int(policy) in self.by_index:
raise PolicyError('Duplicate index %s conflicts with %s' % (
policy, self.get_by_index(int(policy))))
for name in policy.alias_list:
if name.upper() in self.by_name:
raise PolicyError('Duplicate name %s conflicts with %s' % (
policy, self.get_by_name(name)))
if policy.is_default:
if not self.default:
self.default = policy
else:
raise PolicyError(
'Duplicate default %s conflicts with %s' % (
policy, self.default))
self._add_policy(policy)
# If a 0 policy wasn't explicitly given, or nothing was
# provided, create the 0 policy now
if 0 not in self.by_index:
if len(self) != 0:
raise PolicyError('You must specify a storage policy '
'section for policy index 0 in order '
'to define multiple policies')
self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME))
# at least one policy must be enabled
enabled_policies = [p for p in self if not p.is_deprecated]
if not enabled_policies:
raise PolicyError("Unable to find policy that's not deprecated!")
# if needed, specify default
if not self.default:
if len(self) > 1:
raise PolicyError("Unable to find default policy")
self.default = self[0]
self.default.is_default = True
def get_by_name(self, name):
"""
Find a storage policy by its name.
:param name: name of the policy
:returns: storage policy, or None
"""
return self.by_name.get(name.upper())
def get_by_index(self, index):
"""
Find a storage policy by its index.
An index of None will be treated as 0.
:param index: numeric index of the storage policy
:returns: storage policy, or None if no such policy
"""
# makes it easier for callers to just pass in a header value
if index in ('', None):
index = 0
else:
try:
index = int(index)
except ValueError:
return None
return self.by_index.get(index)
def get_by_name_or_index(self, name_or_index):
by_name = self.get_by_name(name_or_index)
by_index = self.get_by_index(name_or_index)
if by_name and by_index and by_name != by_index:
raise PolicyError(
"Found different polices when searching by "
"name (%s) and by index (%s)" % (by_name, by_index))
return by_name or by_index
@property
def legacy(self):
return self.get_by_index(None)
def get_object_ring(self, policy_idx, swift_dir):
"""
Get the ring object to use to handle a request based on its policy.
An index of None will be treated as 0.
:param policy_idx: policy index as defined in swift.conf
:param swift_dir: swift_dir used by the caller
:returns: appropriate ring object
"""
policy = self.get_by_index(policy_idx)
if not policy:
raise PolicyError("No policy with index %s" % policy_idx)
if not policy.object_ring:
policy.load_ring(swift_dir)
return policy.object_ring
def get_policy_info(self):
"""
Build info about policies for the /info endpoint
:returns: list of dicts containing relevant policy information
"""
policy_info = []
for pol in self:
# delete from /info if deprecated
if pol.is_deprecated:
continue
policy_entry = pol.get_info()
policy_info.append(policy_entry)
return policy_info
def add_policy_alias(self, policy_index, *aliases):
"""
Adds a new name or names to a policy
:param policy_index: index of a policy in this policy collection.
:param aliases: arbitrary number of string policy names to add.
"""
policy = self.get_by_index(policy_index)
for alias in aliases:
if alias.upper() in self.by_name:
raise PolicyError('Duplicate name %s in use '
'by policy %s' % (alias,
self.get_by_name(alias)))
else:
policy.add_name(alias)
self.by_name[alias.upper()] = policy
def remove_policy_alias(self, *aliases):
"""
Removes a name or names from a policy. If the name removed is the
primary name then the next available alias will be adopted
as the new primary name.
:param aliases: arbitrary number of existing policy names to remove.
"""
for alias in aliases:
policy = self.get_by_name(alias)
if not policy:
raise PolicyError('No policy with name %s exists.' % alias)
if len(policy.alias_list) == 1:
raise PolicyError('Policy %s with name %s has only one name. '
'Policies must have at least one name.' % (
policy, alias))
else:
policy.remove_name(alias)
del self.by_name[alias.upper()]
def change_policy_primary_name(self, policy_index, new_name):
"""
Changes the primary or default name of a policy. The new primary
name can be an alias that already belongs to the policy or a
completely new name.
:param policy_index: index of a policy in this policy collection.
:param new_name: a string name to set as the new default name.
"""
policy = self.get_by_index(policy_index)
name_taken = self.get_by_name(new_name)
# if the name belongs to some other policy in the collection
if name_taken and name_taken != policy:
raise PolicyError('Other policy %s with name %s exists.' %
(self.get_by_name(new_name).idx, new_name))
else:
policy.change_primary_name(new_name)
self.by_name[new_name.upper()] = policy
def parse_storage_policies(conf):
"""
Parse storage policies in ``swift.conf`` - note that validation
is done when the :class:`StoragePolicyCollection` is instantiated.
:param conf: ConfigParser parser object for swift.conf
"""
policies = []
for section in conf.sections():
if not section.startswith('storage-policy:'):
continue
policy_index = section.split(':', 1)[1]
config_options = dict(conf.items(section))
policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE)
policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type]
policy = policy_cls.from_config(policy_index, config_options)
policies.append(policy)
return StoragePolicyCollection(policies)
class StoragePolicySingleton(object):
"""
An instance of this class is the primary interface to storage policies
exposed as a module level global named ``POLICIES``. This global
reference wraps ``_POLICIES`` which is normally instantiated by parsing
``swift.conf`` and will result in an instance of
:class:`StoragePolicyCollection`.
You should never patch this instance directly, instead patch the module
level ``_POLICIES`` instance so that swift code which imported
``POLICIES`` directly will reference the patched
:class:`StoragePolicyCollection`.
"""
def __iter__(self):
return iter(_POLICIES)
def __len__(self):
return len(_POLICIES)
def __getitem__(self, key):
return _POLICIES[key]
def __getattribute__(self, name):
return getattr(_POLICIES, name)
def __repr__(self):
return repr(_POLICIES)
def reload_storage_policies():
"""
Reload POLICIES from ``swift.conf``.
"""
global _POLICIES
if six.PY2:
policy_conf = ConfigParser()
else:
# Python 3.2 disallows section or option duplicates by default
# strict=False allows us to preserve the older behavior
policy_conf = ConfigParser(strict=False)
policy_conf.read(utils.SWIFT_CONF_FILE)
try:
_POLICIES = parse_storage_policies(policy_conf)
except PolicyError as e:
raise SystemExit('ERROR: Invalid Storage Policy Configuration '
'in %s (%s)' % (utils.SWIFT_CONF_FILE, e))
# parse configuration and setup singleton
_POLICIES = None
reload_storage_policies()
POLICIES = StoragePolicySingleton()
| swift-master | swift/common/storage_policy.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Why our own memcache client?
By Michael Barton
python-memcached doesn't use consistent hashing, so adding or
removing a memcache server from the pool invalidates a huge
percentage of cached items.
If you keep a pool of python-memcached client objects, each client
object has its own connection to every memcached server, only one of
which is ever in use. So you wind up with n * m open sockets and
almost all of them idle. This client effectively has a pool for each
server, so the number of backend connections is hopefully greatly
reduced.
python-memcache uses pickle to store things, and there was already a
huge stink about Swift using pickles in memcache
(http://osvdb.org/show/osvdb/86581). That seemed sort of unfair,
since nova and keystone and everyone else use pickles for memcache
too, but it's hidden behind a "standard" library. But changing would
be a security regression at this point.
Also, pylibmc wouldn't work for us because it needs to use python
sockets in order to play nice with eventlet.
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import os
import six
import json
import logging
# the name of 'time' module is changed to 'tm', to avoid changing the
# signatures of member functions in this file.
import time as tm
from bisect import bisect
from eventlet.green import socket, ssl
from eventlet.pools import Pool
from eventlet import Timeout
from six.moves import range
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from swift.common import utils
from swift.common.utils import md5, human_readable, config_true_value, \
memcached_timing_stats
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
POOL_TIMEOUT = 1.0 # WAG
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
JSON_FLAG = 2
NODE_WEIGHT = 50
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = ERROR_LIMIT_DURATION = 60
DEFAULT_ITEM_SIZE_WARNING_THRESHOLD = -1
# Different sample rates for emitting Memcached timing stats.
TIMING_SAMPLE_RATE_HIGH = 0.1
TIMING_SAMPLE_RATE_MEDIUM = 0.01
TIMING_SAMPLE_RATE_LOW = 0.001
def md5hash(key):
if not isinstance(key, bytes):
if six.PY2:
key = key.encode('utf-8')
else:
key = key.encode('utf-8', errors='surrogateescape')
return md5(key, usedforsecurity=False).hexdigest().encode('ascii')
def sanitize_timeout(timeout):
"""
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
"""
if timeout > (30 * 24 * 60 * 60):
timeout += tm.time()
return int(timeout)
def set_msg(key, flags, timeout, value):
if not isinstance(key, bytes):
raise TypeError('key must be bytes')
if not isinstance(value, bytes):
raise TypeError('value must be bytes')
return b' '.join([
b'set',
key,
str(flags).encode('ascii'),
str(timeout).encode('ascii'),
str(len(value)).encode('ascii'),
]) + (b'\r\n' + value + b'\r\n')
class MemcacheConnectionError(Exception):
pass
class MemcacheIncrNotFoundError(MemcacheConnectionError):
pass
class MemcachePoolTimeout(Timeout):
pass
class MemcacheConnPool(Pool):
"""
Connection pool for Memcache Connections
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
address with an optional port. See
:func:`swift.common.utils.parse_socket_string` for details.
"""
def __init__(self, server, size, connect_timeout, tls_context=None):
Pool.__init__(self, max_size=size)
self.host, self.port = utils.parse_socket_string(
server, DEFAULT_MEMCACHED_PORT)
self._connect_timeout = connect_timeout
self._tls_context = tls_context
def create(self):
addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
family, socktype, proto, canonname, sockaddr = addrs[0]
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
with Timeout(self._connect_timeout):
sock.connect(sockaddr)
if self._tls_context:
sock = self._tls_context.wrap_socket(sock,
server_hostname=self.host)
except (Exception, Timeout):
sock.close()
raise
return (sock.makefile('rwb'), sock)
def get(self):
fp, sock = super(MemcacheConnPool, self).get()
try:
if fp is None:
# An error happened previously, so we need a new connection
fp, sock = self.create()
return fp, sock
except MemcachePoolTimeout:
# This is the only place that knows an item was successfully taken
# from the pool, so it has to be responsible for repopulating it.
# Any other errors should get handled in _get_conns(); see the
# comment about timeouts during create() there.
self.put((None, None))
raise
class MemcacheCommand(object):
"""
Helper class that encapsulates common parameters of a command.
:param method: the name of the MemcacheRing method that was called.
:param key: the memcached key.
"""
__slots__ = ('method', 'key', 'command', 'hash_key')
def __init__(self, method, key):
self.method = method
self.key = key
self.command = method.encode()
self.hash_key = md5hash(key)
@property
def key_prefix(self):
# get the prefix of a user provided memcache key by removing the
# content after the last '/', all current usages within swift are using
# prefix, such as "shard-updating-v2", "nvratelimit" and etc.
return self.key.rsplit('/', 1)[0]
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(
self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
tries=TRY_COUNT,
max_conns=2, tls_context=None, logger=None,
error_limit_count=ERROR_LIMIT_COUNT,
error_limit_time=ERROR_LIMIT_TIME,
error_limit_duration=ERROR_LIMIT_DURATION,
item_size_warning_threshold=DEFAULT_ITEM_SIZE_WARNING_THRESHOLD):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
self._error_limit_count = error_limit_count
self._error_limit_time = error_limit_time
self._error_limit_duration = error_limit_duration
for server in sorted(servers):
for i in range(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring)
self._client_cache = dict((
(server, MemcacheConnPool(server, max_conns, connect_timeout,
tls_context=tls_context))
for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
self._pool_timeout = pool_timeout
if logger is None:
self.logger = logging.getLogger()
else:
self.logger = logger
self.item_size_warning_threshold = item_size_warning_threshold
@property
def memcache_servers(self):
return list(self._client_cache.keys())
"""
Handles exceptions.
:param server: a server.
:param e: an exception.
:param cmd: an instance of MemcacheCommand.
:param conn_start_time: the time at which the failed operation started.
:param action: a verb describing the operation.
:param sock: an optional socket that needs to be closed by this method.
:param fp: an optional file pointer that needs to be closed by this method.
:param got_connection: if ``True``, the server's connection will be reset
in the cached connection pool.
"""
def _exception_occurred(self, server, e, cmd, conn_start_time,
action='talking', sock=None,
fp=None, got_connection=True):
if isinstance(e, Timeout):
self.logger.error(
"Timeout %(action)s to memcached: %(server)s"
": with key_prefix %(key_prefix)s, method %(method)s, "
"config_timeout %(config_timeout)s, time_spent %(time_spent)s",
{'action': action, 'server': server,
'key_prefix': cmd.key_prefix, 'method': cmd.method,
'config_timeout': e.seconds,
'time_spent': tm.time() - conn_start_time})
self.logger.timing_since(
'memcached.' + cmd.method + '.timeout.timing',
conn_start_time)
elif isinstance(e, (socket.error, MemcacheConnectionError)):
self.logger.error(
"Error %(action)s to memcached: %(server)s: "
"with key_prefix %(key_prefix)s, method %(method)s, "
"time_spent %(time_spent)s, %(err)s",
{'action': action, 'server': server,
'key_prefix': cmd.key_prefix, 'method': cmd.method,
'time_spent': tm.time() - conn_start_time, 'err': e})
self.logger.timing_since(
'memcached.' + cmd.method + '.conn_err.timing',
conn_start_time)
else:
self.logger.exception(
"Error %(action)s to memcached: %(server)s"
": with key_prefix %(key_prefix)s, method %(method)s, "
"time_spent %(time_spent)s",
{'action': action, 'server': server,
'key_prefix': cmd.key_prefix, 'method': cmd.method,
'time_spent': tm.time() - conn_start_time})
self.logger.timing_since(
'memcached.' + cmd.method + '.errors.timing', conn_start_time)
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
if isinstance(e, MemcacheIncrNotFoundError):
# these errors can be caused by other greenthreads not yielding to
# the incr greenthread often enough, rather than a server problem,
# so don't error limit the server
return
if self._error_limit_time <= 0 or self._error_limit_duration <= 0:
return
now = tm.time()
self._errors[server].append(now)
if len(self._errors[server]) > self._error_limit_count:
self._errors[server] = [err for err in self._errors[server]
if err > now - self._error_limit_time]
if len(self._errors[server]) > self._error_limit_count:
self._error_limited[server] = now + self._error_limit_duration
self.logger.error('Error limiting server %s', server)
def _get_conns(self, cmd):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
:param cmd: an instance of MemcacheCommand.
:return: generator to serve memcached connection
"""
pos = bisect(self._sorted, cmd.hash_key)
served = []
any_yielded = False
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
pool_start_time = tm.time()
if self._error_limited[server] > pool_start_time:
continue
sock = None
try:
with MemcachePoolTimeout(self._pool_timeout):
fp, sock = self._client_cache[server].get()
any_yielded = True
yield server, fp, sock
except MemcachePoolTimeout as e:
self._exception_occurred(server, e, cmd, pool_start_time,
action='getting a connection',
got_connection=False)
except (Exception, Timeout) as e:
# Typically a Timeout exception caught here is the one raised
# by the create() method of this server's MemcacheConnPool
# object.
self._exception_occurred(server, e, cmd, pool_start_time,
action='connecting', sock=sock)
if not any_yielded:
self.logger.error('All memcached servers error-limited')
def _return_conn(self, server, fp, sock):
"""Returns a server connection to the pool."""
self._client_cache[server].put((fp, sock))
# Sample rates of different memcached operations are based on generic
# swift usage patterns.
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def set(self, key, value, serialize=True, time=0,
min_compress_len=0, raise_on_error=False):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is serialized with JSON before sending
to memcache
:param time: the time to live
:param min_compress_len: minimum compress length, this parameter was
added to keep the signature compatible with
python-memcached interface. This
implementation ignores it.
:param raise_on_error: if True, propagate Timeouts and other errors.
By default, errors are ignored.
"""
cmd = MemcacheCommand('set', key)
timeout = sanitize_timeout(time)
flags = 0
if serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
for (server, fp, sock) in self._get_conns(cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
sock.sendall(set_msg(cmd.hash_key, flags, timeout, value))
# Wait for the set to complete
msg = fp.readline().strip()
if msg != b'STORED':
if not six.PY2:
msg = msg.decode('ascii')
self.logger.error(
"Error setting value in memcached: "
"%(server)s: %(msg)s",
{'server': server, 'msg': msg})
if 0 <= self.item_size_warning_threshold <= len(value):
self.logger.warning(
"Item size larger than warning threshold: "
"%d (%s) >= %d (%s)", len(value),
human_readable(len(value)),
self.item_size_warning_threshold,
human_readable(self.item_size_warning_threshold))
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
if raise_on_error:
raise MemcacheConnectionError(
"No memcached connections succeeded.")
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_MEDIUM)
def get(self, key, raise_on_error=False):
"""
Gets the object specified by key. It will also unserialize the object
before returning if it is serialized in memcache with JSON.
:param key: key
:param raise_on_error: if True, propagate Timeouts and other errors.
By default, errors are treated as cache misses.
:returns: value of the key in memcache
"""
cmd = MemcacheCommand('get', key)
value = None
for (server, fp, sock) in self._get_conns(cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + cmd.hash_key + b'\r\n')
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if (line[0].upper() == b'VALUE' and
line[1] == cmd.hash_key):
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
value = None
if int(line[2]) & JSON_FLAG:
value = json.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
if raise_on_error:
raise MemcacheConnectionError(
"No memcached connections succeeded.")
def _incr_or_decr(self, fp, sock, cmd, delta):
sock.sendall(b' '.join([cmd.command, cmd.hash_key, delta]) + b'\r\n')
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'NOT_FOUND':
return None
return int(line[0].strip())
def _add(self, fp, sock, cmd, add_val, timeout):
sock.sendall(b' '.join([
b'add', cmd.hash_key, b'0', str(timeout).encode('ascii'),
str(len(add_val)).encode('ascii')
]) + b'\r\n' + add_val + b'\r\n')
line = fp.readline().strip().split()
return None if line[0].upper() == b'NOT_STORED' else int(add_val)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_LOW)
def incr(self, key, delta=1, time=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param time: the time to live
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
cmd = MemcacheCommand('incr' if delta >= 0 else 'decr', key)
delta_val = str(abs(int(delta))).encode('ascii')
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
new_val = self._incr_or_decr(fp, sock, cmd, delta_val)
if new_val is None:
add_val = b'0' if cmd.method == 'decr' else delta_val
new_val = self._add(fp, sock, cmd, add_val, timeout)
if new_val is None:
new_val = self._incr_or_decr(
fp, sock, cmd, delta_val)
if new_val is None:
# This can happen if this thread takes more
# than the TTL to get from the first failed
# incr to the second incr, during which time
# the key was concurrently added and expired.
raise MemcacheIncrNotFoundError(
'expired ttl=%s' % time)
self._return_conn(server, fp, sock)
return new_val
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
raise MemcacheConnectionError("No memcached connections succeeded.")
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_LOW)
def decr(self, key, delta=1, time=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param time: the time to live
:returns: result of decrementing
:raises MemcacheConnectionError:
"""
return self.incr(key, delta=-delta, time=time)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def delete(self, key, server_key=None):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
:param server_key: key to use in determining which server in the ring
is used
"""
cmd = server_cmd = MemcacheCommand('delete', key)
if server_key:
server_cmd = MemcacheCommand('delete', server_key)
for (server, fp, sock) in self._get_conns(server_cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
sock.sendall(b'delete ' + cmd.hash_key + b'\r\n')
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def set_multi(self, mapping, server_key, serialize=True, time=0,
min_compress_len=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictionary of keys and values to be set in memcache
:param server_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is serialized with JSON before sending
to memcache.
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it
"""
cmd = MemcacheCommand('set_multi', server_key)
timeout = sanitize_timeout(time)
msg = []
for key, value in mapping.items():
key = md5hash(key)
flags = 0
if serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
msg.append(set_msg(key, flags, timeout, value))
for (server, fp, sock) in self._get_conns(cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
sock.sendall(b''.join(msg))
# Wait for the set to complete
for line in range(len(mapping)):
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param server_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
cmd = MemcacheCommand('get_multi', server_key)
hash_keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(cmd):
conn_start_time = tm.time()
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + b' '.join(hash_keys) + b'\r\n')
line = fp.readline().strip().split()
responses = {}
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if line[0].upper() == b'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in hash_keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except (Exception, Timeout) as e:
self._exception_occurred(server, e, cmd, conn_start_time,
sock=sock, fp=fp)
def load_memcache(conf, logger):
"""
Build a MemcacheRing object from the given config. It will also use the
passed in logger.
:param conf: a dict, the config options
:param logger: a logger
"""
memcache_servers = conf.get('memcache_servers')
try:
# Originally, while we documented using memcache_max_connections
# we only accepted max_connections
max_conns = int(conf.get('memcache_max_connections',
conf.get('max_connections', 0)))
except ValueError:
max_conns = 0
memcache_options = {}
if (not memcache_servers
or max_conns <= 0):
path = os.path.join(conf.get('swift_dir', '/etc/swift'),
'memcache.conf')
memcache_conf = ConfigParser()
if memcache_conf.read(path):
# if memcache.conf exists we'll start with those base options
try:
memcache_options = dict(memcache_conf.items('memcache'))
except NoSectionError:
pass
if not memcache_servers:
try:
memcache_servers = \
memcache_conf.get('memcache', 'memcache_servers')
except (NoSectionError, NoOptionError):
pass
if max_conns <= 0:
try:
new_max_conns = \
memcache_conf.get('memcache',
'memcache_max_connections')
max_conns = int(new_max_conns)
except (NoSectionError, NoOptionError, ValueError):
pass
# while memcache.conf options are the base for the memcache
# middleware, if you set the same option also in the filter
# section of the proxy config it is more specific.
memcache_options.update(conf)
connect_timeout = float(memcache_options.get(
'connect_timeout', CONN_TIMEOUT))
pool_timeout = float(memcache_options.get(
'pool_timeout', POOL_TIMEOUT))
tries = int(memcache_options.get('tries', TRY_COUNT))
io_timeout = float(memcache_options.get('io_timeout', IO_TIMEOUT))
if config_true_value(memcache_options.get('tls_enabled', 'false')):
tls_cafile = memcache_options.get('tls_cafile')
tls_certfile = memcache_options.get('tls_certfile')
tls_keyfile = memcache_options.get('tls_keyfile')
tls_context = ssl.create_default_context(
cafile=tls_cafile)
if tls_certfile:
tls_context.load_cert_chain(tls_certfile, tls_keyfile)
else:
tls_context = None
error_suppression_interval = float(memcache_options.get(
'error_suppression_interval', ERROR_LIMIT_TIME))
error_suppression_limit = float(memcache_options.get(
'error_suppression_limit', ERROR_LIMIT_COUNT))
item_size_warning_threshold = int(memcache_options.get(
'item_size_warning_threshold', DEFAULT_ITEM_SIZE_WARNING_THRESHOLD))
if not memcache_servers:
memcache_servers = '127.0.0.1:11211'
if max_conns <= 0:
max_conns = 2
return MemcacheRing(
[s.strip() for s in memcache_servers.split(',')
if s.strip()],
connect_timeout=connect_timeout,
pool_timeout=pool_timeout,
tries=tries,
io_timeout=io_timeout,
max_conns=max_conns,
tls_context=tls_context,
logger=logger,
error_limit_count=error_suppression_limit,
error_limit_time=error_suppression_interval,
error_limit_duration=error_suppression_interval,
item_size_warning_threshold=item_size_warning_threshold)
| swift-master | swift/common/memcached.py |
# Copyright (c) 2022 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import hashlib
import hmac
import six
from swift.common.utils import strict_b64decode
DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512'
DEPRECATED_DIGESTS = {'sha1'}
SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split()) | DEPRECATED_DIGESTS
def get_hmac(request_method, path, expires, key, digest="sha1",
ip_range=None):
"""
Returns the hexdigest string of the HMAC (see RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:param digest: constructor or the string name for the digest to use in
calculating the HMAC
Defaults to SHA1
:param ip_range: The ip range from which the resource is allowed
to be accessed. We need to put the ip_range as the
first argument to hmac to avoid manipulation of the path
due to newlines being valid in paths
e.g. /v1/a/c/o\\n127.0.0.1
:returns: hexdigest str of the HMAC for the request using the specified
digest algorithm.
"""
# These are the three mandatory fields.
parts = [request_method, str(expires), path]
formats = [b"%s", b"%s", b"%s"]
if ip_range:
parts.insert(0, ip_range)
formats.insert(0, b"ip=%s")
if not isinstance(key, six.binary_type):
key = key.encode('utf8')
message = b'\n'.join(
fmt % (part if isinstance(part, six.binary_type)
else part.encode("utf-8"))
for fmt, part in zip(formats, parts))
if six.PY2 and isinstance(digest, six.string_types):
digest = getattr(hashlib, digest)
return hmac.new(key, message, digest).hexdigest()
def get_allowed_digests(conf_digests, logger=None):
"""
Pulls out 'allowed_digests' from the supplied conf. Then compares them with
the list of supported and deprecated digests and returns whatever remain.
When something is unsupported or deprecated it'll log a warning.
:param conf_digests: iterable of allowed digests. If empty, defaults to
DEFAULT_ALLOWED_DIGESTS.
:param logger: optional logger; if provided, use it issue deprecation
warnings
:returns: A set of allowed digests that are supported and a set of
deprecated digests.
:raises: ValueError, if there are no digests left to return.
"""
allowed_digests = set(digest.lower() for digest in conf_digests)
if not allowed_digests:
allowed_digests = SUPPORTED_DIGESTS
not_supported = allowed_digests - SUPPORTED_DIGESTS
if not_supported:
if logger:
logger.warning('The following digest algorithms are configured '
'but not supported: %s', ', '.join(not_supported))
allowed_digests -= not_supported
deprecated = allowed_digests & DEPRECATED_DIGESTS
if deprecated and logger:
if not conf_digests:
logger.warning('The following digest algorithms are allowed by '
'default but deprecated: %s. Support will be '
'disabled by default in a future release, and '
'later removed entirely.', ', '.join(deprecated))
else:
logger.warning('The following digest algorithms are configured '
'but deprecated: %s. Support will be removed in a '
'future release.', ', '.join(deprecated))
if not allowed_digests:
raise ValueError('No valid digest algorithms are configured')
return allowed_digests, deprecated
def extract_digest_and_algorithm(value):
"""
Returns a tuple of (digest_algorithm, hex_encoded_digest)
from a client-provided string of the form::
<hex-encoded digest>
or::
<algorithm>:<base64-encoded digest>
Note that hex-encoded strings must use one of sha1, sha256, or sha512.
:raises: ValueError on parse failures
"""
if ':' in value:
algo, value = value.split(':', 1)
# accept both standard and url-safe base64
if ('-' in value or '_' in value) and not (
'+' in value or '/' in value):
value = value.replace('-', '+').replace('_', '/')
value = binascii.hexlify(strict_b64decode(value + '=='))
if not six.PY2:
value = value.decode('ascii')
else:
try:
binascii.unhexlify(value) # make sure it decodes
except TypeError:
# This is just for py2
raise ValueError('Non-hexadecimal digit found')
algo = {
40: 'sha1',
64: 'sha256',
128: 'sha512',
}.get(len(value))
if not algo:
raise ValueError('Bad digest length')
return algo, value
| swift-master | swift/common/digest.py |
# Copyright (c) 2021 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from time import time
from swift.common.utils import node_to_string
class ErrorLimiter(object):
"""
Tracks the number of errors that have occurred for nodes. A node will be
considered to be error-limited for a given interval of time after it has
accumulated more errors than a given limit.
:param suppression_interval: The number of seconds for which a node is
error-limited once it has accumulated more than ``suppression_limit``
errors. Should be a float value.
:param suppression_limit: The number of errors that a node must accumulate
before it is considered to be error-limited. Should be an int value.
"""
def __init__(self, suppression_interval, suppression_limit):
self.suppression_interval = float(suppression_interval)
self.suppression_limit = int(suppression_limit)
self.stats = collections.defaultdict(dict)
def node_key(self, node):
"""
Get the key under which a node's error stats will be stored.
:param node: dictionary describing a node.
:return: string key.
"""
return node_to_string(node)
def is_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time()
node_key = self.node_key(node)
error_stats = self.stats.get(node_key)
if error_stats is None or 'errors' not in error_stats:
return False
if 'last_error' in error_stats and error_stats['last_error'] < \
now - self.suppression_interval:
self.stats.pop(node_key)
return False
return error_stats['errors'] > self.suppression_limit
def limit(self, node):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`increment`.
:param node: dictionary of node to error limit
"""
node_key = self.node_key(node)
error_stats = self.stats[node_key]
error_stats['errors'] = self.suppression_limit + 1
error_stats['last_error'] = time()
def increment(self, node):
"""
Increment the error count and update the time of the last error for
the given ``node``.
:param node: dictionary describing a node.
:returns: True if suppression_limit is exceeded, False otherwise
"""
node_key = self.node_key(node)
error_stats = self.stats[node_key]
error_stats['errors'] = error_stats.get('errors', 0) + 1
error_stats['last_error'] = time()
return error_stats['errors'] > self.suppression_limit
| swift-master | swift/common/error_limiter.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import base64
import json
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import six
import six.moves.cPickle as pickle
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
check_utf8
from swift.common.utils import Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate, md5
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPBadRequest
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Whether calls will be made to log queries (py3 only)
QUERY_LOGGING = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max size of .pending file in bytes. When this is exceeded, the pending
# records will be merged.
PENDING_CAP = 131072
SQLITE_ARG_LIMIT = 999
RECLAIM_PAGE_SIZE = 10000
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, six.text_type) else s)
for s in args]
def native_str_keys_and_values(metadata):
if six.PY2:
uni_keys = [k for k in metadata if isinstance(k, six.text_type)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = [
x.encode('utf-8') if isinstance(x, six.text_type) else x
for x in sv]
else:
bin_keys = [k for k in metadata if isinstance(k, six.binary_type)]
for k in bin_keys:
sv = metadata[k]
del metadata[k]
metadata[k.decode('utf-8')] = [
x.decode('utf-8') if isinstance(x, six.binary_type) else x
for x in sv]
ZERO_LIKE_VALUES = {None, '', 0, '0'}
def zero_like(count):
"""
We've cargo culted our consumers to be tolerant of various expressions of
zero in our databases for backwards compatibility with less disciplined
producers.
"""
return count in ZERO_LIKE_VALUES
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
# slots are needed for python 3.11.0 (there's an issue fixed in 3.11.1,
# see https://github.com/python/cpython/issues/99886)
__slots__ = ('timeout', 'db_file')
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def execute(self, *args, **kwargs):
# py311 stopped calling self.cursor() to get the cursor;
# see https://github.com/python/cpython/pull/31351
curs = self.cursor()
curs.execute(*args, **kwargs)
return curs
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
# slots are needed for python 3.11.0 (there's an issue fixed in 3.11.1,
# see https://github.com/python/cpython/issues/99886)
__slots__ = ('timeout', 'db_file')
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
# NB: executemany and executescript are *not* greened, and never have been
# (as far as I can tell)
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = md5(('%s-%s' % (name, timestamp)).encode('utf8'),
usedforsecurity=False).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, logger=None, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if QUERY_LOGGING and logger and not six.PY2:
conn.set_trace_callback(logger.debug)
if not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class TombstoneReclaimer(object):
"""Encapsulates reclamation of deleted rows in a database."""
def __init__(self, broker, age_timestamp):
"""
Encapsulates reclamation of deleted rows in a database.
:param broker: an instance of :class:`~swift.common.db.DatabaseBroker`.
:param age_timestamp: a float timestamp: tombstones older than this
time will be deleted.
"""
self.broker = broker
self.age_timestamp = age_timestamp
self.marker = ''
self.remaining_tombstones = self.reclaimed = 0
self.finished = False
# limit 1 offset N gives back the N+1th matching row; that row is used
# as an exclusive end_marker for a batch of deletes, so a batch
# comprises rows satisfying self.marker <= name < end_marker.
self.batch_query = '''
SELECT name FROM %s WHERE deleted = 1
AND name >= ?
ORDER BY NAME LIMIT 1 OFFSET ?
''' % self.broker.db_contains_type
self.clean_batch_query = '''
DELETE FROM %s WHERE deleted = 1
AND name >= ? AND %s < %s
''' % (self.broker.db_contains_type, self.broker.db_reclaim_timestamp,
self.age_timestamp)
def _reclaim(self, conn):
curs = conn.execute(self.batch_query, (self.marker, RECLAIM_PAGE_SIZE))
row = curs.fetchone()
end_marker = row[0] if row else ''
if end_marker:
# do a single book-ended DELETE and bounce out
curs = conn.execute(self.clean_batch_query + ' AND name < ?',
(self.marker, end_marker))
self.marker = end_marker
self.reclaimed += curs.rowcount
self.remaining_tombstones += RECLAIM_PAGE_SIZE - curs.rowcount
else:
# delete off the end
curs = conn.execute(self.clean_batch_query, (self.marker,))
self.finished = True
self.reclaimed += curs.rowcount
def reclaim(self):
"""
Perform reclaim of deleted rows older than ``age_timestamp``.
"""
while not self.finished:
with self.broker.get() as conn:
self._reclaim(conn)
conn.commit()
def get_tombstone_count(self):
"""
Return the number of remaining tombstones newer than ``age_timestamp``.
Executes the ``reclaim`` method if it has not already been called on
this instance.
:return: The number of tombstones in the ``broker`` that are newer than
``age_timestamp``.
"""
if not self.finished:
self.reclaim()
with self.broker.get() as conn:
curs = conn.execute('''
SELECT COUNT(*) FROM %s WHERE deleted = 1
AND name >= ?
''' % (self.broker.db_contains_type,), (self.marker,))
tombstones = curs.fetchone()[0]
self.remaining_tombstones += tombstones
return self.remaining_tombstones
class DatabaseBroker(object):
"""Encapsulates working with a database."""
delete_meta_whitelist = []
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False, skip_commits=False):
"""Encapsulates working with a database.
:param db_file: path to a database file.
:param timeout: timeout used for database operations.
:param logger: a logger instance.
:param account: name of account.
:param container: name of container.
:param pending_timeout: timeout used when attempting to take a lock to
write to pending file.
:param stale_reads_ok: if True then no error is raised if pending
commits cannot be committed before the database is read, otherwise
an error is raised.
:param skip_commits: if True then this broker instance will never
commit records from the pending file to the database;
:meth:`~swift.common.db.DatabaseBroker.put_record` should not
called on brokers with skip_commits True.
"""
self.conn = None
self._db_file = db_file
self.pending_file = self._db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
self.skip_commits = skip_commits
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
if QUERY_LOGGING and not six.PY2:
conn.set_trace_callback(self.logger.debug)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
if k.lower() in self.delete_meta_whitelist:
continue
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
conn.execute(
"""
UPDATE %s_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """ % self.db_type,
(timestamp, timestamp, timestamp))
conn.commit()
@property
def db_file(self):
return self._db_file
def get_device_path(self):
suffix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(suffix_path)
dbs_path = os.path.dirname(partition_path)
return os.path.dirname(dbs_path)
def quarantine(self, reason):
"""
The database will be quarantined and a
sqlite3.DatabaseError will be raised indicating the action taken.
"""
device_path = self.get_device_path()
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = ('Quarantined %(db_dir)s to %(quar_path)s due to '
'%(reason)s') % {'db_dir': self.db_dir,
'quar_path': quar_path,
'reason': reason}
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed database'
elif 'malformed database schema' in str(exc_value):
exc_hint = 'malformed database'
elif ' is not a database' in str(exc_value):
# older versions said 'file is not a database'
# now 'file is encrypted or is not a database'
exc_hint = 'corrupted database'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing database'
else:
six.reraise(exc_type, exc_value, exc_traceback)
self.quarantine(exc_hint)
@contextmanager
def updated_timeout(self, new_timeout):
"""Use with "with" statement; updates ``timeout`` within the block."""
old_timeout = self.timeout
try:
self.timeout = new_timeout
if self.conn:
self.conn.timeout = new_timeout
yield old_timeout
finally:
self.timeout = old_timeout
if self.conn:
self.conn.timeout = old_timeout
@contextmanager
def maybe_get(self, conn):
if conn:
yield conn
else:
with self.get() as conn:
yield conn
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
finally:
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
'Broker error trying to rollback locked connection')
conn.close()
def _new_db_id(self):
device_name = os.path.basename(self.get_device_path())
return "%s-%s" % (str(uuid4()), device_name)
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (self._new_db_id(),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def _is_deleted(self, conn):
"""
Check if the database is considered deleted
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
raise NotImplementedError()
def is_deleted(self):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if not os.path.exists(self.db_file):
return True
self._commit_puts_stale_ok()
with self.get() as conn:
return self._is_deleted(conn)
def empty(self):
"""
Check if the broker abstraction contains any undeleted records.
"""
raise NotImplementedError()
def is_reclaimable(self, now, reclaim_age):
"""
Check if the broker abstraction is empty, and has been marked deleted
for at least a reclaim age.
"""
info = self.get_replication_info()
return (zero_like(info['count']) and
(Timestamp(now - reclaim_age) >
Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp'])))
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
old_status = self._is_deleted(conn)
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
if old_status != self._is_deleted(conn):
timestamp = Timestamp.now()
self._update_status_changed_at(conn, timestamp.internal)
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % ('incoming' if incoming else 'outgoing'))
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_max_row(self, table=None):
if not table:
table = self.db_contains_type
query = '''
SELECT SQLITE_SEQUENCE.seq
FROM SQLITE_SEQUENCE
WHERE SQLITE_SEQUENCE.name == '%s'
LIMIT 1
''' % (table, )
with self.get() as conn:
row = conn.execute(query).fetchone()
return row[0] if row else -1
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys from get_info plus max_row and metadata
Note:: get_info's <db_contains_type>_count is translated to just
"count" and metadata is the raw string.
"""
info = self.get_info()
info['count'] = info.pop('%s_count' % self.db_contains_type)
info['metadata'] = self.get_raw_metadata()
info['max_row'] = self.get_max_row()
return info
def get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('SELECT * from %s_stat' % self.db_type)
curs.row_factory = dict_factory
return curs.fetchone()
def put_record(self, record):
"""
Put a record into the DB. If the DB has an associated pending file with
space then the record is appended to that file and a commit to the DB
is deferred. If its pending file is full then the record will be
committed immediately.
:param record: a record to be added to the DB.
:raises DatabaseConnectionError: if the DB file does not exist or if
``skip_commits`` is True.
:raises LockTimeout: if a timeout occurs while waiting to take a lock
to write to the pending file.
"""
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
if self.skip_commits:
raise DatabaseConnectionError(self.db_file,
'commits not accepted')
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL)))
fp.flush()
def _skip_commit_puts(self):
return self.skip_commits or not os.path.exists(self.pending_file)
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self._skip_commit_puts():
if item_list:
# this broker instance should not be used to commit records,
# but if it is then raise an error rather than quietly
# discarding the records in item_list.
raise DatabaseConnectionError(self.db_file,
'commits not accepted')
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(b':'):
if entry:
try:
if six.PY2:
data = pickle.loads(base64.b64decode(entry))
else:
data = pickle.loads(base64.b64decode(entry),
encoding='utf8')
self._commit_puts_load(item_list, data)
except Exception:
self.logger.exception(
'Invalid pending entry %(file)s: %(entry)s',
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _commit_puts_stale_ok(self):
"""
Catch failures of _commit_puts() if broker is intended for
reading of stats, and thus does not care for pending updates.
"""
if self._skip_commit_puts():
return
try:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
except (LockTimeout, sqlite3.OperationalError):
if not self.stale_reads_ok:
raise
def _commit_puts_load(self, item_list, entry):
"""
Unmarshall the :param:entry tuple and append it to :param:item_list.
This is implemented by a particular broker to be compatible
with its :func:`merge_items`.
"""
raise NotImplementedError
def merge_items(self, item_list, source=None):
"""
Save :param:item_list to the database.
"""
raise NotImplementedError
def make_tuple_for_pickle(self, record):
"""
Turn this db record dict into the format this service uses for
pending pickles.
"""
raise NotImplementedError
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION:
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
def get_raw_metadata(self):
with self.get() as conn:
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
metadata = row[0]
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
return metadata
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
native_str_keys_and_values(metadata)
else:
metadata = {}
return metadata
@staticmethod
def validate_metadata(metadata):
"""
Validates that metadata falls within acceptable limits.
:param metadata: to be validated
:raises HTTPBadRequest: if MAX_META_COUNT or MAX_META_OVERALL_SIZE
is exceeded, or if metadata contains non-UTF-8 data
"""
meta_count = 0
meta_size = 0
for key, (value, timestamp) in metadata.items():
if key and not check_utf8(key):
raise HTTPBadRequest('Metadata must be valid UTF-8')
if value and not check_utf8(value):
raise HTTPBadRequest('Metadata must be valid UTF-8')
key = key.lower()
if value and key.startswith(('x-account-meta-',
'x-container-meta-')):
prefix = 'x-account-meta-'
if key.startswith('x-container-meta-'):
prefix = 'x-container-meta-'
key = key[len(prefix):]
meta_count = meta_count + 1
meta_size = meta_size + len(key) + len(value)
if meta_count > MAX_META_COUNT:
raise HTTPBadRequest('Too many metadata items; max %d'
% MAX_META_COUNT)
if meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest('Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE)
def update_metadata(self, metadata_updates, validate_metadata=False):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:`reclaim`
"""
old_metadata = self.metadata
if set(metadata_updates).issubset(set(old_metadata)):
for key, (value, timestamp) in metadata_updates.items():
if timestamp > old_metadata[key][1]:
break
else:
return
with self.get() as conn:
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
md = row[0]
md = json.loads(md) if md else {}
native_str_keys_and_values(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
for key, value_timestamp in metadata_updates.items():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
if validate_metadata:
DatabaseBroker.validate_metadata(md)
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, age_timestamp, sync_timestamp):
"""
Delete reclaimable rows and metadata from the db.
By default this method will delete rows from the db_contains_type table
that are marked deleted and whose created_at timestamp is <
age_timestamp, and deletes rows from incoming_sync and outgoing_sync
where the updated_at timestamp is < sync_timestamp. In addition, this
calls the :meth:`_reclaim_metadata` method.
Subclasses may reclaim other items by overriding :meth:`_reclaim`.
:param age_timestamp: max created_at timestamp of object rows to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
if not self._skip_commit_puts():
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
tombstone_reclaimer = TombstoneReclaimer(self, age_timestamp)
tombstone_reclaimer.reclaim()
with self.get() as conn:
self._reclaim_other_stuff(conn, age_timestamp, sync_timestamp)
conn.commit()
return tombstone_reclaimer
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
"""
This is only called once at the end of reclaim after tombstone reclaim
has been completed.
"""
self._reclaim_sync(conn, sync_timestamp)
self._reclaim_metadata(conn, age_timestamp)
def _reclaim_sync(self, conn, sync_timestamp):
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError as err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
def _reclaim_metadata(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
timestamp = Timestamp(timestamp)
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
md = row[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.items():
if value == '' and Timestamp(value_timestamp) < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
return False
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: internalized put timestamp
"""
with self.get() as conn:
conn.execute(
'UPDATE %s_stat SET put_timestamp = ?'
' WHERE put_timestamp < ?' % self.db_type,
(timestamp, timestamp))
conn.commit()
def update_status_changed_at(self, timestamp):
"""
Update the status_changed_at field in the stat table. Only
modifies status_changed_at if the timestamp is greater than the
current status_changed_at timestamp.
:param timestamp: internalized timestamp
"""
with self.get() as conn:
self._update_status_changed_at(conn, timestamp)
conn.commit()
def _update_status_changed_at(self, conn, timestamp):
conn.execute(
'UPDATE %s_stat SET status_changed_at = ?'
' WHERE status_changed_at < ?' % self.db_type,
(timestamp, timestamp))
| swift-master | swift/common/db.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import math
import time
import shutil
import uuid
import errno
import re
from contextlib import contextmanager
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
import swift.common.db
from swift.common.constraints import check_drive
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
parse_override_options, round_robin_iter, Everything, get_db_files, \
parse_db_filename, quote, RateLimitedIterator, config_auto_int_value
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE, \
is_success
from swift.common.bufferedhttp import BufferedHTTPConnection
from swift.common.exceptions import DriveNotMounted
from swift.common.daemon import Daemon
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
HTTPAccepted, HTTPBadRequest
from swift.common.recon import DEFAULT_RECON_CACHE_PATH, \
server_type_to_recon_file
DEBUG_TIMINGS_THRESHOLD = 10
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir, fsync=False)
def looks_like_partition(dir_name):
"""
True if the directory name is a valid partition number, False otherwise.
"""
try:
part = int(dir_name)
return part >= 0
except ValueError:
return False
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of tuples of (path, context, partition_filter) to
walk. The context may be any object; the context is not
used by this function but is included with each yielded
tuple.
:returns: A generator of (partition, path_to_db_file, context)
"""
def walk_datadir(datadir, context, part_filter):
partitions = [pd for pd in os.listdir(datadir)
if looks_like_partition(pd) and part_filter(pd)]
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
if not suffixes:
os.rmdir(part_dir)
continue
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
if not hashes:
os.rmdir(suff_dir)
continue
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
# common case
if os.path.exists(object_file):
yield (partition, object_file, context)
continue
# look for any alternate db filenames
db_files = get_db_files(object_file)
if db_files:
yield (partition, db_files[-1], context)
continue
try:
os.rmdir(hash_dir)
except OSError as e:
if e.errno != errno.ENOTEMPTY:
raise
its = [walk_datadir(datadir, context, filt)
for datadir, context, filt in datadirs]
rr_its = round_robin_iter(its)
for datadir in rr_its:
yield datadir
class ReplConnection(BufferedHTTPConnection):
"""
Helper to simplify REPLICATEing to a remote server.
"""
def __init__(self, node, partition, hash_, logger):
self.logger = logger
self.node = node
host = "%s:%s" % (node['replication_ip'], node['replication_port'])
BufferedHTTPConnection.__init__(self, host)
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
def replicate(self, *args):
"""
Make an HTTP REPLICATE request
:param args: list of json-encodable objects
:returns: bufferedhttp response object
"""
try:
body = json.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
except (Exception, Timeout):
self.close()
self.logger.exception(
'ERROR reading HTTP response from %s', self.node)
return None
class Replicator(Daemon):
"""
Implements the logic for directing db replication.
"""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
self._local_device_ids = {}
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = float(conf.get('interval') or
conf.get('run_pause') or 30)
if 'run_pause' in conf:
if 'interval' in conf:
self.logger.warning(
'Option %(type)s-replicator/run_pause is deprecated '
'and %(type)s-replicator/interval is already configured. '
'You can safely remove run_pause; it is now ignored and '
'will be removed in a future version.'
% {'type': self.server_type})
else:
self.logger.warning(
'Option %(type)s-replicator/run_pause is deprecated '
'and will be removed in a future version. '
'Update your configuration to use option '
'%(type)s-replicator/interval.'
% {'type': self.server_type})
self.databases_per_second = float(
conf.get('databases_per_second', 50))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::%s' % self.server_type
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
swift.common.db.QUERY_LOGGING = \
config_true_value(conf.get('db_query_logging', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.recon_replicator = server_type_to_recon_file(self.server_type)
self.rcache = os.path.join(self.recon_cache_path,
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0, 'deferred': 0,
'failure_nodes': {}}
def _report_stats(self):
"""Report the current stats to the logs."""
now = time.time()
self.logger.info(
'Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)',
{'count': self.stats['attempted'],
'time': now - self.stats['start'],
'rate': self.stats['attempted'] /
(now - self.stats['start'] + 0.0000001)})
self.logger.info('Removed %(remove)d dbs', self.stats)
self.logger.info('%(success)s successes, %(failure)s failures',
self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': now - self.stats['start'],
'replication_last': now},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
sorted(self.stats.items()) if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped', 'remote_merge')]))
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _rsync_file(self, db_file, remote_file, whole_file=True,
different_region=False):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
:param different_region: if True, the destination node is in a
different region
:returns: True if the sync was successful, False otherwise
"""
popen_args = ['rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(self.node_timeout)),
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
if self.rsync_compress and different_region:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
popen_args.append('--compress')
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error('ERROR rsync failed with %(code)s: %(args)s',
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None,
different_region=False):
"""
Sync a whole db using rsync.
:param broker: DB broker object of DB to be synced
:param device: device to sync to
:param http: ReplConnection object
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
:param different_region: if True, the destination node is in a
different region
"""
rsync_module = rsync_module_interpolation(self.rsync_module, device)
rsync_path = '%s/tmp/%s' % (device['device'], local_id)
remote_file = '%s/%s' % (rsync_module, rsync_path)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file,
different_region=different_region):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file,
whole_file=False,
different_region=different_region):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id,
os.path.basename(broker.db_file))
return response and 200 <= response.status < 300
def _send_replicate_request(self, http, *repl_args):
with Timeout(self.node_timeout):
response = http.replicate(*repl_args)
if not response or not is_success(response.status):
if response:
self.logger.error('ERROR Bad response %s from %s',
response.status, http.host)
return False
return True
def _usync_db(self, point, broker, http, remote_id, local_id):
"""
Sync a db by sending all records since the last sync.
:param point: synchronization high water mark between the replicas
:param broker: database broker object
:param http: ReplConnection object for the remote server
:param remote_id: database id for the remote replica
:param local_id: database id for the local replica
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug('%s usyncing chunks to %s, starting at row %s',
broker.db_file,
'%(ip)s:%(port)s/%(device)s' % http.node,
point)
start = time.time()
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
while len(objects) and diffs < self.max_diffs:
diffs += 1
if not self._send_replicate_request(
http, 'merge_items', objects, local_id):
return False
# replication relies on db order to send the next merge batch in
# order with no gaps
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
self.logger.debug('%s usyncing chunks to %s, finished at row %s (%gs)',
broker.db_file,
'%(ip)s:%(port)s/%(device)s' % http.node,
point, time.time() - start)
if objects:
self.logger.debug(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.',
broker, self.max_diffs * self.per_diff)
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
else:
with Timeout(self.node_timeout):
response = http.replicate('merge_syncs', sync_table)
if response and 200 <= response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}],
incoming=False)
return True
return False
def _in_sync(self, rinfo, info, broker, local_sync):
"""
Determine whether or not two replicas of a databases are considered
to be in sync.
:param rinfo: remote database info
:param info: local database info
:param broker: database broker object
:param local_sync: cached last sync point between replicas
:returns: boolean indicating whether or not the replicas are in sync
"""
if max(rinfo['point'], local_sync) >= info['max_row']:
self.stats['no_change'] += 1
self.logger.increment('no_changes')
return True
if rinfo['hash'] == info['hash']:
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
"""
Make an http_connection using ReplConnection
:param node: node dictionary from the ring
:param partition: partition to send in the url
:param db_file: DB file
:returns: ReplConnection object
"""
hsh, other, ext = parse_db_filename(db_file)
return ReplConnection(node, partition, hsh, self.logger)
def _gather_sync_args(self, info):
"""
Convert local replication_info to sync args tuple.
"""
sync_args_order = ('max_row', 'hash', 'id', 'created_at',
'put_timestamp', 'delete_timestamp', 'metadata')
return tuple(info[key] for key in sync_args_order)
def _repl_to_node(self, node, broker, partition, info,
different_region=False):
"""
Replicate a database to a node.
:param node: node dictionary from the ring to be replicated to
:param broker: DB broker for the DB to be replication
:param partition: partition on the node to replicate to
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
:param different_region: if True, the destination node is in a
different region
:returns: True if successful, False otherwise
"""
http = self._http_connect(node, partition, broker.db_file)
sync_args = self._gather_sync_args(info)
with Timeout(self.node_timeout):
response = http.replicate('sync', *sync_args)
if not response:
return False
return self._handle_sync_response(node, response, info, broker, http,
different_region=different_region)
def _handle_sync_response(self, node, response, info, broker, http,
different_region=False):
if response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
return self._rsync_db(broker, node, http, info['id'],
different_region=different_region)
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif 200 <= response.status < 300:
rinfo = json.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if rinfo.get('metadata', ''):
broker.update_metadata(json.loads(rinfo['metadata']))
return self._choose_replication_mode(
node, rinfo, info, local_sync, broker, http,
different_region)
return False
def _choose_replication_mode(self, node, rinfo, info, local_sync, broker,
http, different_region):
if self._in_sync(rinfo, info, broker, local_sync):
self.logger.debug('%s in sync with %s, nothing to do',
broker.db_file,
'%(ip)s:%(port)s/%(device)s' % node)
return True
# if the difference in rowids between the two differs by
# more than 50% and the difference is greater than per_diff,
# rsync then do a remote merge.
# NOTE: difference > per_diff stops us from dropping to rsync
# on smaller containers, who have only a few rows to sync.
if (rinfo['max_row'] / float(info['max_row']) < 0.5 and
info['max_row'] - rinfo['max_row'] > self.per_diff):
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000),
different_region=different_region)
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
def _post_replicate_hook(self, broker, info, responses):
"""
:param broker: broker instance for the database that just replicated
:param info: pre-replication full info dict
:param responses: a list of bools indicating success from nodes
"""
pass
def cleanup_post_replicate(self, broker, orig_info, responses):
"""
Cleanup non primary database from disk if needed.
:param broker: the broker for the database we're replicating
:param orig_info: snapshot of the broker replication info dict taken
before replication
:param responses: a list of boolean success values for each replication
request to other nodes
:return success: returns False if deletion of the database was
attempted but unsuccessful, otherwise returns True.
"""
log_template = 'Not deleting db %s (%%s)' % broker.db_file
max_row_delta = broker.get_max_row() - orig_info['max_row']
if max_row_delta < 0:
reason = 'negative max_row_delta: %s' % max_row_delta
self.logger.error(log_template, reason)
return True
if max_row_delta:
reason = '%s new rows' % max_row_delta
self.logger.debug(log_template, reason)
return True
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
successes_count = len([resp for resp in responses if resp])
delete_handoff = successes_count >= self.handoff_delete
else:
delete_handoff = responses and all(responses)
if not delete_handoff:
reason = '%s/%s success' % (responses.count(True), len(responses))
self.logger.debug(log_template, reason)
return True
# If the db has been successfully synced to all of its peers, it can be
# removed. Callers should have already checked that the db is not on a
# primary node.
if not self.delete_db(broker):
self.logger.debug(
'Failed to delete db %s', broker.db_file)
return False
self.logger.debug('Successfully deleted db %s', broker.db_file)
return True
def _reclaim(self, broker, now=None):
if not now:
now = time.time()
return broker.reclaim(now - self.reclaim_age,
now - (self.reclaim_age * 2))
def _replicate_object(self, partition, object_file, node_id):
"""
Replicate the db, choosing method based on whether or not it
already exists on peers.
:param partition: partition to be replicated to
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated from
:returns: a tuple (success, responses). ``success`` is a boolean that
is True if the method completed successfully, False otherwise.
``responses`` is a list of booleans each of which indicates the
success or not of replicating to a peer node if replication has
been attempted. ``success`` is False if any of ``responses`` is
False; when ``responses`` is empty, ``success`` may be either True
or False.
"""
start_time = now = time.time()
self.logger.debug('Replicating db %s', object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
shouldbehere = True
responses = []
try:
broker = self.brokerclass(object_file, pending_timeout=30,
logger=self.logger)
self._reclaim(broker, now)
info = broker.get_replication_info()
bpart = self.ring.get_part(
info['account'], info.get('container'))
if bpart != int(partition):
partition = bpart
# Important to set this false here since the later check only
# checks if it's on the proper device, not partition.
shouldbehere = False
name = '/' + quote(info['account'])
if 'container' in info:
name += '/' + quote(info['container'])
self.logger.error(
'Found %s for %s when it should be on partition %s; will '
'replicate out and remove.' % (object_file, name, bpart))
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error('Quarantining DB %s', object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception('ERROR reading db %s', object_file)
nodes = self.ring.get_part_nodes(int(partition))
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
self.logger.increment('failures')
return False, responses
if broker.is_reclaimable(now, self.reclaim_age):
if self.report_up_to_date(info):
self.delete_db(broker)
self.logger.timing_since('timing', start_time)
return True, responses
failure_devs_info = set()
nodes = self.ring.get_part_nodes(int(partition))
local_dev = None
for node in nodes:
if node['id'] == node_id:
local_dev = node
break
if shouldbehere:
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
if len(nodes) > 1:
i = 0
while i < len(nodes) and nodes[i]['id'] != node_id:
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
else: # Special case if using only a single replica
repl_nodes = nodes
more_nodes = self.ring.get_more_nodes(int(partition))
if not local_dev:
# Check further if local device is a handoff node
for node in self.ring.get_more_nodes(int(partition)):
if node['id'] == node_id:
local_dev = node
break
for node in repl_nodes:
different_region = False
if local_dev and local_dev['region'] != node['region']:
# This additional information will help later if we
# want to handle syncing to a node in different
# region with some optimizations.
different_region = True
success = False
try:
success = self._repl_to_node(node, broker, partition, info,
different_region)
except DriveNotMounted:
try:
repl_nodes.append(next(more_nodes))
except StopIteration:
self.logger.error(
'ERROR There are not enough handoff nodes to reach '
'replica count for partition %s',
partition)
self.logger.error('ERROR Remote drive not mounted %s', node)
except (Exception, Timeout):
self.logger.exception('ERROR syncing %(file)s with node'
' %(node)s',
{'file': object_file, 'node': node})
if not success:
failure_devs_info.add((node['replication_ip'], node['device']))
self.logger.increment('successes' if success else 'failures')
responses.append(success)
try:
self._post_replicate_hook(broker, info, responses)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: in post replicate '
'hook for %s', broker.db_file)
if not shouldbehere:
if not self.cleanup_post_replicate(broker, info, responses):
failure_devs_info.update(
[(failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in repl_nodes])
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in repl_nodes])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.timing_since('timing', start_time)
if shouldbehere:
responses.append(True)
return all(responses), responses
def delete_db(self, broker):
object_file = broker.db_file
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
for parent_dir in (suf_dir, os.path.dirname(suf_dir)):
try:
os.rmdir(parent_dir)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
break
elif err.errno == errno.ENOENT:
continue
else:
self.logger.exception(
'ERROR while trying to clean up %s', parent_dir)
return False
return True
def extract_device(self, object_file):
"""
Extract the device name from an object path. Returns "UNKNOWN" if the
path could not be extracted successfully for some reason.
:param object_file: the path to a database file.
"""
match = self.extract_device_re.match(object_file)
if match:
return match.groups()[0]
return "UNKNOWN"
def _partition_dir_filter(self, device_id, partitions_to_replicate):
def filt(partition_dir):
partition = int(partition_dir)
if self.handoffs_only:
primary_node_ids = [
d['id'] for d in self.ring.get_part_nodes(partition)]
if device_id in primary_node_ids:
return False
if partition not in partitions_to_replicate:
return False
return True
return filt
def report_up_to_date(self, full_info):
return True
def roundrobin_datadirs(self, dirs):
return RateLimitedIterator(
roundrobin_datadirs(dirs),
elements_per_second=self.databases_per_second)
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
override_options = parse_override_options(once=True, **kwargs)
devices_to_replicate = override_options.devices or Everything()
partitions_to_replicate = override_options.partitions or Everything()
self._zero_stats()
dirs = []
ips = whataremyips(self.bind_ip)
if not ips:
self.logger.error('ERROR Failed to get my own IPs?')
return
if self.handoffs_only or self.handoff_delete:
self.logger.warning(
'Starting replication pass with handoffs_only '
'and/or handoffs_delete enabled. '
'These modes are not intended for normal '
'operation; use these options with care.')
self._local_device_ids = {}
found_local = False
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
found_local = True
try:
dev_path = check_drive(self.root, node['device'],
self.mount_check)
except ValueError as err:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warning('Skipping: %s', err)
continue
if node['device'] not in devices_to_replicate:
self.logger.debug(
'Skipping device %s due to given arguments',
node['device'])
continue
unlink_older_than(
os.path.join(dev_path, 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids[node['id']] = node
part_filt = self._partition_dir_filter(
node['id'], partitions_to_replicate)
dirs.append((datadir, node['id'], part_filt))
if not found_local:
self.logger.error("Can't find itself %s with port %s in ring "
"file, not replicating",
", ".join(ips), self.port)
self.logger.info('Beginning replication run')
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info('Replication run OVER')
if self.handoffs_only or self.handoff_delete:
self.logger.warning(
'Finished replication pass with handoffs_only and/or '
'handoffs_delete enabled. If these are no longer required, '
'disable them.')
self._report_stats()
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""
sleep(random.random() * self.interval)
while True:
begin = time.time()
try:
self.run_once()
except (Exception, Timeout):
self.logger.exception('ERROR trying to replicate')
elapsed = time.time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)"""
def __init__(self, root, datadir, broker_class, mount_check=True,
logger=None):
self.root = root
self.datadir = datadir
self.broker_class = broker_class
self.mount_check = mount_check
self.logger = logger or get_logger({}, log_route='replicator-rpc')
def _db_file_exists(self, db_path):
return os.path.exists(db_path)
def dispatch(self, replicate_args, args):
if not hasattr(args, 'pop'):
return HTTPBadRequest(body='Invalid object type')
op = args.pop(0)
drive, partition, hsh = replicate_args
try:
dev_path = check_drive(self.root, drive, self.mount_check)
except ValueError:
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(dev_path,
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
return self.complete_rsync(drive, db_file, args)
else:
# someone might be about to rsync a db to us,
# make sure there's a tmp dir to receive it.
mkdirs(os.path.join(self.root, drive, 'tmp'))
if not self._db_file_exists(db_file):
return HTTPNotFound()
return getattr(self, op)(
self.broker_class(db_file, logger=self.logger), args)
@contextmanager
def debug_timing(self, name):
timemark = time.time()
yield
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(
'replicator-rpc-sync time for %s: %.02fs' % (
name, timespan))
def _parse_sync_args(self, args):
"""
Convert remote sync args to remote_info dictionary.
"""
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args[:7]
remote_metadata = {}
if metadata:
try:
remote_metadata = json.loads(metadata)
except ValueError:
self.logger.error("Unable to decode remote metadata %r",
metadata)
remote_info = {
'point': remote_sync,
'hash': hash_,
'id': id_,
'created_at': created_at,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'metadata': remote_metadata,
}
return remote_info
def sync(self, broker, args):
remote_info = self._parse_sync_args(args)
return self._handle_sync_request(broker, remote_info)
def _get_synced_replication_info(self, broker, remote_info):
"""
Apply any changes to the broker based on remote_info and return the
current replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
return broker.get_replication_info()
def _handle_sync_request(self, broker, remote_info):
"""
Update metadata, timestamps, sync points.
"""
with self.debug_timing('info'):
try:
info = self._get_synced_replication_info(broker, remote_info)
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error("Quarantining DB %s", broker)
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise
# TODO(mattoliverau) At this point in the RPC, we have the callers
# replication info and ours, so it would be cool to be able to make
# an educated guess here on the size of the incoming replication (maybe
# average object table row size * difference in ROWIDs or something)
# and the fallocate_reserve setting so we could return a 507.
# This would make db fallocate_reserve more or less on par with the
# object's.
if remote_info['metadata']:
with self.debug_timing('update_metadata'):
broker.update_metadata(remote_info['metadata'])
sync_timestamps = ('created_at', 'put_timestamp', 'delete_timestamp')
if any(info[ts] != remote_info[ts] for ts in sync_timestamps):
with self.debug_timing('merge_timestamps'):
broker.merge_timestamps(*(remote_info[ts] for ts in
sync_timestamps))
with self.debug_timing('get_sync'):
info['point'] = broker.get_sync(remote_info['id'])
if remote_info['hash'] == info['hash'] and \
info['point'] < remote_info['point']:
with self.debug_timing('merge_syncs'):
translate = {
'remote_id': 'id',
'sync_point': 'point',
}
data = dict((k, remote_info[v]) for k, v in translate.items())
broker.merge_syncs([data])
info['point'] = remote_info['point']
return Response(json.dumps(info))
def merge_syncs(self, broker, args):
broker.merge_syncs(args[0])
return HTTPAccepted()
def merge_items(self, broker, args):
broker.merge_items(args[0], args[1])
return HTTPAccepted()
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if args[1:]:
db_file = os.path.join(os.path.dirname(db_file), args[1])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename, logger=self.logger)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
def _abort_rsync_then_merge(self, db_file, tmp_filename):
return not (self._db_file_exists(db_file) and
os.path.exists(tmp_filename))
def _post_rsync_then_merge_hook(self, existing_broker, new_broker):
# subclasses may override to make custom changes to the new broker
pass
def rsync_then_merge(self, drive, db_file, args):
tmp_filename = os.path.join(self.root, drive, 'tmp', args[0])
if self._abort_rsync_then_merge(db_file, tmp_filename):
return HTTPNotFound()
new_broker = self.broker_class(tmp_filename, logger=self.logger)
existing_broker = self.broker_class(db_file, logger=self.logger)
db_file = existing_broker.db_file
point = -1
objects = existing_broker.get_items_since(point, 1000)
while len(objects):
new_broker.merge_items(objects)
point = objects[-1]['ROWID']
objects = existing_broker.get_items_since(point, 1000)
sleep()
new_broker.merge_syncs(existing_broker.get_syncs())
self._post_rsync_then_merge_hook(existing_broker, new_broker)
new_broker.newid(args[0])
new_broker.update_metadata(existing_broker.metadata)
if self._abort_rsync_then_merge(db_file, tmp_filename):
return HTTPNotFound()
renamer(tmp_filename, db_file)
return HTTPNoContent()
# Footnote [1]:
# This orders the nodes so that, given nodes a b c, a will contact b then c,
# b will contact c then a, and c will contact a then b -- in other words, each
# node will always contact the next node in the list first.
# This helps in the case where databases are all way out of sync, so each
# node is likely to be sending to a different node than it's receiving from,
# rather than two nodes talking to each other, starving out the third.
# If the third didn't even have a copy and the first two nodes were way out
# of sync, such starvation would mean the third node wouldn't get any copy
# until the first two nodes finally got in sync, which could take a while.
# This new ordering ensures such starvation doesn't occur, making the data
# more durable.
| swift-master | swift/common/db_replicator.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import sleep, Timeout, spawn
from eventlet.green import httplib, socket
import json
import six
from six.moves import range
from six.moves import urllib
import struct
from sys import exc_info, exit
import zlib
from time import gmtime, strftime, time
from zlib import compressobj
from swift.common.exceptions import ClientException
from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES,
is_client_error, is_server_error)
from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.swob import Request, bytes_to_wsgi
from swift.common.utils import quote, close_if_possible, drain_and_close
from swift.common.wsgi import loadapp
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
class UnexpectedResponse(Exception):
"""
Exception raised on invalid responses to InternalClient.make_request().
:param message: Exception message.
:param resp: The unexpected response.
"""
def __init__(self, message, resp):
super(UnexpectedResponse, self).__init__(message)
self.resp = resp
class CompressingFileReader(object):
"""
Wrapper for file object to compress object while reading.
Can be used to wrap file objects passed to InternalClient.upload_object().
Used in testing of InternalClient.
:param file_obj: File object to wrap.
:param compresslevel: Compression level, defaults to 9.
:param chunk_size: Size of chunks read when iterating using object,
defaults to 4096.
"""
def __init__(self, file_obj, compresslevel=9, chunk_size=4096):
self._f = file_obj
self.compresslevel = compresslevel
self.chunk_size = chunk_size
self.set_initial_state()
def set_initial_state(self):
"""
Sets the object to the state needed for the first read.
"""
self._f.seek(0)
self._compressor = compressobj(
self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
self.done = False
self.first = True
self.crc32 = 0
self.total_size = 0
def read(self, *a, **kw):
"""
Reads a chunk from the file object.
Params are passed directly to the underlying file object's read().
:returns: Compressed chunk from file object.
"""
if self.done:
return b''
x = self._f.read(*a, **kw)
if x:
self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff
self.total_size += len(x)
compressed = self._compressor.compress(x)
if not compressed:
compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH)
else:
compressed = self._compressor.flush(zlib.Z_FINISH)
crc32 = struct.pack("<L", self.crc32 & 0xffffffff)
size = struct.pack("<L", self.total_size & 0xffffffff)
footer = crc32 + size
compressed += footer
self.done = True
if self.first:
self.first = False
header = b'\037\213\010\000\000\000\000\000\002\377'
compressed = header + compressed
return compressed
def __iter__(self):
return self
def __next__(self):
chunk = self.read(self.chunk_size)
if chunk:
return chunk
raise StopIteration
next = __next__
def seek(self, offset, whence=0):
if not (offset == 0 and whence == 0):
raise NotImplementedError('Seek implemented on offset 0 only')
self.set_initial_state()
class InternalClient(object):
"""
An internal client that uses a swift proxy app to make requests to Swift.
This client will exponentially slow down for retries.
:param conf_path: Full path to proxy config.
:param user_agent: User agent to be sent to requests to Swift.
:param request_tries: Number of tries before InternalClient.make_request()
gives up.
:param use_replication_network: Force the client to use the replication
network over the cluster.
:param global_conf: a dict of options to update the loaded proxy config.
Options in ``global_conf`` will override those in ``conf_path`` except
where the ``conf_path`` option is preceded by ``set``.
:param app: Optionally provide a WSGI app for the internal client to use.
"""
def __init__(self, conf_path, user_agent, request_tries,
use_replication_network=False, global_conf=None, app=None,
**kwargs):
if request_tries < 1:
raise ValueError('request_tries must be positive')
# Internal clients don't use the gatekeeper and the pipeline remains
# static so we never allow anything to modify the proxy pipeline.
if kwargs.get('allow_modify_pipeline'):
raise ValueError("'allow_modify_pipeline' is no longer supported")
self.app = app or loadapp(conf_path, global_conf=global_conf,
allow_modify_pipeline=False,)
self.check_gatekeeper_not_loaded(self.app)
self.user_agent = \
self.app._pipeline_final_app.backend_user_agent = user_agent
self.request_tries = request_tries
self.use_replication_network = use_replication_network
self.get_object_ring = self.app._pipeline_final_app.get_object_ring
self.container_ring = self.app._pipeline_final_app.container_ring
self.account_ring = self.app._pipeline_final_app.account_ring
self.auto_create_account_prefix = \
self.app._pipeline_final_app.auto_create_account_prefix
@staticmethod
def check_gatekeeper_not_loaded(app):
# the Gatekeeper middleware would prevent an InternalClient passing
# X-Backend-* headers to the proxy app, so ensure it's not present
try:
for app in app._pipeline:
if isinstance(app, GatekeeperMiddleware):
raise ValueError(
"Gatekeeper middleware is not allowed in the "
"InternalClient proxy pipeline")
except AttributeError:
pass
def make_request(
self, method, path, headers, acceptable_statuses, body_file=None,
params=None):
"""Makes a request to Swift with retries.
:param method: HTTP method of request.
:param path: Path of request.
:param headers: Headers to be sent with request.
:param acceptable_statuses: List of acceptable statuses for request.
:param body_file: Body file to be passed along with request,
defaults to None.
:param params: A dict of params to be set in request query string,
defaults to None.
:returns: Response object on success.
:raises UnexpectedResponse: Exception raised when make_request() fails
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers)
headers['user-agent'] = self.user_agent
headers.setdefault('x-backend-allow-reserved-names', 'true')
if self.use_replication_network:
headers.setdefault(USE_REPLICATION_NETWORK_HEADER, 'true')
for attempt in range(self.request_tries):
resp = exc_type = exc_value = exc_traceback = None
req = Request.blank(
path, environ={'REQUEST_METHOD': method}, headers=headers)
if body_file is not None:
if hasattr(body_file, 'seek'):
body_file.seek(0)
req.body_file = body_file
if params:
req.params = params
try:
# execute in a separate greenthread to not polute corolocals
resp = spawn(req.get_response, self.app).wait()
except (Exception, Timeout):
exc_type, exc_value, exc_traceback = exc_info()
else:
if resp.status_int in acceptable_statuses or \
resp.status_int // 100 in acceptable_statuses:
return resp
elif not is_server_error(resp.status_int):
# No sense retrying when we expect the same result
break
# sleep only between tries, not after each one
if attempt < self.request_tries - 1:
if resp:
# for non 2XX requests it's safe and useful to drain
# the response body so we log the correct status code
if resp.status_int // 100 != 2:
drain_and_close(resp)
else:
# Just close; the 499 is appropriate
close_if_possible(resp.app_iter)
sleep(2 ** (attempt + 1))
if resp:
msg = 'Unexpected response: %s' % resp.status
if resp.status_int // 100 != 2 and resp.body:
# provide additional context (and drain the response body) for
# non 2XX responses
msg += ' (%s)' % resp.body
raise UnexpectedResponse(msg, resp)
if exc_type:
# To make pep8 tool happy, in place of raise t, v, tb:
six.reraise(exc_type, exc_value, exc_traceback)
def handle_request(self, *args, **kwargs):
resp = self.make_request(*args, **kwargs)
# Drain the response body to prevent unexpected disconnect
# in proxy-server
drain_and_close(resp)
def _get_metadata(
self, path, metadata_prefix='', acceptable_statuses=(2,),
headers=None, params=None):
"""
Gets metadata by doing a HEAD on a path and using the metadata_prefix
to get values from the headers returned.
:param path: Path to do HEAD on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send
:returns: A dict of metadata with metadata_prefix stripped from keys.
Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
resp = self.make_request('HEAD', path, headers, acceptable_statuses,
params=params)
metadata_prefix = metadata_prefix.lower()
metadata = {}
for k, v in resp.headers.items():
if k.lower().startswith(metadata_prefix):
metadata[k[len(metadata_prefix):].lower()] = v
return metadata
def _iter_items(
self, path, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of items from a json listing. Assumes listing has
'name' key defined and uses markers.
:param path: Path to do GET on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of items
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
if not isinstance(marker, bytes):
marker = marker.encode('utf8')
if not isinstance(end_marker, bytes):
end_marker = end_marker.encode('utf8')
if not isinstance(prefix, bytes):
prefix = prefix.encode('utf8')
while True:
resp = self.make_request(
'GET', '%s?format=json&marker=%s&end_marker=%s&prefix=%s' %
(path, bytes_to_wsgi(quote(marker)),
bytes_to_wsgi(quote(end_marker)),
bytes_to_wsgi(quote(prefix))),
{}, acceptable_statuses)
if not resp.status_int == 200:
if resp.status_int >= HTTP_MULTIPLE_CHOICES:
b''.join(resp.app_iter)
break
data = json.loads(resp.body)
if not data:
break
for item in data:
yield item
marker = data[-1]['name'].encode('utf8')
def make_path(self, account, container=None, obj=None):
"""
Returns a swift path for a request quoting and utf-8 encoding the path
parts as need be.
:param account: swift account
:param container: container, defaults to None
:param obj: object, defaults to None
:raises ValueError: Is raised if obj is specified and container is
not.
"""
path = '/v1/%s' % quote(account)
if container:
path += '/%s' % quote(container)
if obj:
path += '/%s' % quote(obj)
elif obj:
raise ValueError('Object specified without container')
return path
def _set_metadata(
self, path, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets metadata on path using metadata_prefix to set values in headers of
POST request.
:param path: Path to do POST on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = {}
for k, v in metadata.items():
if k.lower().startswith(metadata_prefix):
headers[k] = v
else:
headers['%s%s' % (metadata_prefix, k)] = v
self.handle_request('POST', path, headers, acceptable_statuses)
# account methods
def iter_containers(
self, account, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of containers dicts from an account.
:param account: Account on which to do the container listing.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of containers
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def create_account(self, account):
"""
Creates an account.
:param account: Account to create.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self.handle_request('PUT', path, {}, (201, 202))
def delete_account(self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes an account.
:param account: Account to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self.handle_request('DELETE', path, {}, acceptable_statuses)
def get_account_info(
self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns (container_count, object_count) for an account.
:param account: Account on which to get the information.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
resp = self.make_request('HEAD', path, {}, acceptable_statuses)
if not resp.status_int // 100 == 2:
return (0, 0)
return (int(resp.headers.get('x-account-container-count', 0)),
int(resp.headers.get('x-account-object-count', 0)))
def get_account_metadata(
self, account, metadata_prefix='', acceptable_statuses=(2,),
params=None):
"""Gets account metadata.
:param account: Account on which to get the metadata.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of account metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=None, params=params)
def set_account_metadata(
self, account, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets account metadata. A call to this will add to the account
metadata and not overwrite all of it with values in the metadata dict.
To clear an account metadata value, pass an empty string as
the value for the key in the metadata dict.
:param account: Account on which to get the metadata.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# container methods
def container_exists(self, account, container):
"""Checks to see if a container exists.
:param account: The container's account.
:param container: Container to check.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: True if container exists, false otherwise.
"""
path = self.make_path(account, container)
resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND))
return not resp.status_int == HTTP_NOT_FOUND
def create_container(
self, account, container, headers=None, acceptable_statuses=(2,)):
"""
Creates container.
:param account: The container's account.
:param container: Container to create.
:param headers: Defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.handle_request('PUT', path, headers, acceptable_statuses)
def delete_container(
self, account, container, headers=None,
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes a container.
:param account: The container's account.
:param container: Container to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.handle_request('DELETE', path, headers, acceptable_statuses)
def get_container_metadata(
self, account, container, metadata_prefix='',
acceptable_statuses=(2,), params=None):
"""Gets container metadata.
:param account: The container's account.
:param container: Container to get metadata on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of container metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
params=params)
def iter_objects(
self, account, container, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of object dicts from a container.
:param account: The container's account.
:param container: Container to iterate objects on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of objects
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def set_container_metadata(
self, account, container, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets container metadata. A call to this will add to the container
metadata and not overwrite all of it with values in the metadata dict.
To clear a container metadata value, pass an empty string as the value
for the key in the metadata dict.
:param account: The container's account.
:param container: Container to set metadata on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# object methods
def delete_object(
self, account, container, obj,
acceptable_statuses=(2, HTTP_NOT_FOUND),
headers=None):
"""
Deletes an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:param headers: extra headers to send with request
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self.handle_request('DELETE', path, (headers or {}),
acceptable_statuses)
def get_object_metadata(
self, account, container, obj, metadata_prefix='',
acceptable_statuses=(2,), headers=None, params=None):
"""Gets object metadata.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send with request
:returns: Dict of object metadata.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=headers, params=params)
def get_object(self, account, container, obj, headers=None,
acceptable_statuses=(2,), params=None):
"""
Gets an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object name.
:param headers: Headers to send with request, defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param params: A dict of params to be set in request query string,
defaults to None.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: A 3-tuple (status, headers, iterator of object body)
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request(
'GET', path, headers, acceptable_statuses, params=params)
return (resp.status_int, resp.headers, resp.app_iter)
def iter_object_lines(
self, account, container, obj, headers=None,
acceptable_statuses=(2,)):
"""
Returns an iterator of object lines from an uncompressed or compressed
text object.
Uncompress object as it is read if the object's name ends with '.gz'.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request('GET', path, headers, acceptable_statuses)
if not resp.status_int // 100 == 2:
return
last_part = b''
compressed = obj.endswith('.gz')
# magic in the following zlib.decompressobj argument is courtesy of
# Python decompressing gzip chunk-by-chunk
# http://stackoverflow.com/questions/2423866
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
for chunk in resp.app_iter:
if compressed:
chunk = d.decompress(chunk)
parts = chunk.split(b'\n')
if len(parts) == 1:
last_part = last_part + parts[0]
else:
parts[0] = last_part + parts[0]
for part in parts[:-1]:
yield part
last_part = parts[-1]
if last_part:
yield last_part
def set_object_metadata(
self, account, container, obj, metadata,
metadata_prefix='', acceptable_statuses=(2,)):
"""
Sets an object's metadata. The object's metadata will be overwritten
by the values in the metadata dict.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
def upload_object(
self, fobj, account, container, obj, headers=None,
acceptable_statuses=(2,), params=None):
"""
:param fobj: File object to read object's content from.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param headers: Headers to send with request, defaults to empty dict.
:param acceptable_statuses: List of acceptable statuses for request.
:param params: A dict of params to be set in request query string,
defaults to None.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers or {})
if 'Content-Length' not in headers:
headers['Transfer-Encoding'] = 'chunked'
path = self.make_path(account, container, obj)
self.handle_request('PUT', path, headers, acceptable_statuses, fobj,
params=params)
def get_auth(url, user, key, auth_version='1.0', **kwargs):
if auth_version != '1.0':
exit('ERROR: swiftclient missing, only auth v1.0 supported')
req = urllib2.Request(url)
req.add_header('X-Auth-User', user)
req.add_header('X-Auth-Key', key)
conn = urllib2.urlopen(req)
headers = conn.info()
return (
headers.getheader('X-Storage-Url'),
headers.getheader('X-Auth-Token'))
class SimpleClient(object):
"""
Simple client that is used in bin/swift-dispersion-* and container sync
"""
def __init__(self, url=None, token=None, starting_backoff=1,
max_backoff=5, retries=5):
self.url = url
self.token = token
self.attempts = 0 # needed in swif-dispersion-populate
self.starting_backoff = starting_backoff
self.max_backoff = max_backoff
self.retries = retries
def base_request(self, method, container=None, name=None, prefix=None,
headers=None, proxy=None, contents=None,
full_listing=None, logger=None, additional_info=None,
timeout=None, marker=None):
# Common request method
trans_start = time()
url = self.url
if full_listing:
info, body_data = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
listing = body_data
while listing:
marker = listing[-1]['name']
info, listing = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
if listing:
body_data.extend(listing)
return [info, body_data]
if headers is None:
headers = {}
if self.token:
headers['X-Auth-Token'] = self.token
if container:
url = '%s/%s' % (url.rstrip('/'), quote(container))
if name:
url = '%s/%s' % (url.rstrip('/'), quote(name))
else:
params = ['format=json']
if prefix:
params.append('prefix=%s' % prefix)
if marker:
params.append('marker=%s' % quote(marker))
url += '?' + '&'.join(params)
req = urllib2.Request(url, headers=headers, data=contents)
if proxy:
proxy = urllib.parse.urlparse(proxy)
req.set_proxy(proxy.netloc, proxy.scheme)
req.get_method = lambda: method
conn = urllib2.urlopen(req, timeout=timeout)
body = conn.read()
info = conn.info()
try:
body_data = json.loads(body)
except ValueError:
body_data = None
trans_stop = time()
if logger:
sent_content_length = 0
for n, v in headers.items():
nl = n.lower()
if nl == 'content-length':
try:
sent_content_length = int(v)
break
except ValueError:
pass
logger.debug("-> " + " ".join(
quote(str(x) if x else "-", ":/")
for x in (
strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)),
method,
url,
conn.getcode(),
sent_content_length,
info['content-length'],
trans_start,
trans_stop,
trans_stop - trans_start,
additional_info
)))
return [info, body_data]
def retry_request(self, method, **kwargs):
retries = kwargs.pop('retries', self.retries)
self.attempts = 0
backoff = self.starting_backoff
while self.attempts <= retries:
self.attempts += 1
try:
return self.base_request(method, **kwargs)
except urllib2.HTTPError as err:
if is_client_error(err.getcode() or 500):
raise ClientException('Client error',
http_status=err.getcode())
elif self.attempts > retries:
raise ClientException('Raise too many retries',
http_status=err.getcode())
except (socket.error, httplib.HTTPException, urllib2.URLError):
if self.attempts > retries:
raise
sleep(backoff)
backoff = min(backoff * 2, self.max_backoff)
def get_account(self, *args, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', **kwargs)
def put_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, **kwargs)
def get_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', container=container, **kwargs)
def put_object(self, container, name, contents, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, name=name,
contents=contents.read(), **kwargs)
def head_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
return client.retry_request('HEAD', **kwargs)
def put_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('PUT', **kwargs)
def delete_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('DELETE', **kwargs)
| swift-master | swift/common/internal_client.py |
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utility functions for use in generating responses.
Why not swift.common.utils, you ask? Because this way we can import things
from swob in here without creating circular imports.
"""
import itertools
import sys
import time
import six
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.constraints import AUTO_CREATE_ACCOUNT_PREFIX, \
CONTAINER_LISTING_LIMIT
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success, is_server_error
from swift.common.swob import HTTPBadRequest, \
HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator, \
HTTPPreconditionFailed, wsgi_to_bytes, wsgi_unquote, wsgi_to_str
from swift.common.utils import split_path, validate_device_partition, \
close_if_possible, maybe_multipart_byteranges_to_document_iters, \
multipart_byteranges_to_document_iters, parse_content_type, \
parse_content_range, csv_append, list_from_csv, Spliterator, quote, \
RESERVED, config_true_value, md5, CloseableChain, select_ip_port
from swift.common.wsgi import make_subrequest
OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-'
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX = \
'x-object-sysmeta-container-update-override-'
USE_REPLICATION_NETWORK_HEADER = 'x-backend-use-replication-network'
MISPLACED_OBJECTS_ACCOUNT = '.misplaced_objects'
if six.PY2:
import cgi
def html_escape(s, quote=True):
return cgi.escape(s, quote=quote)
else:
from html import escape as html_escape # noqa: F401
def get_param(req, name, default=None):
"""
Get a parameter from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value, as a native string
(in py2, as UTF-8 encoded str, not unicode object)
:raises HTTPBadRequest: if param not valid UTF-8 byte sequence
"""
value = req.params.get(name, default)
if six.PY2:
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8') # Ensure UTF8ness
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
else:
if value:
# req.params is a dict of WSGI strings, so encoding will succeed
value = value.encode('latin1')
try:
# Ensure UTF8ness since we're at it
value = value.decode('utf8')
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
def validate_params(req, names):
"""
Get list of parameters from an HTTP request, validating the encoding of
each parameter.
:param req: request object
:param names: parameter names
:returns: a dict mapping parameter names to values for each name that
appears in the request parameters
:raises HTTPBadRequest: if any parameter value is not a valid UTF-8 byte
sequence
"""
params = {}
for name in names:
value = get_param(req, name)
if value is None:
continue
params[name] = value
return params
def constrain_req_limit(req, constrained_limit):
given_limit = get_param(req, 'limit')
limit = constrained_limit
if given_limit and given_limit.isdigit():
limit = int(given_limit)
if limit > constrained_limit:
raise HTTPPreconditionFailed(
request=req, body='Maximum limit is %d' % constrained_limit)
return limit
def validate_container_params(req):
params = validate_params(req, ('marker', 'end_marker', 'prefix',
'delimiter', 'path', 'format', 'reverse',
'states', 'includes'))
params['limit'] = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
return params
def _validate_internal_name(name, type_='name'):
if RESERVED in name and not name.startswith(RESERVED):
raise HTTPBadRequest(body='Invalid reserved-namespace %s' % (type_))
def validate_internal_account(account):
"""
Validate internal account name.
:raises: HTTPBadRequest
"""
_validate_internal_name(account, 'account')
def validate_internal_container(account, container):
"""
Validate internal account and container names.
:raises: HTTPBadRequest
"""
if not account:
raise ValueError('Account is required')
validate_internal_account(account)
if container:
_validate_internal_name(container, 'container')
def validate_internal_obj(account, container, obj):
"""
Validate internal account, container and object names.
:raises: HTTPBadRequest
"""
if not account:
raise ValueError('Account is required')
if not container:
raise ValueError('Container is required')
validate_internal_container(account, container)
if obj and not (account.startswith(AUTO_CREATE_ACCOUNT_PREFIX) or
account == MISPLACED_OBJECTS_ACCOUNT):
_validate_internal_name(obj, 'object')
if container.startswith(RESERVED) and not obj.startswith(RESERVED):
raise HTTPBadRequest(body='Invalid user-namespace object '
'in reserved-namespace container')
elif obj.startswith(RESERVED) and not container.startswith(RESERVED):
raise HTTPBadRequest(body='Invalid reserved-namespace object '
'in user-namespace container')
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises HTTPServiceUnavailable: if the path is invalid or no policy exists
with the extracted policy_index.
"""
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body="No policy with index %s" % policy_index,
request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay, as native strings
:raises HTTPBadRequest: if something's not okay
"""
try:
segs = request.split_path(minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return [wsgi_to_str(seg) for seg in segs]
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
def is_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
def is_sys_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
def is_sys_or_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
def is_object_transient_sysmeta(key):
"""
Tests if a header key starts with and is longer than the prefix for object
transient system metadata.
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX):
return False
return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX)
def strip_user_meta_prefix(server_type, key):
"""
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_user_meta(server_type, key):
raise ValueError('Key is not user meta')
return key[len(get_user_meta_prefix(server_type)):]
def strip_sys_meta_prefix(server_type, key):
"""
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_sys_meta(server_type, key):
raise ValueError('Key is not sysmeta')
return key[len(get_sys_meta_prefix(server_type)):]
def strip_object_transient_sysmeta_prefix(key):
"""
Removes the object transient system metadata prefix from the start of a
header key.
:param key: header key
:returns: stripped header key
"""
if not is_object_transient_sysmeta(key):
raise ValueError('Key is not object transient sysmeta')
return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):]
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
def get_sys_meta_prefix(server_type):
"""
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
def get_object_transient_sysmeta(key):
"""
Returns the Object Transient System Metadata header for key.
The Object Transient System Metadata namespace will be persisted by
backend object servers. These headers are treated in the same way as
object user metadata i.e. all headers in this namespace will be
replaced on every POST request.
:param key: metadata key
:returns: the entire object transient system metadata header for key
"""
return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key)
def get_container_update_override_key(key):
"""
Returns the full X-Object-Sysmeta-Container-Update-Override-* header key.
:param key: the key you want to override in the container update
:returns: the full header key
"""
header = '%s%s' % (OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX, key)
return header.title()
def get_reserved_name(*parts):
"""
Generate a valid reserved name that joins the component parts.
:returns: a string
"""
if any(RESERVED in p for p in parts):
raise ValueError('Invalid reserved part in components')
return RESERVED + RESERVED.join(parts)
def split_reserved_name(name):
"""
Separate a valid reserved name into the component parts.
:returns: a list of strings
"""
if not name.startswith(RESERVED):
raise ValueError('Invalid reserved name')
return name.split(RESERVED)[1:]
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = [key for key in headers if condition(key)]
removed.update((key, headers.pop(key)) for key in keys)
return removed
def copy_header_subset(from_r, to_r, condition):
"""
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
"""
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
def check_path_header(req, name, length, error_msg):
"""
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise: HTTPPreconditionFailed if header value
is not well formatted.
"""
hdr = wsgi_unquote(req.headers.get(name))
if not hdr.startswith('/'):
hdr = '/' + hdr
try:
return split_path(hdr, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a large object.
:param req: original request object
:param app: WSGI application from which segments will come
:param listing_iter: iterable yielding the object segments to fetch,
along with the byte subranges to fetch, in the form of a 5-tuple
(object-path, object-etag, object-size, first-byte, last-byte).
If object-etag is None, no MD5 verification will be done.
If object-size is None, no length verification will be done.
If first-byte and last-byte are None, then the entire object will be
fetched.
:param max_get_time: maximum permitted duration of a GET request (seconds)
:param logger: logger object
:param swift_source: value of swift.source in subrequest environ
(just for logging)
:param ua_suffix: string to append to user-agent.
:param name: name of manifest (used in logging only)
:param response_body_length: optional response body length for
the response being sent to the client.
"""
def __init__(self, req, app, listing_iter, max_get_time,
logger, ua_suffix, swift_source,
name='<not specified>', response_body_length=None):
self.req = req
self.app = app
self.listing_iter = listing_iter
self.max_get_time = max_get_time
self.logger = logger
self.ua_suffix = " " + ua_suffix
self.swift_source = swift_source
self.name = name
self.response_body_length = response_body_length
self.peeked_chunk = None
self.app_iter = self._internal_iter()
self.validated_first_segment = False
self.current_resp = None
def _coalesce_requests(self):
pending_req = pending_etag = pending_size = None
try:
for seg_dict in self.listing_iter:
if 'raw_data' in seg_dict:
if pending_req:
yield pending_req, pending_etag, pending_size
to_yield = seg_dict['raw_data'][
seg_dict['first_byte']:seg_dict['last_byte'] + 1]
yield to_yield, None, len(seg_dict['raw_data'])
pending_req = pending_etag = pending_size = None
continue
seg_path, seg_etag, seg_size, first_byte, last_byte = (
seg_dict['path'], seg_dict.get('hash'),
seg_dict.get('bytes'),
seg_dict['first_byte'], seg_dict['last_byte'])
if seg_size is not None:
seg_size = int(seg_size)
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
# The "multipart-manifest=get" query param ensures that the
# segment is a plain old object, not some flavor of large
# object; therefore, its etag is its MD5sum and hence we can
# check it.
path = quote(seg_path) + '?multipart-manifest=get'
seg_req = make_subrequest(
self.req.environ, path=path, method='GET',
headers={'x-auth-token': self.req.headers.get(
'x-auth-token')},
agent=('%(orig)s ' + self.ua_suffix),
swift_source=self.swift_source)
seg_req_rangeval = None
if first_byte != 0 or not go_to_end:
seg_req_rangeval = "%s-%s" % (
first_byte, '' if go_to_end else last_byte)
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
# We can only coalesce if paths match and we know the segment
# size (so we can check that the ranges will be allowed)
if pending_req and pending_req.path == seg_req.path and \
seg_size is not None:
# Make a new Range object so that we don't goof up the
# existing one in case of invalid ranges. Note that a
# range set with too many individual byteranges is
# invalid, so we can combine N valid byteranges and 1
# valid byterange and get an invalid range set.
if pending_req.range:
new_range_str = str(pending_req.range)
else:
new_range_str = "bytes=0-%d" % (seg_size - 1)
if seg_req.range:
new_range_str += "," + seg_req_rangeval
else:
new_range_str += ",0-%d" % (seg_size - 1)
if Range(new_range_str).ranges_for_length(seg_size):
# Good news! We can coalesce the requests
pending_req.headers['Range'] = new_range_str
continue
# else, Too many ranges, or too much backtracking, or ...
if pending_req:
yield pending_req, pending_etag, pending_size
pending_req = seg_req
pending_etag = seg_etag
pending_size = seg_size
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if pending_req:
yield pending_req, pending_etag, pending_size
def _requests_to_bytes_iter(self):
# Take the requests out of self._coalesce_requests, actually make
# the requests, and generate the bytes from the responses.
#
# Yields 2-tuples (segment-name, byte-chunk). The segment name is
# used for logging.
for data_or_req, seg_etag, seg_size in self._coalesce_requests():
if isinstance(data_or_req, bytes): # ugly, awful overloading
yield ('data segment', data_or_req)
continue
seg_req = data_or_req
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
# Error body should be short
body = seg_resp.body
if not six.PY2:
body = body.decode('utf8')
msg = 'While processing manifest %s, got %d (%s) ' \
'while retrieving %s' % (
self.name, seg_resp.status_int,
body if len(body) <= 60 else body[:57] + '...',
seg_req.path)
if is_server_error(seg_resp.status_int):
self.logger.error(msg)
raise HTTPServiceUnavailable(
request=seg_req, content_type='text/plain')
raise SegmentError(msg)
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
resp_len = 0
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = md5(usedforsecurity=False)
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
resp_len += len(chunk)
yield (seg_req.path, chunk)
close_if_possible(seg_resp.app_iter)
if seg_hash:
if resp_len != seg_resp.content_length:
raise SegmentError(
"Bad response length for %(seg)s as part of %(name)s: "
"headers had %(from_headers)s, but response length "
"was actually %(actual)s" %
{'seg': seg_req.path,
'from_headers': seg_resp.content_length,
'name': self.name, 'actual': resp_len})
if seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum for %(seg)s as part of %(name)s: "
"headers had %(etag)s, but object MD5 was actually "
"%(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
def _byte_counting_iter(self):
# Checks that we give the client the right number of bytes. Raises
# SegmentError if the number of bytes is wrong.
bytes_left = self.response_body_length
for seg_name, chunk in self._requests_to_bytes_iter():
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_name,
'left': -bytes_left})
if bytes_left:
raise SegmentError('Expected another %d bytes for %s; '
'closing connection' % (bytes_left, self.name))
def _time_limited_iter(self):
# Makes sure a GET response doesn't take more than self.max_get_time
# seconds to process. Raises an exception if things take too long.
start_time = time.time()
for chunk in self._byte_counting_iter():
now = time.time()
yield chunk
if now - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
def _internal_iter(self):
# Top level of our iterator stack: pass bytes through; catch and
# handle exceptions.
try:
for chunk in self._time_limited_iter():
yield chunk
except (ListingIterError, SegmentError) as err:
self.logger.error(err)
if not self.validated_first_segment:
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
"""
swob.Response will only respond with a 206 status in certain cases; one
of those is if the body iterator responds to .app_iter_range().
However, this object (or really, its listing iter) is smart enough to
handle the range stuff internally, so we just no-op this out for swob.
"""
return self
def app_iter_ranges(self, ranges, content_type, boundary, content_size):
"""
This method assumes that iter(self) yields all the data bytes that
go into the response, but none of the MIME stuff. For example, if
the response will contain three MIME docs with data "abcd", "efgh",
and "ijkl", then iter(self) will give out the bytes "abcdefghijkl".
This method inserts the MIME stuff around the data bytes.
"""
si = Spliterator(self)
mri = multi_range_iterator(
ranges, content_type, boundary, content_size,
lambda start, end_plus_one: si.take(end_plus_one - start))
try:
for x in mri:
yield x
finally:
self.close()
def validate_first_segment(self):
"""
Start fetching object data to ensure that the first segment (if any) is
valid. This is to catch cases like "first segment is missing" or
"first segment's etag doesn't match manifest".
Note: this does not validate that you have any segments. A
zero-segment large object is not erroneous; it is just empty.
"""
if self.validated_first_segment:
return
try:
self.peeked_chunk = next(self.app_iter)
except StopIteration:
pass
finally:
self.validated_first_segment = True
def __iter__(self):
if self.peeked_chunk is not None:
pc = self.peeked_chunk
self.peeked_chunk = None
return CloseableChain([pc], self.app_iter)
else:
return self.app_iter
def close(self):
"""
Called when the client disconnect. Ensure that the connection to the
backend server is closed.
"""
close_if_possible(self.app_iter)
def http_response_to_document_iters(response, read_chunk_size=4096):
"""
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
"""
chunked = is_chunked(dict(response.getheaders()))
if response.status == 200:
if chunked:
# Single "range" that's the whole object with an unknown length
return iter([(0, None, None, response.getheaders(),
response)])
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
# Single range; no MIME framing, just the bytes. The start and end
# byte indices are in the Content-Range header.
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
# Multiple ranges; the response body is a multipart/byteranges MIME
# document, and we have to parse it using the MIME boundary
# extracted from the Content-Type header.
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, wsgi_to_bytes(params['boundary']), read_chunk_size)
def update_etag_is_at_header(req, name):
"""
Helper function to update an X-Backend-Etag-Is-At header whose value is a
list of alternative header names at which the actual object etag may be
found. This informs the object server where to look for the actual object
etag when processing conditional requests.
Since the proxy server and/or middleware may set alternative etag header
names, the value of X-Backend-Etag-Is-At is a comma separated list which
the object server inspects in order until it finds an etag value.
:param req: a swob Request
:param name: name of a sysmeta where alternative etag may be found
"""
if ',' in name:
# HTTP header names should not have commas but we'll check anyway
raise ValueError('Header name must not contain commas')
existing = req.headers.get("X-Backend-Etag-Is-At")
req.headers["X-Backend-Etag-Is-At"] = csv_append(
existing, name)
def resolve_etag_is_at_header(req, metadata):
"""
Helper function to resolve an alternative etag value that may be stored in
metadata under an alternate name.
The value of the request's X-Backend-Etag-Is-At header (if it exists) is a
comma separated list of alternate names in the metadata at which an
alternate etag value may be found. This list is processed in order until an
alternate etag is found.
The left most value in X-Backend-Etag-Is-At will have been set by the left
most middleware, or if no middleware, by ECObjectController, if an EC
policy is in use. The left most middleware is assumed to be the authority
on what the etag value of the object content is.
The resolver will work from left to right in the list until it finds a
value that is a name in the given metadata. So the left most wins, IF it
exists in the metadata.
By way of example, assume the encrypter middleware is installed. If an
object is *not* encrypted then the resolver will not find the encrypter
middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will
then find the EC alternate etag (if EC policy). But if the object *is*
encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is
correct because it should be preferred over X-Object-Sysmeta-Ec-Etag.
:param req: a swob Request
:param metadata: a dict containing object metadata
:return: an alternate etag value if any is found, otherwise None
"""
alternate_etag = None
metadata = HeaderKeyDict(metadata)
if "X-Backend-Etag-Is-At" in req.headers:
names = list_from_csv(req.headers["X-Backend-Etag-Is-At"])
for name in names:
if name in metadata:
alternate_etag = metadata[name]
break
return alternate_etag
def update_ignore_range_header(req, name):
"""
Helper function to update an X-Backend-Ignore-Range-If-Metadata-Present
header whose value is a list of header names which, if any are present
on an object, mean the object server should respond with a 200 instead
of a 206 or 416.
:param req: a swob Request
:param name: name of a header which, if found, indicates the proxy will
want the whole object
"""
if ',' in name:
# HTTP header names should not have commas but we'll check anyway
raise ValueError('Header name must not contain commas')
hdr = 'X-Backend-Ignore-Range-If-Metadata-Present'
req.headers[hdr] = csv_append(req.headers.get(hdr), name)
def is_use_replication_network(headers=None):
"""
Determine if replication network should be used.
:param headers: a dict of headers
:return: the value of the ``x-backend-use-replication-network`` item from
``headers``. If no ``headers`` are given or the item is not found then
False is returned.
"""
if headers:
for h, v in headers.items():
if h.lower() == USE_REPLICATION_NETWORK_HEADER:
return config_true_value(v)
return False
def get_ip_port(node, headers):
"""
Get the ip address and port that should be used for the given ``node``.
The normal ip address and port are returned unless the ``node`` or
``headers`` indicate that the replication ip address and port should be
used.
If the ``headers`` dict has an item with key
``x-backend-use-replication-network`` and a truthy value then the
replication ip address and port are returned. Otherwise if the ``node``
dict has an item with key ``use_replication`` and truthy value then the
replication ip address and port are returned. Otherwise the normal ip
address and port are returned.
:param node: a dict describing a node
:param headers: a dict of headers
:return: a tuple of (ip address, port)
"""
return select_ip_port(
node, use_replication=is_use_replication_network(headers))
| swift-master | swift/common/request_helpers.py |
# Copyright (c) 2022 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Used by get_swift_info and register_swift_info to store information about
# the swift cluster.
from copy import deepcopy
import six
_swift_info = {}
_swift_admin_info = {}
def get_swift_info(admin=False, disallowed_sections=None):
"""
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
"""
disallowed_sections = disallowed_sections or []
info = deepcopy(_swift_info)
for section in disallowed_sections:
key_to_pop = None
sub_section_dict = info
for sub_section in section.split('.'):
if key_to_pop:
sub_section_dict = sub_section_dict.get(key_to_pop, {})
if not isinstance(sub_section_dict, dict):
sub_section_dict = {}
break
key_to_pop = sub_section
sub_section_dict.pop(key_to_pop, None)
if admin:
info['admin'] = dict(_swift_admin_info)
info['admin']['disallowed_sections'] = list(disallowed_sections)
return info
def register_swift_info(name='swift', admin=False, **kwargs):
"""
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used
in the disallowed_sections to remove unwanted keys from /info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
:raises ValueError: if name or any of the keys in kwargs has "." in it
"""
if name == 'admin' or name == 'disallowed_sections':
raise ValueError('\'{0}\' is reserved name.'.format(name))
if admin:
dict_to_use = _swift_admin_info
else:
dict_to_use = _swift_info
if name not in dict_to_use:
if "." in name:
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
dict_to_use[name] = {}
for key, val in kwargs.items():
if "." in key:
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
dict_to_use[name][key] = val
_sensitive_headers = set()
_sensitive_params = set()
def get_sensitive_headers():
"""
Returns the set of registered sensitive headers.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
"""
return frozenset(_sensitive_headers)
def register_sensitive_header(header):
"""
Register a header as being "sensitive".
Sensitive headers are automatically redacted when logging. See the
``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param header: The (case-insensitive) header name which, if present, may
contain sensitive information. Examples include ``X-Auth-Token`` and
(if s3api is enabled) ``Authorization``. Limited to ASCII characters.
"""
if not isinstance(header, str):
raise TypeError
if six.PY2:
header.decode('ascii')
else:
header.encode('ascii')
_sensitive_headers.add(header.lower())
def get_sensitive_params():
"""
Returns the set of registered sensitive query parameters.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
"""
return frozenset(_sensitive_params)
def register_sensitive_param(query_param):
"""
Register a query parameter as being "sensitive".
Sensitive query parameters are automatically redacted when logging. See
the ``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param query_param: The (case-sensitive) query parameter name which, if
present, may contain sensitive information. Examples include
``temp_url_signature`` and (if s3api is enabled) ``X-Amz-Signature``.
Limited to ASCII characters.
"""
if not isinstance(query_param, str):
raise TypeError
if six.PY2:
query_param.decode('ascii')
else:
query_param.encode('ascii')
_sensitive_params.add(query_param)
| swift-master | swift/common/registry.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import hashlib
import hmac
import os
import time
import six
from six.moves import configparser
from swift.common.utils import get_valid_utf8_str
class ContainerSyncRealms(object):
"""
Loads and parses the container-sync-realms.conf, occasionally
checking the file's mtime to see if it needs to be reloaded.
"""
def __init__(self, conf_path, logger):
self.conf_path = conf_path
self.logger = logger
self.next_mtime_check = 0
self.mtime_check_interval = 300
self.conf_path_mtime = 0
self.data = {}
self.reload()
def reload(self):
"""Forces a reload of the conf file."""
self.next_mtime_check = 0
self.conf_path_mtime = 0
self._reload()
def _reload(self):
now = time.time()
if now >= self.next_mtime_check:
self.next_mtime_check = now + self.mtime_check_interval
try:
mtime = os.path.getmtime(self.conf_path)
except OSError as err:
if err.errno == errno.ENOENT:
log_func = self.logger.debug
else:
log_func = self.logger.error
log_func('Could not load %(conf)r: %(error)s', {
'conf': self.conf_path, 'error': err})
else:
if mtime != self.conf_path_mtime:
self.conf_path_mtime = mtime
try:
conf = configparser.ConfigParser()
conf.read(self.conf_path)
except configparser.ParsingError as err:
self.logger.error(
'Could not load %(conf)r: %(error)s',
{'conf': self.conf_path, 'error': err})
else:
try:
self.mtime_check_interval = conf.getfloat(
'DEFAULT', 'mtime_check_interval')
self.next_mtime_check = \
now + self.mtime_check_interval
except configparser.NoOptionError:
self.mtime_check_interval = 300
self.next_mtime_check = \
now + self.mtime_check_interval
except (configparser.ParsingError, ValueError) as err:
self.logger.error(
'Error in %(conf)r with '
'mtime_check_interval: %(error)s',
{'conf': self.conf_path, 'error': err})
realms = {}
for section in conf.sections():
realm = {}
clusters = {}
for option, value in conf.items(section):
if option in ('key', 'key2'):
realm[option] = value
elif option.startswith('cluster_'):
clusters[option[8:].upper()] = value
realm['clusters'] = clusters
realms[section.upper()] = realm
self.data = realms
def realms(self):
"""Returns a list of realms."""
self._reload()
return list(self.data.keys())
def key(self, realm):
"""Returns the key for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('key')
return result
def key2(self, realm):
"""Returns the key2 for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('key2')
return result
def clusters(self, realm):
"""Returns a list of clusters for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('clusters')
if result:
result = list(result.keys())
return result or []
def endpoint(self, realm, cluster):
"""Returns the endpoint for the cluster in the realm."""
self._reload()
result = None
realm_data = self.data.get(realm.upper())
if realm_data:
cluster_data = realm_data.get('clusters')
if cluster_data:
result = cluster_data.get(cluster.upper())
return result
def get_sig(self, request_method, path, x_timestamp, nonce, realm_key,
user_key):
"""
Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for
the information given.
:param request_method: HTTP method of the request.
:param path: The path to the resource (url-encoded).
:param x_timestamp: The X-Timestamp header value for the request.
:param nonce: A unique value for the request.
:param realm_key: Shared secret at the cluster operator level.
:param user_key: Shared secret at the user's container level.
:returns: hexdigest str of the HMAC-SHA1 for the request.
"""
nonce = get_valid_utf8_str(nonce)
realm_key = get_valid_utf8_str(realm_key)
user_key = get_valid_utf8_str(user_key)
# XXX We don't know what is the best here yet; wait for container
# sync to be tested.
if isinstance(path, six.text_type):
path = path.encode('utf-8')
return hmac.new(
realm_key,
b'%s\n%s\n%s\n%s\n%s' % (
request_method.encode('ascii'), path,
x_timestamp.encode('ascii'), nonce, user_key),
hashlib.sha1).hexdigest()
| swift-master | swift/common/container_sync_realms.py |
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ctypes
from ctypes.util import find_library
import six
__all__ = ['linkat']
class Linkat(object):
# From include/uapi/linux/fcntl.h
AT_FDCWD = -100
AT_SYMLINK_FOLLOW = 0x400
__slots__ = '_c_linkat'
def __init__(self):
libc = ctypes.CDLL(find_library('c'), use_errno=True)
try:
c_linkat = libc.linkat
except AttributeError:
self._c_linkat = None
return
c_linkat.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_char_p,
ctypes.c_int]
c_linkat.restype = ctypes.c_int
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'linkat: %s' % os.strerror(errno))
else:
return result
c_linkat.errcheck = errcheck
self._c_linkat = c_linkat
@property
def available(self):
return self._c_linkat is not None
def __call__(self, olddirfd, oldpath, newdirfd, newpath, flags):
"""
linkat() creates a new link (also known as a hard link)
to an existing file.
See `man 2 linkat` for more info.
"""
if not self.available:
raise EnvironmentError('linkat not available')
if not isinstance(olddirfd, int) or not isinstance(newdirfd, int):
raise TypeError("fd must be an integer.")
if isinstance(oldpath, six.text_type):
oldpath = oldpath.encode('utf8')
if isinstance(newpath, six.text_type):
newpath = newpath.encode('utf8')
return self._c_linkat(olddirfd, oldpath, newdirfd, newpath, flags)
linkat = Linkat()
del Linkat
| swift-master | swift/common/linkat.py |
# Copyright (c) 2010-2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from random import random
from eventlet import Timeout
import swift.common.db
from swift.common.utils import get_logger, audit_location_generator, \
config_true_value, dump_recon_cache, EventletRateLimiter
from swift.common.daemon import Daemon
from swift.common.exceptions import DatabaseAuditorException
from swift.common.recon import DEFAULT_RECON_CACHE_PATH, \
server_type_to_recon_file
class DatabaseAuditor(Daemon):
"""Base Database Auditor."""
@property
def rcache(self):
return os.path.join(
self.recon_cache_path,
server_type_to_recon_file(self.server_type))
@property
def server_type(self):
raise NotImplementedError
@property
def broker_class(self):
raise NotImplementedError
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='{}-auditor'.format(
self.server_type))
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = float(conf.get('interval', 1800))
self.logging_interval = 3600 # once an hour
self.passes = 0
self.failures = 0
self.max_dbs_per_second = \
float(conf.get('{}s_per_second'.format(self.server_type), 200))
self.rate_limiter = EventletRateLimiter(self.max_dbs_per_second)
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.datadir = '{}s'.format(self.server_type)
def _one_audit_pass(self, reported):
all_locs = audit_location_generator(self.devices, self.datadir, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.audit(path)
if time.time() - reported >= self.logging_interval:
self.logger.info(
'Since %(time)s: %(server_type)s audits: %(pass)s '
'passed audit, %(fail)s failed audit',
{'time': time.ctime(reported),
'pass': self.passes,
'fail': self.failures,
'server_type': self.server_type})
dump_recon_cache(
{'{}_audits_since'.format(self.server_type): reported,
'{}_audits_passed'.format(self.server_type): self.passes,
'{}_audits_failed'.format(self.server_type):
self.failures},
self.rcache, self.logger)
reported = time.time()
self.passes = 0
self.failures = 0
self.rate_limiter.wait()
return reported
def run_forever(self, *args, **kwargs):
"""Run the database audit until stopped."""
reported = time.time()
time.sleep(random() * self.interval)
while True:
self.logger.info(
'Begin {} audit pass.'.format(self.server_type))
begin = time.time()
try:
reported = self._one_audit_pass(reported)
except (Exception, Timeout):
self.logger.increment('errors')
self.logger.exception('ERROR auditing')
elapsed = time.time() - begin
self.logger.info(
'%(server_type)s audit pass completed: %(elapsed).02fs',
{'elapsed': elapsed, 'server_type': self.server_type.title()})
dump_recon_cache({
'{}_auditor_pass_completed'.format(self.server_type): elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the database audit once."""
self.logger.info(
'Begin {} audit "once" mode'.format(self.server_type))
begin = reported = time.time()
self._one_audit_pass(reported)
elapsed = time.time() - begin
self.logger.info(
'%(server_type)s audit "once" mode completed: %(elapsed).02fs',
{'elapsed': elapsed, 'server_type': self.server_type.title()})
dump_recon_cache(
{'{}_auditor_pass_completed'.format(self.server_type): elapsed},
self.rcache, self.logger)
def audit(self, path):
"""
Audits the given database path
:param path: the path to a db
"""
start_time = time.time()
try:
broker = self.broker_class(path, logger=self.logger)
if not broker.is_deleted():
info = broker.get_info()
err = self._audit(info, broker)
if err:
raise err
self.logger.increment('passes')
self.passes += 1
self.logger.debug('Audit passed for %s', broker)
except DatabaseAuditorException as e:
self.logger.increment('failures')
self.failures += 1
self.logger.error('Audit Failed for %(path)s: %(err)s',
{'path': path, 'err': str(e)})
except (Exception, Timeout):
self.logger.increment('failures')
self.failures += 1
self.logger.exception(
'ERROR Could not get %(server_type)s info %(path)s',
{'server_type': self.server_type, 'path': path})
self.logger.timing_since('timing', start_time)
def _audit(self, info, broker):
"""
Run any additional audit checks in sub auditor classes
:param info: The DB <account/container>_info
:param broker: The broker
:return: None on success, otherwise an exception to throw.
"""
raise NotImplementedError
| swift-master | swift/common/db_auditor.py |
"""Code common to all of Swift."""
| swift-master | swift/common/__init__.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Monkey Patch httplib.HTTPResponse to buffer reads of headers. This can improve
performance when making large numbers of small HTTP requests. This module
also provides helper functions to make HTTP connections using
BufferedHTTPResponse.
.. warning::
If you use this, be sure that the libraries you are using do not access
the socket directly (xmlrpclib, I'm looking at you :/), and instead
make all calls through httplib.
"""
from swift.common import constraints
import logging
import time
import socket
import eventlet
from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \
HTTPResponse, HTTPSConnection, _UNKNOWN, ImproperConnectionState
from six.moves.urllib.parse import quote, parse_qsl, urlencode
import six
if six.PY2:
httplib = eventlet.import_patched('httplib')
from eventlet.green import httplib as green_httplib
else:
httplib = eventlet.import_patched('http.client')
from eventlet.green.http import client as green_httplib
# Apparently http.server uses this to decide when/whether to send a 431.
# Give it some slack, so the app is more likely to get the chance to reject
# with a 400 instead.
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT * 1.6
green_httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT * 1.6
class BufferedHTTPResponse(HTTPResponse):
"""HTTPResponse class that buffers reading of headers"""
def __init__(self, sock, debuglevel=0, strict=0,
method=None): # pragma: no cover
# sock should be an eventlet.greenio.GreenSocket
self.sock = sock
if sock is None:
# ...but it could be None if we close the connection as we're
# getting it wrapped up in a Response
self._real_socket = None
# No socket means no file-like -- set it to None like in
# HTTPResponse.close()
self.fp = None
elif six.PY2:
# sock.fd is a socket._socketobject
# sock.fd._sock is a _socket.socket object, which is what we want.
self._real_socket = sock.fd._sock
self.fp = sock.makefile('rb')
else:
# sock.fd is a socket.socket, which should have a _real_close
self._real_socket = sock.fd
self.fp = sock.makefile('rb')
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
self._readline_buffer = b''
if not six.PY2:
def begin(self):
HTTPResponse.begin(self)
header_payload = self.headers.get_payload()
if isinstance(header_payload, list) and len(header_payload) == 1:
header_payload = header_payload[0].get_payload()
if header_payload:
# This shouldn't be here. We must've bumped up against
# https://bugs.python.org/issue37093
for line in header_payload.rstrip('\r\n').split('\n'):
if ':' not in line or line[:1] in ' \t':
# Well, we're no more broken than we were before...
# Should we support line folding?
# How can/should we handle a bad header line?
break
header, value = line.split(':', 1)
value = value.strip(' \t\n\r')
self.headers.add_header(header, value)
def expect_response(self):
if self.fp:
self.fp.close()
self.fp = None
if not self.sock:
raise ImproperConnectionState('Socket already closed')
self.fp = self.sock.makefile('rb', 0)
version, status, reason = self._read_status()
if status != CONTINUE:
self._read_status = lambda: (version, status, reason)
self.begin()
else:
self.status = status
self.reason = reason.strip()
self.version = 11
if six.PY2:
# Under py2, HTTPMessage.__init__ reads the headers
# which advances fp
self.msg = HTTPMessage(self.fp, 0)
# immediately kill msg.fp to make sure it isn't read again
self.msg.fp = None
else:
# py3 has a separate helper for it
self.headers = self.msg = httplib.parse_headers(self.fp)
def read(self, amt=None):
if not self._readline_buffer:
return HTTPResponse.read(self, amt)
if amt is None:
# Unbounded read: send anything we have buffered plus whatever
# is left.
buffered = self._readline_buffer
self._readline_buffer = b''
return buffered + HTTPResponse.read(self, amt)
elif amt <= len(self._readline_buffer):
# Bounded read that we can satisfy entirely from our buffer
res = self._readline_buffer[:amt]
self._readline_buffer = self._readline_buffer[amt:]
return res
else:
# Bounded read that wants more bytes than we have
smaller_amt = amt - len(self._readline_buffer)
buf = self._readline_buffer
self._readline_buffer = b''
return buf + HTTPResponse.read(self, smaller_amt)
def readline(self, size=1024):
# You'd think Python's httplib would provide this, but it doesn't.
# It does, however, provide a comment in the HTTPResponse class:
#
# # XXX It would be nice to have readline and __iter__ for this,
# # too.
#
# Yes, it certainly would.
while (b'\n' not in self._readline_buffer
and len(self._readline_buffer) < size):
read_size = size - len(self._readline_buffer)
chunk = HTTPResponse.read(self, read_size)
if not chunk:
break
self._readline_buffer += chunk
line, newline, rest = self._readline_buffer.partition(b'\n')
self._readline_buffer = rest
return line + newline
def nuke_from_orbit(self):
"""
Terminate the socket with extreme prejudice.
Closes the underlying socket regardless of whether or not anyone else
has references to it. Use this when you are certain that nobody else
you care about has a reference to this socket.
"""
if self._real_socket:
if six.PY2:
# this is idempotent; see sock_close in Modules/socketmodule.c
# in the Python source for details.
self._real_socket.close()
else:
# Hopefully this is equivalent?
# TODO: verify that this does everything ^^^^ does for py2
self._real_socket._real_close()
self._real_socket = None
self.close()
def close(self):
HTTPResponse.close(self)
self.sock = None
self._real_socket = None
class BufferedHTTPConnection(HTTPConnection):
"""HTTPConnection class that uses BufferedHTTPResponse"""
response_class = BufferedHTTPResponse
def connect(self):
self._connected_time = time.time()
ret = HTTPConnection.connect(self)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return ret
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
'''Send a request to the server.
:param method: specifies an HTTP request method, e.g. 'GET'.
:param url: specifies the object being requested, e.g. '/index.html'.
:param skip_host: if True does not add automatically a 'Host:' header
:param skip_accept_encoding: if True does not add automatically an
'Accept-Encoding:' header
'''
self._method = method
self._path = url
return HTTPConnection.putrequest(self, method, url, skip_host,
skip_accept_encoding)
def putheader(self, header, value):
if not isinstance(header, bytes):
header = header.encode('latin-1')
HTTPConnection.putheader(self, header, value)
def getexpect(self):
kwargs = {'method': self._method}
if hasattr(self, 'strict'):
kwargs['strict'] = self.strict
response = BufferedHTTPResponse(self.sock, **kwargs)
response.expect_response()
return response
def getresponse(self):
response = HTTPConnection.getresponse(self)
logging.debug("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)",
{'time': time.time() - self._connected_time,
'method': self._method, 'host': self.host,
'port': self.port, 'path': self._path})
return response
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param device: device of the node to query
:param partition: partition on the device
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if isinstance(path, six.text_type):
path = path.encode("utf-8")
if isinstance(device, six.text_type):
device = device.encode("utf-8")
if isinstance(partition, six.text_type):
partition = partition.encode('utf-8')
elif isinstance(partition, six.integer_types):
partition = str(partition).encode('ascii')
path = quote(b'/' + device + b'/' + partition + path)
return http_connect_raw(
ipaddr, port, method, path, headers, query_string, ssl)
def http_connect_raw(ipaddr, port, method, path, headers=None,
query_string=None, ssl=False):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if not port:
port = 443 if ssl else 80
if ssl:
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
else:
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port))
if query_string:
# Round trip to ensure proper quoting
if six.PY2:
query_string = urlencode(parse_qsl(
query_string, keep_blank_values=True))
else:
query_string = urlencode(
parse_qsl(query_string, keep_blank_values=True,
encoding='latin1'),
encoding='latin1')
path += '?' + query_string
conn.path = path
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
if headers:
for header, value in headers.items():
conn.putheader(header, str(value))
conn.endheaders()
return conn
| swift-master | swift/common/bufferedhttp.py |
# Copyright (c) 2010-2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import wsgi, websocket
import six
from swift.common.utils import generate_trans_id
from swift.common.http import HTTP_NO_CONTENT, HTTP_RESET_CONTENT, \
HTTP_NOT_MODIFIED
if six.PY2:
from eventlet.green import httplib as http_client
from cgi import escape
else:
from eventlet.green.http import client as http_client
from html import escape
class SwiftHttpProtocol(wsgi.HttpProtocol):
default_request_version = "HTTP/1.0"
def __init__(self, *args, **kwargs):
# See https://github.com/eventlet/eventlet/pull/590
self.pre_shutdown_bugfix_eventlet = not getattr(
websocket.WebSocketWSGI, '_WSGI_APP_ALWAYS_IDLE', None)
# Note this is not a new-style class, so super() won't work
wsgi.HttpProtocol.__init__(self, *args, **kwargs)
def log_request(self, *a):
"""
Turn off logging requests by the underlying WSGI software.
"""
pass
def log_message(self, f, *a):
"""
Redirect logging other messages by the underlying WSGI software.
"""
logger = getattr(self.server.app, 'logger', None)
if logger:
logger.error('ERROR WSGI: ' + f, *a)
else:
# eventlet<=0.17.4 doesn't have an error method, and in newer
# versions the output from error is same as info anyway
self.server.log.info('ERROR WSGI: ' + f, *a)
class MessageClass(wsgi.HttpProtocol.MessageClass):
"""Subclass to see when the client didn't provide a Content-Type"""
# for py2:
def parsetype(self):
if self.typeheader is None:
self.typeheader = ''
wsgi.HttpProtocol.MessageClass.parsetype(self)
# for py3:
def get_default_type(self):
"""If the client didn't provide a content type, leave it blank."""
return ''
def parse_request(self):
"""Parse a request (inlined from cpython@7e293984).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, any relevant
error response has already been sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = True
requestline = self.raw_requestline
if not six.PY2:
requestline = requestline.decode('iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
# Split off \x20 explicitly (see https://bugs.python.org/issue33973)
words = requestline.split(' ')
if len(words) == 0:
return False
if len(words) >= 3: # Enough to determine protocol version
version = words[-1]
try:
if not version.startswith('HTTP/'):
raise ValueError
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(
400,
"Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and \
self.protocol_version >= "HTTP/1.1":
self.close_connection = False
if version_number >= (2, 0):
self.send_error(
505,
"Invalid HTTP version (%s)" % base_version_number)
return False
self.request_version = version
if not 2 <= len(words) <= 3:
self.send_error(
400,
"Bad request syntax (%r)" % requestline)
return False
command, path = words[:2]
if len(words) == 2:
self.close_connection = True
if command != 'GET':
self.send_error(
400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
if path.startswith(('http://', 'https://')):
host, sep, rest = path.partition('//')[2].partition('/')
if sep:
path = '/' + rest
self.command, self.path = command, path
# Examine the headers and look for a Connection directive.
if six.PY2:
self.headers = self.MessageClass(self.rfile, 0)
else:
try:
self.headers = http_client.parse_headers(
self.rfile,
_class=self.MessageClass)
except http_client.LineTooLong as err:
self.send_error(
431,
"Line too long",
str(err))
return False
except http_client.HTTPException as err:
self.send_error(
431,
"Too many headers",
str(err)
)
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = True
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = False
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
if not six.PY2:
def get_environ(self, *args, **kwargs):
environ = wsgi.HttpProtocol.get_environ(self, *args, **kwargs)
header_payload = self.headers.get_payload()
if isinstance(header_payload, list) and len(header_payload) == 1:
header_payload = header_payload[0].get_payload()
if header_payload:
# This shouldn't be here. We must've bumped up against
# https://bugs.python.org/issue37093
headers_raw = list(environ['headers_raw'])
for line in header_payload.rstrip('\r\n').split('\n'):
if ':' not in line or line[:1] in ' \t':
# Well, we're no more broken than we were before...
# Should we support line folding?
# Should we 400 a bad header line?
break
header, value = line.split(':', 1)
value = value.strip(' \t\n\r')
# NB: Eventlet looks at the headers obj to figure out
# whether the client said the connection should close;
# see https://github.com/eventlet/eventlet/blob/v0.25.0/
# eventlet/wsgi.py#L504
self.headers.add_header(header, value)
headers_raw.append((header, value))
wsgi_key = 'HTTP_' + header.replace('-', '_').encode(
'latin1').upper().decode('latin1')
if wsgi_key in ('HTTP_CONTENT_LENGTH',
'HTTP_CONTENT_TYPE'):
wsgi_key = wsgi_key[5:]
environ[wsgi_key] = value
environ['headers_raw'] = tuple(headers_raw)
# Since we parsed some more headers, check to see if they
# change how our wsgi.input should behave
te = environ.get('HTTP_TRANSFER_ENCODING', '').lower()
if te.rsplit(',', 1)[-1].strip() == 'chunked':
environ['wsgi.input'].chunked_input = True
else:
length = environ.get('CONTENT_LENGTH')
if length:
length = int(length)
environ['wsgi.input'].content_length = length
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
environ['wsgi.input'].wfile = self.wfile
environ['wsgi.input'].wfile_line = \
b'HTTP/1.1 100 Continue\r\n'
return environ
def _read_request_line(self):
# Note this is not a new-style class, so super() won't work
got = wsgi.HttpProtocol._read_request_line(self)
# See https://github.com/eventlet/eventlet/pull/590
if self.pre_shutdown_bugfix_eventlet:
self.conn_state[2] = wsgi.STATE_REQUEST
return got
def handle_one_request(self):
# Note this is not a new-style class, so super() won't work
got = wsgi.HttpProtocol.handle_one_request(self)
# See https://github.com/eventlet/eventlet/pull/590
if self.pre_shutdown_bugfix_eventlet:
if self.conn_state[2] != wsgi.STATE_CLOSE:
self.conn_state[2] = wsgi.STATE_IDLE
return got
def send_error(self, code, message=None, explain=None):
"""Send and log an error reply, we are overriding the cpython parent
class method, so we can have logger generate txn_id's for error
response from wsgi since we are at the edge of the proxy server.
This sends an error response (so it must be called before any output
has been generated), logs the error, and finally sends a piece of HTML
explaining the error to the user.
:param code: an HTTP error code
3 digits
:param message: a simple optional 1 line reason phrase.
*( HTAB / SP / VCHAR / %x80-FF )
defaults to short entry matching the response code
:param explain: a detailed message defaults to the long entry
matching the response code.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
if explain is None:
explain = longmsg
try:
# assume we have a LogAdapter
txn_id = self.server.app.logger.txn_id # just in case it was set
except AttributeError:
# turns out we don't have a LogAdapter, so go direct
txn_id = generate_trans_id('')
self.log_error("code %d, message %s, (txn: %s)", code,
message, txn_id)
else:
# we do have a LogAdapter, but likely not yet a txn_id
txn_id = txn_id or generate_trans_id('')
self.server.app.logger.txn_id = txn_id
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.send_header('Connection', 'close')
# Message body is omitted for cases described in:
# - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)
# - RFC7231: 6.3.6. 205(Reset Content)
body = None
exclude_status = (HTTP_NO_CONTENT,
HTTP_RESET_CONTENT,
HTTP_NOT_MODIFIED)
if (code >= 200 and
code not in exclude_status):
# HTML encode to prevent Cross Site Scripting attacks
# (see bug https://bugs.python.org/issue1100201)
content = (self.error_message_format % {
'code': code,
'message': escape(message, quote=False),
'explain': escape(explain, quote=False)
})
body = content.encode('UTF-8', 'replace')
self.send_header("Content-Type", self.error_content_type)
self.send_header('Content-Length', str(len(body)))
self.send_header('X-Trans-Id', txn_id)
self.send_header('X-Openstack-Request-Id', txn_id)
self.end_headers()
if self.command != 'HEAD' and body:
self.wfile.write(body)
class SwiftHttpProxiedProtocol(SwiftHttpProtocol):
"""
Protocol object that speaks HTTP, including multiple requests, but with
a single PROXY line as the very first thing coming in over the socket.
This is so we can learn what the client's IP address is when Swift is
behind a TLS terminator, like hitch, that does not understand HTTP and
so cannot add X-Forwarded-For or other similar headers.
See http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt for
protocol details.
"""
def __init__(self, *a, **kw):
self.proxy_address = None
SwiftHttpProtocol.__init__(self, *a, **kw)
def handle_error(self, connection_line):
if not six.PY2:
connection_line = connection_line.decode('latin-1')
# No further processing will proceed on this connection under any
# circumstances. We always send the request into the superclass to
# handle any cleanup - this ensures that the request will not be
# processed.
self.rfile.close()
# We don't really have any confidence that an HTTP Error will be
# processable by the client as our transmission broken down between
# ourselves and our gateway proxy before processing the client
# protocol request. Hopefully the operator will know what to do!
msg = 'Invalid PROXY line %r' % connection_line
# Even assuming HTTP we don't even known what version of HTTP the
# client is sending? This entire endeavor seems questionable.
self.request_version = self.default_request_version
# appease http.server
self.command = 'PROXY'
self.send_error(400, msg)
def handle(self):
"""Handle multiple requests if necessary."""
# ensure the opening line for the connection is a valid PROXY protcol
# line; this is the only IO we do on this connection before any
# additional wrapping further pollutes the raw socket.
connection_line = self.rfile.readline(self.server.url_length_limit)
if not connection_line.startswith(b'PROXY '):
return self.handle_error(connection_line)
proxy_parts = connection_line.strip(b'\r\n').split(b' ')
if proxy_parts[1].startswith(b'UNKNOWN'):
# "UNKNOWN", in PROXY protocol version 1, means "not
# TCP4 or TCP6". This includes completely legitimate
# things like QUIC or Unix domain sockets. The PROXY
# protocol (section 2.1) states that the receiver
# (that's us) MUST ignore anything after "UNKNOWN" and
# before the CRLF, essentially discarding the first
# line.
pass
elif proxy_parts[1] in (b'TCP4', b'TCP6') and len(proxy_parts) == 6:
if six.PY2:
self.client_address = (proxy_parts[2], proxy_parts[4])
self.proxy_address = (proxy_parts[3], proxy_parts[5])
else:
self.client_address = (
proxy_parts[2].decode('latin-1'),
proxy_parts[4].decode('latin-1'))
self.proxy_address = (
proxy_parts[3].decode('latin-1'),
proxy_parts[5].decode('latin-1'))
else:
self.handle_error(connection_line)
return SwiftHttpProtocol.handle(self)
def get_environ(self, *args, **kwargs):
environ = SwiftHttpProtocol.get_environ(self, *args, **kwargs)
if self.proxy_address:
environ['SERVER_ADDR'] = self.proxy_address[0]
environ['SERVER_PORT'] = self.proxy_address[1]
if self.proxy_address[1] == '443':
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
return environ
| swift-master | swift/common/http_protocol.py |
# Copyright (c) 2010-2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from swift import __version__ as swift_version
from swift.common.utils import public, timing_stats, config_true_value, \
LOG_LINE_DEFAULT_FORMAT
from swift.common.swob import Response
class BaseStorageServer(object):
"""
Implements common OPTIONS method for object, account, container servers.
"""
def __init__(self, conf, **kwargs):
self._allowed_methods = None
self.replication_server = config_true_value(
conf.get('replication_server', 'true'))
self.log_format = conf.get('log_format', LOG_LINE_DEFAULT_FORMAT)
self.anonymization_method = conf.get('log_anonymization_method', 'md5')
self.anonymization_salt = conf.get('log_anonymization_salt', '')
@property
def server_type(self):
raise NotImplementedError(
'Storage nodes have not implemented the Server type.')
@property
def allowed_methods(self):
if self._allowed_methods is None:
self._allowed_methods = []
all_methods = inspect.getmembers(self, predicate=callable)
for name, m in all_methods:
if not getattr(m, 'publicly_accessible', False):
continue
if getattr(m, 'replication', False) and \
not self.replication_server:
continue
self._allowed_methods.append(name)
self._allowed_methods.sort()
return self._allowed_methods
@public
@timing_stats()
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods),
'Server': '%s/%s' % (self.server_type, swift_version)}
resp = Response(status=200, request=req, headers=headers)
return resp
| swift-master | swift/common/base_storage_server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def is_informational(status):
"""
Check if HTTP status code is informational.
:param status: http status code
:returns: True if status is successful, else False
"""
return 100 <= status <= 199
def is_success(status):
"""
Check if HTTP status code is successful.
:param status: http status code
:returns: True if status is successful, else False
"""
return 200 <= status <= 299
def is_redirection(status):
"""
Check if HTTP status code is redirection.
:param status: http status code
:returns: True if status is redirection, else False
"""
return 300 <= status <= 399
def is_client_error(status):
"""
Check if HTTP status code is client error.
:param status: http status code
:returns: True if status is client error, else False
"""
return 400 <= status <= 499
def is_server_error(status):
"""
Check if HTTP status code is server error.
:param status: http status code
:returns: True if status is server error, else False
"""
return 500 <= status <= 599
# List of HTTP status codes
###############################################################################
# 1xx Informational
###############################################################################
HTTP_CONTINUE = 100
HTTP_SWITCHING_PROTOCOLS = 101
HTTP_PROCESSING = 102 # WebDAV
HTTP_CHECKPOINT = 103
HTTP_REQUEST_URI_TOO_LONG = 122
###############################################################################
# 2xx Success
###############################################################################
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
HTTP_MULTI_STATUS = 207 # WebDAV
HTTP_IM_USED = 226
###############################################################################
# 3xx Redirection
###############################################################################
HTTP_MULTIPLE_CHOICES = 300
HTTP_MOVED_PERMANENTLY = 301
HTTP_FOUND = 302
HTTP_SEE_OTHER = 303
HTTP_NOT_MODIFIED = 304
HTTP_USE_PROXY = 305
HTTP_SWITCH_PROXY = 306
HTTP_TEMPORARY_REDIRECT = 307
HTTP_RESUME_INCOMPLETE = 308
###############################################################################
# 4xx Client Error
###############################################################################
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_PAYMENT_REQUIRED = 402
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_NOT_ACCEPTABLE = 406
HTTP_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_REQUEST_TIMEOUT = 408
HTTP_CONFLICT = 409
HTTP_GONE = 410
HTTP_LENGTH_REQUIRED = 411
HTTP_PRECONDITION_FAILED = 412
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_REQUEST_URI_TOO_LONG = 414
HTTP_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_EXPECTATION_FAILED = 417
HTTP_IM_A_TEAPOT = 418
HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV
HTTP_LOCKED = 423 # WebDAV
HTTP_FAILED_DEPENDENCY = 424 # WebDAV
HTTP_UNORDERED_COLLECTION = 425
HTTP_UPGRADE_REQUIED = 426
HTTP_PRECONDITION_REQUIRED = 428
HTTP_TOO_MANY_REQUESTS = 429
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_NO_RESPONSE = 444
HTTP_RETRY_WITH = 449
HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450
HTTP_RATE_LIMITED = 498
HTTP_CLIENT_CLOSED_REQUEST = 499
###############################################################################
# 5xx Server Error
###############################################################################
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
HTTP_BAD_GATEWAY = 502
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_VARIANT_ALSO_NEGOTIATES = 506
HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV
HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509
HTTP_NOT_EXTENDED = 510
HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511
HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC
HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC
| swift-master | swift/common/http.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import Timeout
import swift.common.utils
class MessageTimeout(Timeout):
def __init__(self, seconds=None, msg=None):
Timeout.__init__(self, seconds=seconds)
self.msg = msg
def __str__(self):
return '%s: %s' % (Timeout.__str__(self), self.msg)
class SwiftException(Exception):
pass
class PutterConnectError(Exception):
def __init__(self, status=None):
self.status = status
class InvalidTimestamp(SwiftException):
pass
class InsufficientStorage(SwiftException):
pass
class FooterNotSupported(SwiftException):
pass
class MultiphasePUTNotSupported(SwiftException):
pass
class SuffixSyncError(SwiftException):
pass
class RangeAlreadyComplete(SwiftException):
pass
class DiskFileError(SwiftException):
pass
class DiskFileNotOpen(DiskFileError):
pass
class DiskFileQuarantined(DiskFileError):
pass
class DiskFileCollision(DiskFileError):
pass
class DiskFileNotExist(DiskFileError):
pass
class DiskFileDeleted(DiskFileNotExist):
def __init__(self, metadata=None):
self.metadata = metadata or {}
self.timestamp = swift.common.utils.Timestamp(
self.metadata.get('X-Timestamp', 0))
class DiskFileExpired(DiskFileDeleted):
pass
class DiskFileNoSpace(DiskFileError):
pass
class DiskFileDeviceUnavailable(DiskFileError):
pass
class DiskFileXattrNotSupported(DiskFileError):
pass
class DiskFileBadMetadataChecksum(DiskFileError):
pass
class DeviceUnavailable(SwiftException):
pass
class DatabaseAuditorException(SwiftException):
pass
class InvalidAccountInfo(DatabaseAuditorException):
pass
class PathNotDir(OSError):
pass
class ChunkReadError(SwiftException):
pass
class ShortReadError(SwiftException):
pass
class ChunkReadTimeout(Timeout):
pass
class ChunkWriteTimeout(Timeout):
pass
class ConnectionTimeout(Timeout):
pass
class ResponseTimeout(Timeout):
pass
class DriveNotMounted(SwiftException):
pass
class LockTimeout(MessageTimeout):
pass
class RingLoadError(SwiftException):
pass
class RingBuilderError(SwiftException):
pass
class RingValidationError(RingBuilderError):
pass
class EmptyRingError(RingBuilderError):
pass
class DuplicateDeviceError(RingBuilderError):
pass
class UnPicklingError(SwiftException):
pass
class FileNotFoundError(SwiftException):
pass
class PermissionError(SwiftException):
pass
class ListingIterError(SwiftException):
pass
class ListingIterNotFound(ListingIterError):
pass
class ListingIterNotAuthorized(ListingIterError):
def __init__(self, aresp):
self.aresp = aresp
class SegmentError(SwiftException):
pass
class LinkIterError(SwiftException):
pass
class ReplicationException(Exception):
pass
class ReplicationLockTimeout(LockTimeout):
pass
class PartitionLockTimeout(LockTimeout):
pass
class MimeInvalid(SwiftException):
pass
class APIVersionError(SwiftException):
pass
class EncryptionException(SwiftException):
pass
class UnknownSecretIdError(EncryptionException):
pass
class QuarantineRequest(SwiftException):
pass
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=None, http_reason='',
http_device='', http_response_content='', http_headers=None):
super(ClientException, self).__init__(msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
self.http_response_content = http_response_content
self.http_headers = http_headers or {}
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
if self.http_response_content:
if len(self.http_response_content) <= 60:
b += ' %s' % self.http_response_content
else:
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
class InvalidPidFileException(Exception):
pass
| swift-master | swift/common/exceptions.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from os.path import isdir # tighter scoped import for mocking
import six
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from six.moves import urllib
from swift.common import utils, exceptions
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed, HTTPNotImplemented, \
HTTPException, wsgi_to_str, wsgi_to_bytes
MAX_FILE_SIZE = 5368709122
MAX_META_NAME_LENGTH = 128
MAX_META_VALUE_LENGTH = 256
MAX_META_COUNT = 90
MAX_META_OVERALL_SIZE = 4096
MAX_HEADER_SIZE = 8192
MAX_OBJECT_NAME_LENGTH = 1024
CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
VALID_API_VERSIONS = ["v1", "v1.0"]
EXTRA_HEADER_COUNT = 0
AUTO_CREATE_ACCOUNT_PREFIX = '.'
# If adding an entry to DEFAULT_CONSTRAINTS, note that
# these constraints are automatically published by the
# proxy server in responses to /info requests, with values
# updated by reload_constraints()
DEFAULT_CONSTRAINTS = {
'max_file_size': MAX_FILE_SIZE,
'max_meta_name_length': MAX_META_NAME_LENGTH,
'max_meta_value_length': MAX_META_VALUE_LENGTH,
'max_meta_count': MAX_META_COUNT,
'max_meta_overall_size': MAX_META_OVERALL_SIZE,
'max_header_size': MAX_HEADER_SIZE,
'max_object_name_length': MAX_OBJECT_NAME_LENGTH,
'container_listing_limit': CONTAINER_LISTING_LIMIT,
'account_listing_limit': ACCOUNT_LISTING_LIMIT,
'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH,
'max_container_name_length': MAX_CONTAINER_NAME_LENGTH,
'valid_api_versions': VALID_API_VERSIONS,
'extra_header_count': EXTRA_HEADER_COUNT,
'auto_create_account_prefix': AUTO_CREATE_ACCOUNT_PREFIX,
}
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE
EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global constraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name, default in DEFAULT_CONSTRAINTS.items():
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
if isinstance(default, int):
value = int(value) # Go ahead and let it error
elif isinstance(default, str):
pass # No translation needed, I guess
else:
# Hope we want a list!
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
reload_constraints()
# By default the maximum number of allowed headers depends on the number of max
# allowed metadata settings plus a default value of 36 for swift internally
# generated headers and regular http headers. If for some reason this is not
# enough (custom middleware for example) it can be increased with the
# extra_header_count constraint.
MAX_HEADER_COUNT = MAX_META_COUNT + 36 + max(EXTRA_HEADER_COUNT, 0)
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers. This should only check
that the metadata in the request given is valid. Checks against
account/container overall metadata should be forwarded on to its
respective server to be checked.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
target_type = target_type.lower()
prefix = 'x-%s-meta-' % target_type
meta_count = 0
meta_size = 0
for key, value in req.headers.items():
if (isinstance(value, six.string_types)
and len(value) > MAX_HEADER_SIZE):
return HTTPBadRequest(body=b'Header value too long: %s' %
wsgi_to_bytes(key[:MAX_META_NAME_LENGTH]),
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
bad_key = not check_utf8(wsgi_to_str(key))
bad_value = value and not check_utf8(wsgi_to_str(value))
if target_type in ('account', 'container') and (bad_key or bad_value):
return HTTPBadRequest(body='Metadata must be valid UTF-8',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body=wsgi_to_bytes('Metadata name too long: %s%s' % (
prefix, key)),
request=req, content_type='text/plain')
if len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body=wsgi_to_bytes('Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key)),
request=req, content_type='text/plain')
if meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
if meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns: HTTPRequestEntityTooLarge -- the object is too large
:returns: HTTPLengthRequired -- missing content-length header and not
a chunked request
:returns: HTTPBadRequest -- missing or bad content-type header, or
bad metadata
:returns: HTTPNotImplemented -- unsupported transfer-encoding header value
"""
try:
ml = req.message_length()
except ValueError as e:
return HTTPBadRequest(request=req, content_type='text/plain',
body=str(e))
except AttributeError as e:
return HTTPNotImplemented(request=req, content_type='text/plain',
body=str(e))
if ml is not None and ml > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(body='Missing Content-Length header.',
request=req,
content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body=b'No content type')
try:
req = check_delete_headers(req)
except HTTPException as e:
return HTTPBadRequest(request=req, body=e.body,
content_type='text/plain')
if not check_utf8(wsgi_to_str(req.headers['Content-Type'])):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
def check_dir(root, drive):
"""
Verify that the path to the device is a directory and is a lesser
constraint that is enforced when a full mount_check isn't possible
with, for instance, a VM using loopback or partitions.
:param root: base path where the dir is
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
return check_drive(root, drive, False)
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
return check_drive(root, drive, True)
def check_drive(root, drive, mount_check):
"""
Validate the path given by root and drive is a valid existing directory.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:param mount_check: additionally require path is mounted
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
if not (urllib.parse.quote_plus(drive) == drive):
raise ValueError('%s is not a valid drive name' % drive)
path = os.path.join(root, drive)
if mount_check:
if not utils.ismount(path):
raise ValueError('%s is not mounted' % path)
else:
if not isdir(path):
raise ValueError('%s is not a directory' % path)
return path
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def valid_timestamp(request):
"""
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises HTTPBadRequest: on missing or invalid X-Timestamp
"""
try:
return request.timestamp
except exceptions.InvalidTimestamp as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
def check_delete_headers(request):
"""
Check that 'x-delete-after' and 'x-delete-at' headers have valid values.
Values should be positive integers and correspond to a time greater than
the request timestamp.
If the 'x-delete-after' header is found then its value is used to compute
an 'x-delete-at' value which takes precedence over any existing
'x-delete-at' header.
:param request: the swob request object
:raises: HTTPBadRequest in case of invalid values
:returns: the swob request object
"""
now = float(valid_timestamp(request))
if 'x-delete-after' in request.headers:
try:
x_delete_after = int(request.headers['x-delete-after'])
except ValueError:
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='Non-integer X-Delete-After')
actual_del_time = utils.normalize_delete_at_timestamp(
now + x_delete_after)
if int(actual_del_time) <= now:
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='X-Delete-After in past')
request.headers['x-delete-at'] = actual_del_time
del request.headers['x-delete-after']
if 'x-delete-at' in request.headers:
try:
x_delete_at = int(utils.normalize_delete_at_timestamp(
int(request.headers['x-delete-at'])))
except ValueError:
raise HTTPBadRequest(request=request, content_type='text/plain',
body='Non-integer X-Delete-At')
if x_delete_at <= now and not utils.config_true_value(
request.headers.get('x-backend-replication', 'f')):
raise HTTPBadRequest(request=request, content_type='text/plain',
body='X-Delete-At in past')
return request
def check_utf8(string, internal=False):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any reserved characters.
:param string: string to be validated
:param internal: boolean, allows reserved characters if True
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, six.text_type):
encoded = string.encode('utf-8')
decoded = string
else:
encoded = string
decoded = string.decode('UTF-8')
if decoded.encode('UTF-8') != encoded:
return False
# A UTF-8 string with surrogates in it is invalid.
#
# Note: this check is only useful on Python 2. On Python 3, a
# bytestring with a UTF-8-encoded surrogate codepoint is (correctly)
# treated as invalid, so the decode() call above will fail.
#
# Note 2: this check requires us to use a wide build of Python 2. On
# narrow builds of Python 2, potato = u"\U0001F954" will have length
# 2, potato[0] == u"\ud83e" (surrogate), and potato[1] == u"\udda0"
# (also a surrogate), so even if it is correctly UTF-8 encoded as
# b'\xf0\x9f\xa6\xa0', it will not pass this check. Fortunately,
# most Linux distributions build Python 2 wide, and Python 3.3+
# removed the wide/narrow distinction entirely.
if any(0xD800 <= ord(codepoint) <= 0xDFFF
for codepoint in decoded):
return False
if b'\x00' != utils.RESERVED_BYTE and b'\x00' in encoded:
return False
return True if internal else utils.RESERVED_BYTE not in encoded
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
def check_name_format(req, name, target_type):
"""
Validate that the header contains valid account or container name.
:param req: HTTP request object
:param name: header value to validate
:param target_type: which header is being validated (Account or Container)
:returns: A properly encoded account name or container name
:raise HTTPPreconditionFailed: if account header
is not well formatted.
"""
if not name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot be empty' % target_type)
if six.PY2:
if isinstance(name, six.text_type):
name = name.encode('utf-8')
if '/' in name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot contain slashes' % target_type)
return name
check_account_format = functools.partial(check_name_format,
target_type='Account')
check_container_format = functools.partial(check_name_format,
target_type='Container')
def valid_api_version(version):
"""
Checks if the requested version is valid.
Currently Swift only supports "v1" and "v1.0".
"""
global VALID_API_VERSIONS
if not isinstance(VALID_API_VERSIONS, list):
VALID_API_VERSIONS = [str(VALID_API_VERSIONS)]
return version in VALID_API_VERSIONS
| swift-master | swift/common/constraints.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import json
import os
import socket
from eventlet import sleep, Timeout
import six
import six.moves.cPickle as pickle
from six.moves.http_client import HTTPException
from swift.common.bufferedhttp import http_connect, http_connect_raw
from swift.common.exceptions import ClientException
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER, \
get_ip_port
from swift.common.swob import normalize_etag
from swift.common.utils import Timestamp, FileLikeIter, quote
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
from swift.common.header_key_dict import HeaderKeyDict
class DirectClientException(ClientException):
def __init__(self, stype, method, node, part, path, resp, host=None):
# host can be used to override the node ip and port reported in
# the exception
host = host if host is not None else node
if not isinstance(path, six.text_type):
path = path.decode("utf-8")
full_path = quote('/%s/%s%s' % (node['device'], part, path))
msg = '%s server %s:%s direct %s %r gave status %s' % (
stype, host['ip'], host['port'], method, full_path, resp.status)
headers = HeaderKeyDict(resp.getheaders())
super(DirectClientException, self).__init__(
msg, http_host=host['ip'], http_port=host['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason, http_headers=headers)
class DirectClientReconException(ClientException):
def __init__(self, method, node, path, resp):
if not isinstance(path, six.text_type):
path = path.decode("utf-8")
msg = 'server %s:%s direct %s %r gave status %s' % (
node['ip'], node['port'], method, path, resp.status)
headers = HeaderKeyDict(resp.getheaders())
super(DirectClientReconException, self).__init__(
msg, http_host=node['ip'], http_port=node['port'],
http_status=resp.status, http_reason=resp.reason,
http_headers=headers)
def _make_path(*components):
return u'/' + u'/'.join(
x.decode('utf-8') if isinstance(x, six.binary_type) else x
for x in components)
def _make_req(node, part, method, path, headers, stype,
conn_timeout=5, response_timeout=15, send_timeout=15,
contents=None, content_length=None, chunk_size=65535):
"""
Make request to backend storage node.
(i.e. 'Account', 'Container', 'Object')
:param node: a node dict from a ring
:param part: an integer, the partition number
:param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc)
:param path: a string, the request path
:param headers: a dict, header name => value
:param stype: a string, describing the type of service
:param conn_timeout: timeout while waiting for connection; default is 5
seconds
:param response_timeout: timeout while waiting for response; default is 15
seconds
:param send_timeout: timeout for sending request body; default is 15
seconds
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param chunk_size: if defined, chunk size of data to send
:returns: an HTTPResponse object
:raises DirectClientException: if the response status is not 2xx
:raises eventlet.Timeout: if either conn_timeout or response_timeout is
exceeded
"""
if contents is not None:
if content_length is not None:
headers['Content-Length'] = str(content_length)
else:
for n, v in headers.items():
if n.lower() == 'content-length':
content_length = int(v)
if not contents:
headers['Content-Length'] = '0'
if isinstance(contents, six.string_types):
contents = [contents]
if content_length is None:
headers['Transfer-Encoding'] = 'chunked'
ip, port = get_ip_port(node, headers)
headers.setdefault('X-Backend-Allow-Reserved-Names', 'true')
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
method, path, headers=headers)
if contents is not None:
contents_f = FileLikeIter(contents)
with Timeout(send_timeout):
if content_length is None:
chunk = contents_f.read(chunk_size)
while chunk:
conn.send(b'%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents_f.read(chunk_size)
conn.send(b'0\r\n\r\n')
else:
left = content_length
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents_f.read(size)
if not chunk:
break
conn.send(chunk)
left -= len(chunk)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise DirectClientException(stype, method, node, part, path, resp)
return resp
def _get_direct_account_container(path, stype, node, part,
marker=None, limit=None,
prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15,
end_marker=None, reverse=None, headers=None,
extra_params=None):
"""Base function for get direct account and container.
Do not use directly use the direct_get_account or
direct_get_container instead.
"""
if headers is None:
headers = {}
params = {'format': 'json'}
if extra_params:
for key, value in extra_params.items():
if value is not None:
params[key] = value
if marker:
if 'marker' in params:
raise TypeError('duplicate values for keyword arg: marker')
params['marker'] = quote(marker)
if limit:
if 'limit' in params:
raise TypeError('duplicate values for keyword arg: limit')
params['limit'] = '%d' % limit
if prefix:
if 'prefix' in params:
raise TypeError('duplicate values for keyword arg: prefix')
params['prefix'] = quote(prefix)
if delimiter:
if 'delimiter' in params:
raise TypeError('duplicate values for keyword arg: delimiter')
params['delimiter'] = quote(delimiter)
if end_marker:
if 'end_marker' in params:
raise TypeError('duplicate values for keyword arg: end_marker')
params['end_marker'] = quote(end_marker)
if reverse:
if 'reverse' in params:
raise TypeError('duplicate values for keyword arg: reverse')
params['reverse'] = quote(reverse)
qs = '&'.join('%s=%s' % (k, v) for k, v in params.items())
ip, port = get_ip_port(node, headers)
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
'GET', path, query_string=qs,
headers=gen_headers(hdrs_in=headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException(stype, 'GET', node, part, path, resp)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json.loads(resp.read())
def gen_headers(hdrs_in=None, add_ts=True):
"""
Get the headers ready for a request. All requests should have a User-Agent
string, but if one is passed in don't over-write it. Not all requests will
need an X-Timestamp, but if one is passed in do not over-write it.
:param headers: dict or None, base for HTTP headers
:param add_ts: boolean, should be True for any "unsafe" HTTP request
:returns: HeaderKeyDict based on headers and ready for the request
"""
hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict()
if add_ts and 'X-Timestamp' not in hdrs_out:
hdrs_out['X-Timestamp'] = Timestamp.now().internal
if 'user-agent' not in hdrs_out:
hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid()
hdrs_out.setdefault('X-Backend-Allow-Reserved-Names', 'true')
return hdrs_out
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15, end_marker=None, reverse=None,
headers=None):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:returns: a tuple of (response headers, a list of containers) The response
headers will HeaderKeyDict.
"""
path = _make_path(account)
return _get_direct_account_container(path, "Account", node, part,
headers=headers,
marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
def direct_delete_account(node, part, account, conn_timeout=5,
response_timeout=15, headers=None):
if headers is None:
headers = {}
path = _make_path(account)
_make_req(node, part, 'DELETE', path, gen_headers(headers, True),
'Account', conn_timeout, response_timeout)
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
path = _make_path(account, container)
resp = _make_req(node, part, 'HEAD', path, gen_headers(headers),
'Container', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15, end_marker=None,
reverse=None, headers=None, extra_params=None):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:param headers: headers to be included in the request
:param extra_params: a dict of extra parameters to be included in the
request. It can be used to pass additional parameters, e.g,
{'states':'updating'} can be used with shard_range/namespace listing.
It can also be used to pass the existing keyword args, like 'marker' or
'limit', but if the same parameter appears twice in both keyword arg
(not None) and extra_params, this function will raise TypeError.
:returns: a tuple of (response headers, a list of objects) The response
headers will be a HeaderKeyDict.
"""
path = _make_path(account, container)
return _get_direct_account_container(path, "Container", node,
part, marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout,
headers=headers,
extra_params=extra_params)
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Delete container directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
path = _make_path(account, container)
add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
_make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp),
'Container', conn_timeout, response_timeout)
def direct_put_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None, contents=None,
content_length=None, chunk_size=65535):
"""
Make a PUT request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:param contents: an iterable or string to send in request body (optional)
:param content_length: value to send as content-length header (optional)
:param chunk_size: chunk size of data to send (optional)
:raises ClientException: HTTP PUT request failed
"""
if headers is None:
headers = {}
lower_headers = set(k.lower() for k in headers)
headers_out = gen_headers(headers,
add_ts='x-timestamp' not in lower_headers)
path = _make_path(account, container)
_make_req(node, part, 'PUT', path, headers_out, 'Container', conn_timeout,
response_timeout, contents=contents,
content_length=content_length, chunk_size=chunk_size)
def direct_post_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Make a POST request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:raises ClientException: HTTP PUT request failed
"""
if headers is None:
headers = {}
lower_headers = set(k.lower() for k in headers)
headers_out = gen_headers(headers,
add_ts='x-timestamp' not in lower_headers)
path = _make_path(account, container)
return _make_req(node, part, 'POST', path, headers_out, 'Container',
conn_timeout, response_timeout)
def direct_put_container_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15,
headers=None):
if headers is None:
headers = {}
have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers)
path = _make_path(account, container, obj)
_make_req(node, part, 'PUT', path,
gen_headers(headers, add_ts=(not have_x_timestamp)),
'Container', conn_timeout, response_timeout)
def direct_delete_container_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15,
headers=None):
if headers is None:
headers = {}
headers = gen_headers(headers, add_ts='x-timestamp' not in (
k.lower() for k in headers))
path = _make_path(account, container, obj)
_make_req(node, part, 'DELETE', path, headers,
'Container', conn_timeout, response_timeout)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, headers=None):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers)
path = _make_path(account, container, obj)
resp = _make_req(node, part, 'HEAD', path, headers,
'Object', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers=None):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a HeaderKeyDict.
:raises ClientException: HTTP GET request failed
"""
if headers is None:
headers = {}
ip, port = get_ip_port(node, headers)
path = _make_path(account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
'GET', path, headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException('Object', 'GET', node, part, path, resp)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers, object_body
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
chunk_size=65535):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
:raises ClientException: HTTP PUT request failed
"""
path = _make_path(account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = normalize_etag(etag)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
# Incase the caller want to insert an object with specific age
add_ts = 'X-Timestamp' not in headers
resp = _make_req(
node, part, 'PUT', path, gen_headers(headers, add_ts=add_ts),
'Object', conn_timeout, response_timeout, contents=contents,
content_length=content_length, chunk_size=chunk_size)
return normalize_etag(resp.getheader('etag'))
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = _make_path(account, container, name)
_make_req(node, part, 'POST', path, gen_headers(headers, True),
'Object', conn_timeout, response_timeout)
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers=None):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers, add_ts='x-timestamp' not in (
k.lower() for k in headers))
path = _make_path(account, container, obj)
_make_req(node, part, 'DELETE', path, headers,
'Object', conn_timeout, response_timeout)
def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5,
response_timeout=15, headers=None):
"""
Get suffix hashes directly from the object server.
Note that unlike other ``direct_client`` functions, this one defaults
to using the replication network to make requests.
:param node: node dictionary from the ring
:param part: partition the container is on
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: dict of suffix hashes
:raises ClientException: HTTP REPLICATE request failed
"""
if headers is None:
headers = {}
headers.setdefault(USE_REPLICATION_NETWORK_HEADER, 'true')
ip, port = get_ip_port(node, headers)
path = '/%s' % '-'.join(suffixes)
with Timeout(conn_timeout):
conn = http_connect(ip, port,
node['device'], part, 'REPLICATE', path,
headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
raise DirectClientException('Object', 'REPLICATE',
node, part, path, resp,
host={'ip': node['replication_ip'],
'port': node['replication_port']}
)
return pickle.loads(resp.read())
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: result of func
:raises ClientException: all retries failed
"""
retries = kwargs.pop('retries', 5)
error_log = kwargs.pop('error_log', None)
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout) as err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException as err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[0]['ip'],
http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
def direct_get_recon(node, recon_command, conn_timeout=5, response_timeout=15,
headers=None):
"""
Get recon json directly from the storage server.
:param node: node dictionary from the ring
:param recon_command: recon string (post /recon/)
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: deserialized json response
:raises DirectClientReconException: HTTP GET request failed
"""
if headers is None:
headers = {}
ip, port = get_ip_port(node, headers)
path = '/recon/%s' % recon_command
with Timeout(conn_timeout):
conn = http_connect_raw(ip, port, 'GET', path,
headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
raise DirectClientReconException('GET', node, path, resp)
return json.loads(resp.read())
| swift-master | swift/common/direct_client.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of WSGI Request and Response objects.
This library has a very similar API to Webob. It wraps WSGI request
environments and response values into objects that are more friendly to
interact with.
Why Swob and not just use WebOb?
By Michael Barton
We used webob for years. The main problem was that the interface
wasn't stable. For a while, each of our several test suites required
a slightly different version of webob to run, and none of them worked
with the then-current version. It was a huge headache, so we just
scrapped it.
This is kind of a ton of code, but it's also been a huge relief to
not have to scramble to add a bunch of code branches all over the
place to keep Swift working every time webob decides some interface
needs to change.
"""
from collections import defaultdict
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping # py2
import time
from functools import partial
from datetime import datetime
from email.utils import parsedate
import re
import random
import functools
from io import BytesIO
import six
from six import StringIO
from six.moves import urllib
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import UTC, reiterate, split_path, Timestamp, pairs, \
close_if_possible, closing_if_possible, config_true_value, drain_and_close
from swift.common.exceptions import InvalidTimestamp
RESPONSE_REASONS = {
100: ('Continue', ''),
200: ('OK', ''),
201: ('Created', ''),
202: ('Accepted', 'The request is accepted for processing.'),
204: ('No Content', ''),
206: ('Partial Content', ''),
301: ('Moved Permanently', 'The resource has moved permanently.'),
302: ('Found', 'The resource has moved temporarily.'),
303: ('See Other', 'The response to the request can be found under a '
'different URI.'),
304: ('Not Modified', ''),
307: ('Temporary Redirect', 'The resource has moved temporarily.'),
400: ('Bad Request', 'The server could not comply with the request since '
'it is either malformed or otherwise incorrect.'),
401: ('Unauthorized', 'This server could not verify that you are '
'authorized to access the document you requested.'),
402: ('Payment Required', 'Access was denied for financial reasons.'),
403: ('Forbidden', 'Access was denied to this resource.'),
404: ('Not Found', 'The resource could not be found.'),
405: ('Method Not Allowed', 'The method is not allowed for this '
'resource.'),
406: ('Not Acceptable', 'The resource is not available in a format '
'acceptable to your browser.'),
408: ('Request Timeout', 'The server has waited too long for the request '
'to be sent by the client.'),
409: ('Conflict', 'There was a conflict when trying to complete '
'your request.'),
410: ('Gone', 'This resource is no longer available.'),
411: ('Length Required', 'Content-Length header required.'),
412: ('Precondition Failed', 'A precondition for this request was not '
'met.'),
413: ('Request Entity Too Large', 'The body of your request was too '
'large for this server.'),
414: ('Request URI Too Long', 'The request URI was too long for this '
'server.'),
415: ('Unsupported Media Type', 'The request media type is not '
'supported by this server.'),
416: ('Requested Range Not Satisfiable', 'The Range requested is not '
'available.'),
417: ('Expectation Failed', 'Expectation failed.'),
422: ('Unprocessable Entity', 'Unable to process the contained '
'instructions'),
499: ('Client Disconnect', 'The client was disconnected during request.'),
500: ('Internal Error', 'The server has either erred or is incapable of '
'performing the requested operation.'),
501: ('Not Implemented', 'The requested method is not implemented by '
'this server.'),
502: ('Bad Gateway', 'Bad gateway.'),
503: ('Service Unavailable', 'The server is currently unavailable. '
'Please try again at a later time.'),
504: ('Gateway Timeout', 'A timeout has occurred speaking to a '
'backend server.'),
507: ('Insufficient Storage', 'There was not enough space to save the '
'resource. Drive: %(drive)s'),
529: ('Too Many Backend Requests', 'The server is incapable of performing '
'the requested operation due to too many requests. Slow down.')
}
MAX_RANGE_OVERLAPS = 2
MAX_NONASCENDING_RANGES = 8
MAX_RANGES = 50
class WsgiBytesIO(BytesIO):
"""
This class adds support for the additional wsgi.input methods defined on
eventlet.wsgi.Input to the BytesIO class which would otherwise be a fine
stand-in for the file-like object in the WSGI environment.
"""
def set_hundred_continue_response_headers(self, headers):
pass
def send_hundred_continue_response(self):
pass
def _datetime_property(header):
"""
Set and retrieve the datetime value of self.headers[header]
(Used by both request and response)
The header is parsed on retrieval and a datetime object is returned.
The header can be set using a datetime, numeric value, or str.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
"""
def getter(self):
value = self.headers.get(header, None)
if value is not None:
try:
parts = parsedate(self.headers[header])[:7]
return datetime(*(parts + (UTC,)))
except Exception:
return None
def setter(self, value):
if isinstance(value, (float,) + six.integer_types):
self.headers[header] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value))
elif isinstance(value, datetime):
self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT")
else:
self.headers[header] = value
return property(getter, setter,
doc=("Retrieve and set the %s header as a datetime, "
"set it with a datetime, int, or str") % header)
def _header_property(header):
"""
Set and retrieve the value of self.headers[header]
(Used by both request and response)
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Transfer-Encoding"
"""
def getter(self):
return self.headers.get(header, None)
def setter(self, value):
self.headers[header] = value
return property(getter, setter,
doc="Retrieve and set the %s header" % header)
def _header_int_property(header):
"""
Set and retrieve the value of self.headers[header]
(Used by both request and response)
On retrieval, it converts values to integers.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
"""
def getter(self):
val = self.headers.get(header, None)
if val is not None:
val = int(val)
return val
def setter(self, value):
self.headers[header] = value
return property(getter, setter,
doc="Retrieve and set the %s header as an int" % header)
def header_to_environ_key(header_name):
# Why the to/from wsgi dance? Headers that include something like b'\xff'
# on the wire get translated to u'\u00ff' on py3, which gets upper()ed to
# u'\u0178', which is nonsense in a WSGI string.
# Note that we have to only get as far as bytes because something like
# b'\xc3\x9f' on the wire would be u'\u00df' as a native string on py3,
# which would upper() to 'SS'.
real_header = wsgi_to_bytes(header_name)
header_name = 'HTTP_' + bytes_to_wsgi(
real_header.upper()).replace('-', '_')
if header_name == 'HTTP_CONTENT_LENGTH':
return 'CONTENT_LENGTH'
if header_name == 'HTTP_CONTENT_TYPE':
return 'CONTENT_TYPE'
return header_name
class HeaderEnvironProxy(MutableMapping):
"""
A dict-like object that proxies requests to a wsgi environ,
rewriting header keys to environ keys.
For example, headers['Content-Range'] sets and gets the value of
headers.environ['HTTP_CONTENT_RANGE']
"""
def __init__(self, environ):
self.environ = environ
def __iter__(self):
for k in self.keys():
yield k
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
return self.environ[header_to_environ_key(key)]
def __setitem__(self, key, value):
if value is None:
self.environ.pop(header_to_environ_key(key), None)
elif six.PY2 and isinstance(value, six.text_type):
self.environ[header_to_environ_key(key)] = value.encode('utf-8')
elif not six.PY2 and isinstance(value, six.binary_type):
self.environ[header_to_environ_key(key)] = value.decode('latin1')
else:
self.environ[header_to_environ_key(key)] = str(value)
def __contains__(self, key):
return header_to_environ_key(key) in self.environ
def __delitem__(self, key):
del self.environ[header_to_environ_key(key)]
def keys(self):
# See the to/from WSGI comment in header_to_environ_key
keys = [
bytes_to_wsgi(wsgi_to_bytes(key[5:]).replace(b'_', b'-').title())
for key in self.environ if key.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
keys.append('Content-Length')
if 'CONTENT_TYPE' in self.environ:
keys.append('Content-Type')
return keys
def wsgi_to_bytes(wsgi_str):
if wsgi_str is None:
return None
if six.PY2:
return wsgi_str
return wsgi_str.encode('latin1')
def wsgi_to_str(wsgi_str):
if wsgi_str is None:
return None
if six.PY2:
return wsgi_str
return wsgi_to_bytes(wsgi_str).decode('utf8', errors='surrogateescape')
def bytes_to_wsgi(byte_str):
if six.PY2:
return byte_str
return byte_str.decode('latin1')
def str_to_wsgi(native_str):
if six.PY2:
return native_str
return bytes_to_wsgi(native_str.encode('utf8', errors='surrogateescape'))
def wsgi_quote(wsgi_str, safe='/'):
if six.PY2:
if not isinstance(wsgi_str, bytes):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.quote(wsgi_str, safe=safe)
if not isinstance(wsgi_str, str) or any(ord(x) > 255 for x in wsgi_str):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.quote(wsgi_str, safe=safe, encoding='latin-1')
def wsgi_unquote(wsgi_str):
if six.PY2:
if not isinstance(wsgi_str, bytes):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.unquote(wsgi_str)
if not isinstance(wsgi_str, str) or any(ord(x) > 255 for x in wsgi_str):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.unquote(wsgi_str, encoding='latin-1')
def wsgi_quote_plus(wsgi_str):
if six.PY2:
if not isinstance(wsgi_str, bytes):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.quote_plus(wsgi_str)
if not isinstance(wsgi_str, str) or any(ord(x) > 255 for x in wsgi_str):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.quote_plus(wsgi_str, encoding='latin-1')
def wsgi_unquote_plus(wsgi_str):
if six.PY2:
if not isinstance(wsgi_str, bytes):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.unquote_plus(wsgi_str)
if not isinstance(wsgi_str, str) or any(ord(x) > 255 for x in wsgi_str):
raise TypeError('Expected a WSGI string; got %r' % wsgi_str)
return urllib.parse.unquote_plus(wsgi_str, encoding='latin-1')
def _resp_status_property():
"""
Set and retrieve the value of Response.status
On retrieval, it concatenates status_int and title.
When set to a str, it splits status_int and title apart.
When set to an integer, retrieves the correct title for that
response code from the RESPONSE_REASONS dict.
"""
def getter(self):
return '%s %s' % (self.status_int, self.title)
def setter(self, value):
if isinstance(value, six.integer_types):
self.status_int = value
self.explanation = self.title = RESPONSE_REASONS[value][0]
else:
self.status_int = int(value.split(' ', 1)[0])
self.explanation = self.title = value.split(' ', 1)[1]
return property(getter, setter,
doc="Retrieve and set the Response status, e.g. '200 OK'")
def _resp_body_property():
"""
Set and retrieve the value of Response.body
If necessary, it will consume Response.app_iter to create a body.
On assignment, encodes unicode values to utf-8, and sets the content-length
to the length of the str.
"""
def getter(self):
if not self._body:
if not self._app_iter:
return b''
with closing_if_possible(self._app_iter):
self._body = b''.join(self._app_iter)
self._app_iter = None
return self._body
def setter(self, value):
if isinstance(value, six.text_type):
raise TypeError('WSGI responses must be bytes')
if isinstance(value, six.binary_type):
self.content_length = len(value)
close_if_possible(self._app_iter)
self._app_iter = None
self._body = value
return property(getter, setter,
doc="Retrieve and set the Response body str")
def _resp_etag_property():
"""
Set and retrieve Response.etag
This may be broken for etag use cases other than Swift's.
Quotes strings when assigned and unquotes when read, for compatibility
with webob.
"""
def getter(self):
etag = self.headers.get('etag', None)
if etag:
etag = etag.replace('"', '')
return etag
def setter(self, value):
if value is None:
self.headers['etag'] = None
else:
self.headers['etag'] = '"%s"' % value
return property(getter, setter,
doc="Retrieve and set the response Etag header")
def _resp_content_type_property():
"""
Set and retrieve Response.content_type
Strips off any charset when retrieved -- that is accessible
via Response.charset.
"""
def getter(self):
if 'content-type' in self.headers:
return self.headers.get('content-type').split(';')[0]
def setter(self, value):
self.headers['content-type'] = value
return property(getter, setter,
doc="Retrieve and set the response Content-Type header")
def _resp_charset_property():
"""
Set and retrieve Response.charset
On retrieval, separates the charset from the content-type.
On assignment, removes any existing charset from the content-type and
appends the new one.
"""
def getter(self):
if '; charset=' in self.headers['content-type']:
return self.headers['content-type'].split('; charset=')[1]
def setter(self, value):
if 'content-type' in self.headers:
self.headers['content-type'] = self.headers['content-type'].split(
';')[0]
if value:
self.headers['content-type'] += '; charset=' + value
return property(getter, setter,
doc="Retrieve and set the response charset")
def _resp_app_iter_property():
"""
Set and retrieve Response.app_iter
Mostly a pass-through to Response._app_iter; it's a property so it can zero
out an existing content-length on assignment.
"""
def getter(self):
return self._app_iter
def setter(self, value):
if isinstance(value, (list, tuple)):
for i, item in enumerate(value):
if not isinstance(item, bytes):
raise TypeError('WSGI responses must be bytes; '
'got %s for item %d' % (type(item), i))
self.content_length = sum(map(len, value))
elif value is not None:
self.content_length = None
self._body = None
close_if_possible(self._app_iter)
self._app_iter = value
return property(getter, setter,
doc="Retrieve and set the response app_iter")
def _req_fancy_property(cls, header, even_if_nonexistent=False):
"""
Set and retrieve "fancy" properties.
On retrieval, these properties return a class that takes the value of the
header as the only argument to their constructor.
For assignment, those classes should implement a __str__ that converts them
back to their header values.
:param header: name of the header, e.g. "Accept"
:param even_if_nonexistent: Return a value even if the header does not
exist. Classes using this should be prepared to accept None as a
parameter.
"""
def getter(self):
try:
if header in self.headers or even_if_nonexistent:
return cls(self.headers.get(header))
except ValueError:
return None
def setter(self, value):
self.headers[header] = value
return property(getter, setter, doc=("Retrieve and set the %s "
"property in the WSGI environ, as a %s object") %
(header, cls.__name__))
class Range(object):
"""
Wraps a Request's Range header as a friendly object.
After initialization, "range.ranges" is populated with a list
of (start, end) tuples denoting the requested ranges.
If there were any syntactically-invalid byte-range-spec values, the
constructor will raise a ValueError, per the relevant RFC:
"The recipient of a byte-range-set that includes one or more syntactically
invalid byte-range-spec values MUST ignore the header field that includes
that byte-range-set."
According to the RFC 2616 specification, the following cases will be all
considered as syntactically invalid, thus, a ValueError is thrown so that
the range header will be ignored. If the range value contains at least
one of the following cases, the entire range is considered invalid,
ValueError will be thrown so that the header will be ignored.
1. value not starts with bytes=
2. range value start is greater than the end, eg. bytes=5-3
3. range does not have start or end, eg. bytes=-
4. range does not have hyphen, eg. bytes=45
5. range value is non numeric
6. any combination of the above
Every syntactically valid range will be added into the ranges list
even when some of the ranges may not be satisfied by underlying content.
:param headerval: value of the header as a str
"""
def __init__(self, headerval):
if not headerval:
raise ValueError('Invalid Range header: %r' % headerval)
headerval = headerval.replace(' ', '')
if not headerval.lower().startswith('bytes='):
raise ValueError('Invalid Range header: %s' % headerval)
self.ranges = []
for rng in headerval[6:].split(','):
# Check if the range has required hyphen.
if rng.find('-') == -1:
raise ValueError('Invalid Range header: %s' % headerval)
start, end = rng.split('-', 1)
if start:
# when start contains non numeric value, this also causes
# ValueError
start = int(start)
else:
start = None
if end:
# We could just rely on int() raising the ValueError, but
# this catches things like '--0'
if not end.isdigit():
raise ValueError('Invalid Range header: %s' % headerval)
end = int(end)
if end < 0:
raise ValueError('Invalid Range header: %s' % headerval)
elif start is not None and end < start:
raise ValueError('Invalid Range header: %s' % headerval)
else:
end = None
if start is None:
raise ValueError('Invalid Range header: %s' % headerval)
self.ranges.append((start, end))
def __str__(self):
string = 'bytes='
for i, (start, end) in enumerate(self.ranges):
if start is not None:
string += str(start)
string += '-'
if end is not None:
string += str(end)
if i < len(self.ranges) - 1:
string += ','
return string
def ranges_for_length(self, length):
"""
This method is used to return multiple ranges for a given length
which should represent the length of the underlying content.
The constructor method __init__ made sure that any range in ranges
list is syntactically valid. So if length is None or size of the
ranges is zero, then the Range header should be ignored which will
eventually make the response to be 200.
If an empty list is returned by this method, it indicates that there
are unsatisfiable ranges found in the Range header, 416 will be
returned.
if a returned list has at least one element, the list indicates that
there is at least one range valid and the server should serve the
request with a 206 status code.
The start value of each range represents the starting position in
the content, the end value represents the ending position. This
method purposely adds 1 to the end number because the spec defines
the Range to be inclusive.
The Range spec can be found at the following link:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1
:param length: length of the underlying content
"""
# not syntactically valid ranges, must ignore
if length is None or not self.ranges or self.ranges == []:
return None
all_ranges = []
for single_range in self.ranges:
begin, end = single_range
# The possible values for begin and end are
# None, 0, or a positive numeric number
if begin is None:
if end == 0:
# this is the bytes=-0 case
continue
elif end > length:
# This is the case where the end is greater than the
# content length, as the RFC 2616 stated, the entire
# content should be returned.
all_ranges.append((0, length))
else:
all_ranges.append((length - end, length))
continue
# begin can only be 0 and numeric value from this point on
if end is None:
if begin < length:
all_ranges.append((begin, length))
else:
# the begin position is greater than or equal to the
# content length; skip and move on to the next range
continue
# end can only be 0 or numeric value
elif begin < length:
# the begin position is valid, take the min of end + 1 or
# the total length of the content
all_ranges.append((begin, min(end + 1, length)))
# RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says:
#
# Unconstrained multiple range requests are susceptible to denial-of-
# service attacks because the effort required to request many
# overlapping ranges of the same data is tiny compared to the time,
# memory, and bandwidth consumed by attempting to serve the requested
# data in many parts. Servers ought to ignore, coalesce, or reject
# egregious range requests, such as requests for more than two
# overlapping ranges or for many small ranges in a single set,
# particularly when the ranges are requested out of order for no
# apparent reason. Multipart range requests are not designed to
# support random access.
#
# We're defining "egregious" here as:
#
# * more than 50 requested ranges OR
# * more than 2 overlapping ranges OR
# * more than 8 non-ascending-order ranges
if len(all_ranges) > MAX_RANGES:
return []
overlaps = 0
for ((start1, end1), (start2, end2)) in pairs(all_ranges):
if ((start1 < start2 < end1) or (start1 < end2 < end1) or
(start2 < start1 < end2) or (start2 < end1 < end2)):
overlaps += 1
if overlaps > MAX_RANGE_OVERLAPS:
return []
ascending = True
for start1, start2 in zip(all_ranges, all_ranges[1:]):
if start1 > start2:
ascending = False
break
if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES:
return []
return all_ranges
def normalize_etag(tag):
if tag and tag.startswith('"') and tag.endswith('"') and tag != '"':
return tag[1:-1]
return tag
class Match(object):
"""
Wraps a Request's If-[None-]Match header as a friendly object.
:param headerval: value of the header as a str
"""
def __init__(self, headerval):
self.tags = set()
for tag in headerval.split(','):
tag = tag.strip()
if not tag:
continue
self.tags.add(normalize_etag(tag))
def __contains__(self, val):
return '*' in self.tags or normalize_etag(val) in self.tags
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__, ', '.join(sorted(self.tags)))
class Accept(object):
"""
Wraps a Request's Accept header as a friendly object.
:param headerval: value of the header as a str
"""
# RFC 2616 section 2.2
token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
qdtext = r'[^"]'
quoted_pair = r'(?:\\.)'
quoted_string = r'"(?:' + qdtext + r'|' + quoted_pair + r')*"'
extension = (r'(?:\s*;\s*(?:' + token + r")\s*=\s*" + r'(?:' + token +
r'|' + quoted_string + r'))')
acc = (r'^\s*(' + token + r')/(' + token +
r')(' + extension + r'*?\s*)$')
acc_pattern = re.compile(acc)
def __init__(self, headerval):
self.headerval = headerval
def _get_types(self):
types = []
if not self.headerval:
return []
for typ in self.headerval.split(','):
type_parms = self.acc_pattern.findall(typ)
if not type_parms:
raise ValueError('Invalid accept header')
typ, subtype, parms = type_parms[0]
parms = [p.strip() for p in parms.split(';') if p.strip()]
seen_q_already = False
quality = 1.0
for parm in parms:
name, value = parm.split('=')
name = name.strip()
value = value.strip()
if name == 'q':
if seen_q_already:
raise ValueError('Multiple "q" params')
seen_q_already = True
quality = float(value)
pattern = '^' + \
(self.token if typ == '*' else re.escape(typ)) + '/' + \
(self.token if subtype == '*' else re.escape(subtype)) + '$'
types.append((pattern, quality, '*' not in (typ, subtype)))
# sort candidates by quality, then whether or not there were globs
types.sort(reverse=True, key=lambda t: (t[1], t[2]))
return [t[0] for t in types]
def best_match(self, options):
"""
Returns the item from "options" that best matches the accept header.
Returns None if no available options are acceptable to the client.
:param options: a list of content-types the server can respond with
:raises ValueError: if the header is malformed
"""
types = self._get_types()
if not types and options:
return options[0]
for pattern in types:
for option in options:
if re.match(pattern, option):
return option
return None
def __repr__(self):
return self.headerval
def _req_environ_property(environ_field, is_wsgi_string_field=True):
"""
Set and retrieve value of the environ_field entry in self.environ.
(Used by Request)
"""
def getter(self):
return self.environ.get(environ_field, None)
def setter(self, value):
if six.PY2:
if isinstance(value, six.text_type):
self.environ[environ_field] = value.encode('utf-8')
else:
self.environ[environ_field] = value
else:
if is_wsgi_string_field:
# Check that input is valid before setting
if isinstance(value, str):
value.encode('latin1').decode('utf-8')
if isinstance(value, bytes):
value = value.decode('latin1')
self.environ[environ_field] = value
return property(getter, setter, doc=("Get and set the %s property "
"in the WSGI environment") % environ_field)
def _req_body_property():
"""
Set and retrieve the Request.body parameter. It consumes wsgi.input and
returns the results. On assignment, uses a WsgiBytesIO to create a new
wsgi.input.
"""
def getter(self):
body = self.environ['wsgi.input'].read()
self.environ['wsgi.input'] = WsgiBytesIO(body)
return body
def setter(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf8')
self.environ['wsgi.input'] = WsgiBytesIO(value)
self.environ['CONTENT_LENGTH'] = str(len(value))
return property(getter, setter, doc="Get and set the request body str")
def _host_url_property():
"""
Retrieves the best guess that can be made for an absolute location up to
the path, for example: https://host.com:1234
"""
def getter(self):
if 'HTTP_HOST' in self.environ:
host = self.environ['HTTP_HOST']
else:
host = '%s:%s' % (self.environ['SERVER_NAME'],
self.environ['SERVER_PORT'])
scheme = self.environ.get('wsgi.url_scheme', 'http')
if scheme == 'http' and host.endswith(':80'):
host, port = host.rsplit(':', 1)
elif scheme == 'https' and host.endswith(':443'):
host, port = host.rsplit(':', 1)
return '%s://%s' % (scheme, host)
return property(getter, doc="Get url for request/response up to path")
def is_chunked(headers):
te = None
for key in headers:
if key.lower() == 'transfer-encoding':
te = headers.get(key)
if te:
encodings = te.split(',')
if len(encodings) > 1:
raise AttributeError('Unsupported Transfer-Coding header'
' value specified in Transfer-Encoding'
' header')
# If there are more than one transfer encoding value, the last
# one must be chunked, see RFC 2616 Sec. 3.6
if encodings[-1].lower() == 'chunked':
return True
else:
raise ValueError('Invalid Transfer-Encoding header value')
else:
return False
class Request(object):
"""
WSGI Request object.
"""
range = _req_fancy_property(Range, 'range')
if_none_match = _req_fancy_property(Match, 'if-none-match')
accept = _req_fancy_property(Accept, 'accept', True)
method = _req_environ_property('REQUEST_METHOD')
referrer = referer = _req_environ_property('HTTP_REFERER')
script_name = _req_environ_property('SCRIPT_NAME')
path_info = _req_environ_property('PATH_INFO')
host = _req_environ_property('HTTP_HOST')
host_url = _host_url_property()
remote_addr = _req_environ_property('REMOTE_ADDR')
remote_user = _req_environ_property('REMOTE_USER')
user_agent = _req_environ_property('HTTP_USER_AGENT')
query_string = _req_environ_property('QUERY_STRING')
if_match = _req_fancy_property(Match, 'if-match')
body_file = _req_environ_property('wsgi.input',
is_wsgi_string_field=False)
content_length = _header_int_property('content-length')
if_modified_since = _datetime_property('if-modified-since')
if_unmodified_since = _datetime_property('if-unmodified-since')
body = _req_body_property()
charset = None
_params_cache = None
_timestamp = None
acl = _req_environ_property('swob.ACL', is_wsgi_string_field=False)
def __init__(self, environ):
self.environ = environ
self.headers = HeaderEnvironProxy(self.environ)
@classmethod
def blank(cls, path, environ=None, headers=None, body=None, **kwargs):
"""
Create a new request object with the given parameters, and an
environment otherwise filled in with non-surprising default values.
:param path: encoded, parsed, and unquoted into PATH_INFO
:param environ: WSGI environ dictionary
:param headers: HTTP headers
:param body: stuffed in a WsgiBytesIO and hung on wsgi.input
:param kwargs: any environ key with an property setter
"""
headers = headers or {}
environ = environ or {}
if six.PY2:
if isinstance(path, six.text_type):
path = path.encode('utf-8')
else:
if isinstance(path, six.binary_type):
path = path.decode('latin1')
else:
# Check that the input is valid
path.encode('latin1')
parsed_path = urllib.parse.urlparse(path)
server_name = 'localhost'
if parsed_path.netloc:
server_name = parsed_path.netloc.split(':', 1)[0]
server_port = parsed_path.port
if server_port is None:
server_port = {'http': 80,
'https': 443}.get(parsed_path.scheme, 80)
if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']:
raise TypeError('Invalid scheme: %s' % parsed_path.scheme)
env = {
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'QUERY_STRING': parsed_path.query,
'PATH_INFO': wsgi_unquote(parsed_path.path),
'SERVER_NAME': server_name,
'SERVER_PORT': str(server_port),
'HTTP_HOST': '%s:%d' % (server_name, server_port),
'SERVER_PROTOCOL': 'HTTP/1.0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': parsed_path.scheme or 'http',
'wsgi.errors': StringIO(),
'wsgi.multithread': False,
'wsgi.multiprocess': False
}
env.update(environ)
if body is not None:
if not isinstance(body, six.binary_type):
body = body.encode('utf8')
env['wsgi.input'] = WsgiBytesIO(body)
env['CONTENT_LENGTH'] = str(len(body))
elif 'wsgi.input' not in env:
env['wsgi.input'] = WsgiBytesIO()
req = Request(env)
for key, val in headers.items():
req.headers[key] = val
for key, val in kwargs.items():
prop = getattr(Request, key, None)
if prop and isinstance(prop, property):
try:
setattr(req, key, val)
except AttributeError:
pass
else:
continue
raise TypeError("got unexpected keyword argument %r" % key)
return req
@property
def params(self):
"Provides QUERY_STRING parameters as a dictionary"
if self._params_cache is None:
if 'QUERY_STRING' in self.environ:
if six.PY2:
self._params_cache = dict(urllib.parse.parse_qsl(
self.environ['QUERY_STRING'], True))
else:
self._params_cache = dict(urllib.parse.parse_qsl(
self.environ['QUERY_STRING'],
keep_blank_values=True, encoding='latin-1'))
else:
self._params_cache = {}
return self._params_cache
str_params = params
@params.setter
def params(self, param_pairs):
self._params_cache = None
if six.PY2:
self.query_string = urllib.parse.urlencode(param_pairs)
else:
self.query_string = urllib.parse.urlencode(param_pairs,
encoding='latin-1')
def ensure_x_timestamp(self):
"""
Similar to :attr:`timestamp`, but the ``X-Timestamp`` header will be
set if not present.
:raises HTTPBadRequest: if X-Timestamp is already set but not a valid
:class:`~swift.common.utils.Timestamp`
:returns: the request's X-Timestamp header,
as a :class:`~swift.common.utils.Timestamp`
"""
# The container sync feature includes an x-timestamp header with
# requests. If present this is checked and preserved, otherwise a fresh
# timestamp is added.
if 'HTTP_X_TIMESTAMP' in self.environ:
try:
self._timestamp = Timestamp(self.environ['HTTP_X_TIMESTAMP'])
except ValueError:
raise HTTPBadRequest(
request=self, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % self.environ['HTTP_X_TIMESTAMP'])
else:
self._timestamp = Timestamp.now()
# Always normalize it to the internal form
self.environ['HTTP_X_TIMESTAMP'] = self._timestamp.internal
return self._timestamp
@property
def timestamp(self):
"""
Provides HTTP_X_TIMESTAMP as a :class:`~swift.common.utils.Timestamp`
"""
if self._timestamp is None:
try:
raw_timestamp = self.environ['HTTP_X_TIMESTAMP']
except KeyError:
raise InvalidTimestamp('Missing X-Timestamp header')
try:
self._timestamp = Timestamp(raw_timestamp)
except ValueError:
raise InvalidTimestamp('Invalid X-Timestamp header')
return self._timestamp
@property
def path_qs(self):
"""The path of the request, without host but with query string."""
path = self.path
if self.query_string:
path += '?' + self.query_string
return path
@property
def path(self):
"Provides the full path of the request, excluding the QUERY_STRING"
return wsgi_quote(self.environ.get('SCRIPT_NAME', '') +
self.environ['PATH_INFO'])
@property
def swift_entity_path(self):
"""
Provides the (native string) account/container/object path,
sans API version.
This can be useful when constructing a path to send to a backend
server, as that path will need everything after the "/v1".
"""
_ver, entity_path = self.split_path(1, 2, rest_with_last=True)
if entity_path is not None:
return '/' + wsgi_to_str(entity_path)
@property
def is_chunked(self):
return is_chunked(self.headers)
@property
def url(self):
"Provides the full url of the request"
return self.host_url + self.path_qs
@property
def allow_reserved_names(self):
return config_true_value(self.environ.get(
'HTTP_X_BACKEND_ALLOW_RESERVED_NAMES'))
def as_referer(self):
return self.method + ' ' + self.url
def path_info_pop(self):
"""
Takes one path portion (delineated by slashes) from the
path_info, and appends it to the script_name. Returns
the path segment.
"""
path_info = self.path_info
if not path_info or not path_info.startswith('/'):
return None
try:
slash_loc = path_info.index('/', 1)
except ValueError:
slash_loc = len(path_info)
self.script_name += path_info[:slash_loc]
self.path_info = path_info[slash_loc:]
return path_info[1:slash_loc]
def copy_get(self):
"""
Makes a copy of the request, converting it to a GET.
"""
env = self.environ.copy()
env.update({
'REQUEST_METHOD': 'GET',
'CONTENT_LENGTH': '0',
'wsgi.input': WsgiBytesIO(),
})
return Request(env)
def call_application(self, application):
"""
Calls the application with this request's environment. Returns the
status, headers, and app_iter for the response as a tuple.
:param application: the WSGI application to call
"""
output = []
captured = []
def start_response(status, headers, exc_info=None):
captured[:] = [status, headers, exc_info]
return output.append
app_iter = application(self.environ, start_response)
if not app_iter:
app_iter = output
if not captured:
app_iter = reiterate(app_iter)
if not captured:
raise RuntimeError('application never called start_response')
return (captured[0], captured[1], app_iter)
def get_response(self, application):
"""
Calls the application with this request's environment. Returns a
Response object that wraps up the application's result.
:param application: the WSGI application to call
"""
status, headers, app_iter = self.call_application(application)
return Response(status=status, headers=dict(headers),
app_iter=app_iter, request=self)
def split_path(self, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the Request's path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises ValueError: if given an invalid path
"""
return split_path(
self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO'],
minsegs, maxsegs, rest_with_last)
def message_length(self):
"""
Properly determine the message length for this request. It will return
an integer if the headers explicitly contain the message length, or
None if the headers don't contain a length. The ValueError exception
will be raised if the headers are invalid.
:raises ValueError: if either transfer-encoding or content-length
headers have bad values
:raises AttributeError: if the last value of the transfer-encoding
header is not "chunked"
"""
if not is_chunked(self.headers):
# Because we are not using chunked transfer encoding we can pay
# attention to the content-length header.
fsize = self.headers.get('content-length', None)
if fsize is not None:
try:
fsize = int(fsize)
except ValueError:
raise ValueError('Invalid Content-Length header value')
else:
fsize = None
return fsize
def content_range_header_value(start, stop, size):
return 'bytes %s-%s/%s' % (start, (stop - 1), size)
def content_range_header(start, stop, size):
value = content_range_header_value(start, stop, size)
return b"Content-Range: " + value.encode('ascii')
def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen):
for start, stop in ranges:
yield b''.join([b'--', boundary, b'\r\n',
b'Content-Type: ', content_type, b'\r\n'])
yield content_range_header(start, stop, size) + b'\r\n\r\n'
sub_iter = sub_iter_gen(start, stop)
for chunk in sub_iter:
yield chunk
yield b'\r\n'
yield b'--' + boundary + b'--'
class Response(object):
"""
WSGI Response object.
"""
content_length = _header_int_property('content-length')
content_type = _resp_content_type_property()
content_range = _header_property('content-range')
etag = _resp_etag_property()
status = _resp_status_property()
status_int = None
body = _resp_body_property()
host_url = _host_url_property()
last_modified = _datetime_property('last-modified')
location = _header_property('location')
accept_ranges = _header_property('accept-ranges')
charset = _resp_charset_property()
app_iter = _resp_app_iter_property()
def __init__(self, body=None, status=200, headers=None, app_iter=None,
request=None, conditional_response=False,
conditional_etag=None, **kw):
self.headers = HeaderKeyDict(
[('Content-Type', 'text/html; charset=UTF-8')])
self.conditional_response = conditional_response
self._conditional_etag = conditional_etag
self.request = request
self._app_iter = None
# Allow error messages to come as natural strings on py3.
if isinstance(body, six.text_type):
body = body.encode('utf8')
self.body = body
self.app_iter = app_iter
self.response_iter = None
self.status = status
self.boundary = b"%.32x" % random.randint(0, 256 ** 16)
if request:
self.environ = request.environ
else:
self.environ = {}
if headers:
if self._body and 'Content-Length' in headers:
# If body is not empty, prioritize actual body length over
# content_length in headers
del headers['Content-Length']
self.headers.update(headers)
if self.status_int == 401 and 'www-authenticate' not in self.headers:
self.headers.update({'www-authenticate': self.www_authenticate()})
for key, value in kw.items():
setattr(self, key, value)
# When specifying both 'content_type' and 'charset' in the kwargs,
# charset needs to be applied *after* content_type, otherwise charset
# can get wiped out when content_type sorts later in dict order.
if 'charset' in kw and 'content_type' in kw:
self.charset = kw['charset']
@property
def conditional_etag(self):
"""
The conditional_etag keyword argument for Response will allow the
conditional match value of a If-Match request to be compared to a
non-standard value.
This is available for Storage Policies that do not store the client
object data verbatim on the storage nodes, but still need support
conditional requests.
It's most effectively used with X-Backend-Etag-Is-At which would
define the additional Metadata key(s) where the original ETag of the
clear-form client request data may be found.
"""
if self._conditional_etag is not None:
return self._conditional_etag
else:
return self.etag
def _prepare_for_ranges(self, ranges):
"""
Prepare the Response for multiple ranges.
"""
content_size = self.content_length
content_type = self.headers['content-type'].encode('utf8')
self.content_type = b''.join([b'multipart/byteranges;',
b'boundary=', self.boundary])
# This section calculates the total size of the response.
section_header_fixed_len = sum([
# --boundary\r\n
2, len(self.boundary), 2,
# Content-Type: <type>\r\n
len('Content-Type: '), len(content_type), 2,
# Content-Range: <value>\r\n; <value> accounted for later
len('Content-Range: '), 2,
# \r\n at end of headers
2])
body_size = 0
for start, end in ranges:
body_size += section_header_fixed_len
# length of the value of Content-Range, not including the \r\n
# since that's already accounted for
cr = content_range_header_value(start, end, content_size)
body_size += len(cr)
# the actual bytes (note: this range is half-open, i.e. begins
# with byte <start> and ends with byte <end - 1>, so there's no
# fencepost error here)
body_size += (end - start)
# \r\n prior to --boundary
body_size += 2
# --boundary-- terminates the message
body_size += len(self.boundary) + 4
self.content_length = body_size
self.content_range = None
return content_size, content_type
def _get_conditional_response_status(self):
"""Checks for a conditional response from an If-Match
or If-Modified. request. If so, returns the correct status code
(304 or 412).
:returns: conditional response status (304 or 412) or None
"""
if self.conditional_etag and self.request.if_none_match and \
self.conditional_etag in self.request.if_none_match:
return 304
if self.conditional_etag and self.request.if_match and \
self.conditional_etag not in self.request.if_match:
return 412
if self.status_int == 404 and self.request.if_match \
and '*' in self.request.if_match:
# If none of the entity tags match, or if "*" is given and no
# current entity exists, the server MUST NOT perform the
# requested method, and MUST return a 412 (Precondition
# Failed) response. [RFC 2616 section 14.24]
return 412
if self.last_modified and self.request.if_modified_since \
and self.last_modified <= self.request.if_modified_since:
return 304
if self.last_modified and self.request.if_unmodified_since \
and self.last_modified > self.request.if_unmodified_since:
return 412
return None
def _response_iter(self, app_iter, body):
if self.conditional_response and self.request:
empty_resp = self._get_conditional_response_status()
if empty_resp is not None:
self.status = empty_resp
self.content_length = 0
close_if_possible(app_iter)
return [b'']
if self.request and self.request.method == 'HEAD':
# We explicitly do NOT want to set self.content_length to 0 here
drain_and_close(app_iter) # be friendly to our app_iter
return [b'']
if self.conditional_response and self.request and \
self.request.range and self.request.range.ranges and \
not self.content_range:
ranges = self.request.range.ranges_for_length(self.content_length)
if ranges == []:
self.status = 416
close_if_possible(app_iter)
self.headers['Content-Range'] = \
'bytes */%d' % self.content_length
# Setting body + app_iter to None makes us emit the default
# body text from RESPONSE_REASONS.
body = None
app_iter = None
elif self.content_length == 0:
# If ranges_for_length found ranges but our content length
# is 0, then that means we got a suffix-byte-range request
# (e.g. "bytes=-512"). This is asking for *up to* the last N
# bytes of the file. If we had any bytes to send at all,
# we'd return a 206 with an appropriate Content-Range header,
# but we can't construct a Content-Range header because we
# have no byte indices because we have no bytes.
#
# The only reasonable thing to do is to return a 200 with
# the whole object (all zero bytes of it). This is also what
# Apache and Nginx do, so if we're wrong, at least we're in
# good company.
pass
elif ranges:
range_size = len(ranges)
if range_size > 0:
# There is at least one valid range in the request, so try
# to satisfy the request
if range_size == 1:
start, end = ranges[0]
if app_iter and hasattr(app_iter, 'app_iter_range'):
self.status = 206
self.content_range = content_range_header_value(
start, end, self.content_length)
self.content_length = (end - start)
return app_iter.app_iter_range(start, end)
elif body:
self.status = 206
self.content_range = content_range_header_value(
start, end, self.content_length)
self.content_length = (end - start)
return [body[start:end]]
elif range_size > 1:
if app_iter and hasattr(app_iter, 'app_iter_ranges'):
self.status = 206
content_size, content_type = \
self._prepare_for_ranges(ranges)
return app_iter.app_iter_ranges(ranges,
content_type,
self.boundary,
content_size)
elif body:
self.status = 206
content_size, content_type, = \
self._prepare_for_ranges(ranges)
def _body_slicer(start, stop):
yield body[start:stop]
return multi_range_iterator(ranges, content_type,
self.boundary,
content_size,
_body_slicer)
if app_iter:
return app_iter
if body is not None:
return [body]
if self.status_int in RESPONSE_REASONS:
title, exp = RESPONSE_REASONS[self.status_int]
if exp:
body = '<html><h1>%s</h1><p>%s</p></html>' % (
title,
exp % defaultdict(lambda: 'unknown', self.__dict__))
body = body.encode('utf8')
self.content_length = len(body)
return [body]
return [b'']
def fix_conditional_response(self):
"""
You may call this once you have set the content_length to the whole
object length and body or app_iter to reset the content_length
properties on the request.
It is ok to not call this method, the conditional response will be
maintained for you when you __call__ the response.
"""
self.response_iter = self._response_iter(self.app_iter, self._body)
def absolute_location(self):
"""
Attempt to construct an absolute location.
"""
if not self.location.startswith('/'):
return self.location
return self.host_url + self.location
def www_authenticate(self):
"""
Construct a suitable value for WWW-Authenticate response header
If we have a request and a valid-looking path, the realm
is the account; otherwise we set it to 'unknown'.
"""
try:
vrs, realm, rest = self.request.split_path(2, 3, True)
if realm in ('v1.0', 'auth'):
realm = 'unknown'
except (AttributeError, ValueError):
realm = 'unknown'
return 'Swift realm="%s"' % wsgi_quote(realm)
@property
def is_success(self):
return self.status_int // 100 == 2
def __call__(self, env, start_response):
"""
Respond to the WSGI request.
.. warning::
This will translate any relative Location header value to an
absolute URL using the WSGI environment's HOST_URL as a
prefix, as RFC 2616 specifies.
However, it is quite common to use relative redirects,
especially when it is difficult to know the exact HOST_URL
the browser would have used when behind several CNAMEs, CDN
services, etc. All modern browsers support relative
redirects.
To skip over RFC enforcement of the Location header value,
you may set ``env['swift.leave_relative_location'] = True``
in the WSGI environment.
"""
if not self.request:
self.request = Request(env)
self.environ = env
if not self.response_iter:
self.response_iter = self._response_iter(self.app_iter, self._body)
if 'location' in self.headers and \
not env.get('swift.leave_relative_location'):
self.location = self.absolute_location()
start_response(self.status, list(self.headers.items()))
return self.response_iter
class HTTPException(Response, Exception):
def __init__(self, *args, **kwargs):
Response.__init__(self, *args, **kwargs)
Exception.__init__(self, self.status)
def wsgify(func):
"""
A decorator for translating functions which take a swob Request object and
return a Response object into WSGI callables. Also catches any raised
HTTPExceptions and treats them as a returned Response.
"""
@functools.wraps(func)
def _wsgify(*args):
env, start_response = args[-2:]
new_args = args[:-2] + (Request(env), )
try:
return func(*new_args)(env, start_response)
except HTTPException as err_resp:
return err_resp(env, start_response)
return _wsgify
class StatusMap(object):
"""
A dict-like object that returns HTTPException subclasses/factory functions
where the given key is the status code.
"""
def __getitem__(self, key):
return partial(HTTPException, status=key)
status_map = StatusMap()
HTTPOk = status_map[200]
HTTPCreated = status_map[201]
HTTPAccepted = status_map[202]
HTTPNoContent = status_map[204]
HTTPPartialContent = status_map[206]
HTTPMovedPermanently = status_map[301]
HTTPFound = status_map[302]
HTTPSeeOther = status_map[303]
HTTPNotModified = status_map[304]
HTTPTemporaryRedirect = status_map[307]
HTTPBadRequest = status_map[400]
HTTPUnauthorized = status_map[401]
HTTPForbidden = status_map[403]
HTTPMethodNotAllowed = status_map[405]
HTTPNotFound = status_map[404]
HTTPNotAcceptable = status_map[406]
HTTPRequestTimeout = status_map[408]
HTTPConflict = status_map[409]
HTTPLengthRequired = status_map[411]
HTTPPreconditionFailed = status_map[412]
HTTPRequestEntityTooLarge = status_map[413]
HTTPRequestedRangeNotSatisfiable = status_map[416]
HTTPUnprocessableEntity = status_map[422]
HTTPClientDisconnect = status_map[499]
HTTPServerError = status_map[500]
HTTPInternalServerError = status_map[500]
HTTPNotImplemented = status_map[501]
HTTPBadGateway = status_map[502]
HTTPServiceUnavailable = status_map[503]
HTTPInsufficientStorage = status_map[507]
HTTPTooManyBackendRequests = status_map[529]
| swift-master | swift/common/swob.py |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bindings to the `tee` and `splice` system calls
'''
import os
import operator
import six
import ctypes
import ctypes.util
__all__ = ['tee', 'splice']
c_loff_t = ctypes.c_long
class Tee(object):
'''Binding to `tee`'''
__slots__ = '_c_tee',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_tee = libc.tee
except AttributeError:
self._c_tee = None
return
c_tee.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_size_t,
ctypes.c_uint
]
c_tee.restype = ctypes.c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'tee: %s' % os.strerror(errno))
else:
return result
c_tee.errcheck = errcheck
self._c_tee = c_tee
def __call__(self, fd_in, fd_out, len_, flags):
'''See `man 2 tee`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on
`splice`).
This function returns the number of bytes transferred (i.e. the actual
result of the call to `tee`).
Upon other errors, an `IOError` is raised with the proper `errno` set.
'''
if not self.available:
raise EnvironmentError('tee not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)
@property
def available(self):
'''Availability of `tee`'''
return self._c_tee is not None
tee = Tee()
del Tee
class Splice(object):
'''Binding to `splice`'''
# From `bits/fcntl-linux.h`
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
__slots__ = '_c_splice',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_splice = libc.splice
except AttributeError:
self._c_splice = None
return
c_loff_t_p = ctypes.POINTER(c_loff_t)
c_splice.argtypes = [
ctypes.c_int, c_loff_t_p,
ctypes.c_int, c_loff_t_p,
ctypes.c_size_t,
ctypes.c_uint
]
c_splice.restype = ctypes.c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'splice: %s' % os.strerror(errno))
else:
off_in = arguments[1]
off_out = arguments[3]
return (
result,
off_in.contents.value if off_in is not None else None,
off_out.contents.value if off_out is not None else None)
c_splice.errcheck = errcheck
self._c_splice = c_splice
def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags):
'''See `man 2 splice`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on this
object).
Returns a tuple of the result of the `splice` call, the output value of
`off_in` and the output value of `off_out` (or `None` for any of these
output values, if applicable).
Upon other errors, an `IOError` is raised with the proper `errno` set.
Note: if you want to pass `NULL` as value for `off_in` or `off_out` to
the system call, you must pass `None`, *not* 0!
'''
if not self.available:
raise EnvironmentError('splice not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
c_off_in = \
ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None
c_off_out = \
ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None
return self._c_splice(
c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags)
@property
def available(self):
'''Availability of `splice`'''
return self._c_splice is not None
splice = Splice()
del Splice
| swift-master | swift/common/splice.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
import six
import tempfile
from distutils.spawn import find_executable
from swift.common.utils import search_tree, remove_file, write_file, readconf
from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
PROC_DIR = '/proc'
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-reconciler',
'container-server', 'container-sharder', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
# aliases mapping
ALIASES = {'all': ALL_SERVERS, 'main': MAIN_SERVERS, 'rest': REST_SERVERS}
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS
SEAMLESS_SHUTDOWN_SERVERS = MAIN_SERVERS
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['container-reconciler']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers * disks, can get high
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print("WARNING: Unable to modify memory limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print("WARNING: Unable to modify max process limit. "
"Running as non-root?")
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', tempfile.gettempdir())
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(self, *a, **kw):
rv = func(self, *a, **kw)
if len(self.servers) == 0:
return 1
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
def kill_group(pid, sig):
"""Send signal to process group
: param pid: process id
: param sig: signal to send
"""
# Negative PID means process group
os.kill(-pid, sig)
def format_server_name(servername):
"""
Formats server name as swift compatible server names
E.g. swift-object-server
:param servername: server name
:returns: swift compatible server name and its binary name
"""
if '.' in servername:
servername = servername.split('.', 1)[0]
if '-' not in servername:
servername = '%s-server' % servername
cmd = 'swift-%s' % servername
return servername, cmd
def verify_server(server):
"""
Check whether the server is among swift servers or not, and also
checks whether the server's binaries are installed or not.
:param server: name of the server
:returns: True, when the server name is valid and its binaries are found.
False, otherwise.
"""
if not server:
return False
_, cmd = format_server_name(server)
if find_executable(cmd) is None:
return False
return True
class UnknownCommandError(Exception):
pass
class Manager(object):
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
self.server_names = set()
self._default_strict = True
for server in servers:
if server in ALIASES:
self.server_names.update(ALIASES[server])
self._default_strict = False
elif '*' in server:
# convert glob to regex
self.server_names.update([
s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
self._default_strict = False
else:
self.server_names.add(server)
self.servers = set()
for name in self.server_names:
if verify_server(name):
self.servers.add(Server(name, run_dir))
def __iter__(self):
return iter(self.servers)
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
strict = kwargs.get('strict')
# if strict not set explicitly
if strict is None:
strict = self._default_strict
for server in self.servers:
status += 0 if server.launch(**kwargs) else 1
if not strict:
status = 0
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print('No %s running' % server)
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print("%(server)s (%(pid)s) appears to have stopped" %
{'server': server, 'pid': killed_pid})
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
kill_after_timeout = kwargs.get('kill_after_timeout', False)
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
if kill_after_timeout:
print('Waited %(kill_wait)s seconds for %(server)s '
'to die; killing' %
{'kill_wait': kill_wait, 'server': server})
# Send SIGKILL to all remaining pids
for pid in set(pids.keys()) - killed_pids:
print('Signal %(server)s pid: %(pid)s signal: '
'%(signal)s' % {'server': server,
'pid': pid,
'signal': signal.SIGKILL})
# Send SIGKILL to process group
try:
kill_group(pid, signal.SIGKILL)
except OSError as e:
# PID died before kill_group can take action?
if e.errno != errno.ESRCH:
raise
else:
print('Waited %(kill_wait)s seconds for %(server)s '
'to die; giving up' %
{'kill_wait': kill_wait, 'server': server})
return 1
@command
def kill(self, **kwargs):
"""stop a server (no error if not running)
"""
status = self.stop(**kwargs)
kwargs['quiet'] = True
if status and not self.status(**kwargs):
# only exit error if the server is still running
return status
return 0
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.server_names:
m = Manager([server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def reload_seamless(self, **kwargs):
"""seamlessly re-exec, then shutdown of old listen sockets on
supporting servers
"""
kwargs.pop('graceful', None)
kwargs['seamless'] = True
status = 0
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print('No %s running' % server)
status += 1
return status
def kill_child_pids(self, **kwargs):
"""kill child pids, optionally servicing accepted connections"""
status = 0
for server in self.servers:
signaled_pids = server.kill_child_pids(**kwargs)
if not signaled_pids:
print('No %s running' % server)
status += 1
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
f = getattr(self, cmd, None)
if f is None:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server(object):
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
self.server = server.lower()
if '.' in self.server:
self.server, self.conf = self.server.rsplit('.', 1)
else:
self.conf = None
self.server, self.cmd = format_server_name(self.server)
self.type = self.server.rsplit('-', 1)[0]
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def _find_conf_files(self, server_search):
if self.conf is not None:
return search_tree(SWIFT_DIR, server_search, self.conf + '.conf',
dir_ext=self.conf + '.conf.d')
else:
return search_tree(SWIFT_DIR, server_search + '*', '.conf',
dir_ext='.conf.d')
def conf_files(self, **kwargs):
"""Get conf files for this server
:param number: if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server == 'object-expirer':
def has_expirer_section(conf_path):
try:
readconf(conf_path, section_name="object-expirer")
except ValueError:
return False
else:
return True
# config of expirer is preferentially read from object-server
# section. If all object-server.conf doesn't have object-expirer
# section, object-expirer.conf is used.
found_conf_files = [
conf for conf in self._find_conf_files("object-server")
if has_expirer_section(conf)
] or self._find_conf_files("object-expirer")
elif self.server in STANDALONE_SERVERS:
found_conf_files = self._find_conf_files(self.server)
else:
found_conf_files = self._find_conf_files("%s-server" % self.type)
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
def dump_found_configs():
if found_conf_files:
print('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print(' %d) %s' % (i + 1, conf_file))
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
if number:
print('Unable to locate config number %(number)s for'
' %(server)s' %
{'number': number, 'server': self.server})
else:
print('Unable to locate config for %s' % self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
dump_found_configs()
elif any(["object-expirer" in name for name in conf_files]) and \
not kwargs.get('quiet'):
print("WARNING: object-expirer.conf is deprecated. "
"Move object-expirers' configuration into "
"object-server.conf.")
if kwargs.get('verbose'):
dump_found_configs()
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param number: if supplied will only lookup the nth server
:returns: list of pid files
"""
if self.conf is not None:
pid_files = search_tree(self.run_dir, '%s*' % self.server,
exts=[self.conf + '.pid',
self.conf + '.pid.d'])
else:
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
try:
pid = int(open(pid_file).read().strip())
except ValueError:
pid = None
yield pid_file, pid
def _signal_pid(self, sig, pid, pid_file, verbose):
try:
if sig != signal.SIG_DFL:
print('Signal %(server)s pid: %(pid)s signal: '
'%(signal)s' %
{'server': self.server, 'pid': pid, 'signal': sig})
safe_kill(pid, sig, 'swift-%s' % self.server)
except InvalidPidFileException:
if verbose:
print('Removing pid file %(pid_file)s with wrong pid '
'%(pid)d' % {'pid_file': pid_file, 'pid': pid})
remove_file(pid_file)
return False
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
if verbose:
print("Removing stale pid file %s" % pid_file)
remove_file(pid_file)
elif e.errno == errno.EPERM:
print("No permission to signal PID %d" % pid)
return False
else:
# process exists
return True
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
if not pid: # Catches None and 0
print('Removing pid file %s with invalid pid' % pid_file)
remove_file(pid_file)
continue
if self._signal_pid(sig, pid, pid_file, kwargs.get('verbose')):
pids[pid] = pid_file
return pids
def signal_children(self, sig, **kwargs):
"""Send a signal to child pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
if not pid: # Catches None and 0
print('Removing pid file %s with invalid pid' % pid_file)
remove_file(pid_file)
continue
ps_cmd = ['ps', '--ppid', str(pid), '--no-headers', '-o', 'pid']
for pid in subprocess.check_output(ps_cmd).split():
pid = int(pid)
if self._signal_pid(sig, pid, pid_file, kwargs.get('verbose')):
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:param seamless: if True, attempt SIGUSR1 on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
seamless = kwargs.get('seamless')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
elif seamless and self.server in SEAMLESS_SHUTDOWN_SERVERS:
sig = signal.SIGUSR1
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def kill_child_pids(self, **kwargs):
"""Kill child pids, leaving server overseer to respawn them
:param graceful: if True, attempt SIGHUP on supporting servers
:param seamless: if True, attempt SIGUSR1 on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
seamless = kwargs.get('seamless')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
elif seamless and self.server in SEAMLESS_SHUTDOWN_SERVERS:
sig = signal.SIGUSR1
else:
sig = signal.SIGTERM
return self.signal_children(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param pids: if not supplied pids will be populated automatically
:param number: if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print("%(server)s #%(number)d not running (%(conf)s)" %
{'server': self.server, 'number': number,
'conf': conf_files[0]})
else:
print("No %s running" % self.server)
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print("%(server)s running (%(pid)s - %(conf)s)" %
{'server': self.server, 'pid': pid, 'conf': conf_file})
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True,
additional_args=None, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if false ask server to log to console
:param additional_args: list of additional arguments to pass
on the command line
:returns: the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
if additional_args:
if isinstance(additional_args, str):
additional_args = [additional_args]
args.extend(additional_args)
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout (if we haven't done that)
if proc.stdout.closed:
output = ''
else:
output = proc.stdout.read()
proc.stdout.close()
if not six.PY2:
output = output.decode('utf8', 'backslashreplace')
if kwargs.get('once', False):
# if you don't want once to wait you can send it to the
# background on the command line, I generally just run with
# no-daemon anyway, but this is quieter
proc.wait()
if output:
print(output)
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate() # should handle closing pipes
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return {}
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print("%(server)s running (%(pid)s - %(conf)s)" %
{'server': self.server, 'pid': pid, 'conf': conf_file})
elif not kwargs.get('number', 0):
already_started = True
print("%(server)s running (%(pid)s - %(pid_file)s)" %
{'server': self.server, 'pid': pid,
'pid_file': pid_file})
if already_started:
print("%s already started..." % self.server)
return {}
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = 'Running %s once' % self.server
else:
msg = 'Starting %s' % self.server
print('%s...(%s)' % (msg, conf_file))
try:
pid = self.spawn(conf_file, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
# TODO(clayg): should I check if self.cmd exists earlier?
print("%s does not exist" % self.cmd)
break
else:
raise
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
| swift-master | swift/common/manager.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet
import eventlet.debug
from swift.common import utils
class Daemon(object):
"""
Daemon base class
A daemon has a run method that accepts a ``once`` kwarg and will dispatch
to :meth:`run_once` or :meth:`run_forever`.
A subclass of Daemon must implement :meth:`run_once` and
:meth:`run_forever`.
A subclass of Daemon may override :meth:`get_worker_args` to dispatch
arguments to individual child process workers and :meth:`is_healthy` to
perform context specific periodic wellness checks which can reset worker
arguments.
Implementations of Daemon do not know *how* to daemonize, or execute
multiple daemonized workers, they simply provide the behavior of the daemon
and context specific knowledge about how workers should be started.
"""
WORKERS_HEALTHCHECK_INTERVAL = 5.0
def __init__(self, conf):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
def run(self, once=False, **kwargs):
if once:
self.run_once(**kwargs)
else:
self.run_forever(**kwargs)
def post_multiprocess_run(self):
"""
Override this to do something after running using multiple worker
processes. This method is called in the parent process.
This is probably only useful for run-once mode since there is no
"after running" in run-forever mode.
"""
pass
def get_worker_args(self, once=False, **kwargs):
"""
For each worker yield a (possibly empty) dict of kwargs to pass along
to the daemon's :meth:`run` method after fork. The length of elements
returned from this method will determine the number of processes
created.
If the returned iterable is empty, the Strategy will fallback to
run-inline strategy.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: plumbed through via command line argparser
:returns: an iterable of dicts, each element represents the kwargs to
be passed to a single worker's :meth:`run` method after fork.
"""
return []
def is_healthy(self):
"""
This method is called very frequently on the instance of the daemon
held by the parent process. If it returns False, all child workers are
terminated, and new workers will be created.
:returns: a boolean, True only if all workers should continue to run
"""
return True
class DaemonStrategy(object):
"""
This is the execution strategy for using subclasses of Daemon. The default
behavior is to invoke the daemon's :meth:`Daemon.run` method from within
the parent process. When the :meth:`Daemon.run` method returns the parent
process will exit.
However, if the Daemon returns a non-empty iterable from
:meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will
be invoked in child processes, with the arguments provided from the parent
process's instance of the daemon. If a child process exits it will be
restarted with the same options, unless it was executed in once mode.
:param daemon: an instance of a :class:`Daemon` (has a `run` method)
:param logger: a logger instance
"""
def __init__(self, daemon, logger):
self.daemon = daemon
self.logger = logger
self.running = False
# only used by multi-worker strategy
self.options_by_pid = {}
self.unspawned_worker_options = []
def setup(self, **kwargs):
utils.validate_configuration()
utils.drop_privileges(self.daemon.conf.get('user', 'swift'))
utils.clean_up_daemon_hygiene()
utils.capture_stdio(self.logger, **kwargs)
def kill_children(*args):
self.running = False
self.logger.notice('SIGTERM received (%s)', os.getpid())
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
os._exit(0)
signal.signal(signal.SIGTERM, kill_children)
self.running = True
utils.systemd_notify(self.logger)
def _run_inline(self, once=False, **kwargs):
"""Run the daemon"""
self.daemon.run(once=once, **kwargs)
def run(self, once=False, **kwargs):
"""Daemonize and execute our strategy"""
self.setup(**kwargs)
try:
self._run(once=once, **kwargs)
except KeyboardInterrupt:
self.logger.notice('User quit')
finally:
self.cleanup()
self.running = False
def _fork(self, once, **kwargs):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.daemon.run(once, **kwargs)
self.logger.debug('Forked worker %s finished', os.getpid())
# do not return from this stack, nor execute any finally blocks
os._exit(0)
else:
self.register_worker_start(pid, kwargs)
return pid
def iter_unspawned_workers(self):
while True:
try:
per_worker_options = self.unspawned_worker_options.pop()
except IndexError:
return
yield per_worker_options
def spawned_pids(self):
return list(self.options_by_pid.keys())
def register_worker_start(self, pid, per_worker_options):
self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)
self.options_by_pid[pid] = per_worker_options
def register_worker_exit(self, pid):
self.unspawned_worker_options.append(self.options_by_pid.pop(pid))
def ask_daemon_to_prepare_workers(self, once, **kwargs):
self.unspawned_worker_options = list(
self.daemon.get_worker_args(once=once, **kwargs))
def abort_workers_if_daemon_would_like(self):
if not self.daemon.is_healthy():
self.logger.debug(
'Daemon needs to change options, aborting workers')
self.cleanup()
return True
return False
def check_on_all_running_workers(self):
for p in self.spawned_pids():
try:
pid, status = os.waitpid(p, os.WNOHANG)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
self.logger.notice('Worker %s died', p)
else:
if pid == 0:
# child still running
continue
self.logger.debug('Worker %s exited', p)
self.register_worker_exit(p)
def _run(self, once, **kwargs):
self.ask_daemon_to_prepare_workers(once, **kwargs)
if not self.unspawned_worker_options:
return self._run_inline(once, **kwargs)
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
while self.running:
if self.abort_workers_if_daemon_would_like():
self.ask_daemon_to_prepare_workers(once, **kwargs)
self.check_on_all_running_workers()
if not once:
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
else:
if not self.spawned_pids():
self.logger.notice('Finished %s', os.getpid())
break
time.sleep(self.daemon.WORKERS_HEALTHCHECK_INTERVAL)
self.daemon.post_multiprocess_run()
return 0
def cleanup(self):
for p in self.spawned_pids():
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):
raise
self.register_worker_exit(p)
self.logger.debug('Cleaned up worker %s', p)
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
# patch eventlet/logging early
utils.monkey_patch()
eventlet.hubs.use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
d = klass(conf)
DaemonStrategy(d, logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
return d
| swift-master | swift/common/daemon.py |
# Copyright (c) 2010-2021 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
RECON_RELINKER_FILE = 'relinker.recon'
RECON_OBJECT_FILE = 'object.recon'
RECON_CONTAINER_FILE = 'container.recon'
RECON_ACCOUNT_FILE = 'account.recon'
RECON_DRIVE_FILE = 'drive.recon'
DEFAULT_RECON_CACHE_PATH = '/var/cache/swift'
def server_type_to_recon_file(server_type):
if not isinstance(server_type, six.string_types) or \
server_type.lower() not in ('account', 'container', 'object'):
raise ValueError('Invalid server_type')
return "%s.recon" % server_type.lower()
| swift-master | swift/common/recon.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WSGI tools for use with swift."""
from __future__ import print_function
import errno
import fcntl
import os
import signal
import sys
from textwrap import dedent
import time
import warnings
import eventlet
import eventlet.debug
from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout
from paste.deploy import loadwsgi
from eventlet.green import socket, ssl, os as green_os
from io import BytesIO
import six
from six import StringIO
from swift.common import utils, constraints
from swift.common.http_protocol import SwiftHttpProtocol, \
SwiftHttpProxiedProtocol
from swift.common.storage_policy import BindPortsCache
from swift.common.swob import Request, wsgi_unquote
from swift.common.utils import capture_stdio, disable_fallocate, \
drop_privileges, get_logger, NullLogger, config_true_value, \
validate_configuration, get_hub, config_auto_int_value, \
reiterate, clean_up_daemon_hygiene, systemd_notify, NicerInterpolation
SIGNUM_TO_NAME = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
NOTIFY_FD_ENV_KEY = '__SWIFT_SERVER_NOTIFY_FD'
# Set maximum line size of message headers to be accepted.
wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE
try:
import multiprocessing
CPU_COUNT = multiprocessing.cpu_count() or 1
except (ImportError, NotImplementedError):
CPU_COUNT = 1
class NamedConfigLoader(loadwsgi.ConfigLoader):
"""
Patch paste.deploy's ConfigLoader so each context object will know what
config section it came from.
"""
def get_context(self, object_type, name=None, global_conf=None):
if not six.PY2:
self.parser._interpolation = NicerInterpolation()
context = super(NamedConfigLoader, self).get_context(
object_type, name=name, global_conf=global_conf)
context.name = name
context.local_conf['__name__'] = name
return context
loadwsgi.ConfigLoader = NamedConfigLoader
class ConfigDirLoader(NamedConfigLoader):
"""
Read configuration from multiple files under the given path.
"""
def __init__(self, conf_dir):
# parent class uses filename attribute when building error messages
self.filename = conf_dir = conf_dir.strip()
defaults = {
'here': os.path.normpath(os.path.abspath(conf_dir)),
'__file__': os.path.abspath(conf_dir)
}
self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
utils.read_conf_dir(self.parser, conf_dir)
def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf):
if relative_to:
path = os.path.normpath(os.path.join(relative_to, path))
loader = ConfigDirLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
# add config_dir parsing to paste.deploy
loadwsgi._loaders['config_dir'] = _loadconfigdir
class ConfigString(NamedConfigLoader):
"""
Wrap a raw config string up for paste.deploy.
If you give one of these to our loadcontext (e.g. give it to our
appconfig) we'll intercept it and get it routed to the right loader.
"""
def __init__(self, config_string):
self.contents = StringIO(dedent(config_string))
self.filename = "string"
defaults = {
'here': "string",
'__file__': self,
}
self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
# Defaults don't need interpolation (crazy PasteDeploy...)
self.parser.defaults = lambda: dict(self.parser._defaults, **defaults)
if six.PY2:
self.parser.readfp(self.contents)
else:
self.parser.read_file(self.contents)
def readline(self, *args, **kwargs):
return self.contents.readline(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.contents.seek(*args, **kwargs)
def __iter__(self):
return iter(self.contents)
def wrap_conf_type(f):
"""
Wrap a function whos first argument is a paste.deploy style config uri,
such that you can pass it an un-adorned raw filesystem path (or config
string) and the config directive (either config:, config_dir:, or
config_str:) will be added automatically based on the type of entity
(either a file or directory, or if no such entity on the file system -
just a string) before passing it through to the paste.deploy function.
"""
def wrapper(conf_path, *args, **kwargs):
if os.path.isdir(conf_path):
conf_type = 'config_dir'
else:
conf_type = 'config'
conf_uri = '%s:%s' % (conf_type, conf_path)
return f(conf_uri, *args, **kwargs)
return wrapper
appconfig = wrap_conf_type(loadwsgi.appconfig)
def get_socket(conf):
"""Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:returns: a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
try:
bind_port = int(conf['bind_port'])
except (ValueError, KeyError, TypeError):
raise ConfigFilePortError()
bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port)
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
bind_timeout = int(conf.get('bind_timeout', 30))
retry_until = time.time() + bind_timeout
warn_ssl = False
try:
keepidle = int(conf.get('keep_idle', 600))
if keepidle <= 0 or keepidle >= 2 ** 15 - 1:
raise ValueError()
except (ValueError, KeyError, TypeError):
raise ConfigFileError()
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
sleep(0.1)
if not sock:
raise Exception('Could not bind to %(addr)s:%(port)s '
'after trying for %(timeout)s seconds' % {
'addr': bind_addr[0], 'port': bind_addr[1],
'timeout': bind_timeout})
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepidle)
if warn_ssl:
ssl_warning_message = ('WARNING: SSL should only be enabled for '
'testing purposes. Use external SSL '
'termination for a production deployment.')
get_logger(conf).warning(ssl_warning_message)
print(ssl_warning_message)
return sock
class RestrictedGreenPool(GreenPool):
"""
Works the same as GreenPool, but if the size is specified as one, then the
spawn_n() method will invoke waitall() before returning to prevent the
caller from doing any other work (like calling accept()).
"""
def __init__(self, size=1024):
super(RestrictedGreenPool, self).__init__(size=size)
self._rgp_do_wait = (size == 1)
def spawn_n(self, *args, **kwargs):
super(RestrictedGreenPool, self).spawn_n(*args, **kwargs)
if self._rgp_do_wait:
self.waitall()
class PipelineWrapper(object):
"""
This class provides a number of utility methods for
modifying the composition of a wsgi pipeline.
"""
def __init__(self, context):
self.context = context
def __contains__(self, entry_point_name):
try:
self.index(entry_point_name)
return True
except ValueError:
return False
def startswith(self, entry_point_name):
"""
Tests if the pipeline starts with the given entry point name.
:param entry_point_name: entry point of middleware or app (Swift only)
:returns: True if entry_point_name is first in pipeline, False
otherwise
"""
try:
first_ctx = self.context.filter_contexts[0]
except IndexError:
first_ctx = self.context.app_context
return first_ctx.entry_point_name == entry_point_name
def _format_for_display(self, ctx):
# Contexts specified by pipeline= have .name set in NamedConfigLoader.
if hasattr(ctx, 'name'):
return ctx.name
# This should not happen: a foreign context. Let's not crash.
return "<unknown>"
def __str__(self):
parts = [self._format_for_display(ctx)
for ctx in self.context.filter_contexts]
parts.append(self._format_for_display(self.context.app_context))
return " ".join(parts)
def create_filter(self, entry_point_name):
"""
Creates a context for a filter that can subsequently be added
to a pipeline context.
:param entry_point_name: entry point of the middleware (Swift only)
:returns: a filter context
"""
spec = 'egg:swift#' + entry_point_name
ctx = loadwsgi.loadcontext(loadwsgi.FILTER, spec,
global_conf=self.context.global_conf)
ctx.protocol = 'paste.filter_factory'
ctx.name = entry_point_name
return ctx
def index(self, entry_point_name):
"""
Returns the first index of the given entry point name in the pipeline.
Raises ValueError if the given module is not in the pipeline.
"""
for i, ctx in enumerate(self.context.filter_contexts):
if ctx.entry_point_name == entry_point_name:
return i
raise ValueError("%s is not in pipeline" % (entry_point_name,))
def insert_filter(self, ctx, index=0):
"""
Inserts a filter module into the pipeline context.
:param ctx: the context to be inserted
:param index: (optional) index at which filter should be
inserted in the list of pipeline filters. Default
is 0, which means the start of the pipeline.
"""
self.context.filter_contexts.insert(index, ctx)
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if isinstance(uri, loadwsgi.ConfigLoader):
# bypass loadcontext's uri parsing and loader routing and
# just directly return the context
if global_conf:
uri.update_defaults(global_conf, overwrite=False)
return uri.get_context(object_type, name, global_conf)
add_conf_type = wrap_conf_type(lambda x: x)
return loadwsgi.loadcontext(object_type, add_conf_type(uri), name=name,
relative_to=relative_to,
global_conf=global_conf)
def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
"""
Loads a context from a config file, and if the context is a pipeline
then presents the app with the opportunity to modify the pipeline.
:param conf_file: path to a config file
:param global_conf: a dict of options to update the loaded config. Options
in ``global_conf`` will override those in ``conf_file`` except where
the ``conf_file`` option is preceded by ``set``.
:param allow_modify_pipeline: if True, and the context is a pipeline, and
the loaded app has a ``modify_wsgi_pipeline`` property, then that
property will be called before the pipeline is loaded.
:return: the loaded app
"""
global_conf = global_conf or {}
ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf)
if ctx.object_type.name == 'pipeline':
# give app the opportunity to modify the pipeline context
ultimate_app = ctx.app_context.create()
func = getattr(ultimate_app, 'modify_wsgi_pipeline', None)
if func and allow_modify_pipeline:
func(PipelineWrapper(ctx))
filters = [c.create() for c in reversed(ctx.filter_contexts)]
pipeline = [ultimate_app]
request_logging_app = app = ultimate_app
for filter_app in filters:
app = filter_app(pipeline[0])
pipeline.insert(0, app)
if request_logging_app is ultimate_app and \
app.__class__.__name__ == 'ProxyLoggingMiddleware':
request_logging_app = filter_app(ultimate_app)
# Set some separate-pipeline attrs
request_logging_app._pipeline = [
request_logging_app, ultimate_app]
request_logging_app._pipeline_request_logging_app = \
request_logging_app
request_logging_app._pipeline_final_app = ultimate_app
for app in pipeline:
app._pipeline = pipeline
# For things like making (logged) backend requests for
# get_account_info and get_container_info
app._pipeline_request_logging_app = request_logging_app
# For getting proxy-server options like *_existence_skip_cache_pct
app._pipeline_final_app = ultimate_app
return pipeline[0]
return ctx.create()
def load_app_config(conf_file):
"""
Read the app config section from a config file.
:param conf_file: path to a config file
:return: a dict
"""
app_conf = {}
try:
ctx = loadcontext(loadwsgi.APP, conf_file)
except LookupError:
pass
else:
app_conf.update(ctx.app_context.global_conf)
app_conf.update(ctx.app_context.local_conf)
return app_conf
def run_server(conf, logger, sock, global_conf=None, ready_callback=None,
allow_modify_pipeline=True):
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
eventlet.hubs.use_hub(get_hub())
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
wsgi_logger = NullLogger()
if eventlet_debug:
# let eventlet.wsgi.server log to stderr
wsgi_logger = None
# utils.LogAdapter stashes name in server; fallback on unadapted loggers
if not global_conf:
if hasattr(logger, 'server'):
log_name = logger.server
else:
log_name = logger.name
global_conf = {'log_name': log_name}
app = loadapp(conf['__file__'], global_conf=global_conf,
allow_modify_pipeline=allow_modify_pipeline)
max_clients = int(conf.get('max_clients', '1024'))
pool = RestrictedGreenPool(size=max_clients)
# Select which protocol class to use (normal or one expecting PROXY
# protocol)
if config_true_value(conf.get('require_proxy_protocol', 'no')):
protocol_class = SwiftHttpProxiedProtocol
else:
protocol_class = SwiftHttpProtocol
server_kwargs = {
'custom_pool': pool,
'protocol': protocol_class,
'socket_timeout': float(conf.get('client_timeout') or 60),
# Disable capitalizing headers in Eventlet. This is necessary for
# the AWS SDK to work with s3api middleware (it needs an "ETag"
# header; "Etag" just won't do).
'capitalize_response_headers': False,
}
if conf.get('keepalive_timeout'):
server_kwargs['keepalive'] = float(conf['keepalive_timeout']) or False
if ready_callback:
ready_callback()
# Yes, eventlet, we know -- we have to support bad clients, though
warnings.filterwarnings(
'ignore', message='capitalize_response_headers is disabled')
try:
wsgi.server(sock, app, wsgi_logger, **server_kwargs)
except socket.error as err:
if err.errno != errno.EINVAL:
raise
pool.waitall()
class StrategyBase(object):
"""
Some operations common to all strategy classes.
"""
def __init__(self, conf, logger):
self.conf = conf
self.logger = logger
self.signaled_ready = False
# Each strategy is welcome to track data however it likes, but all
# socket refs should be somewhere in this dict. This allows forked-off
# children to easily drop refs to sibling sockets in post_fork_hook().
self.tracking_data = {}
def post_fork_hook(self):
"""
Called in each forked-off child process, prior to starting the actual
wsgi server, to perform any initialization such as drop privileges.
"""
if not self.signaled_ready:
capture_stdio(self.logger)
drop_privileges(self.conf.get('user', 'swift'))
del self.tracking_data # children don't need to track siblings
def shutdown_sockets(self):
"""
Shutdown any listen sockets.
"""
for sock in self.iter_sockets():
greenio.shutdown_safe(sock)
sock.close()
def set_close_on_exec_on_listen_sockets(self):
"""
Set the close-on-exec flag on any listen sockets.
"""
for sock in self.iter_sockets():
if six.PY2:
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
else:
# Python 3.4 and later default to sockets having close-on-exec
# set (what PEP 0446 calls "non-inheritable"). This new method
# on socket objects is provided to toggle it.
sock.set_inheritable(False)
def signal_ready(self):
"""
Signal that the server is up and accepting connections.
"""
if self.signaled_ready:
return # Already did it
# Redirect errors to logger and close stdio. swift-init (for example)
# uses this to know that the service is ready to accept connections.
capture_stdio(self.logger)
# If necessary, signal an old copy of us that it's okay to shutdown
# its listen sockets now because ours are up and ready to receive
# connections. This is used for seamless reloading using SIGUSR1.
reexec_signal_fd = os.getenv(NOTIFY_FD_ENV_KEY)
if reexec_signal_fd:
reexec_signal_fd = int(reexec_signal_fd)
os.write(reexec_signal_fd, str(os.getpid()).encode('utf8'))
os.close(reexec_signal_fd)
# Finally, signal systemd (if appropriate) that process started
# properly.
systemd_notify(logger=self.logger)
self.signaled_ready = True
class WorkersStrategy(StrategyBase):
"""
WSGI server management strategy object for a single bind port and listen
socket shared by a configured number of forked-off workers.
Tracking data is a map of ``pid -> socket``.
Used in :py:func:`run_wsgi`.
:param dict conf: Server configuration dictionary.
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
object.
"""
def __init__(self, conf, logger):
super(WorkersStrategy, self).__init__(conf, logger)
self.worker_count = config_auto_int_value(conf.get('workers'),
CPU_COUNT)
def loop_timeout(self):
"""
We want to keep from busy-waiting, but we also need a non-None value so
the main loop gets a chance to tell whether it should keep running or
not (e.g. SIGHUP received).
So we return 0.5.
"""
return 0.5
def no_fork_sock(self):
"""
Return a server listen socket if the server should run in the
foreground (no fork).
"""
# Useful for profiling [no forks].
if self.worker_count == 0:
return get_socket(self.conf)
def new_worker_socks(self):
"""
Yield a sequence of (socket, opqaue_data) tuples for each server which
should be forked-off and started.
The opaque_data item for each socket will passed into the
:py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods
where it will be ignored.
"""
while len(self.tracking_data) < self.worker_count:
yield get_socket(self.conf), None
def log_sock_exit(self, sock, _unused):
"""
Log a server's exit.
:param socket sock: The listen socket for the worker just started.
:param _unused: The socket's opaque_data yielded by
:py:meth:`new_worker_socks`.
"""
self.logger.notice('Child %d exiting normally' % os.getpid())
def register_worker_start(self, sock, _unused, pid):
"""
Called when a new worker is started.
:param socket sock: The listen socket for the worker just started.
:param _unused: The socket's opaque_data yielded by new_worker_socks().
:param int pid: The new worker process' PID
"""
self.logger.notice('Started child %s from parent %s',
pid, os.getpid())
self.tracking_data[pid] = sock
def register_worker_exit(self, pid):
"""
Called when a worker has exited.
NOTE: a re-exec'ed server can reap the dead worker PIDs from the old
server process that is being replaced as part of a service reload
(SIGUSR1). So we need to be robust to getting some unknown PID here.
:param int pid: The PID of the worker that exited.
"""
sock = self.tracking_data.pop(pid, None)
if sock is None:
self.logger.info('Ignoring wait() result from unknown PID %s', pid)
else:
self.logger.error('Removing dead child %s from parent %s',
pid, os.getpid())
greenio.shutdown_safe(sock)
sock.close()
def iter_sockets(self):
"""
Yields all known listen sockets.
"""
for sock in self.tracking_data.values():
yield sock
class ServersPerPortStrategy(StrategyBase):
"""
WSGI server management strategy object for an object-server with one listen
port per unique local port in the storage policy rings. The
`servers_per_port` integer config setting determines how many workers are
run per port.
Tracking data is a map like ``port -> [(pid, socket), ...]``.
Used in :py:func:`run_wsgi`.
:param dict conf: Server configuration dictionary.
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
object.
:param int servers_per_port: The number of workers to run per port.
"""
def __init__(self, conf, logger, servers_per_port):
super(ServersPerPortStrategy, self).__init__(conf, logger)
self.servers_per_port = servers_per_port
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
# typically ring_ip will be the same as bind_ip, but in a container the
# bind_ip might be differnt than the host ip address used to lookup
# devices/ports in the ring
ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
self.cache = BindPortsCache(self.swift_dir, ring_ip)
def _reload_bind_ports(self):
self.bind_ports = self.cache.all_bind_ports_for_node()
def _bind_port(self, port):
new_conf = self.conf.copy()
new_conf['bind_port'] = port
return get_socket(new_conf)
def loop_timeout(self):
"""
Return timeout before checking for reloaded rings.
:returns: The time to wait for a child to exit before checking for
reloaded rings (new ports).
"""
return self.ring_check_interval
def no_fork_sock(self):
"""
This strategy does not support running in the foreground.
"""
pass
def new_worker_socks(self):
"""
Yield a sequence of (socket, (port, server_idx)) tuples for each server
which should be forked-off and started.
Any sockets for "orphaned" ports no longer in any ring will be closed
(causing their associated workers to gracefully exit) after all new
sockets have been yielded.
The server_idx item for each socket will passed into the
:py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods.
"""
self._reload_bind_ports()
desired_port_index_pairs = {
(p, i) for p in self.bind_ports
for i in range(self.servers_per_port)}
current_port_index_pairs = {
(p, i)
for p, port_data in self.tracking_data.items()
for i, (pid, sock) in enumerate(port_data)
if pid is not None}
if desired_port_index_pairs != current_port_index_pairs:
# Orphan ports are ports which had object-server processes running,
# but which no longer appear in the ring. We'll kill them after we
# start missing workers.
orphan_port_index_pairs = current_port_index_pairs - \
desired_port_index_pairs
# Fork off worker(s) for every port that's supposed to have
# worker(s) but doesn't
missing_port_index_pairs = desired_port_index_pairs - \
current_port_index_pairs
for port, server_idx in sorted(missing_port_index_pairs):
try:
sock = self._bind_port(port)
except Exception as e:
self.logger.critical('Unable to bind to port %d: %s',
port, e)
continue
yield sock, (port, server_idx)
for port, idx in orphan_port_index_pairs:
# For any port in orphan_port_index_pairs, it is guaranteed
# that there should be no listen socket for that port, so we
# can close and forget them.
pid, sock = self.tracking_data[port][idx]
greenio.shutdown_safe(sock)
sock.close()
self.logger.notice(
'Closing unnecessary sock for port %d (child pid %d)',
port, pid)
self.tracking_data[port][idx] = (None, None)
if all(sock is None
for _pid, sock in self.tracking_data[port]):
del self.tracking_data[port]
def log_sock_exit(self, sock, data):
"""
Log a server's exit.
"""
port, server_idx = data
self.logger.notice('Child %d (PID %d, port %d) exiting normally',
server_idx, os.getpid(), port)
def register_worker_start(self, sock, data, pid):
"""
Called when a new worker is started.
:param socket sock: The listen socket for the worker just started.
:param tuple data: The socket's (port, server_idx) as yielded by
:py:meth:`new_worker_socks`.
:param int pid: The new worker process' PID
"""
port, server_idx = data
self.logger.notice('Started child %d (PID %d) for port %d',
server_idx, pid, port)
if port not in self.tracking_data:
self.tracking_data[port] = [(None, None)] * self.servers_per_port
self.tracking_data[port][server_idx] = (pid, sock)
def register_worker_exit(self, pid):
"""
Called when a worker has exited.
:param int pid: The PID of the worker that exited.
"""
for port_data in self.tracking_data.values():
for idx, (child_pid, sock) in enumerate(port_data):
if child_pid == pid:
port_data[idx] = (None, None)
greenio.shutdown_safe(sock)
sock.close()
return
def iter_sockets(self):
"""
Yields all known listen sockets.
"""
for port_data in self.tracking_data.values():
for _pid, sock in port_data:
yield sock
def check_config(conf_path, app_section, *args, **kwargs):
# Load configuration, Set logger and Load request processor
(conf, logger, log_name) = \
_initrp(conf_path, app_section, *args, **kwargs)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
# NOTE: for now servers_per_port is object-server-only; future work could
# be done to test and allow it to be used for account and container
# servers, but that has not been done yet.
if servers_per_port and app_section == 'object-server':
strategy = ServersPerPortStrategy(
conf, logger, servers_per_port=servers_per_port)
else:
strategy = WorkersStrategy(conf, logger)
try:
# Quick sanity check
if not (1 <= int(conf['bind_port']) <= 2 ** 16 - 1):
raise ValueError
except (ValueError, KeyError, TypeError):
error_msg = 'bind_port wasn\'t properly set in the config file. ' \
'It must be explicitly set to a valid port number.'
logger.error(error_msg)
raise ConfigFileError(error_msg)
# patch event before loadapp
utils.monkey_patch()
# Ensure the configuration and application can be loaded before proceeding.
global_conf = {'log_name': log_name}
loadapp(conf_path, global_conf=global_conf)
if 'global_conf_callback' in kwargs:
kwargs['global_conf_callback'](conf, global_conf)
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
return conf, logger, global_conf, strategy
def run_wsgi(conf_path, app_section, *args, **kwargs):
"""
Runs the server according to some strategy. The default strategy runs a
specified number of workers in pre-fork model. The object-server (only)
may use a servers-per-port strategy if its config has a servers_per_port
setting with a value greater than zero.
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:param allow_modify_pipeline: Boolean for whether the server should have
an opportunity to change its own pipeline.
Defaults to True
:param test_config: if False (the default) then load and validate the
config and if successful then continue to run the server; if True then
load and validate the config but do not run the server.
:returns: 0 if successful, nonzero otherwise
"""
try:
conf, logger, global_conf, strategy = check_config(
conf_path, app_section, *args, **kwargs)
except ConfigFileError as err:
print(err)
return 1
if kwargs.get('test_config'):
return 0
# Do some daemonization process hygene before we fork any children or run a
# server without forking.
clean_up_daemon_hygiene()
allow_modify_pipeline = kwargs.get('allow_modify_pipeline', True)
no_fork_sock = strategy.no_fork_sock()
if no_fork_sock:
run_server(conf, logger, no_fork_sock, global_conf=global_conf,
ready_callback=strategy.signal_ready,
allow_modify_pipeline=allow_modify_pipeline)
return 0
def stop_with_signal(signum, *args):
"""Set running flag to False and capture the signum"""
running_context[0] = False
running_context[1] = signum
# context to hold boolean running state and stop signum
running_context = [True, None]
signal.signal(signal.SIGTERM, stop_with_signal)
signal.signal(signal.SIGHUP, stop_with_signal)
signal.signal(signal.SIGUSR1, stop_with_signal)
while running_context[0]:
new_workers = {} # pid -> status pipe
for sock, sock_info in strategy.new_worker_socks():
read_fd, write_fd = os.pipe()
pid = os.fork()
if pid == 0:
os.close(read_fd)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def shutdown_my_listen_sock(signum, *args):
greenio.shutdown_safe(sock)
signal.signal(signal.SIGHUP, shutdown_my_listen_sock)
signal.signal(signal.SIGUSR1, shutdown_my_listen_sock)
strategy.post_fork_hook()
def notify():
os.write(write_fd, b'ready')
os.close(write_fd)
run_server(conf, logger, sock, ready_callback=notify,
allow_modify_pipeline=allow_modify_pipeline)
strategy.log_sock_exit(sock, sock_info)
return 0
else:
os.close(write_fd)
new_workers[pid] = read_fd
strategy.register_worker_start(sock, sock_info, pid)
for pid, read_fd in new_workers.items():
worker_status = os.read(read_fd, 30)
os.close(read_fd)
if worker_status != b'ready':
raise Exception(
'worker %d did not start normally: %r' %
(pid, worker_status))
# TODO: signal_ready() as soon as we have at least one new worker for
# each port, instead of waiting for all of them
strategy.signal_ready()
# The strategy may need to pay attention to something in addition to
# child process exits (like new ports showing up in a ring).
#
# NOTE: a timeout value of None will just instantiate the Timeout
# object and not actually schedule it, which is equivalent to no
# timeout for the green_os.wait().
loop_timeout = strategy.loop_timeout()
with Timeout(loop_timeout, exception=False):
try:
try:
pid, status = green_os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
strategy.register_worker_exit(pid)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
if err.errno == errno.ECHILD:
# If there are no children at all (ECHILD), then
# there's nothing to actually wait on. We sleep
# for a little bit to avoid a tight CPU spin
# and still are able to catch any KeyboardInterrupt
# events that happen. The value of 0.01 matches the
# value in eventlet's waitpid().
sleep(0.01)
except KeyboardInterrupt:
logger.notice('User quit')
running_context[0] = False
break
if running_context[1] is not None:
try:
signame = SIGNUM_TO_NAME[running_context[1]]
except KeyError:
logger.error('Stopping with unexpected signal %r' %
running_context[1])
else:
logger.notice('%s received (%s)', signame, os.getpid())
if running_context[1] == signal.SIGTERM:
os.killpg(0, signal.SIGTERM)
elif running_context[1] == signal.SIGUSR1:
# set up a pipe, fork off a child to handle cleanup later,
# and rexec ourselves with an environment variable set which will
# indicate which fd (one of the pipe ends) to write a byte to
# to indicate listen socket setup is complete. That will signal
# the forked-off child to complete its listen socket shutdown.
#
# NOTE: all strategies will now require the parent process to retain
# superuser privileges so that the re'execd process can bind a new
# socket to the configured IP & port(s). We can't just reuse existing
# listen sockets because then the bind IP couldn't be changed.
#
# NOTE: we need to set all our listen sockets close-on-exec so the only
# open reference to those file descriptors will be in the forked-off
# child here who waits to shutdown the old server's listen sockets. If
# the re-exec'ed server's old listen sockets aren't closed-on-exec,
# then the old server can't actually ever exit.
strategy.set_close_on_exec_on_listen_sockets()
read_fd, write_fd = os.pipe()
orig_server_pid = os.getpid()
child_pid = os.fork()
if child_pid:
# parent; set env var for fds and reexec ourselves
os.close(read_fd)
os.putenv(NOTIFY_FD_ENV_KEY, str(write_fd))
myself = os.path.realpath(sys.argv[0])
logger.info("Old server PID=%d re'execing as: %r",
orig_server_pid, [myself] + list(sys.argv))
if hasattr(os, 'set_inheritable'):
# See https://www.python.org/dev/peps/pep-0446/
os.set_inheritable(write_fd, True)
os.execv(myself, sys.argv)
logger.error('Somehow lived past os.execv()?!')
exit('Somehow lived past os.execv()?!')
elif child_pid == 0:
# child
os.close(write_fd)
logger.info('Old server temporary child PID=%d waiting for '
"re-exec'ed PID=%d to signal readiness...",
os.getpid(), orig_server_pid)
try:
got_pid = os.read(read_fd, 30)
except Exception:
logger.warning('Unexpected exception while reading from '
'pipe:', exc_info=True)
else:
got_pid = got_pid.decode('ascii')
if got_pid:
logger.info('Old server temporary child PID=%d notified '
'to shutdown old listen sockets by PID=%s',
os.getpid(), got_pid)
else:
logger.warning('Old server temporary child PID=%d *NOT* '
'notified to shutdown old listen sockets; '
'the pipe just *died*.', os.getpid())
try:
os.close(read_fd)
except Exception:
pass
strategy.shutdown_sockets()
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.notice('Exited (%s)', os.getpid())
return 0
class ConfigFileError(Exception):
pass
class ConfigFilePortError(ConfigFileError):
pass
def _initrp(conf_path, app_section, *args, **kwargs):
try:
conf = appconfig(conf_path, name=app_section)
except Exception as e:
raise ConfigFileError("Error trying to load config from %s: %s" %
(conf_path, e))
validate_configuration()
# pre-configure logger
log_name = conf.get('log_name', app_section)
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = get_logger(conf, log_name,
log_to_console=kwargs.pop('verbose', False),
log_route='wsgi')
# disable fallocate if desired
if config_true_value(conf.get('disable_fallocate', 'no')):
disable_fallocate()
return (conf, logger, log_name)
def init_request_processor(conf_path, app_section, *args, **kwargs):
"""
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error
"""
(conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs)
app = loadapp(conf_path, global_conf={'log_name': log_name})
return (app, conf, logger, log_name)
class WSGIContext(object):
"""
This class provides a means to provide context (scope) for a middleware
filter to have access to the wsgi start_response results like the request
status and headers.
"""
def __init__(self, wsgi_app):
self.app = wsgi_app
def _start_response(self, status, headers, exc_info=None):
"""
Saves response info without sending it to the remote client.
Uses the same semantics as the usual WSGI start_response.
"""
self._response_status = status
self._response_headers = \
headers if isinstance(headers, list) else list(headers)
self._response_exc_info = exc_info
def _app_call(self, env):
"""
Ensures start_response has been called before returning.
"""
self._response_status = None
self._response_headers = None
self._response_exc_info = None
resp = self.app(env, self._start_response)
# if start_response has not been called, iterate until we've got a
# non-empty chunk, by which time the app *should* have called it
if self._response_status is None:
resp = reiterate(resp)
return resp
def _get_status_int(self):
"""
Returns the HTTP status int from the last called self._start_response
result.
"""
return int(self._response_status.split(' ', 1)[0])
def _response_header_value(self, key):
"Returns str of value for given header key or None"
for h_key, val in self._response_headers:
if h_key.lower() == key.lower():
return val
return None
def update_content_length(self, new_total_len):
self._response_headers = [
(h, v) for h, v in self._response_headers
if h.lower() != 'content-length']
self._response_headers.append(('Content-Length', str(new_total_len)))
def make_env(env, method=None, path=None, agent='Swift', query_string=None,
swift_source=None):
"""
Returns a new fresh WSGI environment.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment.
"""
newenv = {}
for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO',
'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD',
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'HTTP_REFERER', 'swift.infocache',
'swift.shard_listing_history'):
if name in env:
newenv[name] = env[name]
if method:
newenv['REQUEST_METHOD'] = method
if path:
newenv['PATH_INFO'] = path
newenv['SCRIPT_NAME'] = ''
if query_string is not None:
newenv['QUERY_STRING'] = query_string
if agent:
newenv['HTTP_USER_AGENT'] = (
agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip()
elif agent == '' and 'HTTP_USER_AGENT' in newenv:
del newenv['HTTP_USER_AGENT']
if swift_source:
newenv['swift.source'] = swift_source
newenv['wsgi.input'] = BytesIO()
if 'SCRIPT_NAME' not in newenv:
newenv['SCRIPT_NAME'] = ''
return newenv
def make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None, make_env=make_env):
"""
Makes a new swob.Request based on the current env but with the
parameters specified.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param make_env: make_subrequest calls this make_env to help build the
swob.Request.
:returns: Fresh swob.Request object.
"""
query_string = None
path = path or ''
if path and '?' in path:
path, query_string = path.split('?', 1)
newenv = make_env(env, method, path=wsgi_unquote(path), agent=agent,
query_string=query_string, swift_source=swift_source)
if not headers:
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers)
def make_pre_authed_env(env, method=None, path=None, agent='Swift',
query_string=None, swift_source=None):
"""Same as :py:func:`make_env` but with preauthorization."""
newenv = make_env(
env, method=method, path=path, agent=agent, query_string=query_string,
swift_source=swift_source)
newenv['swift.authorize'] = lambda req: None
newenv['swift.authorize_override'] = True
newenv['REMOTE_USER'] = '.wsgi.pre_authed'
return newenv
def make_pre_authed_request(env, method=None, path=None, body=None,
headers=None, agent='Swift', swift_source=None):
"""Same as :py:func:`make_subrequest` but with preauthorization."""
return make_subrequest(
env, method=method, path=path, body=body, headers=headers, agent=agent,
swift_source=swift_source, make_env=make_pre_authed_env)
| swift-master | swift/common/wsgi.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class HeaderKeyDict(dict):
"""
A dict that title-cases all keys on the way in, so as to be
case-insensitive.
Note that all keys and values are expected to be wsgi strings,
though some allowances are made when setting values.
"""
def __init__(self, base_headers=None, **kwargs):
if base_headers:
self.update(base_headers)
self.update(kwargs)
@staticmethod
def _title(s):
if six.PY2:
return s.title()
else:
return s.encode('latin1').title().decode('latin1')
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[self._title(key)] = other[key]
else:
for key, value in other:
self[self._title(key)] = value
def __getitem__(self, key):
return dict.get(self, self._title(key))
def __setitem__(self, key, value):
key = self._title(key)
if value is None:
self.pop(key, None)
elif six.PY2 and isinstance(value, six.text_type):
return dict.__setitem__(self, key, value.encode('utf-8'))
elif six.PY3 and isinstance(value, six.binary_type):
return dict.__setitem__(self, key, value.decode('latin-1'))
else:
return dict.__setitem__(self, key, str(value))
def __contains__(self, key):
return dict.__contains__(self, self._title(key))
def __delitem__(self, key):
return dict.__delitem__(self, self._title(key))
def get(self, key, default=None):
return dict.get(self, self._title(key), default)
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def pop(self, key, default=None):
return dict.pop(self, self._title(key), default)
| swift-master | swift/common/header_key_dict.py |