407 lines
14 KiB
Diff
407 lines
14 KiB
Diff
diff -ru a/ufo/bin/gluster-swift-gen-builders b/ufo/bin/gluster-swift-gen-builders
|
|
--- a/ufo/bin/gluster-swift-gen-builders 2012-12-07 12:24:00.000000000 -0500
|
|
+++ b/ufo/bin/gluster-swift-gen-builders 2013-04-29 15:16:22.748000000 -0400
|
|
@@ -1,9 +1,25 @@
|
|
#!/bin/bash
|
|
|
|
+# Note that these port numbers must match the configured values for the
|
|
+# various servers in their configuration files.
|
|
+declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \
|
|
+ ["object.builder"]=6010)
|
|
+
|
|
+builder_files="account.builder container.builder object.builder"
|
|
+
|
|
function create {
|
|
- swift-ring-builder $1 create 0 1 1
|
|
- swift-ring-builder $1 add z1-127.0.0.1:$2/$3_ 100.0
|
|
+ swift-ring-builder $1 create 1 1 1 >> /tmp/out
|
|
+}
|
|
+
|
|
+function add {
|
|
+ swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0
|
|
+}
|
|
+
|
|
+function rebalance {
|
|
swift-ring-builder $1 rebalance
|
|
+}
|
|
+
|
|
+function build {
|
|
swift-ring-builder $1
|
|
}
|
|
|
|
@@ -12,8 +28,17 @@
|
|
exit 1
|
|
fi
|
|
|
|
-# Note that these port numbers must match the configured values for the
|
|
-# various servers in their configuration files.
|
|
-create account.builder 6012 $1
|
|
-create container.builder 6011 $1
|
|
-create object.builder 6010 $1
|
|
+for builder_file in $builder_files
|
|
+do
|
|
+ create $builder_file
|
|
+
|
|
+ zone=1
|
|
+ for volname in $@
|
|
+ do
|
|
+ add $builder_file $zone ${port[$builder_file]} $volname
|
|
+ zone=$(expr $zone + 1)
|
|
+ done
|
|
+
|
|
+ rebalance $builder_file
|
|
+ build $builder_file
|
|
+done
|
|
diff -ru a/ufo/etc/fs.conf-gluster b/ufo/etc/fs.conf-gluster
|
|
--- a/ufo/etc/fs.conf-gluster 2012-12-07 12:24:00.000000000 -0500
|
|
+++ b/ufo/etc/fs.conf-gluster 2013-04-29 15:16:22.752000000 -0400
|
|
@@ -3,10 +3,6 @@
|
|
# local host.
|
|
mount_ip = localhost
|
|
|
|
-# The GlusterFS server need not be local, a remote server can also be used
|
|
-# by setting "remote_cluster = yes".
|
|
-remote_cluster = no
|
|
-
|
|
# By default it is assumed the Gluster volumes can be accessed using other
|
|
# methods besides UFO (not object only), which disables a caching
|
|
# optimizations in order to keep in sync with file system changes.
|
|
diff -ru a/ufo/gluster/swift/common/constraints.py b/ufo/gluster/swift/common/constraints.py
|
|
--- a/ufo/gluster/swift/common/constraints.py 2012-12-07 12:24:00.000000000 -0500
|
|
+++ b/ufo/gluster/swift/common/constraints.py 2013-04-29 15:16:22.749000000 -0400
|
|
@@ -16,7 +16,8 @@
|
|
from webob.exc import HTTPBadRequest
|
|
|
|
import swift.common.constraints
|
|
-from gluster.swift.common import Glusterfs
|
|
+import swift.common.ring as _ring
|
|
+from gluster.swift.common import Glusterfs, ring
|
|
|
|
|
|
MAX_OBJECT_NAME_COMPONENT_LENGTH = swift.common.constraints.constraints_conf_int(
|
|
@@ -80,3 +81,9 @@
|
|
|
|
# Replace the original check mount with ours
|
|
swift.common.constraints.check_mount = gluster_check_mount
|
|
+
|
|
+# Save the original Ring class
|
|
+__Ring = _ring.Ring
|
|
+
|
|
+# Replace the original Ring class
|
|
+_ring.Ring = ring.Ring
|
|
diff -ru a/ufo/gluster/swift/common/Glusterfs.py b/ufo/gluster/swift/common/Glusterfs.py
|
|
--- a/ufo/gluster/swift/common/Glusterfs.py 2012-12-07 12:24:00.000000000 -0500
|
|
+++ b/ufo/gluster/swift/common/Glusterfs.py 2013-04-29 15:16:22.753000000 -0400
|
|
@@ -12,33 +12,35 @@
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
+
|
|
import logging
|
|
import os, fcntl, time
|
|
-from ConfigParser import ConfigParser
|
|
-from swift.common.utils import TRUE_VALUES
|
|
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
|
+from swift.common.utils import TRUE_VALUES, search_tree
|
|
from gluster.swift.common.fs_utils import mkdirs
|
|
|
|
-
|
|
#
|
|
# Read the fs.conf file once at startup (module load)
|
|
#
|
|
_fs_conf = ConfigParser()
|
|
MOUNT_IP = 'localhost'
|
|
-REMOTE_CLUSTER = False
|
|
OBJECT_ONLY = False
|
|
+RUN_DIR='/var/run/swift'
|
|
+SWIFT_DIR = '/etc/swift'
|
|
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
|
|
try:
|
|
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
|
|
except (NoSectionError, NoOptionError):
|
|
pass
|
|
try:
|
|
- REMOTE_CLUSTER = _fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES
|
|
+ OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
|
|
except (NoSectionError, NoOptionError):
|
|
pass
|
|
try:
|
|
- OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
|
|
+ RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', '/var/run/swift')
|
|
except (NoSectionError, NoOptionError):
|
|
pass
|
|
+
|
|
NAME = 'glusterfs'
|
|
|
|
|
|
@@ -60,7 +62,7 @@
|
|
if drive == export:
|
|
break
|
|
else:
|
|
- logging.error('No export found in %r matching drive %s', el, drive)
|
|
+ logging.error('No export found in %r matching drive, %s', el, drive)
|
|
return False
|
|
|
|
# NOTE: root is typically the default value of /mnt/gluster-object
|
|
@@ -68,13 +70,12 @@
|
|
if not os.path.isdir(full_mount_path):
|
|
mkdirs(full_mount_path)
|
|
|
|
- pid_dir = "/var/lib/glusterd/vols/%s/run/" % drive
|
|
- pid_file = os.path.join(pid_dir, 'swift.pid');
|
|
+ lck_file = os.path.join(RUN_DIR, '%s.lock' %drive);
|
|
|
|
- if not os.path.exists(pid_dir):
|
|
- mkdirs(pid_dir)
|
|
+ if not os.path.exists(RUN_DIR):
|
|
+ mkdirs(RUN_DIR)
|
|
|
|
- fd = os.open(pid_file, os.O_CREAT|os.O_RDWR)
|
|
+ fd = os.open(lck_file, os.O_CREAT|os.O_RDWR)
|
|
with os.fdopen(fd, 'r+b') as f:
|
|
try:
|
|
fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
|
@@ -100,19 +101,12 @@
|
|
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
|
|
|
|
def _get_export_list():
|
|
- if REMOTE_CLUSTER:
|
|
- cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
|
- else:
|
|
- cmnd = 'gluster volume info'
|
|
+ cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
|
|
|
export_list = []
|
|
|
|
if os.system(cmnd + ' >> /dev/null'):
|
|
- if REMOTE_CLUSTER:
|
|
- logging.error('Getting volume info failed for %s, make sure '\
|
|
- 'gluster --remote-host=%s works', NAME, MOUNT_IP)
|
|
- else:
|
|
- logging.error('Getting volume info failed for %s', NAME)
|
|
+ logging.error('Getting volume info failed for %s', NAME)
|
|
else:
|
|
fp = os.popen(cmnd)
|
|
while True:
|
|
@@ -124,3 +118,20 @@
|
|
export_list.append(item.split(':')[1].strip(' '))
|
|
|
|
return export_list
|
|
+
|
|
+def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
|
|
+ """Read the object-server's configuration file and return
|
|
+ the device value"""
|
|
+
|
|
+ mnt_dir = ''
|
|
+ conf_files = search_tree(conf_dir, conf_file, '.conf')
|
|
+ if not conf_files:
|
|
+ raise Exception("Config file not found")
|
|
+
|
|
+ _conf = ConfigParser()
|
|
+ if _conf.read(conf_files[0]):
|
|
+ try:
|
|
+ mnt_dir = _conf.get('DEFAULT', 'devices', '')
|
|
+ except (NoSectionError, NoOptionError):
|
|
+ raise
|
|
+ return os.path.join(mnt_dir, vol_name)
|
|
diff -ru a/ufo/gluster/swift/common/ring.py b/ufo/gluster/swift/common/ring.py
|
|
--- a/ufo/gluster/swift/common/ring.py 2013-04-30 08:21:55.948000000 -0400
|
|
+++ b/ufo/gluster/swift/common/ring.py 2013-04-29 15:16:22.755000000 -0400
|
|
@@ -0,0 +1,111 @@
|
|
+# Copyright (c) 2013 Red Hat, Inc.
|
|
+#
|
|
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
+# you may not use this file except in compliance with the License.
|
|
+# You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing, software
|
|
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
+# implied.
|
|
+# See the License for the specific language governing permissions and
|
|
+# limitations under the License.
|
|
+
|
|
+from ConfigParser import ConfigParser
|
|
+from swift.common.ring import ring
|
|
+from swift.common.utils import search_tree
|
|
+from gluster.swift.common.Glusterfs import SWIFT_DIR
|
|
+
|
|
+reseller_prefix = "AUTH_"
|
|
+conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf')
|
|
+if conf_files:
|
|
+ conf_file = conf_files[0]
|
|
+
|
|
+_conf = ConfigParser()
|
|
+if conf_files and _conf.read(conf_file):
|
|
+ if _conf.defaults().get("reseller_prefix", None):
|
|
+ reseller_prefix = _conf.defaults().get("reseller_prefix")
|
|
+ else:
|
|
+ for key, value in _conf._sections.items():
|
|
+ if value.get("reseller_prefix", None):
|
|
+ reseller_prefix = value["reseller_prefix"]
|
|
+ break
|
|
+
|
|
+if not reseller_prefix.endswith('_'):
|
|
+ reseller_prefix = reseller_prefix + '_'
|
|
+
|
|
+class Ring(ring.Ring):
|
|
+ def _get_part_nodes(self, part):
|
|
+ seen_ids = set()
|
|
+ nodes = [dev for dev in self._devs \
|
|
+ if dev['device'] == self.acc_name \
|
|
+ and not (dev['id'] in seen_ids \
|
|
+ or seen_ids.add(dev['id']))]
|
|
+ if not nodes:
|
|
+ nodes = [self.false_node]
|
|
+ return nodes
|
|
+
|
|
+ def get_part_nodes(self, part):
|
|
+ """
|
|
+ Get the nodes that are responsible for the partition. If one
|
|
+ node is responsible for more than one replica of the same
|
|
+ partition, it will only appear in the output once.
|
|
+
|
|
+ :param part: partition to get nodes for
|
|
+ :returns: list of node dicts
|
|
+
|
|
+ See :func:`get_nodes` for a description of the node dicts.
|
|
+ """
|
|
+ return self._get_part_nodes(part)
|
|
+
|
|
+ def get_nodes(self, account, container=None, obj=None):
|
|
+ """
|
|
+ Get the partition and nodes for an account/container/object.
|
|
+ If a node is responsible for more than one replica, it will
|
|
+ only appear in the output once.
|
|
+ :param account: account name
|
|
+ :param container: container name
|
|
+ :param obj: object name
|
|
+ :returns: a tuple of (partition, list of node dicts)
|
|
+
|
|
+ Each node dict will have at least the following keys:
|
|
+ ====== ===============================================================
|
|
+ id unique integer identifier amongst devices
|
|
+ weight a float of the relative weight of this device as compared to
|
|
+ others; this indicates how many partitions the builder will try
|
|
+ to assign to this device
|
|
+ zone integer indicating which zone the device is in; a given
|
|
+ partition will not be assigned to multiple devices within the
|
|
+ same zone
|
|
+ ip the ip address of the device
|
|
+ port the tcp port of the device
|
|
+ device the device's name on disk (sdb1, for example)
|
|
+ meta general use 'extra' field; for example: the online date, the
|
|
+ hardware description
|
|
+ ====== ===============================================================
|
|
+ """
|
|
+ self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 0, \
|
|
+ 'meta': '', 'device': 'volume_not_in_ring', \
|
|
+ 'port': 6012}
|
|
+ if account.startswith(reseller_prefix):
|
|
+ self.acc_name = account.replace(reseller_prefix, '', 1)
|
|
+ else:
|
|
+ self.acc_name = account
|
|
+
|
|
+ part = 0
|
|
+ return part, self._get_part_nodes(part)
|
|
+
|
|
+
|
|
+ def get_more_nodes(self, part):
|
|
+ """
|
|
+ Generator to get extra nodes for a partition for hinted handoff.
|
|
+
|
|
+ :param part: partition to get handoff nodes for
|
|
+ :returns: generator of node dicts
|
|
+
|
|
+ See :func:`get_nodes` for a description of the node dicts.
|
|
+ Should never be called in the swift UFO environment, so yield nothing
|
|
+ """
|
|
+ yield self.false_node
|
|
diff -ru a/ufo/test/unit/common/test_ring.py b/ufo/test/unit/common/test_ring.py
|
|
--- a/ufo/test/unit/common/test_ring.py 2013-04-30 08:22:08.975000000 -0400
|
|
+++ b/ufo/test/unit/common/test_ring.py 2013-04-29 15:16:22.756000000 -0400
|
|
@@ -0,0 +1,81 @@
|
|
+# Copyright (c) 2013 Red Hat, Inc.
|
|
+#
|
|
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
+# you may not use this file except in compliance with the License.
|
|
+# You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing, software
|
|
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
+# implied.
|
|
+# See the License for the specific language governing permissions and
|
|
+# limitations under the License.
|
|
+
|
|
+import unittest
|
|
+import gluster.swift.common.constraints
|
|
+from gluster.swift.common.ring import *
|
|
+from gluster.swift.common.Glusterfs import SWIFT_DIR
|
|
+
|
|
+def _mock_ring_data():
|
|
+ return [{'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'port': 6012, \
|
|
+ 'meta': '', 'device': 'test', 'id': 0},
|
|
+ {'zone': 2, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 1, \
|
|
+ 'meta': '', 'device': 'iops', 'port': 6012}]
|
|
+
|
|
+class TestRing(unittest.TestCase):
|
|
+ """ Tests for common.utils """
|
|
+
|
|
+ def setUp(self):
|
|
+ self.ring = Ring(SWIFT_DIR, ring_name='object')
|
|
+
|
|
+ def test_first_device(self):
|
|
+ try:
|
|
+ __devs = self.ring._devs
|
|
+ self.ring._devs = _mock_ring_data()
|
|
+
|
|
+ part, node = self.ring.get_nodes('test')
|
|
+ assert node[0]['device'] == 'test'
|
|
+ node = self.ring.get_part_nodes(0)
|
|
+ assert node[0]['device'] == 'test'
|
|
+ for node in self.ring.get_more_nodes(0):
|
|
+ assert node['device'] == 'volume_not_in_ring'
|
|
+ finally:
|
|
+ self.ring._devs = __devs
|
|
+
|
|
+ def test_invalid_device(self):
|
|
+ try:
|
|
+ __devs = self.ring._devs
|
|
+ self.ring._devs = _mock_ring_data()
|
|
+
|
|
+ part, node = self.ring.get_nodes('test2')
|
|
+ assert node[0]['device'] == 'volume_not_in_ring'
|
|
+ node = self.ring.get_part_nodes(0)
|
|
+ assert node[0]['device'] == 'volume_not_in_ring'
|
|
+ finally:
|
|
+ self.ring._devs = __devs
|
|
+
|
|
+ def test_second_device(self):
|
|
+ try:
|
|
+ __devs = self.ring._devs
|
|
+ self.ring._devs = _mock_ring_data()
|
|
+
|
|
+ part, node = self.ring.get_nodes('iops')
|
|
+ assert node[0]['device'] == 'iops'
|
|
+ node = self.ring.get_part_nodes(0)
|
|
+ assert node[0]['device'] == 'iops'
|
|
+ for node in self.ring.get_more_nodes(0):
|
|
+ assert node['device'] == 'volume_not_in_ring'
|
|
+ finally:
|
|
+ self.ring._devs = __devs
|
|
+
|
|
+ def test_second_device_with_reseller_prefix(self):
|
|
+ try:
|
|
+ __devs = self.ring._devs
|
|
+ self.ring._devs = _mock_ring_data()
|
|
+
|
|
+ part, node = self.ring.get_nodes('AUTH_iops')
|
|
+ assert node[0]['device'] == 'iops'
|
|
+ finally:
|
|
+ self.ring._devs = __devs
|