glusterfs-3.3.1-14, added multi-volume fix/support for G4S/UFO

This commit is contained in:
Kaleb S. KEITHLEY 2013-04-30 12:26:14 -04:00
parent 066251b41d
commit d7cec693e1
3 changed files with 475 additions and 198 deletions

View File

@ -0,0 +1,406 @@
diff -ru a/ufo/bin/gluster-swift-gen-builders b/ufo/bin/gluster-swift-gen-builders
--- a/ufo/bin/gluster-swift-gen-builders 2012-12-07 12:24:00.000000000 -0500
+++ b/ufo/bin/gluster-swift-gen-builders 2013-04-29 15:16:22.748000000 -0400
@@ -1,9 +1,25 @@
#!/bin/bash
+# Note that these port numbers must match the configured values for the
+# various servers in their configuration files.
+declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \
+ ["object.builder"]=6010)
+
+builder_files="account.builder container.builder object.builder"
+
function create {
- swift-ring-builder $1 create 0 1 1
- swift-ring-builder $1 add z1-127.0.0.1:$2/$3_ 100.0
+ swift-ring-builder $1 create 1 1 1 >> /tmp/out
+}
+
+function add {
+ swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0
+}
+
+function rebalance {
swift-ring-builder $1 rebalance
+}
+
+function build {
swift-ring-builder $1
}
@@ -12,8 +28,17 @@
exit 1
fi
-# Note that these port numbers must match the configured values for the
-# various servers in their configuration files.
-create account.builder 6012 $1
-create container.builder 6011 $1
-create object.builder 6010 $1
+for builder_file in $builder_files
+do
+ create $builder_file
+
+ zone=1
+ for volname in $@
+ do
+ add $builder_file $zone ${port[$builder_file]} $volname
+ zone=$(expr $zone + 1)
+ done
+
+ rebalance $builder_file
+ build $builder_file
+done
diff -ru a/ufo/etc/fs.conf-gluster b/ufo/etc/fs.conf-gluster
--- a/ufo/etc/fs.conf-gluster 2012-12-07 12:24:00.000000000 -0500
+++ b/ufo/etc/fs.conf-gluster 2013-04-29 15:16:22.752000000 -0400
@@ -3,10 +3,6 @@
# local host.
mount_ip = localhost
-# The GlusterFS server need not be local, a remote server can also be used
-# by setting "remote_cluster = yes".
-remote_cluster = no
-
# By default it is assumed the Gluster volumes can be accessed using other
# methods besides UFO (not object only), which disables a caching
# optimizations in order to keep in sync with file system changes.
diff -ru a/ufo/gluster/swift/common/constraints.py b/ufo/gluster/swift/common/constraints.py
--- a/ufo/gluster/swift/common/constraints.py 2012-12-07 12:24:00.000000000 -0500
+++ b/ufo/gluster/swift/common/constraints.py 2013-04-29 15:16:22.749000000 -0400
@@ -16,7 +16,8 @@
from webob.exc import HTTPBadRequest
import swift.common.constraints
-from gluster.swift.common import Glusterfs
+import swift.common.ring as _ring
+from gluster.swift.common import Glusterfs, ring
MAX_OBJECT_NAME_COMPONENT_LENGTH = swift.common.constraints.constraints_conf_int(
@@ -80,3 +81,9 @@
# Replace the original check mount with ours
swift.common.constraints.check_mount = gluster_check_mount
+
+# Save the original Ring class
+__Ring = _ring.Ring
+
+# Replace the original Ring class
+_ring.Ring = ring.Ring
diff -ru a/ufo/gluster/swift/common/Glusterfs.py b/ufo/gluster/swift/common/Glusterfs.py
--- a/ufo/gluster/swift/common/Glusterfs.py 2012-12-07 12:24:00.000000000 -0500
+++ b/ufo/gluster/swift/common/Glusterfs.py 2013-04-29 15:16:22.753000000 -0400
@@ -12,33 +12,35 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import logging
import os, fcntl, time
-from ConfigParser import ConfigParser
-from swift.common.utils import TRUE_VALUES
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+from swift.common.utils import TRUE_VALUES, search_tree
from gluster.swift.common.fs_utils import mkdirs
-
#
# Read the fs.conf file once at startup (module load)
#
_fs_conf = ConfigParser()
MOUNT_IP = 'localhost'
-REMOTE_CLUSTER = False
OBJECT_ONLY = False
+RUN_DIR='/var/run/swift'
+SWIFT_DIR = '/etc/swift'
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
try:
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
except (NoSectionError, NoOptionError):
pass
try:
- REMOTE_CLUSTER = _fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES
+ OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
try:
- OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
+ RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', '/var/run/swift')
except (NoSectionError, NoOptionError):
pass
+
NAME = 'glusterfs'
@@ -60,7 +62,7 @@
if drive == export:
break
else:
- logging.error('No export found in %r matching drive %s', el, drive)
+ logging.error('No export found in %r matching drive, %s', el, drive)
return False
# NOTE: root is typically the default value of /mnt/gluster-object
@@ -68,13 +70,12 @@
if not os.path.isdir(full_mount_path):
mkdirs(full_mount_path)
- pid_dir = "/var/lib/glusterd/vols/%s/run/" % drive
- pid_file = os.path.join(pid_dir, 'swift.pid');
+ lck_file = os.path.join(RUN_DIR, '%s.lock' %drive);
- if not os.path.exists(pid_dir):
- mkdirs(pid_dir)
+ if not os.path.exists(RUN_DIR):
+ mkdirs(RUN_DIR)
- fd = os.open(pid_file, os.O_CREAT|os.O_RDWR)
+ fd = os.open(lck_file, os.O_CREAT|os.O_RDWR)
with os.fdopen(fd, 'r+b') as f:
try:
fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
@@ -100,19 +101,12 @@
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
def _get_export_list():
- if REMOTE_CLUSTER:
- cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
- else:
- cmnd = 'gluster volume info'
+ cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
export_list = []
if os.system(cmnd + ' >> /dev/null'):
- if REMOTE_CLUSTER:
- logging.error('Getting volume info failed for %s, make sure '\
- 'gluster --remote-host=%s works', NAME, MOUNT_IP)
- else:
- logging.error('Getting volume info failed for %s', NAME)
+ logging.error('Getting volume info failed for %s', NAME)
else:
fp = os.popen(cmnd)
while True:
@@ -124,3 +118,20 @@
export_list.append(item.split(':')[1].strip(' '))
return export_list
+
+def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
+ """Read the object-server's configuration file and return
+ the device value"""
+
+ mnt_dir = ''
+ conf_files = search_tree(conf_dir, conf_file, '.conf')
+ if not conf_files:
+ raise Exception("Config file not found")
+
+ _conf = ConfigParser()
+ if _conf.read(conf_files[0]):
+ try:
+ mnt_dir = _conf.get('DEFAULT', 'devices', '')
+ except (NoSectionError, NoOptionError):
+ raise
+ return os.path.join(mnt_dir, vol_name)
diff -ru a/ufo/gluster/swift/common/ring.py b/ufo/gluster/swift/common/ring.py
--- a/ufo/gluster/swift/common/ring.py 2013-04-30 08:21:55.948000000 -0400
+++ b/ufo/gluster/swift/common/ring.py 2013-04-29 15:16:22.755000000 -0400
@@ -0,0 +1,111 @@
+# Copyright (c) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ConfigParser import ConfigParser
+from swift.common.ring import ring
+from swift.common.utils import search_tree
+from gluster.swift.common.Glusterfs import SWIFT_DIR
+
+reseller_prefix = "AUTH_"
+conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf')
+if conf_files:
+ conf_file = conf_files[0]
+
+_conf = ConfigParser()
+if conf_files and _conf.read(conf_file):
+ if _conf.defaults().get("reseller_prefix", None):
+ reseller_prefix = _conf.defaults().get("reseller_prefix")
+ else:
+ for key, value in _conf._sections.items():
+ if value.get("reseller_prefix", None):
+ reseller_prefix = value["reseller_prefix"]
+ break
+
+if not reseller_prefix.endswith('_'):
+ reseller_prefix = reseller_prefix + '_'
+
+class Ring(ring.Ring):
+ def _get_part_nodes(self, part):
+ seen_ids = set()
+ nodes = [dev for dev in self._devs \
+ if dev['device'] == self.acc_name \
+ and not (dev['id'] in seen_ids \
+ or seen_ids.add(dev['id']))]
+ if not nodes:
+ nodes = [self.false_node]
+ return nodes
+
+ def get_part_nodes(self, part):
+ """
+ Get the nodes that are responsible for the partition. If one
+ node is responsible for more than one replica of the same
+ partition, it will only appear in the output once.
+
+ :param part: partition to get nodes for
+ :returns: list of node dicts
+
+ See :func:`get_nodes` for a description of the node dicts.
+ """
+ return self._get_part_nodes(part)
+
+ def get_nodes(self, account, container=None, obj=None):
+ """
+ Get the partition and nodes for an account/container/object.
+ If a node is responsible for more than one replica, it will
+ only appear in the output once.
+ :param account: account name
+ :param container: container name
+ :param obj: object name
+ :returns: a tuple of (partition, list of node dicts)
+
+ Each node dict will have at least the following keys:
+ ====== ===============================================================
+ id unique integer identifier amongst devices
+ weight a float of the relative weight of this device as compared to
+ others; this indicates how many partitions the builder will try
+ to assign to this device
+ zone integer indicating which zone the device is in; a given
+ partition will not be assigned to multiple devices within the
+ same zone
+ ip the ip address of the device
+ port the tcp port of the device
+ device the device's name on disk (sdb1, for example)
+ meta general use 'extra' field; for example: the online date, the
+ hardware description
+ ====== ===============================================================
+ """
+ self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 0, \
+ 'meta': '', 'device': 'volume_not_in_ring', \
+ 'port': 6012}
+ if account.startswith(reseller_prefix):
+ self.acc_name = account.replace(reseller_prefix, '', 1)
+ else:
+ self.acc_name = account
+
+ part = 0
+ return part, self._get_part_nodes(part)
+
+
+ def get_more_nodes(self, part):
+ """
+ Generator to get extra nodes for a partition for hinted handoff.
+
+ :param part: partition to get handoff nodes for
+ :returns: generator of node dicts
+
+ See :func:`get_nodes` for a description of the node dicts.
+ Should never be called in the swift UFO environment, so yield nothing
+ """
+ yield self.false_node
diff -ru a/ufo/test/unit/common/test_ring.py b/ufo/test/unit/common/test_ring.py
--- a/ufo/test/unit/common/test_ring.py 2013-04-30 08:22:08.975000000 -0400
+++ b/ufo/test/unit/common/test_ring.py 2013-04-29 15:16:22.756000000 -0400
@@ -0,0 +1,81 @@
+# Copyright (c) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import gluster.swift.common.constraints
+from gluster.swift.common.ring import *
+from gluster.swift.common.Glusterfs import SWIFT_DIR
+
+def _mock_ring_data():
+ return [{'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'port': 6012, \
+ 'meta': '', 'device': 'test', 'id': 0},
+ {'zone': 2, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 1, \
+ 'meta': '', 'device': 'iops', 'port': 6012}]
+
+class TestRing(unittest.TestCase):
+ """ Tests for common.utils """
+
+ def setUp(self):
+ self.ring = Ring(SWIFT_DIR, ring_name='object')
+
+ def test_first_device(self):
+ try:
+ __devs = self.ring._devs
+ self.ring._devs = _mock_ring_data()
+
+ part, node = self.ring.get_nodes('test')
+ assert node[0]['device'] == 'test'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'test'
+ for node in self.ring.get_more_nodes(0):
+ assert node['device'] == 'volume_not_in_ring'
+ finally:
+ self.ring._devs = __devs
+
+ def test_invalid_device(self):
+ try:
+ __devs = self.ring._devs
+ self.ring._devs = _mock_ring_data()
+
+ part, node = self.ring.get_nodes('test2')
+ assert node[0]['device'] == 'volume_not_in_ring'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'volume_not_in_ring'
+ finally:
+ self.ring._devs = __devs
+
+ def test_second_device(self):
+ try:
+ __devs = self.ring._devs
+ self.ring._devs = _mock_ring_data()
+
+ part, node = self.ring.get_nodes('iops')
+ assert node[0]['device'] == 'iops'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'iops'
+ for node in self.ring.get_more_nodes(0):
+ assert node['device'] == 'volume_not_in_ring'
+ finally:
+ self.ring._devs = __devs
+
+ def test_second_device_with_reseller_prefix(self):
+ try:
+ __devs = self.ring._devs
+ self.ring._devs = _mock_ring_data()
+
+ part, node = self.ring.get_nodes('AUTH_iops')
+ assert node[0]['device'] == 'iops'
+ finally:
+ self.ring._devs = __devs

View File

@ -1,8 +1,8 @@
%global _for_fedora_koji_builds 1
# use the prerel for pre-releases
%global prereltag alpha3
# uncomment and add '%' to use the prereltag for pre-releases
# global prereltag alpha
# if you wish to compile an rpm without rdma support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without rdma
@ -17,33 +17,23 @@
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without epoll
%{?_without_epoll:%global _without_epoll --disable-epoll}
# if you wish to compile an rpm without fusermount...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without fusermount
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
# if you wish to compile an rpm with fusermount...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with fusermount
%{?_with_fusermount:%global _with_fusermount --enable-fusermount}
# if you wish to compile an rpm without geo-replication support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without georeplication
%{?_without_georeplication:%global _without_georeplication --disable-geo-replication}
# if you wish to compile an rpm without the OCF resource agents...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without ocf
%{?_without_ocf:%global _without_ocf --without-ocf}
# if you wish to build rpms without UFO, compile like this
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@tar.gz --without ufo
%{?_without_ufo:%global _without_ufo true}
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
%global SWIFTVER 1.8.0
%global SWIFTVER 1.7.4
%if ( 0%{_for_fedora_koji_builds} )
%global UFOVER 1.2
%global UFOVER 1.1
%else
%global UFOVER @PACKAGE_VERSION@
%endif
%if ! 0%{?_without_ufo:1}
%global _with_ufo true
%endif
%endif
%if ( 0%{?fedora} && 0%{?fedora} > 16 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
@ -52,8 +42,8 @@
Summary: Cluster File System
%if ( 0%{_for_fedora_koji_builds} )
Name: glusterfs
Version: 3.4.0
Release: 0.3%{?prereltag:.%{prereltag}}%{?dist}
Version: 3.3.1
Release: 14%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
@ -66,7 +56,7 @@ Vendor: Red Hat
%endif
URL: http://www.gluster.org//docs/index.php/GlusterFS
%if ( 0%{_for_fedora_koji_builds} )
Source0: http://download.gluster.org/pub/gluster/glusterfs/qa-releases/%{PACKAGE_VERSION}%{?prereltag}/glusterfs-%{version}%{?prereltag}.tar.gz
Source0: http://download.gluster.org/pub/gluster/glusterfs/3.3/%{version}/glusterfs-%{version}%{?prereltag}.tar.gz
%else
Source0: @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
%endif
@ -102,10 +92,10 @@ Requires(postun): systemd-units
# can't seem to make a generic macro that works
%define _init_glusterd %{_unitdir}/glusterd.service
%define _init_glusterfsd %{_unitdir}/glusterfsd.service
%define _init_gluster_swift_account %{_unitdir}/gluster-swift-account.service
%define _init_gluster_swift_account %{_unitdir}/gluster-swift-account.service
%define _init_gluster_swift_object %{_unitdir}/gluster-swift-object.service
%define _init_gluster_swift_container %{_unitdir}/gluster-swift-container.service
%define _init_gluster_swift_proxy %{_unitdir}/gluster-swift-proxy.service
%define _init_gluster_swift_container %{_unitdir}/gluster-swift-container.service
%define _init_gluster_swift_proxy %{_unitdir}/gluster-swift-proxy.service
%else
%if ( 0%{_for_fedora_koji_builds} )
%global glusterd_service %{S:%{SOURCE12}}
@ -123,10 +113,10 @@ Requires(postun): /sbin/service
# can't seem to make a generic macro that works
%define _init_glusterd %{_sysconfdir}/init.d/glusterd
%define _init_glusterfsd %{_sysconfdir}/init.d/glusterfsd
%define _init_gluster_swift_account %{_sysconfdir}/init.d/gluster-swift-account
%define _init_gluster_swift_object %{_sysconfdir}/init.d/gluster-swift-object
%define _init_gluster_swift_container %{_sysconfdir}/init.d/gluster-swift-container
%define _init_gluster_swift_proxy %{_sysconfdir}/init.d/gluster-swift-proxy
%define _init_gluster_swift_account %{_sysconfdir}/init.d/gluster-swift-account
%define _init_gluster_swift_object %{_sysconfdir}/init.d/gluster-swift-object
%define _init_gluster_swift_container %{_sysconfdir}/init.d/gluster-swift-container
%define _init_gluster_swift_proxy %{_sysconfdir}/init.d/gluster-swift-proxy
%endif
BuildRequires: bison flex
@ -149,9 +139,13 @@ Provides: %{name}-libs = %{version}-%{release}
Provides: %{name}-common = %{version}-%{release}
Provides: %{name}-core = %{version}-%{release}
# We do not want to generate useless provides for xlator .so files
# We do not want to generate useless provides and requires for xlator .so files
# Filter all generated:
# - .so requires that do not start with 'lib'
# - all provides from xlators and private libraries not in the ldconfig path
#
# TODO: RHEL5 does not have a convenient solution
%if ( 0%{?rhel} == 6 )
%if 0%{?rhel} == 6
# filter_setup exists in RHEL6 only
%filter_provides_in %{_libdir}/glusterfs/%{version}/
%global __filter_from_req %{?__filter_from_req} | %{__grep} -v -P '^(?!lib).*\.so.*$'
@ -220,9 +214,6 @@ Group: Applications/File
BuildRequires: fuse-devel
Requires: %{name} = %{version}-%{release}
%if ( ! 0%{?_without_fusermount} )
Requires: /usr/bin/fusermount
%endif
Obsoletes: %{name}-client < %{version}-%{release}
Provides: %{name}-client = %{version}-%{release}
@ -261,36 +252,6 @@ is in user space and easily manageable.
This package provides the glusterfs server daemon.
%if ( 0%{!?_without_ocf:1} )
%package resource-agents
Summary: OCF Resource Agents for GlusterFS
License: GPLv3+
BuildArch: noarch
# this Group handling comes from the Fedora resource-agents package
%if ( 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} )
Group: System Environment/Base
%else
Group: Productivity/Clustering/HA
%endif
# for glusterd
Requires: glusterfs-server
# depending on the distribution, we need pacemaker or resource-agents
Requires: %{_prefix}/lib/ocf/resource.d
%description resource-agents
GlusterFS is a clustered file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the resource agents which plug glusterd into
Open Cluster Framework (OCF) compliant cluster resource managers,
like Pacemaker.
%endif
%package devel
Summary: Development Libraries
License: GPLv2 or LGPLv3+
@ -362,7 +323,6 @@ Source71: container-server.conf
Source72: object-server.conf
Source73: proxy-server.conf
Source74: swift.conf
Source75: object-expirer.conf
Patch20: 0001-Use-updated-parallel-install-versions-of-epel-packag.patch
Patch21: 0002-Add-fixes-for-building-the-doc-package.patch
@ -383,6 +343,7 @@ Patch24: 0002-Add-fixes-for-building-the-doc-package.patch.180
%global glusterswiftproxy_service %{S:%{SOURCE43}}
%endif
BuildRequires: dos2unix
BuildRequires: python-devel
BuildRequires: python-setuptools
BuildRequires: python-netifaces
@ -405,9 +366,6 @@ Requires: python-simplejson
Requires: pyxattr
Requires: python-setuptools
Requires: python-netifaces
%if "%{SWIFTVER}" != "1.7.4"
Requires: python-swiftclient
%endif
Conflicts: openstack-swift
@ -516,11 +474,12 @@ Obsoletes: glusterfs-swift-plugin < 3.3.1-4
Obsoletes: glusterfs-swift-ufo <= 3.3.1-4
%if ( 0%{_for_fedora_koji_builds} )
Source15: http://download.gluster.org/pub/gluster/glusterfs/qa-releases/%{version}%{?prereltag}/UFO/gluster-swift-ufo-%{UFOVER}%{?prereltag}.tar.gz
Source15: http://download.gluster.org/pub/gluster/glusterfs/3.3/%{version}/UFO/gluster-swift-ufo-%{UFOVER}%{?prereltag}.tar.gz
%else
Source15: gluster-swift-ufo-@PACKAGE_VERSION@.tar.gz
%endif
Patch15: %{name}-3.3.1.ufo.gluster.swift.common.DiskFile-1.7.4.patch
Patch16: %{name}-3.3.1.ufo.gluster.multi-volume.backport-1.1.patch
%description ufo
Gluster Unified File and Object Storage unifies NAS and object storage
@ -532,19 +491,20 @@ storage costs.
%prep
%setup -q -n %{name}-%{version}%{?prereltag}
%if ( 0%{_for_fedora_koji_builds} )
#%patch0 -p0
%patch1 -p0
%if ( "%{version}" == "3.3.1" )
%patch2 -p1
%endif
%endif
%if ( 0%{?_with_ufo:1} )
# unpack swift-1.x.y
%setup -q -T -D -n %{name}-%{version}%{?prereltag} -a 20
# unpack gluster ufo
%setup -q -T -D -n %{name}-%{version}%{?prereltag} -a 15
%if ( 0%{_for_fedora_koji_builds} )
#%patch0 -p0
%patch1 -p0 -F4
%if ( "%{version}" == "3.3.1" )
%patch2 -p1
%endif
%endif
cd swift-%{SWIFTVER}
# apply Fedora openstack-swift patches to Swift as appropriate
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
%patch20 -p1
%if "%{SWIFTVER}" == "1.7.4"
@ -553,22 +513,26 @@ cd swift-%{SWIFTVER}
%patch24 -p1
%endif
%endif
# apply our own patches to Swift, as appropriate
%if "%{SWIFTVER}" == "1.7.4"
%patch22 -p1
%else
%patch23 -p1
%endif
# apply our fix for UFO 1.1 (tarball snapshot circa 7 Dec, 2012)
%if ( 0%{_for_fedora_koji_builds} )
%if ( "%{UFOVER}" == "1.1" )
cd ../ufo
%patch15 -p1
cd ../
pwd
%patch15
%patch16 -p1
%endif
%endif
%endif
%build
./autogen.sh
%configure %{?_without_rdma} %{?_without_epoll} %{?_without_fusermount} %{?_without_georeplication} %{?_without_ocf}
%configure %{?_without_rdma} %{?_without_epoll} %{?_with_fusermount} %{?_without_georeplication}
# Remove rpath
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
@ -633,11 +597,6 @@ find %{buildroot}%{_libdir} -name '*.la' -delete
# Remove installed docs, we include them ourselves as %%doc
%{__rm} -rf %{buildroot}%{_datadir}/doc/glusterfs/
# Remove benchmarking and other unpackaged files
%{__rm} -rf %{buildroot}/benchmarking
%{__rm} -f %{buildroot}/glusterfs-mode.el
%{__rm} -f %{buildroot}/glusterfs.vim
# Create working directory
%{__mkdir_p} %{buildroot}%{_sharedstatedir}/glusterd
@ -769,7 +728,6 @@ cd swift-%{SWIFTVER}
#install -p -D -m 660 %{SOURCE72} %{buildroot}%{_sysconfdir}/swift/object-server.conf
#install -p -D -m 660 %{SOURCE73} %{buildroot}%{_sysconfdir}/swift/proxy-server.conf
#install -p -D -m 660 %{SOURCE74} %{buildroot}%{_sysconfdir}/swift/swift.conf
#install -p -D -m 660 %{SOURCE75} %{buildroot}%{_sysconfdir}/swift/object-expirer.conf
#%endif
# Install pid directory
%{__install} -d -m 755 %{buildroot}%{_localstatedir}/run/swift
@ -782,8 +740,6 @@ cd swift-%{SWIFTVER}
%{__mkdir_p} %{buildroot}%{_sysconfdir}/tmpfiles.d
install -p -m 0644 %{SOURCE37} %{buildroot}%{_sysconfdir}/tmpfiles.d/gluster-swift.conf
%endif
# Install recon directory
install -d -m 755 %{buildroot}%{_localstatedir}/cache/swift
# man pages
install -d -m 755 %{buildroot}%{_mandir}/man5
for m in doc/manpages/*.5; do
@ -868,13 +824,12 @@ fi
%if ( 0%{_for_fedora_koji_builds} )
/sbin/umount.glusterfs
%endif
%if ( 0%{?_without_fusermount:1} )
%if ( 0%{?_with_fusermount:1} )
%{_bindir}/fusermount-glusterfs
%endif
%files server
%defattr(-,root,root,-)
%doc extras/clear_xattrs.sh
%if ( 0%{_for_fedora_koji_builds} )
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterd
%endif
@ -935,17 +890,10 @@ fi
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
%defattr(-,root,root)
# /usr/lib is the standard for OCF, also on x86_64
%{_prefix}/lib/ocf/resource.d/glusterfs
%endif
%files devel
%defattr(-,root,root,-)
%{_includedir}/glusterfs
%{_libdir}/pkgconfig/glusterfs-api.pc
%exclude %{_includedir}/glusterfs/y.tab.h
%{_libdir}/*.so
@ -954,13 +902,20 @@ fi
%defattr(-,root,root,-)
%doc swift-%{SWIFTVER}/AUTHORS
%doc swift-%{SWIFTVER}/LICENSE
%doc swift-%{SWIFTVER}/README*
%doc swift-%{SWIFTVER}/README
%doc swift-%{SWIFTVER}/etc/dispersion.conf-sample
%doc swift-%{SWIFTVER}/etc/drive-audit.conf-sample
%doc swift-%{SWIFTVER}/etc/object-expirer.conf-sample
%doc swift-%{SWIFTVER}/etc/swift.conf-sample
%{_mandir}/man5/dispersion.conf.5*
%{_mandir}/man1/swift*.1*
%{_mandir}/man1/swift-dispersion-populate.1*
%{_mandir}/man1/swift-dispersion-report.1*
%{_mandir}/man1/swift.1*
%{_mandir}/man1/swift-get-nodes.1*
%{_mandir}/man1/swift-init.1*
%{_mandir}/man1/swift-orphans.1*
%{_mandir}/man1/swift-recon.1*
%{_mandir}/man1/swift-ring-builder.1*
%if ( 0%{?_with_systemd:1} )
%config(noreplace) %{_sysconfdir}/tmpfiles.d/gluster-swift.conf
%else
@ -974,7 +929,6 @@ fi
%dir %{python_sitelib}/swift
%{_bindir}/swift-account-audit
%{_bindir}/swift-bench
%{_bindir}/swift-bench-client
%{_bindir}/swift-drive-audit
%{_bindir}/swift-get-nodes
%{_bindir}/swift-init
@ -1001,10 +955,7 @@ fi
%{_mandir}/man1/swift-account-server.1*
%_init_gluster_swift_account
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/gluster-swift-account@.service
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%{_unitdir}/gluster-swift-account-*.service
%endif
%{_unitdir}/gluster-swift-account*.service
%endif
%dir %attr(0755, swift, swift) %{_localstatedir}/run/swift/account-server
%dir %{_sysconfdir}/swift/account-server
@ -1025,13 +976,9 @@ fi
%{_mandir}/man1/swift-container-updater.1*
%_init_gluster_swift_container
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/gluster-swift-container@.service
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%{_unitdir}/gluster-swift-container-*.service
%endif
%{_unitdir}/gluster-swift-container*.service
%endif
%dir %attr(0755, swift, swift) %{_localstatedir}/run/swift/container-server
%dir %attr(0755, swift, swift) %{_localstatedir}/cache/swift
%dir %{_sysconfdir}/swift/container-server
%{_bindir}/swift-container-auditor
%{_bindir}/swift-container-server
@ -1045,21 +992,18 @@ fi
%doc swift-%{SWIFTVER}/etc/object-server.conf-sample
%doc swift-%{SWIFTVER}/etc/rsyncd.conf-sample
%{_mandir}/man5/object-server.conf.5*
%{_mandir}/man5/object-expirer.conf.5*
%{_mandir}/man1/swift-object-auditor.1*
%{_mandir}/man1/swift-object-expirer.1*
%{_mandir}/man1/swift-object-info.1*
%{_mandir}/man1/swift-object-replicator.1*
%{_mandir}/man1/swift-object-server.1*
%{_mandir}/man1/swift-object-updater.1*
%_init_gluster_swift_object
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/gluster-swift-object@.service
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%{_unitdir}/gluster-swift-object-*.service
%exclude %{_unitdir}/gluster-swift-object-expirer*.service
%endif
%{_unitdir}/gluster-swift-object*.service
%endif
%dir %attr(0755, swift, swift) %{_localstatedir}/run/swift/object-server
%dir %attr(0755, swift, swift) %{_localstatedir}/cache/swift
%dir %{_sysconfdir}/swift/object-server
%{_bindir}/swift-object-auditor
%{_bindir}/swift-object-info
@ -1071,19 +1015,11 @@ fi
%files swift-proxy
%defattr(-,root,root,-)
%doc swift-%{SWIFTVER}/etc/proxy-server.conf-sample
%doc swift-%{SWIFTVER}/etc/object-expirer.conf-sample
%{_mandir}/man5/object-expirer.conf.5*
%{_mandir}/man5/proxy-server.conf.5*
%{_mandir}/man1/swift-object-expirer.1*
%{_mandir}/man1/swift-proxy-server.1*
%_init_gluster_swift_proxy
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%{_unitdir}/gluster-swift-object-expirer*.service
%endif
%dir %attr(0755, swift, swift) %{_localstatedir}/run/swift/proxy-server
%dir %attr(0755, swift, swift) %{_localstatedir}/cache/swift
%dir %{_sysconfdir}/swift/proxy-server
%{_bindir}/swift-object-expirer
%{_bindir}/swift-proxy-server
%{python_sitelib}/swift/proxy
@ -1183,36 +1119,18 @@ fi
%post swift-account
%_init_enable gluster-swift-account
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_enable gluster-swift-account-replicator
%_init_enable gluster-swift-account-auditor
%_init_enable gluster-swift-account-reaper
%endif
%preun swift-account
if [ $1 = 0 ] ; then
%_init_stop gluster-swift-account
%_init_disable gluster-swift-account
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_stop gluster-swift-account-replicator
%_init_disable gluster-swift-account-replicator
%_init_stop gluster-swift-account-auditor
%_init_disable gluster-swift-account-auditor
%_init_stop gluster-swift-account-reaper
%_init_disable gluster-swift-account-reaper
%endif
fi
%postun swift-account
if [ $1 -ge 1 ] ; then
if [ "$1" -ge "1" ] ; then
%_init_restart gluster-swift-account
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_restart gluster-swift-account-replicator
%_init_restart gluster-swift-account-auditor
%_init_restart gluster-swift-account-reaper
%endif
fi
@ -1226,36 +1144,18 @@ fi
%post swift-container
%_init_enable gluster-swift-container
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_enable gluster-swift-container-replicator
%_init_enable gluster-swift-container-auditor
%_init_enable gluster-swift-container-updater
%endif
%preun swift-container
if [ $1 = 0 ] ; then
%_init_stop gluster-swift-container
%_init_disable gluster-swift-container
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_stop gluster-swift-container-replicator
%_init_disable gluster-swift-container-replicator
%_init_stop gluster-swift-container-auditor
%_init_disable gluster-swift-container-auditor
%_init_stop gluster-swift-container-updater
%_init_disable gluster-swift-container-updater
%endif
fi
%postun swift-container
if [ $1 -ge 1 ] ; then
if [ "$1" -ge "1" ] ; then
%_init_restart gluster-swift-container
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_restart gluster-swift-container-replicator
%_init_restart gluster-swift-container-auditor
%_init_restart gluster-swift-container-updator
%endif
fi
@ -1269,36 +1169,18 @@ fi
%post swift-object
%_init_enable gluster-swift-object
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_enable gluster-swift-object-replicator
%_init_enable gluster-swift-object-auditor
%_init_enable gluster-swift-object-updater
%endif
%preun swift-object
if [ $1 = 0 ] ; then
%_init_stop gluster-swift-object
%_init_disable gluster-swift-object
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_stop gluster-swift-object-replicator
%_init_disable gluster-swift-object-replicator
%_init_stop gluster-swift-object-auditor
%_init_disable gluster-swift-object-auditor
%_init_stop gluster-swift-object-updater
%_init_disable gluster-swift-object-updater
%endif
fi
%postun swift-object
if [ $1 -ge 1 ] ; then
if [ "$1" -ge "1" ] ; then
%_init_restart gluster-swift-object
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_restart gluster-swift-object-replicator
%_init_restart gluster-swift-object-auditor
%_init_restart gluster-swift-object-updater
%endif
fi
@ -1308,42 +1190,30 @@ if [ -f /etc/swift/proxy-server.conf ]; then
echo "warning: /etc/swift/proxy-server.conf saved as /etc/swift/proxy-server.conf.rpmsave"
cp /etc/swift/proxy-server.conf /etc/swift/proxy-server.conf.rpmsave
fi
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
if [ -f /etc/swift/object-expirer.conf ]; then
echo "warning: /etc/swift/object-expirer.conf saved as /etc/swift/object-expirer.conf.rpmsave"
cp /etc/swift/object-expirer.conf /etc/swift/object-expirer.conf.rpmsave
fi
%endif
%post swift-proxy
%_init_enable gluster-swift-proxy
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_enable gluster-swift-object-expirer
%endif
%preun swift-proxy
if [ $1 = 0 ] ; then
%_init_stop gluster-swift-proxy
%_init_disable gluster-swift-proxy
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_stop gluster-swift-object-expirer
%_init_disable gluster-swift-object-expirer
%endif
fi
%postun swift-proxy
if [ $1 -ge 1 ] ; then
if [ "$1" -ge "1" ] ; then
%_init_restart gluster-swift-proxy
%if ( 0%{?fedora} && 0%{?fedora} > 18 )
%_init_restart gluster-swift-object-expirer
%endif
fi
%endif
%changelog
* Mon Apr 29 2013 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 3.3.1-14
- include backport of G4S/UFO multi-volume fix
* Fri Apr 19 2013 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 3.4.0-0.3alpha3
- #else -> %else, a twisty maze of passages, all alike
@ -1356,7 +1226,7 @@ fi
- prerelease 3.4.0alpha3 for oVirt/vdsm dependencies in Fedora19
* Wed Apr 17 2013 Niels de Vos <devos@fedoraproject.org> - 3.3.1-13
- remove unused requires for xlator .so files and private libraries (RHBZ#95212
- remove unused requires for xlator .so files and private libraries (RHBZ#952122)
* Mon Apr 15 2013 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 3.3.1-12
- add glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch, BZ 920332
@ -1365,6 +1235,7 @@ fi
* Mon Apr 15 2013 Niels de Vos <devos@fedoraproject.org>
- Remove useless provides for xlator .so files and private libraries
(3.4.x)
* Wed Apr 10 2013 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 3.4.0-0.1alpha2
- prerelease 3.4.0alpha2 for oVirt/vdsm dependencies in Fedora19

View File

@ -1,3 +1,3 @@
cfeb80809faead1bf75c3aed53b37fc5 glusterfs-3.4.0alpha3.tar.gz
0d2479c3cbb124fa3a6b001d0d4d5a8b swift-1.8.0.tar.gz
49358f9687473bd5aa1a4d4375eba259 gluster-swift-ufo-1.2alpha3.tar.gz
4c9f291de887b1193d5d1acac4003360 glusterfs-3.3.1.tar.gz
1cfb85335eafc4317414736477d47dfc swift-1.7.4.tar.gz
c1041b0551be722363f18f3a5664d0cf gluster-swift-ufo-1.1.tar.gz