From d37017bea7dc57dba7a3dd40a2ef4f7d5de32e9e Mon Sep 17 00:00:00 2001 From: Anoop C S Date: Wed, 11 Jan 2017 15:38:16 +0530 Subject: [PATCH] Remove older patch files Signed-off-by: Anoop C S --- ...e-for-serialization-in-memcache-but-.patch | 351 -------- ...llel-install-versions-of-epel-packag.patch | 53 -- ...d-fixes-for-building-the-doc-package.patch | 37 - ...xes-for-building-the-doc-package.patch.180 | 37 - ...here-serialization_format-is-ignored.patch | 70 -- glusterfs-3.2.5.configure.ac.patch | 11 - glusterfs-3.2.5.libglusterfs.Makefile.patch | 23 - ...ors.mgmt.glusterd.glusterd-rpc-ops.c.patch | 474 ----------- glusterfs-3.3.0.cli.cli-rpc-ops.c.patch | 20 - glusterfs-3.3.0.libglusterfs.Makefile.patch | 24 - glusterfs-3.3.0.swift.patch | 797 ------------------ ....3.0.xlator.mount.fuse.fuse-bridge.c.patch | 76 -- glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch | 11 - ...3.1.swift.constraints.backport-1.7.4.patch | 567 ------------- ...rfs-3.3.1.swift.constraints.backport.patch | 518 ------------ ...fo.gluster.multi-volume.backport-1.1.patch | 406 --------- ....gluster.swift.common.DiskFile-1.7.4.patch | 12 - glusterfs-3.3.2.libglusterfs.Makefile.patch | 24 - glusterfs-3.4.0.swift.egginfo-grizzly.patch | 9 - ...fs-3.4.1.add.base-port.config.option.patch | 156 ---- ...terfs-3.4.3.xlator.nfs.server.nlm4.c.patch | 13 - glusterfs-3.8.0.api.glfs-fops.patch | 23 - glusterfs-3.8.0rc1.VERSION.patch | 6 - glusterfs-3.8.0rc2.VERSION.patch | 6 - glusterfs-3.8.5.xlators.crypt.patch | 48 -- glusterfs-3.9.0rc2.xlators.crypt.patch | 48 -- openstack-swift-docmod.patch | 14 - openstack-swift-newdeps.patch | 36 - openstack-swift-nonet.patch | 11 - 29 files changed, 3881 deletions(-) delete mode 100644 0001-Do-not-use-pickle-for-serialization-in-memcache-but-.patch delete mode 100644 0001-Use-updated-parallel-install-versions-of-epel-packag.patch delete mode 100644 0002-Add-fixes-for-building-the-doc-package.patch delete mode 100644 0002-Add-fixes-for-building-the-doc-package.patch.180 delete mode 100644 0002-Fix-bug-where-serialization_format-is-ignored.patch delete mode 100644 glusterfs-3.2.5.configure.ac.patch delete mode 100644 glusterfs-3.2.5.libglusterfs.Makefile.patch delete mode 100644 glusterfs-3.2.7.xlators.mgmt.glusterd.glusterd-rpc-ops.c.patch delete mode 100644 glusterfs-3.3.0.cli.cli-rpc-ops.c.patch delete mode 100644 glusterfs-3.3.0.libglusterfs.Makefile.patch delete mode 100644 glusterfs-3.3.0.swift.patch delete mode 100644 glusterfs-3.3.0.xlator.mount.fuse.fuse-bridge.c.patch delete mode 100644 glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch delete mode 100644 glusterfs-3.3.1.swift.constraints.backport-1.7.4.patch delete mode 100644 glusterfs-3.3.1.swift.constraints.backport.patch delete mode 100644 glusterfs-3.3.1.ufo.gluster.multi-volume.backport-1.1.patch delete mode 100644 glusterfs-3.3.1.ufo.gluster.swift.common.DiskFile-1.7.4.patch delete mode 100644 glusterfs-3.3.2.libglusterfs.Makefile.patch delete mode 100644 glusterfs-3.4.0.swift.egginfo-grizzly.patch delete mode 100644 glusterfs-3.4.1.add.base-port.config.option.patch delete mode 100644 glusterfs-3.4.3.xlator.nfs.server.nlm4.c.patch delete mode 100644 glusterfs-3.8.0.api.glfs-fops.patch delete mode 100644 glusterfs-3.8.0rc1.VERSION.patch delete mode 100644 glusterfs-3.8.0rc2.VERSION.patch delete mode 100644 glusterfs-3.8.5.xlators.crypt.patch delete mode 100644 glusterfs-3.9.0rc2.xlators.crypt.patch delete mode 100644 openstack-swift-docmod.patch delete mode 100644 openstack-swift-newdeps.patch delete mode 100644 openstack-swift-nonet.patch diff --git a/0001-Do-not-use-pickle-for-serialization-in-memcache-but-.patch b/0001-Do-not-use-pickle-for-serialization-in-memcache-but-.patch deleted file mode 100644 index 6179ced..0000000 --- a/0001-Do-not-use-pickle-for-serialization-in-memcache-but-.patch +++ /dev/null @@ -1,351 +0,0 @@ -From c0619bd0c5eeb3d2f8af8b37575e11847664272c Mon Sep 17 00:00:00 2001 -From: Vincent Untz -Date: Thu, 21 Jun 2012 14:37:41 +0200 -Subject: [PATCH] Do not use pickle for serialization in memcache, but JSON - -We don't want to use pickle as it can execute arbitrary code. JSON is -safer. However, note that it supports serialization for only some -specific subset of object types; this should be enough for what we need, -though. - -To avoid issues on upgrades (unability to read pickled values, and cache -poisoning for old servers not understanding JSON), we add a -memcache_serialization_support configuration option, with the following -values: - - 0 = older, insecure pickle serialization (compatible, default in this release) - 1 = json serialization but pickles can still be read (still insecure) - 2 = json serialization only (secure, suggested, and the future default) - -To avoid an instant full cache flush, existing installations should -upgrade with 0, then set to 1 and reload, then after some time (24 -hours) set to 2 and reload. Support for 0 and 1 will be removed in -future versions. - -Part of bug 1006414. - -Patch Set 2: Added Vincent Untz to AUTHORS - -Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4 ---- - doc/manpages/proxy-server.conf.5 | 15 ++++++++ - etc/memcache.conf-sample | 10 +++++ - etc/proxy-server.conf-sample | 12 ++++++ - swift/common/memcached.py | 48 +++++++++++++++++++++----- - swift/common/middleware/memcache.py | 30 ++++++++++++---- - test/unit/common/middleware/test_memcache.py | 5 ++- - test/unit/common/test_memcached.py | 22 ++++++++++++ - 7 files changed, 125 insertions(+), 17 deletions(-) - -diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 -index 4979e4d..5cf5a7e 100644 ---- a/doc/manpages/proxy-server.conf.5 -+++ b/doc/manpages/proxy-server.conf.5 -@@ -205,6 +205,21 @@ Enables the ability to log request headers. The default is False. - .IP \fBmemcache_servers\fR - The memcache servers that are available. This can be a list separated by commas. The default - is 127.0.0.1:11211. -+.IP \fBmemcache_serialization_support\fR -+This sets how memcache values are serialized and deserialized: -+.RE -+ -+.PD 0 -+.RS 10 -+.IP "0 = older, insecure pickle serialization (default)" -+.IP "1 = json serialization but pickles can still be read (still insecure)" -+.IP "2 = json serialization only (secure)" -+.RE -+ -+.RS 10 -+To avoid an instant full cache flush, existing installations should upgrade with 0, then set to 1 and reload, then after some time (24 hours) set to 2 and reload. In the future, the ability to use pickle serialization will be removed. -+ -+If not set in the configuration file, the value for memcache_serialization_support will be read from /etc/swift/memcache.conf if it exists (see memcache.conf-sample). Otherwise, the default value as indicated above will be used. - .RE - - -diff --git a/etc/memcache.conf-sample b/etc/memcache.conf-sample -index 580d94a..cedfc19 100644 ---- a/etc/memcache.conf-sample -+++ b/etc/memcache.conf-sample -@@ -3,3 +3,13 @@ - # several other conf files under [filter:cache] for example. You can specify - # multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211 - # memcache_servers = 127.0.0.1:11211 -+# -+# Sets how memcache values are serialized and deserialized: -+# 0 = older, insecure pickle serialization (compatible, default in this release) -+# 1 = json serialization but pickles can still be read (still insecure) -+# 2 = json serialization only (secure, suggested, and the future default) -+# To avoid an instant full cache flush, existing installations should -+# upgrade with 0, then set to 1 and reload, then after some time (24 hours) -+# set to 2 and reload. -+# In the future, the ability to use pickle serialization will be removed. -+# memcache_serialization_support = 0 -diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample -index 148616b..18f711a 100644 ---- a/etc/proxy-server.conf-sample -+++ b/etc/proxy-server.conf-sample -@@ -122,6 +122,18 @@ use = egg:swift#memcache - # default to the value below. You can specify multiple servers separated with - # commas, as in: 10.1.2.3:11211,10.1.2.4:11211 - # memcache_servers = 127.0.0.1:11211 -+# -+# Sets how memcache values are serialized and deserialized: -+# 0 = older, insecure pickle serialization (compatible, default in this release) -+# 1 = json serialization but pickles can still be read (still insecure) -+# 2 = json serialization only (secure, suggested, and the future default) -+# If not set here, the value for memcache_serialization_support will be read -+# from /etc/swift/memcache.conf (see memcache.conf-sample). -+# To avoid an instant full cache flush, existing installations should -+# upgrade with 0, then set to 1 and reload, then after some time (24 hours) -+# set to 2 and reload. -+# In the future, the ability to use pickle serialization will be removed. -+# memcache_serialization_support = 0 - - [filter:ratelimit] - use = egg:swift#ratelimit -diff --git a/swift/common/memcached.py b/swift/common/memcached.py -index ecd9332..82ebb7a 100644 ---- a/swift/common/memcached.py -+++ b/swift/common/memcached.py -@@ -27,11 +27,17 @@ import time - from bisect import bisect - from hashlib import md5 - -+try: -+ import simplejson as json -+except ImportError: -+ import json -+ - DEFAULT_MEMCACHED_PORT = 11211 - - CONN_TIMEOUT = 0.3 - IO_TIMEOUT = 2.0 - PICKLE_FLAG = 1 -+JSON_FLAG = 2 - NODE_WEIGHT = 50 - PICKLE_PROTOCOL = 2 - TRY_COUNT = 3 -@@ -57,7 +63,8 @@ class MemcacheRing(object): - """ - - def __init__(self, servers, connect_timeout=CONN_TIMEOUT, -- io_timeout=IO_TIMEOUT, tries=TRY_COUNT): -+ io_timeout=IO_TIMEOUT, tries=TRY_COUNT, -+ allow_pickle=False, allow_unpickle=False): - self._ring = {} - self._errors = dict(((serv, []) for serv in servers)) - self._error_limited = dict(((serv, 0) for serv in servers)) -@@ -69,6 +76,8 @@ class MemcacheRing(object): - self._client_cache = dict(((server, []) for server in servers)) - self._connect_timeout = connect_timeout - self._io_timeout = io_timeout -+ self._allow_pickle = allow_pickle -+ self._allow_unpickle = allow_unpickle or allow_pickle - - def _exception_occurred(self, server, e, action='talking'): - if isinstance(e, socket.timeout): -@@ -130,16 +139,21 @@ class MemcacheRing(object): - - :param key: key - :param value: value -- :param serialize: if True, value is pickled before sending to memcache -+ :param serialize: if True, value is serialized with JSON before sending -+ to memcache, or with pickle if configured to use -+ pickle instead of JSON (to avoid cache poisoning) - :param timeout: ttl in memcache - """ - key = md5hash(key) - if timeout > 0: - timeout += time.time() - flags = 0 -- if serialize: -+ if serialize and self._allow_pickle: - value = pickle.dumps(value, PICKLE_PROTOCOL) - flags |= PICKLE_FLAG -+ elif serialize: -+ value = json.dumps(value) -+ flags |= JSON_FLAG - for (server, fp, sock) in self._get_conns(key): - try: - sock.sendall('set %s %d %d %s noreply\r\n%s\r\n' % \ -@@ -151,8 +165,9 @@ class MemcacheRing(object): - - def get(self, key): - """ -- Gets the object specified by key. It will also unpickle the object -- before returning if it is pickled in memcache. -+ Gets the object specified by key. It will also unserialize the object -+ before returning if it is serialized in memcache with JSON, or if it -+ is pickled and unpickling is allowed. - - :param key: key - :returns: value of the key in memcache -@@ -168,7 +183,12 @@ class MemcacheRing(object): - size = int(line[3]) - value = fp.read(size) - if int(line[2]) & PICKLE_FLAG: -- value = pickle.loads(value) -+ if self._allow_unpickle: -+ value = pickle.loads(value) -+ else: -+ value = None -+ elif int(line[2]) & JSON_FLAG: -+ value = json.loads(value) - fp.readline() - line = fp.readline().strip().split() - self._return_conn(server, fp, sock) -@@ -258,7 +278,9 @@ class MemcacheRing(object): - :param mapping: dictonary of keys and values to be set in memcache - :param servery_key: key to use in determining which server in the ring - is used -- :param serialize: if True, value is pickled before sending to memcache -+ :param serialize: if True, value is serialized with JSON before sending -+ to memcache, or with pickle if configured to use -+ pickle instead of JSON (to avoid cache poisoning) - :param timeout: ttl for memcache - """ - server_key = md5hash(server_key) -@@ -268,9 +290,12 @@ class MemcacheRing(object): - for key, value in mapping.iteritems(): - key = md5hash(key) - flags = 0 -- if serialize: -+ if serialize and self._allow_pickle: - value = pickle.dumps(value, PICKLE_PROTOCOL) - flags |= PICKLE_FLAG -+ elif serialize: -+ value = json.dumps(value) -+ flags |= JSON_FLAG - msg += ('set %s %d %d %s noreply\r\n%s\r\n' % - (key, flags, timeout, len(value), value)) - for (server, fp, sock) in self._get_conns(server_key): -@@ -302,7 +327,12 @@ class MemcacheRing(object): - size = int(line[3]) - value = fp.read(size) - if int(line[2]) & PICKLE_FLAG: -- value = pickle.loads(value) -+ if self._allow_unpickle: -+ value = pickle.loads(value) -+ else: -+ value = None -+ elif int(line[2]) & JSON_FLAG: -+ value = json.loads(value) - responses[line[1]] = value - fp.readline() - line = fp.readline().strip().split() -diff --git a/swift/common/middleware/memcache.py b/swift/common/middleware/memcache.py -index eb988bd..20121c9 100644 ---- a/swift/common/middleware/memcache.py -+++ b/swift/common/middleware/memcache.py -@@ -27,20 +27,36 @@ class MemcacheMiddleware(object): - def __init__(self, app, conf): - self.app = app - self.memcache_servers = conf.get('memcache_servers') -- if not self.memcache_servers: -+ serialization_format = conf.get('memcache_serialization_support') -+ -+ if not self.memcache_servers or serialization_format is None: - path = os.path.join(conf.get('swift_dir', '/etc/swift'), - 'memcache.conf') - memcache_conf = ConfigParser() - if memcache_conf.read(path): -- try: -- self.memcache_servers = \ -- memcache_conf.get('memcache', 'memcache_servers') -- except (NoSectionError, NoOptionError): -- pass -+ if not self.memcache_servers: -+ try: -+ self.memcache_servers = \ -+ memcache_conf.get('memcache', 'memcache_servers') -+ except (NoSectionError, NoOptionError): -+ pass -+ if serialization_format is None: -+ try: -+ serialization_format = \ -+ memcache_conf.get('memcache', -+ 'memcache_serialization_support') -+ except (NoSectionError, NoOptionError): -+ pass -+ - if not self.memcache_servers: - self.memcache_servers = '127.0.0.1:11211' -+ if serialization_format is None: -+ serialization_format = 0 -+ - self.memcache = MemcacheRing( -- [s.strip() for s in self.memcache_servers.split(',') if s.strip()]) -+ [s.strip() for s in self.memcache_servers.split(',') if s.strip()], -+ allow_pickle=(serialization_format == 0), -+ allow_unpickle=(serialization_format <= 1)) - - def __call__(self, env, start_response): - env['swift.cache'] = self.memcache -diff --git a/test/unit/common/middleware/test_memcache.py b/test/unit/common/middleware/test_memcache.py -index 6b94bd1..e217a96 100644 ---- a/test/unit/common/middleware/test_memcache.py -+++ b/test/unit/common/middleware/test_memcache.py -@@ -47,6 +47,8 @@ class SetConfigParser(object): - if section == 'memcache': - if option == 'memcache_servers': - return '1.2.3.4:5' -+ elif option == 'memcache_serialization_support': -+ return '2' - else: - raise NoOptionError(option) - else: -@@ -86,7 +88,8 @@ class TestCacheMiddleware(unittest.TestCase): - exc = None - try: - app = memcache.MemcacheMiddleware( -- FakeApp(), {'memcache_servers': '1.2.3.4:5'}) -+ FakeApp(), {'memcache_servers': '1.2.3.4:5', -+ 'memcache_serialization_support': '2'}) - except Exception, err: - exc = err - finally: -diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py -index dff6e80..3016d10 100644 ---- a/test/unit/common/test_memcached.py -+++ b/test/unit/common/test_memcached.py -@@ -1,3 +1,4 @@ -+ # -*- coding: utf8 -*- - # Copyright (c) 2010-2012 OpenStack, LLC. - # - # Licensed under the Apache License, Version 2.0 (the "License"); -@@ -166,6 +167,9 @@ class TestMemcached(unittest.TestCase): - self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) - memcache_client.set('some_key', [4, 5, 6]) - self.assertEquals(memcache_client.get('some_key'), [4, 5, 6]) -+ memcache_client.set('some_key', ['simple str', 'utf8 str éà']) -+ # As per http://wiki.openstack.org/encoding, we should expect to have unicode -+ self.assertEquals(memcache_client.get('some_key'), ['simple str', u'utf8 str éà']) - self.assert_(float(mock.cache.values()[0][1]) == 0) - esttimeout = time.time() + 10 - memcache_client.set('some_key', [1, 2, 3], timeout=10) -@@ -244,6 +248,24 @@ class TestMemcached(unittest.TestCase): - self.assertEquals(memcache_client.get_multi(('some_key2', 'some_key1', - 'not_exists'), 'multi_key'), [[4, 5, 6], [1, 2, 3], None]) - -+ def test_serialization(self): -+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'], -+ allow_pickle=True) -+ mock = MockMemcached() -+ memcache_client._client_cache['1.2.3.4:11211'] = [(mock, mock)] * 2 -+ memcache_client.set('some_key', [1, 2, 3]) -+ self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) -+ memcache_client._allow_pickle = False -+ memcache_client._allow_unpickle = True -+ self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) -+ memcache_client._allow_unpickle = False -+ self.assertEquals(memcache_client.get('some_key'), None) -+ memcache_client.set('some_key', [1, 2, 3]) -+ self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) -+ memcache_client._allow_unpickle = True -+ self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) -+ memcache_client._allow_pickle = True -+ self.assertEquals(memcache_client.get('some_key'), [1, 2, 3]) - - if __name__ == '__main__': - unittest.main() diff --git a/0001-Use-updated-parallel-install-versions-of-epel-packag.patch b/0001-Use-updated-parallel-install-versions-of-epel-packag.patch deleted file mode 100644 index 309b08b..0000000 --- a/0001-Use-updated-parallel-install-versions-of-epel-packag.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 43e8681d5cbd6c919e379fe25cccc189827e2d60 Mon Sep 17 00:00:00 2001 -From: Alan Pevec -Date: Wed, 4 Jan 2012 00:15:05 +0100 -Subject: [PATCH] Use updated parallel install versions of epel package -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Use WebOb >= 1.0 -and depend on the EPEL parallel installable versions of the package -to satisfy those requirements. -Based on Nova/Glance EPEL patch by Pádraig Brady ---- - swift/__init__.py | 29 +++++++++++++++++++++++++++++ - 1 file changed, 29 insertions(+) - -diff --git a/swift/__init__.py b/swift/__init__.py -index 9065801..9600d1e 100644 ---- a/swift/__init__.py -+++ b/swift/__init__.py -@@ -1,3 +1,32 @@ -+import sys -+import pkg_resources -+ -+# If there is a conflicting non egg module, -+# i.e. an older standard system module installed, -+# then replace it with this requirement -+def replace_dist(requirement): -+ try: -+ return pkg_resources.require(requirement) -+ except pkg_resources.VersionConflict: -+ e = sys.exc_info()[1] -+ dist=e.args[0] -+ req=e.args[1] -+ if dist.key == req.key and not dist.location.endswith('.egg'): -+ del pkg_resources.working_set.by_key[dist.key] -+ # We assume there is no need to adjust sys.path -+ # and the associated pkg_resources.working_set.entries -+ return pkg_resources.require(requirement) -+ -+replace_dist("WebOb >= 1.0") -+ -+replace_dist("PasteDeploy >= 1.5.0") -+# This hack is needed because replace_dist() results in -+# the standard paste module path being at the start of __path__. -+# TODO: See can we get pkg_resources to do the right thing directly -+import paste -+paste.__path__.insert(0, paste.__path__.pop(-1)) -+ -+ - import gettext - - diff --git a/0002-Add-fixes-for-building-the-doc-package.patch b/0002-Add-fixes-for-building-the-doc-package.patch deleted file mode 100644 index f8a173d..0000000 --- a/0002-Add-fixes-for-building-the-doc-package.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 7df10fb14d27e35faa590770594ea1b05552576f Mon Sep 17 00:00:00 2001 -From: Alan Pevec -Date: Thu, 5 Jan 2012 00:03:00 +0100 -Subject: [PATCH] Add fixes for building the doc package -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Don't access the net and always reference -the swift module from the package we're building -Based on Nova/Glance EPEL patch by Pádraig Brady ---- - doc/source/conf.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/doc/source/conf.py b/doc/source/conf.py -index e6a43b0..3734cd4 100644 ---- a/doc/source/conf.py -+++ b/doc/source/conf.py -@@ -33,14 +33,14 @@ import os - # If extensions (or modules to document with autodoc) are in another directory, - # add these directories to sys.path here. If the directory is relative to the - # documentation root, use os.path.abspath to make it absolute, like shown here. --sys.path.append([os.path.abspath('../swift'), os.path.abspath('..'), -- os.path.abspath('../bin')]) -+sys.path.extend([os.path.abspath('../../swift'), os.path.abspath('../..'), -+ os.path.abspath('../../bin')]) - - # -- General configuration ---------------------------------------------------- - - # Add any Sphinx extension module names here, as strings. They can be - # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. --extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', -+extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', - 'sphinx.ext.ifconfig'] - todo_include_todos = True diff --git a/0002-Add-fixes-for-building-the-doc-package.patch.180 b/0002-Add-fixes-for-building-the-doc-package.patch.180 deleted file mode 100644 index a3c1856..0000000 --- a/0002-Add-fixes-for-building-the-doc-package.patch.180 +++ /dev/null @@ -1,37 +0,0 @@ -From 7df10fb14d27e35faa590770594ea1b05552576f Mon Sep 17 00:00:00 2001 -From: Alan Pevec -Date: Thu, 5 Jan 2012 00:03:00 +0100 -Subject: [PATCH] Add fixes for building the doc package -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Don't access the net and always reference -the swift module from the package we're building -Based on Nova/Glance EPEL patch by Pádraig Brady ---- - doc/source/conf.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/doc/source/conf.py b/doc/source/conf.py -index e6a43b0..3734cd4 100644 ---- a/doc/source/conf.py -+++ b/doc/source/conf.py -@@ -20,14 +20,14 @@ import os - # If extensions (or modules to document with autodoc) are in another directory, - # add these directories to sys.path here. If the directory is relative to the - # documentation root, use os.path.abspath to make it absolute, like shown here. --sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'), -- os.path.abspath('../bin')]) -+sys.path.extend([os.path.abspath('../../swift'), os.path.abspath('../..'), -+ os.path.abspath('../../bin')]) - - # -- General configuration ---------------------------------------------------- - - # Add any Sphinx extension module names here, as strings. They can be - # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. --extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', -+extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', - 'sphinx.ext.ifconfig'] - todo_include_todos = True diff --git a/0002-Fix-bug-where-serialization_format-is-ignored.patch b/0002-Fix-bug-where-serialization_format-is-ignored.patch deleted file mode 100644 index 4f7e4b4..0000000 --- a/0002-Fix-bug-where-serialization_format-is-ignored.patch +++ /dev/null @@ -1,70 +0,0 @@ -From c38568f026853f64f2669f03bd56441b007f13be Mon Sep 17 00:00:00 2001 -From: gholt -Date: Tue, 18 Sep 2012 18:24:47 +0000 -Subject: [PATCH] Fix bug where serialization_format is ignored - -Change-Id: I5a5ac8b5f18e077105ab12e9b1f0ccafac3983f7 ---- - swift/common/middleware/memcache.py | 2 ++ - test/unit/common/middleware/test_memcache.py | 12 ++++++++++-- - 2 files changed, 12 insertions(+), 2 deletions(-) - -diff --git a/swift/common/middleware/memcache.py b/swift/common/middleware/memcache.py -index 20121c9..06678c4 100644 ---- a/swift/common/middleware/memcache.py -+++ b/swift/common/middleware/memcache.py -@@ -52,6 +52,8 @@ class MemcacheMiddleware(object): - self.memcache_servers = '127.0.0.1:11211' - if serialization_format is None: - serialization_format = 0 -+ else: -+ serialization_format = int(serialization_format) - - self.memcache = MemcacheRing( - [s.strip() for s in self.memcache_servers.split(',') if s.strip()], -diff --git a/test/unit/common/middleware/test_memcache.py b/test/unit/common/middleware/test_memcache.py -index e217a96..28c7b13 100644 ---- a/test/unit/common/middleware/test_memcache.py -+++ b/test/unit/common/middleware/test_memcache.py -@@ -48,7 +48,7 @@ class SetConfigParser(object): - if option == 'memcache_servers': - return '1.2.3.4:5' - elif option == 'memcache_serialization_support': -- return '2' -+ return '1' - else: - raise NoOptionError(option) - else: -@@ -104,6 +104,8 @@ class TestCacheMiddleware(unittest.TestCase): - finally: - memcache.ConfigParser = orig_parser - self.assertEquals(app.memcache_servers, '127.0.0.1:11211') -+ self.assertEquals(app.memcache._allow_pickle, True) -+ self.assertEquals(app.memcache._allow_unpickle, True) - - def test_conf_from_extra_conf(self): - orig_parser = memcache.ConfigParser -@@ -113,16 +115,22 @@ class TestCacheMiddleware(unittest.TestCase): - finally: - memcache.ConfigParser = orig_parser - self.assertEquals(app.memcache_servers, '1.2.3.4:5') -+ self.assertEquals(app.memcache._allow_pickle, False) -+ self.assertEquals(app.memcache._allow_unpickle, True) - - def test_conf_from_inline_conf(self): - orig_parser = memcache.ConfigParser - memcache.ConfigParser = SetConfigParser - try: - app = memcache.MemcacheMiddleware( -- FakeApp(), {'memcache_servers': '6.7.8.9:10'}) -+ FakeApp(), -+ {'memcache_servers': '6.7.8.9:10', -+ 'serialization_format': '0'}) - finally: - memcache.ConfigParser = orig_parser - self.assertEquals(app.memcache_servers, '6.7.8.9:10') -+ self.assertEquals(app.memcache._allow_pickle, False) -+ self.assertEquals(app.memcache._allow_unpickle, True) - - - if __name__ == '__main__': diff --git a/glusterfs-3.2.5.configure.ac.patch b/glusterfs-3.2.5.configure.ac.patch deleted file mode 100644 index 122af1a..0000000 --- a/glusterfs-3.2.5.configure.ac.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- configure.ac.orig 2012-01-16 13:38:53.020000114 -0500 -+++ configure.ac 2012-01-16 13:39:29.177000589 -0500 -@@ -431,7 +431,7 @@ - linux*) - dnl GF_LINUX_HOST_OS=1 - GF_HOST_OS="GF_LINUX_HOST_OS" -- GF_CFLAGS="${ARGP_STANDALONE_CPPFLAGS} -O0" -+ GF_CFLAGS="${ARGP_STANDALONE_CPPFLAGS} -O2" - GF_GLUSTERFS_CFLAGS="${GF_CFLAGS}" - GF_LDADD="${ARGP_STANDALONE_LDADD}" - GF_FUSE_CFLAGS="-DFUSERMOUNT_DIR=\\\"\$(bindir)\\\"" diff --git a/glusterfs-3.2.5.libglusterfs.Makefile.patch b/glusterfs-3.2.5.libglusterfs.Makefile.patch deleted file mode 100644 index 56a84b0..0000000 --- a/glusterfs-3.2.5.libglusterfs.Makefile.patch +++ /dev/null @@ -1,23 +0,0 @@ ---- libglusterfs/src/Makefile.am.orig 2011-11-23 14:04:41.810001717 -0500 -+++ libglusterfs/src/Makefile.am 2011-11-23 14:30:49.940000394 -0500 -@@ -16,6 +16,7 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.c y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - - CLEANFILES = graph.lex.c y.tab.c y.tab.h ---- libglusterfs/src/Makefile.in.orig 2011-11-23 14:04:35.995001451 -0500 -+++ libglusterfs/src/Makefile.in 2011-11-23 14:31:01.730999353 -0500 -@@ -866,7 +866,8 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.c y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - # Tell versions [3.59,3.63) of GNU make to not export all variables. - # Otherwise a system limit (for SysV at least) may be exceeded. - .NOEXPORT: diff --git a/glusterfs-3.2.7.xlators.mgmt.glusterd.glusterd-rpc-ops.c.patch b/glusterfs-3.2.7.xlators.mgmt.glusterd.glusterd-rpc-ops.c.patch deleted file mode 100644 index d19799b..0000000 --- a/glusterfs-3.2.7.xlators.mgmt.glusterd.glusterd-rpc-ops.c.patch +++ /dev/null @@ -1,474 +0,0 @@ ---- xlators/mgmt/glusterd/src/glusterd-rpc-ops.c -+++ xlators/mgmt/glusterd/src/glusterd-rpc-ops.c -@@ -51,9 +51,26 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - int32_t op_errno, rpcsvc_request_t *req, - void *op_ctx, char *op_errstr) - { -- int32_t ret = -1; -- gd_serialize_t sfunc = NULL; -- void *cli_rsp = NULL; -+ union { -+ gf1_cli_create_vol_rsp createv_rsp; -+ gf1_cli_start_vol_rsp startv_rsp; -+ gf1_cli_stop_vol_rsp stopv_rsp; -+ gf1_cli_delete_vol_rsp delv_rsp; -+ gf1_cli_defrag_vol_rsp defragv_rsp; -+ gf1_cli_set_vol_rsp setv_rsp; -+ gf1_cli_reset_vol_rsp resetv_rsp; -+ gf1_cli_sync_volume_rsp syncv_rsp; -+ gf1_cli_stats_volume_rsp statsv_rsp; -+ gf1_cli_add_brick_rsp addb_rsp; -+ gf1_cli_remove_brick_rsp rmb_rsp; -+ gf1_cli_replace_brick_rsp replb_rsp; -+ gf1_cli_log_filename_rsp logfn_rsp; -+ gf1_cli_log_rotate_rsp logrot_rsp; -+ gf1_cli_gsync_set_rsp gsyncs_rsp; -+ gf1_cli_quota_rsp quota_rsp; -+ } cli_rsp; -+ int32_t ret = -1; -+ gd_serialize_t sfunc = NULL; - dict_t *ctx = NULL; - char *free_ptr = NULL; - glusterd_conf_t *conf = NULL; -@@ -67,145 +84,103 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - switch (op) { - case GD_OP_CREATE_VOLUME: - { -- gf1_cli_create_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.createv_rsp.op_ret = op_ret; -+ cli_rsp.createv_rsp.op_errno = op_errno; -+ cli_rsp.createv_rsp.volname = ""; -+ cli_rsp.createv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_create_vol_rsp; - break; - } - - case GD_OP_START_VOLUME: - { -- gf1_cli_start_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.startv_rsp.op_ret = op_ret; -+ cli_rsp.startv_rsp.op_errno = op_errno; -+ cli_rsp.startv_rsp.volname = ""; -+ cli_rsp.startv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_start_vol_rsp; - break; - } - - case GD_OP_STOP_VOLUME: - { -- gf1_cli_stop_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.stopv_rsp.op_ret = op_ret; -+ cli_rsp.stopv_rsp.op_errno = op_errno; -+ cli_rsp.stopv_rsp.volname = ""; -+ cli_rsp.stopv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_stop_vol_rsp; - break; - } - - case GD_OP_DELETE_VOLUME: - { -- gf1_cli_delete_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.delv_rsp.op_ret = op_ret; -+ cli_rsp.delv_rsp.op_errno = op_errno; -+ cli_rsp.delv_rsp.volname = ""; -+ cli_rsp.delv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_delete_vol_rsp; - break; - } - - case GD_OP_DEFRAG_VOLUME: - { -- gf1_cli_defrag_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- //rsp.volname = ""; -- cli_rsp = &rsp; -+ cli_rsp.defragv_rsp.op_ret = op_ret; -+ cli_rsp.defragv_rsp.op_errno = op_errno; -+ //cli_rsp.defragv_rsp.volname = ""; - sfunc = gf_xdr_serialize_cli_defrag_vol_rsp; - break; - } - - case GD_OP_ADD_BRICK: - { -- gf1_cli_add_brick_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.addb_rsp.op_ret = op_ret; -+ cli_rsp.addb_rsp.op_errno = op_errno; -+ cli_rsp.addb_rsp.volname = ""; -+ cli_rsp.addb_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_add_brick_rsp; - break; - } - - case GD_OP_REMOVE_BRICK: - { -- gf1_cli_remove_brick_rsp rsp = {0,}; - ctx = op_ctx; - if (ctx && -- dict_get_str (ctx, "errstr", &rsp.op_errstr)) -- rsp.op_errstr = ""; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -- cli_rsp = &rsp; -+ dict_get_str (ctx, "errstr", &cli_rsp.rmb_rsp.op_errstr)) -+ cli_rsp.rmb_rsp.op_errstr = ""; -+ cli_rsp.rmb_rsp.op_ret = op_ret; -+ cli_rsp.rmb_rsp.op_errno = op_errno; -+ cli_rsp.rmb_rsp.volname = ""; - sfunc = gf_xdr_serialize_cli_remove_brick_rsp; - break; - } - - case GD_OP_REPLACE_BRICK: - { -- gf1_cli_replace_brick_rsp rsp = {0,}; - ctx = op_ctx; - if (ctx && -- dict_get_str (ctx, "status-reply", &rsp.status)) -- rsp.status = ""; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- rsp.volname = ""; -- cli_rsp = &rsp; -+ dict_get_str (ctx, "status-reply", &cli_rsp.replb_rsp.status)) -+ cli_rsp.replb_rsp.status = ""; -+ cli_rsp.replb_rsp.op_ret = op_ret; -+ cli_rsp.replb_rsp.op_errno = op_errno; -+ cli_rsp.replb_rsp.volname = ""; -+ cli_rsp.replb_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_replace_brick_rsp; - break; - } - - case GD_OP_SET_VOLUME: - { -- gf1_cli_set_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; - ctx = op_ctx; -- -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; - if (ctx) { - ret = dict_allocate_and_serialize (ctx, -- &rsp.dict.dict_val, -- (size_t*)&rsp.dict.dict_len); -+ &cli_rsp.setv_rsp.dict.dict_val, -+ (size_t*)&cli_rsp.setv_rsp.dict.dict_len); - if (ret == 0) -- free_ptr = rsp.dict.dict_val; -+ free_ptr = cli_rsp.setv_rsp.dict.dict_val; - } -- -- cli_rsp = &rsp; -+ cli_rsp.setv_rsp.op_errno = op_errno; -+ cli_rsp.setv_rsp.volname = ""; -+ cli_rsp.setv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_set_vol_rsp; - break; - } -@@ -213,55 +188,35 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - case GD_OP_RESET_VOLUME: - { - gf_log ("", GF_LOG_DEBUG, "Return value to CLI"); -- gf1_cli_reset_vol_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = 1; -- rsp.volname = ""; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = "Error while resetting options"; -- cli_rsp = &rsp; -+ cli_rsp.resetv_rsp.op_ret = op_ret; -+ cli_rsp.resetv_rsp.op_errno = 1; -+ cli_rsp.resetv_rsp.volname = ""; -+ cli_rsp.resetv_rsp.op_errstr = op_errstr ? op_errstr : "Error while resetting options"; - sfunc = gf_xdr_serialize_cli_reset_vol_rsp; - break; - } - - case GD_OP_LOG_FILENAME: - { -- gf1_cli_log_filename_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- if (op_errstr) -- rsp.errstr = op_errstr; -- else -- rsp.errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.logfn_rsp.op_ret = op_ret; -+ cli_rsp.logfn_rsp.op_errno = op_errno; -+ cli_rsp.logfn_rsp.errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_log_filename_rsp; - break; - } - case GD_OP_LOG_ROTATE: - { -- gf1_cli_log_rotate_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- if (op_errstr) -- rsp.errstr = op_errstr; -- else -- rsp.errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.logrot_rsp.op_ret = op_ret; -+ cli_rsp.logrot_rsp.op_errno = op_errno; -+ cli_rsp.logrot_rsp.errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_serialize_cli_log_rotate_rsp; - break; - } - case GD_OP_SYNC_VOLUME: - { -- gf1_cli_sync_volume_rsp rsp = {0,}; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -- cli_rsp = &rsp; -+ cli_rsp.syncv_rsp.op_ret = op_ret; -+ cli_rsp.syncv_rsp.op_errno = op_errno; -+ cli_rsp.syncv_rsp.op_errstr = op_errstr ? op_errstr : ""; - sfunc = gf_xdr_from_cli_sync_volume_rsp; - break; - } -@@ -273,72 +228,56 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - char *slave = NULL; - char *op_name = NULL; - char *subop = NULL; -- gf1_cli_gsync_set_rsp rsp = {0,}; - -+ cli_rsp.gsyncs_rsp.op_ret = op_ret; -+ cli_rsp.gsyncs_rsp.op_errno = op_errno; -+ cli_rsp.gsyncs_rsp.op_errstr = op_errstr ? op_errstr : ""; -+ cli_rsp.gsyncs_rsp.op_name = ""; -+ cli_rsp.gsyncs_rsp.subop = ""; -+ cli_rsp.gsyncs_rsp.master = ""; -+ cli_rsp.gsyncs_rsp.slave = ""; -+ cli_rsp.gsyncs_rsp.glusterd_workdir = conf->workdir; -+ cli_rsp.gsyncs_rsp.gsync_prefix = GSYNCD_PREFIX; - ctx = op_ctx; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.op_errstr = ""; -- rsp.op_name = ""; -- rsp.subop = ""; -- rsp.master = ""; -- rsp.slave = ""; -- rsp.glusterd_workdir = conf->workdir; -- rsp.gsync_prefix = GSYNCD_PREFIX; - if (ctx) { - ret = dict_get_str (ctx, "errstr", &str); - if (ret == 0) -- rsp.op_errstr = str; -+ cli_rsp.gsyncs_rsp.op_errstr = str; - ret = dict_get_int32 (ctx, "type", &type); - if (ret == 0) -- rsp.type = type; -+ cli_rsp.gsyncs_rsp.type = type; - ret = dict_get_str (ctx, "master", &master); - if (ret == 0) -- rsp.master = master; -+ cli_rsp.gsyncs_rsp.master = master; - - ret = dict_get_str (ctx, "slave", &slave); - if (ret == 0) -- rsp.slave = slave; -+ cli_rsp.gsyncs_rsp.slave = slave; - - if (type == GF_GSYNC_OPTION_TYPE_CONFIG) { - if (dict_get_str (ctx, "op_name", &op_name) == 0) -- rsp.op_name = op_name; -+ cli_rsp.gsyncs_rsp.op_name = op_name; - if (dict_get_str (ctx, "subop", &subop) == 0) -- rsp.subop = subop; -+ cli_rsp.gsyncs_rsp.subop = subop; - } - - ret = dict_allocate_and_serialize (ctx, -- &rsp.status_dict.status_dict_val, -- (size_t*)&rsp.status_dict.status_dict_len); -+ &cli_rsp.gsyncs_rsp.status_dict.status_dict_val, -+ (size_t*)&cli_rsp.gsyncs_rsp.status_dict.status_dict_len); - - if (ret == 0) -- free_ptr = rsp.status_dict.status_dict_val; -+ free_ptr = cli_rsp.gsyncs_rsp.status_dict.status_dict_val; - - } -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- cli_rsp = &rsp; - sfunc = gf_xdr_serialize_cli_gsync_set_rsp; - break; - } -- case GD_OP_RENAME_VOLUME: -- case GD_OP_START_BRICK: -- case GD_OP_STOP_BRICK: -- case GD_OP_LOG_LOCATE: -- { -- gf_log ("", GF_LOG_DEBUG, "not supported op %d", op); -- break; -- } - case GD_OP_PROFILE_VOLUME: - { -- gf1_cli_stats_volume_rsp rsp = {0,}; - int32_t count = 0; -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- if (op_errstr) -- rsp.op_errstr = op_errstr; -- else -- rsp.op_errstr = ""; -+ cli_rsp.statsv_rsp.op_ret = op_ret; -+ cli_rsp.statsv_rsp.op_errno = op_errno; -+ cli_rsp.statsv_rsp.op_errstr = op_errstr ? op_errstr : ""; - ctx = op_ctx; - if (dict_get_int32 (ctx, "count", &count)) { - ret = dict_set_int32 (ctx, "count", 0); -@@ -347,10 +286,9 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - "to set brick count."); - } - dict_allocate_and_serialize (ctx, -- &rsp.stats_info.stats_info_val, -- (size_t*)&rsp.stats_info.stats_info_len); -- free_ptr = rsp.stats_info.stats_info_val; -- cli_rsp = &rsp; -+ &cli_rsp.statsv_rsp.stats_info.stats_info_val, -+ (size_t*)&cli_rsp.statsv_rsp.stats_info.stats_info_len); -+ free_ptr = cli_rsp.statsv_rsp.stats_info.stats_info_val; - sfunc = gf_xdr_from_cli_stats_volume_rsp; - break; - } -@@ -360,49 +298,56 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - int32_t type; - char *str = NULL; - char *errstr = NULL; -- gf1_cli_quota_rsp rsp = {0,}; - -- rsp.op_ret = op_ret; -- rsp.op_errno = op_errno; -- rsp.volname = ""; -+ cli_rsp.quota_rsp.op_ret = op_ret; -+ cli_rsp.quota_rsp.op_errno = op_errno; -+ cli_rsp.quota_rsp.volname = ""; - - ctx = op_ctx; - - if (op_errstr) -- rsp.op_errstr = op_errstr; -+ cli_rsp.quota_rsp.op_errstr = op_errstr; - else { - ret = dict_get_str (ctx, "errstr", &errstr); - if (ret == 0) -- rsp.op_errstr = errstr; -+ cli_rsp.quota_rsp.op_errstr = errstr; - else -- rsp.op_errstr = ""; -+ cli_rsp.quota_rsp.op_errstr = ""; - } - -- rsp.limit_list = ""; -+ cli_rsp.quota_rsp.limit_list = ""; - - if (op_ret == 0 && ctx) { - ret = dict_get_str (ctx, "volname", &str); - if (ret == 0) -- rsp.volname = str; -+ cli_rsp.quota_rsp.volname = str; - - ret = dict_get_int32 (ctx, "type", &type); - if (ret == 0) -- rsp.type = type; -+ cli_rsp.quota_rsp.type = type; - else -- rsp.type = 0; -+ cli_rsp.quota_rsp.type = 0; - - if (type == GF_QUOTA_OPTION_TYPE_LIST) { - ret = dict_get_str (ctx,"limit_list", &str); - - if (ret == 0) -- rsp.limit_list = str; -+ cli_rsp.quota_rsp.limit_list = str; - } - } -- cli_rsp = &rsp; - sfunc = gf_xdr_serialize_cli_quota_rsp; - break; - } - -+ case GD_OP_RENAME_VOLUME: -+ case GD_OP_START_BRICK: -+ case GD_OP_STOP_BRICK: -+ case GD_OP_LOG_LOCATE: -+ { -+ gf_log ("", GF_LOG_DEBUG, "not supported op %d", op); -+ break; -+ } -+ - case GD_OP_NONE: - case GD_OP_MAX: - { -@@ -411,7 +356,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, - } - } - -- ret = glusterd_submit_reply (req, cli_rsp, NULL, 0, NULL, -+ ret = glusterd_submit_reply (req, &cli_rsp, NULL, 0, NULL, - sfunc); - - if (free_ptr) diff --git a/glusterfs-3.3.0.cli.cli-rpc-ops.c.patch b/glusterfs-3.3.0.cli.cli-rpc-ops.c.patch deleted file mode 100644 index 3d7ca10..0000000 --- a/glusterfs-3.3.0.cli.cli-rpc-ops.c.patch +++ /dev/null @@ -1,20 +0,0 @@ -*** cli/src/cli-rpc-ops.c.orig 2012-07-18 13:16:43.720998232 -0400 ---- cli/src/cli-rpc-ops.c 2012-07-18 13:19:52.463999495 -0400 -*************** -*** 5832,5838 **** - if (!time) { - cli_out ("%s", path); - } else { -! tm = localtime ((time_t*)(&time)); - strftime (timestr, sizeof (timestr), - "%Y-%m-%d %H:%M:%S", tm); - if (i ==0) { ---- 5832,5839 ---- - if (!time) { - cli_out ("%s", path); - } else { -! time_t hack = time; -! tm = localtime (&hack); - strftime (timestr, sizeof (timestr), - "%Y-%m-%d %H:%M:%S", tm); - if (i ==0) { diff --git a/glusterfs-3.3.0.libglusterfs.Makefile.patch b/glusterfs-3.3.0.libglusterfs.Makefile.patch deleted file mode 100644 index bd9bde7..0000000 --- a/glusterfs-3.3.0.libglusterfs.Makefile.patch +++ /dev/null @@ -1,24 +0,0 @@ ---- libglusterfs/src/Makefile.am.orig 2012-05-17 12:30:57.000000000 -0400 -+++ libglusterfs/src/Makefile.am 2012-05-18 08:52:55.469998306 -0400 -@@ -44,7 +44,8 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - - CLEANFILES = graph.lex.c y.tab.c y.tab.h - CONFIG_CLEAN_FILES = $(CONTRIB_BUILDDIR)/uuid/uuid_types.h ---- libglusterfs/src/Makefile.in.orig 2012-05-17 12:31:12.000000000 -0400 -+++ libglusterfs/src/Makefile.in 2012-05-18 09:02:31.108002281 -0400 -@@ -941,7 +941,8 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - # Tell versions [3.59,3.63) of GNU make to not export all variables. - # Otherwise a system limit (for SysV at least) may be exceeded. - .NOEXPORT: diff --git a/glusterfs-3.3.0.swift.patch b/glusterfs-3.3.0.swift.patch deleted file mode 100644 index 8ed5070..0000000 --- a/glusterfs-3.3.0.swift.patch +++ /dev/null @@ -1,797 +0,0 @@ -diff --git a/setup.py b/setup.py -index d195d34..b5b5ca2 100644 ---- a/setup.py -+++ b/setup.py -@@ -1,5 +1,6 @@ - #!/usr/bin/python - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -94,6 +95,7 @@ setup( - 'tempurl=swift.common.middleware.tempurl:filter_factory', - 'formpost=swift.common.middleware.formpost:filter_factory', - 'name_check=swift.common.middleware.name_check:filter_factory', -+ 'gluster=swift.common.middleware.gluster:filter_factory', - ], - }, - ) -diff --git a/swift/account/server.py b/swift/account/server.py -index 800b3c0..cb17970 100644 ---- a/swift/account/server.py -+++ b/swift/account/server.py -@@ -1,4 +1,5 @@ - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -31,7 +32,7 @@ import simplejson - - from swift.common.db import AccountBroker - from swift.common.utils import get_logger, get_param, hash_path, \ -- normalize_timestamp, split_path, storage_directory -+ normalize_timestamp, split_path, storage_directory, plugin_enabled - from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \ - check_mount, check_float, check_utf8 - from swift.common.db_replicator import ReplicatorRpc -@@ -39,6 +40,8 @@ from swift.common.db_replicator import ReplicatorRpc - - DATADIR = 'accounts' - -+if plugin_enabled(): -+ from swift.plugins.DiskDir import DiskAccount - - class AccountController(object): - """WSGI controller for the account server.""" -@@ -52,8 +55,12 @@ class AccountController(object): - self.mount_check, logger=self.logger) - self.auto_create_account_prefix = \ - conf.get('auto_create_account_prefix') or '.' -+ self.fs_object = None - - def _get_account_broker(self, drive, part, account): -+ if self.fs_object: -+ return DiskAccount(self.root, account, self.fs_object); -+ - hsh = hash_path(account) - db_dir = storage_directory(DATADIR, part, hsh) - db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') -@@ -121,9 +128,15 @@ class AccountController(object): - if broker.is_deleted(): - return HTTPConflict(request=req) - metadata = {} -- metadata.update((key, (value, timestamp)) -- for key, value in req.headers.iteritems() -- if key.lower().startswith('x-account-meta-')) -+ if not self.fs_object: -+ metadata.update((key, (value, timestamp)) -+ for key, value in req.headers.iteritems() -+ if key.lower().startswith('x-account-meta-')) -+ else: -+ metadata.update((key, value) -+ for key, value in req.headers.iteritems() -+ if key.lower().startswith('x-account-meta-')) -+ - if metadata: - broker.update_metadata(metadata) - if created: -@@ -153,6 +166,9 @@ class AccountController(object): - broker.stale_reads_ok = True - if broker.is_deleted(): - return HTTPNotFound(request=req) -+ if self.fs_object and not self.fs_object.object_only: -+ broker.list_containers_iter(None, None,None, -+ None, None) - info = broker.get_info() - headers = { - 'X-Account-Container-Count': info['container_count'], -@@ -164,9 +180,16 @@ class AccountController(object): - container_ts = broker.get_container_timestamp(container) - if container_ts is not None: - headers['X-Container-Timestamp'] = container_ts -- headers.update((key, value) -- for key, (value, timestamp) in broker.metadata.iteritems() -- if value != '') -+ if not self.fs_object: -+ headers.update((key, value) -+ for key, (value, timestamp) in broker.metadata.iteritems() -+ if value != '') -+ else: -+ headers.update((key, value) -+ for key, value in broker.metadata.iteritems() -+ if value != '') -+ -+ - return HTTPNoContent(request=req, headers=headers) - - def GET(self, req): -@@ -190,9 +213,15 @@ class AccountController(object): - 'X-Account-Bytes-Used': info['bytes_used'], - 'X-Timestamp': info['created_at'], - 'X-PUT-Timestamp': info['put_timestamp']} -- resp_headers.update((key, value) -- for key, (value, timestamp) in broker.metadata.iteritems() -- if value != '') -+ if not self.fs_object: -+ resp_headers.update((key, value) -+ for key, (value, timestamp) in broker.metadata.iteritems() -+ if value != '') -+ else: -+ resp_headers.update((key, value) -+ for key, value in broker.metadata.iteritems() -+ if value != '') -+ - try: - prefix = get_param(req, 'prefix') - delimiter = get_param(req, 'delimiter') -@@ -224,6 +253,7 @@ class AccountController(object): - content_type='text/plain', request=req) - account_list = broker.list_containers_iter(limit, marker, end_marker, - prefix, delimiter) -+ - if out_content_type == 'application/json': - json_pattern = ['"name":%s', '"count":%s', '"bytes":%s'] - json_pattern = '{' + ','.join(json_pattern) + '}' -@@ -298,15 +328,29 @@ class AccountController(object): - return HTTPNotFound(request=req) - timestamp = normalize_timestamp(req.headers['x-timestamp']) - metadata = {} -- metadata.update((key, (value, timestamp)) -- for key, value in req.headers.iteritems() -- if key.lower().startswith('x-account-meta-')) -+ if not self.fs_object: -+ metadata.update((key, (value, timestamp)) -+ for key, value in req.headers.iteritems() -+ if key.lower().startswith('x-account-meta-')) -+ else: -+ metadata.update((key, value) -+ for key, value in req.headers.iteritems() -+ if key.lower().startswith('x-account-meta-')) - if metadata: - broker.update_metadata(metadata) - return HTTPNoContent(request=req) - -+ def plugin(self, env): -+ if env.get('Gluster_enabled', False): -+ self.fs_object = env.get('fs_object') -+ self.root = env.get('root') -+ self.mount_check = False -+ else: -+ self.fs_object = None -+ - def __call__(self, env, start_response): - start_time = time.time() -+ self.plugin(env) - req = Request(env) - self.logger.txn_id = req.headers.get('x-trans-id', None) - if not check_utf8(req.path_info): -diff --git a/swift/common/middleware/gluster.py b/swift/common/middleware/gluster.py -new file mode 100644 -index 0000000..341285d ---- /dev/null -+++ b/swift/common/middleware/gluster.py -@@ -0,0 +1,55 @@ -+# Copyright (c) 2011 Red Hat, Inc. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+from swift.common.utils import get_logger, plugin_enabled -+from swift import plugins -+from ConfigParser import ConfigParser -+ -+class Gluster_plugin(object): -+ """ -+ Update the environment with keys that reflect Gluster_plugin enabled -+ """ -+ -+ def __init__(self, app, conf): -+ self.app = app -+ self.conf = conf -+ self.fs_name = 'Glusterfs' -+ self.logger = get_logger(conf, log_route='gluster') -+ -+ def __call__(self, env, start_response): -+ if not plugin_enabled(): -+ return self.app(env, start_response) -+ env['Gluster_enabled'] =True -+ fs_object = getattr(plugins, self.fs_name, False) -+ if not fs_object: -+ raise Exception('%s plugin not found', self.fs_name) -+ -+ env['fs_object'] = fs_object() -+ fs_conf = ConfigParser() -+ if fs_conf.read('/etc/swift/fs.conf'): -+ try: -+ env['root'] = fs_conf.get ('DEFAULT', 'mount_path') -+ except NoSectionError, NoOptionError: -+ self.logger.exception(_('ERROR mount_path not present')) -+ return self.app(env, start_response) -+ -+def filter_factory(global_conf, **local_conf): -+ """Returns a WSGI filter app for use with paste.deploy.""" -+ conf = global_conf.copy() -+ conf.update(local_conf) -+ -+ def gluster_filter(app): -+ return Gluster_plugin(app, conf) -+ return gluster_filter -diff --git a/swift/common/utils.py b/swift/common/utils.py -index 47edce8..03701ce 100644 ---- a/swift/common/utils.py -+++ b/swift/common/utils.py -@@ -1,4 +1,5 @@ - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -1138,3 +1139,11 @@ def streq_const_time(s1, s2): - for (a, b) in zip(s1, s2): - result |= ord(a) ^ ord(b) - return result == 0 -+ -+def plugin_enabled(): -+ swift_conf = ConfigParser() -+ swift_conf.read(os.path.join('/etc/swift', 'swift.conf')) -+ try: -+ return swift_conf.get('DEFAULT', 'Enable_plugin', 'no') in TRUE_VALUES -+ except NoOptionError, NoSectionError: -+ return False -diff --git a/swift/container/server.py b/swift/container/server.py -index 8a18cfd..93943a3 100644 ---- a/swift/container/server.py -+++ b/swift/container/server.py -@@ -1,4 +1,5 @@ - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -31,7 +32,8 @@ from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ - - from swift.common.db import ContainerBroker - from swift.common.utils import get_logger, get_param, hash_path, \ -- normalize_timestamp, storage_directory, split_path, validate_sync_to -+ normalize_timestamp, storage_directory, split_path, validate_sync_to, \ -+ plugin_enabled - from swift.common.constraints import CONTAINER_LISTING_LIMIT, \ - check_mount, check_float, check_utf8 - from swift.common.bufferedhttp import http_connect -@@ -40,6 +42,9 @@ from swift.common.db_replicator import ReplicatorRpc - - DATADIR = 'containers' - -+if plugin_enabled(): -+ from swift.plugins.DiskDir import DiskDir -+ - - class ContainerController(object): - """WSGI Controller for the container server.""" -@@ -62,6 +67,7 @@ class ContainerController(object): - ContainerBroker, self.mount_check, logger=self.logger) - self.auto_create_account_prefix = \ - conf.get('auto_create_account_prefix') or '.' -+ self.fs_object = None - - def _get_container_broker(self, drive, part, account, container): - """ -@@ -73,6 +79,11 @@ class ContainerController(object): - :param container: container name - :returns: ContainerBroker object - """ -+ if self.fs_object: -+ return DiskDir(self.root, drive, part, account, -+ container, self.logger, -+ fs_object = self.fs_object) -+ - hsh = hash_path(account, container) - db_dir = storage_directory(DATADIR, part, hsh) - db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') -@@ -211,10 +222,18 @@ class ContainerController(object): - if broker.is_deleted(): - return HTTPConflict(request=req) - metadata = {} -- metadata.update((key, (value, timestamp)) -- for key, value in req.headers.iteritems() -- if key.lower() in self.save_headers or -- key.lower().startswith('x-container-meta-')) -+ #Note: check the structure of req.headers -+ if not self.fs_object: -+ metadata.update((key, (value, timestamp)) -+ for key, value in req.headers.iteritems() -+ if key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-')) -+ else: -+ metadata.update((key, value) -+ for key, value in req.headers.iteritems() -+ if key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-')) -+ - if metadata: - if 'X-Container-Sync-To' in metadata: - if 'X-Container-Sync-To' not in broker.metadata or \ -@@ -222,6 +241,7 @@ class ContainerController(object): - broker.metadata['X-Container-Sync-To'][0]: - broker.set_x_container_sync_points(-1, -1) - broker.update_metadata(metadata) -+ - resp = self.account_update(req, account, container, broker) - if resp: - return resp -@@ -245,6 +265,11 @@ class ContainerController(object): - broker.stale_reads_ok = True - if broker.is_deleted(): - return HTTPNotFound(request=req) -+ -+ if self.fs_object and not self.fs_object.object_only: -+ broker.list_objects_iter(None, None, None, None, -+ None, None) -+ - info = broker.get_info() - headers = { - 'X-Container-Object-Count': info['object_count'], -@@ -252,10 +277,17 @@ class ContainerController(object): - 'X-Timestamp': info['created_at'], - 'X-PUT-Timestamp': info['put_timestamp'], - } -- headers.update((key, value) -- for key, (value, timestamp) in broker.metadata.iteritems() -- if value != '' and (key.lower() in self.save_headers or -- key.lower().startswith('x-container-meta-'))) -+ if not self.fs_object: -+ headers.update((key, value) -+ for key, (value, timestamp) in broker.metadata.iteritems() -+ if value != '' and (key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-'))) -+ else: -+ headers.update((key, value) -+ for key, value in broker.metadata.iteritems() -+ if value != '' and (key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-'))) -+ - return HTTPNoContent(request=req, headers=headers) - - def GET(self, req): -@@ -268,6 +300,7 @@ class ContainerController(object): - request=req) - if self.mount_check and not check_mount(self.root, drive): - return Response(status='507 %s is not mounted' % drive) -+ - broker = self._get_container_broker(drive, part, account, container) - broker.pending_timeout = 0.1 - broker.stale_reads_ok = True -@@ -280,10 +313,17 @@ class ContainerController(object): - 'X-Timestamp': info['created_at'], - 'X-PUT-Timestamp': info['put_timestamp'], - } -- resp_headers.update((key, value) -- for key, (value, timestamp) in broker.metadata.iteritems() -- if value != '' and (key.lower() in self.save_headers or -- key.lower().startswith('x-container-meta-'))) -+ if not self.fs_object: -+ resp_headers.update((key, value) -+ for key, (value, timestamp) in broker.metadata.iteritems() -+ if value != '' and (key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-'))) -+ else: -+ resp_headers.update((key, value) -+ for key, value in broker.metadata.iteritems() -+ if value != '' and (key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-'))) -+ - try: - path = get_param(req, 'path') - prefix = get_param(req, 'prefix') -@@ -414,10 +454,17 @@ class ContainerController(object): - return HTTPNotFound(request=req) - timestamp = normalize_timestamp(req.headers['x-timestamp']) - metadata = {} -- metadata.update((key, (value, timestamp)) -- for key, value in req.headers.iteritems() -- if key.lower() in self.save_headers or -- key.lower().startswith('x-container-meta-')) -+ if not self.fs_object: -+ metadata.update((key, (value, timestamp)) -+ for key, value in req.headers.iteritems() -+ if key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-')) -+ else: -+ metadata.update((key, value) -+ for key, value in req.headers.iteritems() -+ if key.lower() in self.save_headers or -+ key.lower().startswith('x-container-meta-')) -+ - if metadata: - if 'X-Container-Sync-To' in metadata: - if 'X-Container-Sync-To' not in broker.metadata or \ -@@ -427,8 +474,19 @@ class ContainerController(object): - broker.update_metadata(metadata) - return HTTPNoContent(request=req) - -+ def plugin(self, env): -+ if env.get('Gluster_enabled', False): -+ self.fs_object = env.get('fs_object') -+ if not self.fs_object: -+ raise NoneTypeError -+ self.root = env.get('root') -+ self.mount_check = False -+ else: -+ self.fs_object = None -+ - def __call__(self, env, start_response): - start_time = time.time() -+ self.plugin(env) - req = Request(env) - self.logger.txn_id = req.headers.get('x-trans-id', None) - if not check_utf8(req.path_info): -diff --git a/swift/obj/server.py b/swift/obj/server.py -index 9cca16b..a45daff 100644 ---- a/swift/obj/server.py -+++ b/swift/obj/server.py -@@ -1,4 +1,5 @@ - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -26,6 +27,7 @@ from hashlib import md5 - from tempfile import mkstemp - from urllib import unquote - from contextlib import contextmanager -+from ConfigParser import ConfigParser - - from webob import Request, Response, UTC - from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ -@@ -37,16 +39,23 @@ from eventlet import sleep, Timeout, tpool - - from swift.common.utils import mkdirs, normalize_timestamp, \ - storage_directory, hash_path, renamer, fallocate, \ -- split_path, drop_buffer_cache, get_logger, write_pickle -+ split_path, drop_buffer_cache, get_logger, write_pickle, \ -+ plugin_enabled - from swift.common.bufferedhttp import http_connect --from swift.common.constraints import check_object_creation, check_mount, \ -- check_float, check_utf8 -+if plugin_enabled(): -+ from swift.plugins.constraints import check_object_creation -+ from swift.plugins.utils import X_TYPE, X_OBJECT_TYPE, FILE, DIR, MARKER_DIR, \ -+ OBJECT, DIR_TYPE, FILE_TYPE -+else: -+ from swift.common.constraints import check_object_creation -+ -+from swift.common.constraints import check_mount, check_float, check_utf8 -+ - from swift.common.exceptions import ConnectionTimeout, DiskFileError, \ - DiskFileNotExist - from swift.obj.replicator import tpooled_get_hashes, invalidate_hash, \ - quarantine_renamer - -- - DATADIR = 'objects' - ASYNCDIR = 'async_pending' - PICKLE_PROTOCOL = 2 -@@ -339,6 +348,9 @@ class DiskFile(object): - raise - raise DiskFileNotExist('Data File does not exist.') - -+if plugin_enabled(): -+ from swift.plugins.DiskFile import Gluster_DiskFile -+ - - class ObjectController(object): - """Implements the WSGI application for the Swift Object Server.""" -@@ -377,6 +389,17 @@ class ObjectController(object): - 'expiring_objects' - self.expiring_objects_container_divisor = \ - int(conf.get('expiring_objects_container_divisor') or 86400) -+ self.fs_object = None -+ -+ def get_DiskFile_obj(self, path, device, partition, account, container, obj, -+ logger, keep_data_fp=False, disk_chunk_size=65536): -+ if self.fs_object: -+ return Gluster_DiskFile(path, device, partition, account, container, -+ obj, logger, keep_data_fp, -+ disk_chunk_size, fs_object = self.fs_object); -+ else: -+ return DiskFile(path, device, partition, account, container, -+ obj, logger, keep_data_fp, disk_chunk_size) - - def async_update(self, op, account, container, obj, host, partition, - contdevice, headers_out, objdevice): -@@ -493,7 +516,7 @@ class ObjectController(object): - content_type='text/plain') - if self.mount_check and not check_mount(self.devices, device): - return Response(status='507 %s is not mounted' % device) -- file = DiskFile(self.devices, device, partition, account, container, -+ file = self.get_DiskFile_obj(self.devices, device, partition, account, container, - obj, self.logger, disk_chunk_size=self.disk_chunk_size) - - if 'X-Delete-At' in file.metadata and \ -@@ -548,7 +571,7 @@ class ObjectController(object): - if new_delete_at and new_delete_at < time.time(): - return HTTPBadRequest(body='X-Delete-At in past', request=request, - content_type='text/plain') -- file = DiskFile(self.devices, device, partition, account, container, -+ file = self.get_DiskFile_obj(self.devices, device, partition, account, container, - obj, self.logger, disk_chunk_size=self.disk_chunk_size) - orig_timestamp = file.metadata.get('X-Timestamp') - upload_expiration = time.time() + self.max_upload_time -@@ -580,12 +603,29 @@ class ObjectController(object): - if 'etag' in request.headers and \ - request.headers['etag'].lower() != etag: - return HTTPUnprocessableEntity(request=request) -- metadata = { -- 'X-Timestamp': request.headers['x-timestamp'], -- 'Content-Type': request.headers['content-type'], -- 'ETag': etag, -- 'Content-Length': str(os.fstat(fd).st_size), -- } -+ content_type = request.headers['content-type'] -+ if self.fs_object and not content_type: -+ content_type = FILE_TYPE -+ if not self.fs_object: -+ metadata = { -+ 'X-Timestamp': request.headers['x-timestamp'], -+ 'Content-Type': request.headers['content-type'], -+ 'ETag': etag, -+ 'Content-Length': str(os.fstat(fd).st_size), -+ } -+ else: -+ metadata = { -+ 'X-Timestamp': request.headers['x-timestamp'], -+ 'Content-Type': request.headers['content-type'], -+ 'ETag': etag, -+ 'Content-Length': str(os.fstat(fd).st_size), -+ X_TYPE: OBJECT, -+ X_OBJECT_TYPE: FILE, -+ } -+ -+ if self.fs_object and \ -+ request.headers['content-type'].lower() == DIR_TYPE: -+ metadata.update({X_OBJECT_TYPE: MARKER_DIR}) - metadata.update(val for val in request.headers.iteritems() - if val[0].lower().startswith('x-object-meta-') and - len(val[0]) > 14) -@@ -612,7 +652,7 @@ class ObjectController(object): - 'x-timestamp': file.metadata['X-Timestamp'], - 'x-etag': file.metadata['ETag'], - 'x-trans-id': request.headers.get('x-trans-id', '-')}, -- device) -+ (self.fs_object and account) or device) - resp = HTTPCreated(request=request, etag=etag) - return resp - -@@ -626,9 +666,9 @@ class ObjectController(object): - content_type='text/plain') - if self.mount_check and not check_mount(self.devices, device): - return Response(status='507 %s is not mounted' % device) -- file = DiskFile(self.devices, device, partition, account, container, -- obj, self.logger, keep_data_fp=True, -- disk_chunk_size=self.disk_chunk_size) -+ file = self.get_DiskFile_obj(self.devices, device, partition, account, container, -+ obj, self.logger, keep_data_fp=True, -+ disk_chunk_size=self.disk_chunk_size) - if file.is_deleted() or ('X-Delete-At' in file.metadata and - int(file.metadata['X-Delete-At']) <= time.time()): - if request.headers.get('if-match') == '*': -@@ -702,7 +742,7 @@ class ObjectController(object): - return resp - if self.mount_check and not check_mount(self.devices, device): - return Response(status='507 %s is not mounted' % device) -- file = DiskFile(self.devices, device, partition, account, container, -+ file = self.get_DiskFile_obj(self.devices, device, partition, account, container, - obj, self.logger, disk_chunk_size=self.disk_chunk_size) - if file.is_deleted() or ('X-Delete-At' in file.metadata and - int(file.metadata['X-Delete-At']) <= time.time()): -@@ -744,7 +784,7 @@ class ObjectController(object): - if self.mount_check and not check_mount(self.devices, device): - return Response(status='507 %s is not mounted' % device) - response_class = HTTPNoContent -- file = DiskFile(self.devices, device, partition, account, container, -+ file = self.get_DiskFile_obj(self.devices, device, partition, account, container, - obj, self.logger, disk_chunk_size=self.disk_chunk_size) - if 'x-if-delete-at' in request.headers and \ - int(request.headers['x-if-delete-at']) != \ -@@ -797,9 +837,18 @@ class ObjectController(object): - raise hashes - return Response(body=pickle.dumps(hashes)) - -+ def plugin(self, env): -+ if env.get('Gluster_enabled', False): -+ self.fs_object = env.get('fs_object') -+ self.devices = env.get('root') -+ self.mount_check = False -+ else: -+ self.fs_object = None -+ - def __call__(self, env, start_response): - """WSGI Application entry point for the Swift Object Server.""" - start_time = time.time() -+ self.plugin(env) - req = Request(env) - self.logger.txn_id = req.headers.get('x-trans-id', None) - if not check_utf8(req.path_info): -diff --git a/swift/proxy/server.py b/swift/proxy/server.py -index 17613b8..d277d28 100644 ---- a/swift/proxy/server.py -+++ b/swift/proxy/server.py -@@ -1,4 +1,5 @@ - # Copyright (c) 2010-2012 OpenStack, LLC. -+# Copyright (c) 2011 Red Hat, Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. -@@ -53,11 +54,20 @@ from webob import Request, Response - - from swift.common.ring import Ring - from swift.common.utils import cache_from_env, ContextPool, get_logger, \ -- get_remote_client, normalize_timestamp, split_path, TRUE_VALUES -+ get_remote_client, normalize_timestamp, split_path, TRUE_VALUES, \ -+ plugin_enabled - from swift.common.bufferedhttp import http_connect --from swift.common.constraints import check_metadata, check_object_creation, \ -- check_utf8, CONTAINER_LISTING_LIMIT, MAX_ACCOUNT_NAME_LENGTH, \ -- MAX_CONTAINER_NAME_LENGTH, MAX_FILE_SIZE -+ -+if plugin_enabled(): -+ from swift.plugins.constraints import check_object_creation, \ -+ MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, MAX_FILE_SIZE -+else: -+ from swift.common.constraints import check_object_creation, \ -+ MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, MAX_FILE_SIZE -+ -+from swift.common.constraints import check_metadata, check_utf8, \ -+ CONTAINER_LISTING_LIMIT -+ - from swift.common.exceptions import ChunkReadTimeout, \ - ChunkWriteTimeout, ConnectionTimeout - -diff --git a/test/__init__.py b/test/__init__.py -index ef2ce31..363a051 100644 ---- a/test/__init__.py -+++ b/test/__init__.py -@@ -6,8 +6,16 @@ import sys - import os - from ConfigParser import MissingSectionHeaderError - from StringIO import StringIO -- - from swift.common.utils import readconf -+from swift.common.utils import plugin_enabled -+if plugin_enabled(): -+ from swift.plugins.constraints import MAX_OBJECT_NAME_LENGTH, \ -+ MAX_CONTAINER_NAME_LENGTH, MAX_ACCOUNT_NAME_LENGTH, \ -+ MAX_FILE_SIZE -+else: -+ from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \ -+ MAX_CONTAINER_NAME_LENGTH, MAX_ACCOUNT_NAME_LENGTH, \ -+ MAX_FILE_SIZE - - setattr(__builtin__, '_', lambda x: x) - -diff --git a/test/functional/tests.py b/test/functional/tests.py -index b25b4fd..8d12f58 100644 ---- a/test/functional/tests.py -+++ b/test/functional/tests.py -@@ -31,6 +31,16 @@ import urllib - from test import get_config - from swift import Account, AuthenticationFailed, Connection, Container, \ - File, ResponseError -+from test import plugin_enabled -+if plugin_enabled(): -+ from test import MAX_OBJECT_NAME_LENGTH, \ -+ MAX_CONTAINER_NAME_LENGTH, MAX_ACCOUNT_NAME_LENGTH, \ -+ MAX_FILE_SIZE -+else: -+ from test import MAX_OBJECT_NAME_LENGTH, \ -+ MAX_CONTAINER_NAME_LENGTH, MAX_ACCOUNT_NAME_LENGTH, \ -+ MAX_FILE_SIZE -+ - - config = get_config() - -@@ -361,7 +371,7 @@ class TestContainer(Base): - set_up = False - - def testContainerNameLimit(self): -- limit = 256 -+ limit = MAX_CONTAINER_NAME_LENGTH - - for l in (limit-100, limit-10, limit-1, limit, - limit+1, limit+10, limit+100): -@@ -949,7 +959,7 @@ class TestFile(Base): - self.assert_status(404) - - def testNameLimit(self): -- limit = 1024 -+ limit = MAX_OBJECT_NAME_LENGTH - - for l in (1, 10, limit/2, limit-1, limit, limit+1, limit*2): - file = self.env.container.file('a'*l) -@@ -1093,7 +1103,7 @@ class TestFile(Base): - self.assert_(file.read(hdrs={'Range': r}) == data[0:1000]) - - def testFileSizeLimit(self): -- limit = 5*2**30 + 2 -+ limit = MAX_FILE_SIZE - tsecs = 3 - - for i in (limit-100, limit-10, limit-1, limit, limit+1, limit+10, -diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py -index 075700e..5b6f32d 100644 ---- a/test/unit/obj/test_server.py -+++ b/test/unit/obj/test_server.py -@@ -1355,7 +1355,7 @@ class TestObjectController(unittest.TestCase): - - def test_max_object_name_length(self): - timestamp = normalize_timestamp(time()) -- req = Request.blank('/sda1/p/a/c/' + ('1' * 1024), -+ req = Request.blank('/sda1/p/a/c/' + ('1' * MAX_OBJECT_NAME_LENGTH), - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Length': '4', -diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py -index 364370e..c17fe59 100644 ---- a/test/unit/proxy/test_server.py -+++ b/test/unit/proxy/test_server.py -@@ -21,7 +21,6 @@ import os - import sys - import unittest - from nose import SkipTest --from ConfigParser import ConfigParser - from contextlib import contextmanager - from cStringIO import StringIO - from gzip import GzipFile -@@ -44,8 +43,18 @@ from swift.account import server as account_server - from swift.container import server as container_server - from swift.obj import server as object_server - from swift.common import ring --from swift.common.constraints import MAX_META_NAME_LENGTH, \ -- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, MAX_FILE_SIZE -+from swift.common.utils import plugin_enabled -+if plugin_enabled(): -+ from swift.plugins.constraints import MAX_META_NAME_LENGTH, \ -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, \ -+ MAX_OBJECT_NAME_LENGTH -+else: -+ from swift.plugins.constraints import MAX_META_NAME_LENGTH, \ -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, \ -+ MAX_OBJECT_NAME_LENGTH -+ - from swift.common.utils import mkdirs, normalize_timestamp, NullLogger - from swift.common.wsgi import monkey_patch_mimetools - -@@ -3207,7 +3216,8 @@ class TestContainerController(unittest.TestCase): - def test_PUT_max_container_name_length(self): - with save_globals(): - controller = proxy_server.ContainerController(self.app, 'account', -- '1' * 256) -+ '1' * -+ MAX_CONTAINER_NAME_LENGTH,) - self.assert_status_map(controller.PUT, - (200, 200, 200, 201, 201, 201), 201, - missing_container=True) -@@ -3813,7 +3823,8 @@ class TestAccountController(unittest.TestCase): - def test_PUT_max_account_name_length(self): - with save_globals(): - self.app.allow_account_management = True -- controller = proxy_server.AccountController(self.app, '1' * 256) -+ controller = proxy_server.AccountController(self.app, '1' * -+ MAX_ACCOUNT_NAME_LENGTH) - self.assert_status_map(controller.PUT, (201, 201, 201), 201) - controller = proxy_server.AccountController(self.app, '2' * 257) - self.assert_status_map(controller.PUT, (201, 201, 201), 400) diff --git a/glusterfs-3.3.0.xlator.mount.fuse.fuse-bridge.c.patch b/glusterfs-3.3.0.xlator.mount.fuse.fuse-bridge.c.patch deleted file mode 100644 index 13fbb58..0000000 --- a/glusterfs-3.3.0.xlator.mount.fuse.fuse-bridge.c.patch +++ /dev/null @@ -1,76 +0,0 @@ ---- xlators/mount/fuse/src/fuse-bridge.c.orig -+++ xlators/mount/fuse/src/fuse-bridge.c -@@ -4198,13 +4198,11 @@ fuse_thread_proc (void *data) - finh->uid == priv->uid_map_root) - finh->uid = 0; - --#ifdef GF_DARWIN_HOST_OS - if (finh->opcode >= FUSE_OP_HIGH) - /* turn down MacFUSE specific messages */ - fuse_enosys (this, finh, msg); - else --#endif -- fuse_ops[finh->opcode] (this, finh, msg); -+ fuse_ops[finh->opcode] (this, finh, msg); - - iobuf_unref (iobuf); - continue; -@@ -4423,40 +4421,47 @@ mem_acct_init (xlator_t *this) - - - static fuse_handler_t *fuse_std_ops[FUSE_OP_HIGH] = { -- [FUSE_INIT] = fuse_init, -- [FUSE_DESTROY] = fuse_destroy, - [FUSE_LOOKUP] = fuse_lookup, - [FUSE_FORGET] = fuse_forget, - [FUSE_GETATTR] = fuse_getattr, - [FUSE_SETATTR] = fuse_setattr, -- [FUSE_OPENDIR] = fuse_opendir, -- [FUSE_READDIR] = fuse_readdir, -- [FUSE_RELEASEDIR] = fuse_releasedir, -- [FUSE_ACCESS] = fuse_access, - [FUSE_READLINK] = fuse_readlink, -+ [FUSE_SYMLINK] = fuse_symlink, - [FUSE_MKNOD] = fuse_mknod, - [FUSE_MKDIR] = fuse_mkdir, - [FUSE_UNLINK] = fuse_unlink, - [FUSE_RMDIR] = fuse_rmdir, -- [FUSE_SYMLINK] = fuse_symlink, - [FUSE_RENAME] = fuse_rename, - [FUSE_LINK] = fuse_link, -- [FUSE_CREATE] = fuse_create, - [FUSE_OPEN] = fuse_open, - [FUSE_READ] = fuse_readv, - [FUSE_WRITE] = fuse_write, -- [FUSE_FLUSH] = fuse_flush, -+ [FUSE_STATFS] = fuse_statfs, - [FUSE_RELEASE] = fuse_release, - [FUSE_FSYNC] = fuse_fsync, -- [FUSE_FSYNCDIR] = fuse_fsyncdir, -- [FUSE_STATFS] = fuse_statfs, - [FUSE_SETXATTR] = fuse_setxattr, - [FUSE_GETXATTR] = fuse_getxattr, - [FUSE_LISTXATTR] = fuse_listxattr, - [FUSE_REMOVEXATTR] = fuse_removexattr, -+ [FUSE_FLUSH] = fuse_flush, -+ [FUSE_INIT] = fuse_init, -+ [FUSE_OPENDIR] = fuse_opendir, -+ [FUSE_READDIR] = fuse_readdir, -+ [FUSE_RELEASEDIR] = fuse_releasedir, -+ [FUSE_FSYNCDIR] = fuse_fsyncdir, - [FUSE_GETLK] = fuse_getlk, - [FUSE_SETLK] = fuse_setlk, - [FUSE_SETLKW] = fuse_setlk, -+ [FUSE_ACCESS] = fuse_access, -+ [FUSE_CREATE] = fuse_create, -+ /* [FUSE_INTERRUPT] */ -+ /* [FUSE_BMAP] */ -+ [FUSE_DESTROY] = fuse_destroy, -+ /* [FUSE_IOCTL] */ -+ /* [FUSE_POLL] */ -+ /* [FUSE_NOTIFY_REPLY] */ -+ /* [FUSE_BATCH_FORGET] */ -+ /* [FUSE_FALLOCATE] */ - }; - - diff --git a/glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch b/glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch deleted file mode 100644 index 67367ae..0000000 --- a/glusterfs-3.3.1.rpc.rpcxprt.rdma.name.c.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/rpc/rpc-transport/rdma/src/name.c 2012-10-11 13:38:22.000000000 -0400 -+++ b/rpc/rpc-transport/rdma/src/name.c 2013-04-12 13:50:07.000000000 -0400 -@@ -352,6 +352,8 @@ - if (listen_port_data) { - listen_port = data_to_uint16 (listen_port_data); - } else { -+ listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; -+ - if (addr->sa_family == AF_INET6) { - struct sockaddr_in6 *in = (struct sockaddr_in6 *) addr; - in->sin6_addr = in6addr_any; diff --git a/glusterfs-3.3.1.swift.constraints.backport-1.7.4.patch b/glusterfs-3.3.1.swift.constraints.backport-1.7.4.patch deleted file mode 100644 index 78e3220..0000000 --- a/glusterfs-3.3.1.swift.constraints.backport-1.7.4.patch +++ /dev/null @@ -1,567 +0,0 @@ -From 9ce581c9c548c6a843e682f79f9ae510121501ac Mon Sep 17 00:00:00 2001 -From: Peter Portante -Date: Thu, 4 Oct 2012 11:32:56 -0400 -Subject: [PATCH] Backport commit a2ac5efaa64f57fbbe059066c6c4636dfd0715c2, - 'swift constraints are now settable via config', excluding - PEP8 changes that did not involve the constraints. - ---- - etc/swift.conf-sample | 73 ++++++++++++++++++++++++++++++++++ - swift/common/constraints.py | 29 +++++++++++++ - swift/container/sync.py | 8 +++- - test/functional/tests.py | 63 ++++++++++++++++++++++++----- - test/sample.conf | 15 +++++++ - test/unit/common/test_constraints.py | 9 +++- - test/unit/obj/test_server.py | 6 ++- - test/unit/proxy/test_server.py | 50 ++++++++++++++--------- - 8 files changed, 218 insertions(+), 35 deletions(-) - -diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample -index 7e1c31d..2f4192a 100644 ---- a/etc/swift.conf-sample -+++ b/etc/swift.conf-sample -@@ -1,3 +1,76 @@ - [swift-hash] -+ -+# swift_hash_path_suffix is used as part of the hashing algorithm -+# when determining data placement in the cluster. This value should -+# remain secret and MUST NOT change once a cluster has been deployed. -+ - swift_hash_path_suffix = changeme - -+ -+ -+# The swift-constraints section sets the basic constraints on data -+# saved in the swift cluster. -+ -+[swift-constraints] -+ -+# max_file_size is the largest "normal" object that can be saved in -+# the cluster. This is also the limit on the size of each segment of -+# a "large" object when using the large object manifest support. -+# This value is set in bytes. Setting it to lower than 1MiB will cause -+# some tests to fail. It is STRONGLY recommended to leave this value at -+# the default (5 * 2**30 + 2). -+ -+#max_file_size = 5368709122 -+ -+ -+# max_meta_name_length is the max number of bytes in the utf8 encoding -+# of the name portion of a metadata header. -+ -+#max_meta_name_length = 128 -+ -+ -+# max_meta_value_length is the max number of bytes in the utf8 encoding -+# of a metadata value -+ -+#max_meta_value_length = 256 -+ -+ -+# max_meta_count is the max number of metadata keys that can be stored -+# on a single account, container, or object -+ -+#max_meta_count = 90 -+ -+ -+# max_meta_overall_size is the max number of bytes in the utf8 encoding -+# of the metadata (keys + values) -+ -+#max_meta_overall_size = 4096 -+ -+ -+# max_object_name_length is the max number of bytes in the utf8 encoding -+# of an object name -+ -+#max_object_name_length = 1024 -+ -+ -+# container_listing_limit is the default (and max) number of items -+# returned for a container listing request -+ -+#container_listing_limit = 10000 -+ -+ -+# account_listing_limit is the default (and max) number of items returned -+# for an account listing request -+#account_listing_limit = 10000 -+ -+ -+# max_account_name_length is the max number of bytes in the utf8 encoding -+# of an account name -+ -+#max_account_name_length = 256 -+ -+ -+# max_container_name_length is the max number of bytes in the utf8 encoding -+# of a container name -+ -+#max_container_name_length = 256 -diff --git a/swift/common/constraints.py b/swift/common/constraints.py -index a797b8b..0083346 100644 ---- a/swift/common/constraints.py -+++ b/swift/common/constraints.py -@@ -14,29 +14,58 @@ - # limitations under the License. - - import os -+from ConfigParser import ConfigParser, NoSectionError, NoOptionError, \ -+ RawConfigParser - - from webob.exc import HTTPBadRequest, HTTPLengthRequired, \ - HTTPRequestEntityTooLarge - -+constraints_conf = ConfigParser() -+constraints_conf.read('/etc/swift/swift.conf') -+ -+ -+def constraints_conf_int(name, default): -+ try: -+ return int(constraints_conf.get('swift-constraints', name)) -+ except (NoSectionError, NoOptionError): -+ return default -+ - - #: Max file size allowed for objects - MAX_FILE_SIZE = 5 * 1024 * 1024 * 1024 + 2 -+MAX_FILE_SIZE = constraints_conf_int('max_file_size', -+ 5368709122) # 5 * 1024 * 1024 * 1024 + 2 - #: Max length of the name of a key for metadata - MAX_META_NAME_LENGTH = 128 -+MAX_META_NAME_LENGTH = constraints_conf_int('max_meta_name_length', 128) - #: Max length of the value of a key for metadata - MAX_META_VALUE_LENGTH = 256 -+MAX_META_VALUE_LENGTH = constraints_conf_int('max_meta_value_length', 256) - #: Max number of metadata items - MAX_META_COUNT = 90 -+MAX_META_COUNT = constraints_conf_int('max_meta_count', 90) - #: Max overall size of metadata - MAX_META_OVERALL_SIZE = 4096 -+MAX_META_OVERALL_SIZE = constraints_conf_int('max_meta_overall_size', 4096) - #: Max object name length - MAX_OBJECT_NAME_LENGTH = 1024 -+MAX_OBJECT_NAME_LENGTH = constraints_conf_int('max_object_name_length', 1024) - #: Max object list length of a get request for a container - CONTAINER_LISTING_LIMIT = 10000 -+CONTAINER_LISTING_LIMIT = constraints_conf_int('container_listing_limit', -+ 10000) - #: Max container list length of a get request for an account - ACCOUNT_LISTING_LIMIT = 10000 - MAX_ACCOUNT_NAME_LENGTH = 256 - MAX_CONTAINER_NAME_LENGTH = 256 -+ACCOUNT_LISTING_LIMIT = constraints_conf_int('account_listing_limit', 10000) -+#: Max account name length -+MAX_ACCOUNT_NAME_LENGTH = constraints_conf_int('max_account_name_length', 256) -+#: Max container name length -+MAX_CONTAINER_NAME_LENGTH = constraints_conf_int('max_container_name_length', -+ 256) -+ -+ - #: Query string format= values to their corresponding content-type values - FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json', - 'xml': 'application/xml'} -diff --git a/swift/container/sync.py b/swift/container/sync.py -index d7152ac..472d33a 100644 ---- a/swift/container/sync.py -+++ b/swift/container/sync.py -@@ -21,8 +21,12 @@ from eventlet import sleep, Timeout - - import swift.common.db - from swift.container import server as container_server --from swiftclient import ClientException, delete_object, put_object, \ -- quote -+try: -+ from swiftclient import ClientException, delete_object, put_object, \ -+ quote -+except: -+ import sys -+ raise Exception("\n".join(sys.path)) - from swift.common.direct_client import direct_get_object - from swift.common.ring import Ring - from swift.common.db import ContainerBroker -diff --git a/test/functional/tests.py b/test/functional/tests.py -index a412f83..bcdd76f 100644 ---- a/test/functional/tests.py -+++ b/test/functional/tests.py -@@ -15,6 +15,7 @@ - # limitations under the License. - - from datetime import datetime -+from ConfigParser import ConfigParser - import locale - import random - import StringIO -@@ -26,8 +27,50 @@ from nose import SkipTest - - from test import get_config - from test.functional.swift import Account, Connection, File, ResponseError -- -+from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \ -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \ -+ MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH -+ -+default_constraints = dict(( -+ ('max_file_size', MAX_FILE_SIZE), -+ ('max_meta_name_length', MAX_META_NAME_LENGTH), -+ ('max_meta_value_length', MAX_META_VALUE_LENGTH), -+ ('max_meta_count', MAX_META_COUNT), -+ ('max_meta_overall_size', MAX_META_OVERALL_SIZE), -+ ('max_object_name_length', MAX_OBJECT_NAME_LENGTH), -+ ('container_listing_limit', CONTAINER_LISTING_LIMIT), -+ ('account_listing_limit', ACCOUNT_LISTING_LIMIT), -+ ('max_account_name_length', MAX_ACCOUNT_NAME_LENGTH), -+ ('max_container_name_length', MAX_CONTAINER_NAME_LENGTH))) -+constraints_conf = ConfigParser() -+conf_exists = constraints_conf.read('/etc/swift/swift.conf') -+# Constraints are set first from the test config, then from -+# /etc/swift/swift.conf if it exists. If swift.conf doesn't exist, -+# then limit test coverage. This allows SAIO tests to work fine but -+# requires remote funtional testing to know something about the cluster -+# that is being tested. - config = get_config('func_test') -+for k in default_constraints: -+ if k in config: -+ # prefer what's in test.conf -+ config[k] = int(config[k]) -+ elif conf_exists: -+ # swift.conf exists, so use what's defined there (or swift defaults) -+ # This normally happens when the test is running locally to the cluster -+ # as in a SAIO. -+ config[k] = default_constraints[k] -+ else: -+ # .functests don't know what the constraints of the tested cluster are, -+ # so the tests can't reliably pass or fail. Therefore, skip those -+ # tests. -+ config[k] = '%s constraint is not defined' % k -+ -+def load_constraint(name): -+ c = config[name] -+ if not isinstance(c, int): -+ raise SkipTest(c) -+ return c - - locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C')) - -@@ -225,8 +268,7 @@ class TestAccount(Base): - 'application/xml; charset=utf-8') - - def testListingLimit(self): -- limit = 10000 -- -+ limit = load_constraint('account_listing_limit') - for l in (1, 100, limit/2, limit-1, limit, limit+1, limit*2): - p = {'limit':l} - -@@ -353,7 +395,7 @@ class TestContainer(Base): - set_up = False - - def testContainerNameLimit(self): -- limit = 256 -+ limit = load_constraint('max_container_name_length') - - for l in (limit-100, limit-10, limit-1, limit, - limit+1, limit+10, limit+100): -@@ -398,6 +440,7 @@ class TestContainer(Base): - self.assert_(cont.files(parms={'prefix': f}) == [f]) - - def testPrefixAndLimit(self): -+ load_constraint('container_listing_limit') - cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) - -@@ -941,7 +984,7 @@ class TestFile(Base): - self.assert_status(404) - - def testNameLimit(self): -- limit = 1024 -+ limit = load_constraint('max_object_name_length') - - for l in (1, 10, limit/2, limit-1, limit, limit+1, limit*2): - file = self.env.container.file('a'*l) -@@ -989,13 +1032,12 @@ class TestFile(Base): - self.assert_status(400) - - def testMetadataNumberLimit(self): -- number_limit = 90 -+ number_limit = load_constraint('max_meta_count') -+ size_limit = load_constraint('max_meta_overall_size') - - for i in (number_limit-10, number_limit-1, number_limit, - number_limit+1, number_limit+10, number_limit+100): - -- size_limit = 4096 -- - j = size_limit/(i * 2) - - size = 0 -@@ -1096,7 +1138,7 @@ class TestFile(Base): - self.assert_(file.read(hdrs={'Range': r}) == data[0:1000]) - - def testFileSizeLimit(self): -- limit = 5*2**30 + 2 -+ limit = load_constraint('max_file_size') - tsecs = 3 - - for i in (limit-100, limit-10, limit-1, limit, limit+1, limit+10, -@@ -1150,7 +1192,8 @@ class TestFile(Base): - self.assert_status(200) - - def testMetadataLengthLimits(self): -- key_limit, value_limit = 128, 256 -+ key_limit = load_constraint('max_meta_name_length') -+ value_limit = load_constraint('max_meta_value_length') - lengths = [[key_limit, value_limit], [key_limit, value_limit+1], \ - [key_limit+1, value_limit], [key_limit, 0], \ - [key_limit, value_limit*10], [key_limit*10, value_limit]] -diff --git a/test/sample.conf b/test/sample.conf -index 7594c02..d3eced0 100644 ---- a/test/sample.conf -+++ b/test/sample.conf -@@ -19,6 +19,21 @@ password2 = testing2 - username3 = tester3 - password3 = testing3 - -+# Default constraints if not defined here, the test runner will try -+# to set them from /etc/swift/swift.conf. If that file isn't found, -+# the test runner will skip tests that depend on these values. -+# Note that the cluster must have "sane" values for the test suite to pass. -+#max_file_size = 5368709122 -+#max_meta_name_length = 128 -+#max_meta_value_length = 256 -+#max_meta_count = 90 -+#max_meta_overall_size = 4096 -+#max_object_name_length = 1024 -+#container_listing_limit = 10000 -+#account_listing_limit = 10000 -+#max_account_name_length = 256 -+#max_container_name_length = 256 -+ - collate = C - - [unit_test] -diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py -index 37ed225..000a0b4 100644 ---- a/test/unit/common/test_constraints.py -+++ b/test/unit/common/test_constraints.py -@@ -84,8 +84,13 @@ class TestConstraints(unittest.TestCase): - x += 1 - self.assertEquals(constraints.check_metadata(Request.blank('/', - headers=headers), 'object'), None) -- headers['X-Object-Meta-9999%s' % -- ('a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ -+ # add two more headers in case adding just one falls exactly on the -+ # limit (eg one header adds 1024 and the limit is 2048) -+ headers['X-Object-Meta-%04d%s' % -+ (x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ -+ 'v' * constraints.MAX_META_VALUE_LENGTH -+ headers['X-Object-Meta-%04d%s' % -+ (x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ - 'v' * constraints.MAX_META_VALUE_LENGTH - self.assert_(isinstance(constraints.check_metadata(Request.blank('/', - headers=headers), 'object'), HTTPBadRequest)) -diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py -index 0d94dba..f78baa1 100644 ---- a/test/unit/obj/test_server.py -+++ b/test/unit/obj/test_server.py -@@ -35,6 +35,7 @@ from swift.common import utils - from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ - NullLogger, storage_directory - from swift.common.exceptions import DiskFileNotExist -+from swift.common import constraints - from eventlet import tpool - - -@@ -1389,7 +1390,8 @@ class TestObjectController(unittest.TestCase): - - def test_max_object_name_length(self): - timestamp = normalize_timestamp(time()) -- req = Request.blank('/sda1/p/a/c/' + ('1' * 1024), -+ max_name_len = constraints.MAX_OBJECT_NAME_LENGTH -+ req = Request.blank('/sda1/p/a/c/' + ('1' * max_name_len), - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Length': '4', -@@ -1397,7 +1399,7 @@ class TestObjectController(unittest.TestCase): - req.body = 'DATA' - resp = self.object_controller.PUT(req) - self.assertEquals(resp.status_int, 201) -- req = Request.blank('/sda1/p/a/c/' + ('2' * 1025), -+ req = Request.blank('/sda1/p/a/c/' + ('2' * (max_name_len + 1)), - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Length': '4', -diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py -index 80ef9f7..0e3f30d 100644 ---- a/test/unit/proxy/test_server.py -+++ b/test/unit/proxy/test_server.py -@@ -46,7 +46,8 @@ from swift.obj import server as object_server - from swift.common import ring - from swift.common.exceptions import ChunkReadTimeout - from swift.common.constraints import MAX_META_NAME_LENGTH, \ -- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, MAX_FILE_SIZE -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH - from swift.common.utils import mkdirs, normalize_timestamp, NullLogger - from swift.common.wsgi import monkey_patch_mimetools - from swift.proxy.controllers.obj import SegmentedIterable -@@ -1060,47 +1061,50 @@ class TestObjectController(unittest.TestCase): - - def test_POST_meta_val_len(self): - with save_globals(): -+ limit = MAX_META_VALUE_LENGTH - self.app.object_post_as_copy = False - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - set_http_connect(200, 200, 202, 202, 202) - # acct cont obj obj obj - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 256}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * limit}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 257}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * (limit + 1)}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_as_copy_meta_val_len(self): - with save_globals(): -+ limit = MAX_META_VALUE_LENGTH - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - set_http_connect(200, 200, 200, 200, 200, 202, 202, 202) - # acct cont objc objc objc obj obj obj - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 256}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * limit}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 257}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * (limit + 1)}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_meta_key_len(self): - with save_globals(): -+ limit = MAX_META_NAME_LENGTH - self.app.object_post_as_copy = False - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') -@@ -1108,44 +1112,46 @@ class TestObjectController(unittest.TestCase): - # acct cont obj obj obj - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 128): 'x'}) -+ ('X-Object-Meta-' + 'x' * limit): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 129): 'x'}) -+ ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_as_copy_meta_key_len(self): - with save_globals(): -+ limit = MAX_META_NAME_LENGTH - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - set_http_connect(200, 200, 200, 200, 200, 202, 202, 202) - # acct cont objc objc objc obj obj obj - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 128): 'x'}) -+ ('X-Object-Meta-' + 'x' * limit): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 129): 'x'}) -+ ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_meta_count(self): - with save_globals(): -+ limit = MAX_META_COUNT - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - headers = dict( -- (('X-Object-Meta-' + str(i), 'a') for i in xrange(91))) -+ (('X-Object-Meta-' + str(i), 'a') for i in xrange(limit + 1))) - headers.update({'Content-Type': 'foo/bar'}) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers=headers) -@@ -1155,10 +1161,13 @@ class TestObjectController(unittest.TestCase): - - def test_POST_meta_size(self): - with save_globals(): -+ limit = MAX_META_OVERALL_SIZE - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') -+ count = limit / 256 # enough to cause the limit to be reched - headers = dict( -- (('X-Object-Meta-' + str(i), 'a' * 256) for i in xrange(1000))) -+ (('X-Object-Meta-' + str(i), 'a' * 256) -+ for i in xrange(count + 1))) - headers.update({'Content-Type': 'foo/bar'}) - set_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers=headers) -@@ -3408,13 +3417,14 @@ class TestContainerController(unittest.TestCase): - - def test_PUT_max_container_name_length(self): - with save_globals(): -+ limit = MAX_CONTAINER_NAME_LENGTH - controller = proxy_server.ContainerController(self.app, 'account', -- '1' * 256) -+ '1' * limit) - self.assert_status_map(controller.PUT, - (200, 200, 200, 201, 201, 201), 201, - missing_container=True) - controller = proxy_server.ContainerController(self.app, 'account', -- '2' * 257) -+ '2' * (limit + 1)) - self.assert_status_map(controller.PUT, (201, 201, 201), 400, - missing_container=True) - -@@ -3961,9 +3971,11 @@ class TestAccountController(unittest.TestCase): - def test_PUT_max_account_name_length(self): - with save_globals(): - self.app.allow_account_management = True -- controller = proxy_server.AccountController(self.app, '1' * 256) -+ limit = MAX_ACCOUNT_NAME_LENGTH -+ controller = proxy_server.AccountController(self.app, '1' * limit) - self.assert_status_map(controller.PUT, (201, 201, 201), 201) -- controller = proxy_server.AccountController(self.app, '2' * 257) -+ controller = proxy_server.AccountController( -+ self.app, '2' * (limit + 1)) - self.assert_status_map(controller.PUT, (201, 201, 201), 400) - - def test_PUT_connect_exceptions(self): --- -1.7.7.6 - diff --git a/glusterfs-3.3.1.swift.constraints.backport.patch b/glusterfs-3.3.1.swift.constraints.backport.patch deleted file mode 100644 index bb61a67..0000000 --- a/glusterfs-3.3.1.swift.constraints.backport.patch +++ /dev/null @@ -1,518 +0,0 @@ -From fc2421b04022ac6bbe9d5014362ec5f99f94c5e0 Mon Sep 17 00:00:00 2001 -From: Peter Portante -Date: Tue, 25 Sep 2012 13:27:59 -0400 -Subject: [PATCH] Backport commit a2ac5efaa64f57fbbe059066c6c4636dfd0715c2, - 'swift constraints are now settable via config', excluding - PEP8 changes that did not involve the constraints. - ---- - etc/swift.conf-sample | 73 ++++++++++++++++++++++++++++++++++ - swift/common/constraints.py | 37 ++++++++++++----- - test/functional/tests.py | 62 ++++++++++++++++++++++++---- - test/unit/common/test_constraints.py | 9 +++- - test/unit/obj/test_server.py | 7 ++- - test/unit/proxy/test_server.py | 50 ++++++++++++++--------- - 6 files changed, 196 insertions(+), 42 deletions(-) - -diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample -index 7e1c31d..2f4192a 100644 ---- a/etc/swift.conf-sample -+++ b/etc/swift.conf-sample -@@ -1,3 +1,76 @@ - [swift-hash] -+ -+# swift_hash_path_suffix is used as part of the hashing algorithm -+# when determining data placement in the cluster. This value should -+# remain secret and MUST NOT change once a cluster has been deployed. -+ - swift_hash_path_suffix = changeme - -+ -+ -+# The swift-constraints section sets the basic constraints on data -+# saved in the swift cluster. -+ -+[swift-constraints] -+ -+# max_file_size is the largest "normal" object that can be saved in -+# the cluster. This is also the limit on the size of each segment of -+# a "large" object when using the large object manifest support. -+# This value is set in bytes. Setting it to lower than 1MiB will cause -+# some tests to fail. It is STRONGLY recommended to leave this value at -+# the default (5 * 2**30 + 2). -+ -+#max_file_size = 5368709122 -+ -+ -+# max_meta_name_length is the max number of bytes in the utf8 encoding -+# of the name portion of a metadata header. -+ -+#max_meta_name_length = 128 -+ -+ -+# max_meta_value_length is the max number of bytes in the utf8 encoding -+# of a metadata value -+ -+#max_meta_value_length = 256 -+ -+ -+# max_meta_count is the max number of metadata keys that can be stored -+# on a single account, container, or object -+ -+#max_meta_count = 90 -+ -+ -+# max_meta_overall_size is the max number of bytes in the utf8 encoding -+# of the metadata (keys + values) -+ -+#max_meta_overall_size = 4096 -+ -+ -+# max_object_name_length is the max number of bytes in the utf8 encoding -+# of an object name -+ -+#max_object_name_length = 1024 -+ -+ -+# container_listing_limit is the default (and max) number of items -+# returned for a container listing request -+ -+#container_listing_limit = 10000 -+ -+ -+# account_listing_limit is the default (and max) number of items returned -+# for an account listing request -+#account_listing_limit = 10000 -+ -+ -+# max_account_name_length is the max number of bytes in the utf8 encoding -+# of an account name -+ -+#max_account_name_length = 256 -+ -+ -+# max_container_name_length is the max number of bytes in the utf8 encoding -+# of a container name -+ -+#max_container_name_length = 256 -diff --git a/swift/common/constraints.py b/swift/common/constraints.py -index 235dcca..0fe5078 100644 ---- a/swift/common/constraints.py -+++ b/swift/common/constraints.py -@@ -14,29 +14,46 @@ - # limitations under the License. - - import os -+from ConfigParser import ConfigParser, NoSectionError, NoOptionError, \ -+ RawConfigParser - - from webob.exc import HTTPBadRequest, HTTPLengthRequired, \ - HTTPRequestEntityTooLarge - -+constraints_conf = ConfigParser() -+constraints_conf.read('/etc/swift/swift.conf') -+ -+ -+def constraints_conf_int(name, default): -+ try: -+ return int(constraints_conf.get('swift-constraints', name)) -+ except (NoSectionError, NoOptionError): -+ return default -+ - - #: Max file size allowed for objects --MAX_FILE_SIZE = 5 * 1024 * 1024 * 1024 + 2 -+MAX_FILE_SIZE = constraints_conf_int('max_file_size', -+ 5368709122) # 5 * 1024 * 1024 * 1024 + 2 - #: Max length of the name of a key for metadata --MAX_META_NAME_LENGTH = 128 -+MAX_META_NAME_LENGTH = constraints_conf_int('max_meta_name_length', 128) - #: Max length of the value of a key for metadata --MAX_META_VALUE_LENGTH = 256 -+MAX_META_VALUE_LENGTH = constraints_conf_int('max_meta_value_length', 256) - #: Max number of metadata items --MAX_META_COUNT = 90 -+MAX_META_COUNT = constraints_conf_int('max_meta_count', 90) - #: Max overall size of metadata --MAX_META_OVERALL_SIZE = 4096 -+MAX_META_OVERALL_SIZE = constraints_conf_int('max_meta_overall_size', 4096) - #: Max object name length --MAX_OBJECT_NAME_LENGTH = 1024 -+MAX_OBJECT_NAME_LENGTH = constraints_conf_int('max_object_name_length', 1024) - #: Max object list length of a get request for a container --CONTAINER_LISTING_LIMIT = 10000 -+CONTAINER_LISTING_LIMIT = constraints_conf_int('container_listing_limit', -+ 10000) - #: Max container list length of a get request for an account --ACCOUNT_LISTING_LIMIT = 10000 --MAX_ACCOUNT_NAME_LENGTH = 256 --MAX_CONTAINER_NAME_LENGTH = 256 -+ACCOUNT_LISTING_LIMIT = constraints_conf_int('account_listing_limit', 10000) -+#: Max account name length -+MAX_ACCOUNT_NAME_LENGTH = constraints_conf_int('max_account_name_length', 256) -+#: Max container name length -+MAX_CONTAINER_NAME_LENGTH = constraints_conf_int('max_container_name_length', -+ 256) - - - def check_metadata(req, target_type): -diff --git a/test/functional/tests.py b/test/functional/tests.py -index b25b4fd..3b18fc4 100644 ---- a/test/functional/tests.py -+++ b/test/functional/tests.py -@@ -27,12 +27,55 @@ import threading - import uuid - import unittest - import urllib -+from ConfigParser import ConfigParser - - from test import get_config - from swift import Account, AuthenticationFailed, Connection, Container, \ - File, ResponseError -- -+from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \ -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \ -+ MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH -+ -+default_constraints = dict(( -+ ('max_file_size', MAX_FILE_SIZE), -+ ('max_meta_name_length', MAX_META_NAME_LENGTH), -+ ('max_meta_value_length', MAX_META_VALUE_LENGTH), -+ ('max_meta_count', MAX_META_COUNT), -+ ('max_meta_overall_size', MAX_META_OVERALL_SIZE), -+ ('max_object_name_length', MAX_OBJECT_NAME_LENGTH), -+ ('container_listing_limit', CONTAINER_LISTING_LIMIT), -+ ('account_listing_limit', ACCOUNT_LISTING_LIMIT), -+ ('max_account_name_length', MAX_ACCOUNT_NAME_LENGTH), -+ ('max_container_name_length', MAX_CONTAINER_NAME_LENGTH))) -+constraints_conf = ConfigParser() -+conf_exists = constraints_conf.read('/etc/swift/swift.conf') -+# Constraints are set first from the test config, then from -+# /etc/swift/swift.conf if it exists. If swift.conf doesn't exist, -+# then limit test coverage. This allows SAIO tests to work fine but -+# requires remote funtional testing to know something about the cluster -+# that is being tested. - config = get_config() -+for k in default_constraints: -+ if k in config: -+ # prefer what's in test.conf -+ config[k] = int(config[k]) -+ elif conf_exists: -+ # swift.conf exists, so use what's defined there (or swift defaults) -+ # This normally happens when the test is running locally to the cluster -+ # as in a SAIO. -+ config[k] = default_constraints[k] -+ else: -+ # .functests don't know what the constraints of the tested cluster are, -+ # so the tests can't reliably pass or fail. Therefore, skip those -+ # tests. -+ config[k] = '%s constraint is not defined' % k -+ -+def load_constraint(name): -+ c = config[name] -+ if not isinstance(c, int): -+ raise SkipTest(c) -+ return c - - locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C')) - -@@ -233,7 +276,7 @@ class TestAccount(Base): - 'application/xml; charset=utf-8') - - def testListingLimit(self): -- limit = 10000 -+ limit = load_constraint('account_listing_limit') - - for l in (1, 100, limit/2, limit-1, limit, limit+1, limit*2): - p = {'limit':l} -@@ -361,7 +404,7 @@ class TestContainer(Base): - set_up = False - - def testContainerNameLimit(self): -- limit = 256 -+ limit = load_constraint('max_container_name_length') - - for l in (limit-100, limit-10, limit-1, limit, - limit+1, limit+10, limit+100): -@@ -406,6 +449,7 @@ class TestContainer(Base): - self.assert_(cont.files(parms={'prefix': f}) == [f]) - - def testPrefixAndLimit(self): -+ load_constraint('container_listing_limit') - cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) - -@@ -949,7 +993,7 @@ class TestFile(Base): - self.assert_status(404) - - def testNameLimit(self): -- limit = 1024 -+ limit = load_constraint('max_object_name_length') - - for l in (1, 10, limit/2, limit-1, limit, limit+1, limit*2): - file = self.env.container.file('a'*l) -@@ -997,13 +1041,12 @@ class TestFile(Base): - self.assert_status(400) - - def testMetadataNumberLimit(self): -- number_limit = 90 -+ number_limit = load_constraint('max_meta_count') -+ size_limit = load_constraint('max_meta_overall_size') - - for i in (number_limit-10, number_limit-1, number_limit, - number_limit+1, number_limit+10, number_limit+100): - -- size_limit = 4096 -- - j = size_limit/(i * 2) - - size = 0 -@@ -1093,7 +1136,7 @@ class TestFile(Base): - self.assert_(file.read(hdrs={'Range': r}) == data[0:1000]) - - def testFileSizeLimit(self): -- limit = 5*2**30 + 2 -+ limit = load_constraint('max_file_size') - tsecs = 3 - - for i in (limit-100, limit-10, limit-1, limit, limit+1, limit+10, -@@ -1147,7 +1190,8 @@ class TestFile(Base): - self.assert_status(200) - - def testMetadataLengthLimits(self): -- key_limit, value_limit = 128, 256 -+ key_limit = load_constraint('max_meta_name_length') -+ value_limit = load_constraint('max_meta_value_length') - lengths = [[key_limit, value_limit], [key_limit, value_limit+1], \ - [key_limit+1, value_limit], [key_limit, 0], \ - [key_limit, value_limit*10], [key_limit*10, value_limit]] -diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py -index 478b2a8..4b0c997 100644 ---- a/test/unit/common/test_constraints.py -+++ b/test/unit/common/test_constraints.py -@@ -84,8 +84,13 @@ class TestConstraints(unittest.TestCase): - x += 1 - self.assertEquals(constraints.check_metadata(Request.blank('/', - headers=headers), 'object'), None) -- headers['X-Object-Meta-9999%s' % -- ('a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ -+ # add two more headers in case adding just one falls exactly on the -+ # limit (eg one header adds 1024 and the limit is 2048) -+ headers['X-Object-Meta-%04d%s' % -+ (x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ -+ 'v' * constraints.MAX_META_VALUE_LENGTH -+ headers['X-Object-Meta-%04d%s' % -+ (x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \ - 'v' * constraints.MAX_META_VALUE_LENGTH - self.assert_(isinstance(constraints.check_metadata(Request.blank('/', - headers=headers), 'object'), HTTPBadRequest)) -diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py -index 075700e..5160830 100644 ---- a/test/unit/obj/test_server.py -+++ b/test/unit/obj/test_server.py -@@ -38,6 +38,7 @@ from swift.common import utils - from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ - NullLogger, storage_directory - from swift.common.exceptions import DiskFileNotExist -+from swift.common import constraints - from swift.obj import replicator - from eventlet import tpool - -@@ -1355,7 +1356,9 @@ class TestObjectController(unittest.TestCase): - - def test_max_object_name_length(self): - timestamp = normalize_timestamp(time()) -- req = Request.blank('/sda1/p/a/c/' + ('1' * 1024), -+ max_name_len = constraints.MAX_OBJECT_NAME_LENGTH -+ req = Request.blank('/sda1/p/a/c/' + ('1' * max_name_len), -+ - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Length': '4', -@@ -1363,7 +1366,7 @@ class TestObjectController(unittest.TestCase): - req.body = 'DATA' - resp = self.object_controller.PUT(req) - self.assertEquals(resp.status_int, 201) -- req = Request.blank('/sda1/p/a/c/' + ('2' * 1025), -+ req = Request.blank('/sda1/p/a/c/' + ('2' * (max_name_len + 1)), - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Length': '4', -diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py -index 364370e..d28f604 100644 ---- a/test/unit/proxy/test_server.py -+++ b/test/unit/proxy/test_server.py -@@ -45,7 +45,8 @@ from swift.container import server as container_server - from swift.obj import server as object_server - from swift.common import ring - from swift.common.constraints import MAX_META_NAME_LENGTH, \ -- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, MAX_FILE_SIZE -+ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ -+ MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH - from swift.common.utils import mkdirs, normalize_timestamp, NullLogger - from swift.common.wsgi import monkey_patch_mimetools - -@@ -1168,6 +1169,7 @@ class TestObjectController(unittest.TestCase): - - def test_POST_meta_val_len(self): - with save_globals(): -+ limit = MAX_META_VALUE_LENGTH - self.app.object_post_as_copy = False - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') -@@ -1175,42 +1177,44 @@ class TestObjectController(unittest.TestCase): - fake_http_connect(200, 200, 202, 202, 202) - # acct cont obj obj obj - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 256}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * limit}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 257}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * (limit + 1)}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_as_copy_meta_val_len(self): - with save_globals(): -+ limit = MAX_META_VALUE_LENGTH - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - proxy_server.http_connect = \ - fake_http_connect(200, 200, 200, 200, 200, 202, 202, 202) - # acct cont objc objc objc obj obj obj - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 256}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * limit}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ -- 'Content-Type': 'foo/bar', -- 'X-Object-Meta-Foo': 'x' * 257}) -+ 'Content-Type': 'foo/bar', -+ 'X-Object-Meta-Foo': 'x' * (limit + 1)}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_meta_key_len(self): - with save_globals(): -+ limit = MAX_META_NAME_LENGTH - self.app.object_post_as_copy = False - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') -@@ -1219,20 +1223,21 @@ class TestObjectController(unittest.TestCase): - # acct cont obj obj obj - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 128): 'x'}) -+ ('X-Object-Meta-' + 'x' * limit): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 129): 'x'}) -+ ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_as_copy_meta_key_len(self): - with save_globals(): -+ limit = MAX_META_NAME_LENGTH - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - proxy_server.http_connect = \ -@@ -1240,24 +1245,25 @@ class TestObjectController(unittest.TestCase): - # acct cont objc objc objc obj obj obj - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 128): 'x'}) -+ ('X-Object-Meta-' + 'x' * limit): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 202) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', -- ('X-Object-Meta-' + 'x' * 129): 'x'}) -+ ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) - self.app.update_request(req) - res = controller.POST(req) - self.assertEquals(res.status_int, 400) - - def test_POST_meta_count(self): - with save_globals(): -+ limit = MAX_META_COUNT - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') - headers = dict( -- (('X-Object-Meta-' + str(i), 'a') for i in xrange(91))) -+ (('X-Object-Meta-' + str(i), 'a') for i in xrange(limit + 1))) - headers.update({'Content-Type': 'foo/bar'}) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers=headers) -@@ -1267,10 +1273,13 @@ class TestObjectController(unittest.TestCase): - - def test_POST_meta_size(self): - with save_globals(): -+ limit = MAX_META_OVERALL_SIZE - controller = proxy_server.ObjectController(self.app, 'account', - 'container', 'object') -+ count = limit / 256 # enough to cause the limit to be reched - headers = dict( -- (('X-Object-Meta-' + str(i), 'a' * 256) for i in xrange(1000))) -+ (('X-Object-Meta-' + str(i), 'a' * 256) -+ for i in xrange(count + 1))) - headers.update({'Content-Type': 'foo/bar'}) - proxy_server.http_connect = fake_http_connect(202, 202, 202) - req = Request.blank('/a/c/o', {}, headers=headers) -@@ -3206,13 +3215,14 @@ class TestContainerController(unittest.TestCase): - - def test_PUT_max_container_name_length(self): - with save_globals(): -+ limit = MAX_CONTAINER_NAME_LENGTH - controller = proxy_server.ContainerController(self.app, 'account', -- '1' * 256) -+ '1' * limit) - self.assert_status_map(controller.PUT, - (200, 200, 200, 201, 201, 201), 201, - missing_container=True) - controller = proxy_server.ContainerController(self.app, 'account', -- '2' * 257) -+ '2' * (limit + 1)) - self.assert_status_map(controller.PUT, (201, 201, 201), 400, - missing_container=True) - -@@ -3813,9 +3823,11 @@ class TestAccountController(unittest.TestCase): - def test_PUT_max_account_name_length(self): - with save_globals(): - self.app.allow_account_management = True -- controller = proxy_server.AccountController(self.app, '1' * 256) -+ limit = MAX_ACCOUNT_NAME_LENGTH -+ controller = proxy_server.AccountController(self.app, '1' * limit) - self.assert_status_map(controller.PUT, (201, 201, 201), 201) -- controller = proxy_server.AccountController(self.app, '2' * 257) -+ controller = proxy_server.AccountController( -+ self.app, '2' * (limit + 1)) - self.assert_status_map(controller.PUT, (201, 201, 201), 400) - - def test_PUT_connect_exceptions(self): --- -1.7.7.6 - diff --git a/glusterfs-3.3.1.ufo.gluster.multi-volume.backport-1.1.patch b/glusterfs-3.3.1.ufo.gluster.multi-volume.backport-1.1.patch deleted file mode 100644 index 6d7830b..0000000 --- a/glusterfs-3.3.1.ufo.gluster.multi-volume.backport-1.1.patch +++ /dev/null @@ -1,406 +0,0 @@ -diff -ru a/ufo/bin/gluster-swift-gen-builders b/ufo/bin/gluster-swift-gen-builders ---- a/ufo/bin/gluster-swift-gen-builders 2012-12-07 12:24:00.000000000 -0500 -+++ b/ufo/bin/gluster-swift-gen-builders 2013-04-29 15:16:22.748000000 -0400 -@@ -1,9 +1,25 @@ - #!/bin/bash - -+# Note that these port numbers must match the configured values for the -+# various servers in their configuration files. -+declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \ -+ ["object.builder"]=6010) -+ -+builder_files="account.builder container.builder object.builder" -+ - function create { -- swift-ring-builder $1 create 0 1 1 -- swift-ring-builder $1 add z1-127.0.0.1:$2/$3_ 100.0 -+ swift-ring-builder $1 create 1 1 1 >> /tmp/out -+} -+ -+function add { -+ swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0 -+} -+ -+function rebalance { - swift-ring-builder $1 rebalance -+} -+ -+function build { - swift-ring-builder $1 - } - -@@ -12,8 +28,17 @@ - exit 1 - fi - --# Note that these port numbers must match the configured values for the --# various servers in their configuration files. --create account.builder 6012 $1 --create container.builder 6011 $1 --create object.builder 6010 $1 -+for builder_file in $builder_files -+do -+ create $builder_file -+ -+ zone=1 -+ for volname in $@ -+ do -+ add $builder_file $zone ${port[$builder_file]} $volname -+ zone=$(expr $zone + 1) -+ done -+ -+ rebalance $builder_file -+ build $builder_file -+done -diff -ru a/ufo/etc/fs.conf-gluster b/ufo/etc/fs.conf-gluster ---- a/ufo/etc/fs.conf-gluster 2012-12-07 12:24:00.000000000 -0500 -+++ b/ufo/etc/fs.conf-gluster 2013-04-29 15:16:22.752000000 -0400 -@@ -3,10 +3,6 @@ - # local host. - mount_ip = localhost - --# The GlusterFS server need not be local, a remote server can also be used --# by setting "remote_cluster = yes". --remote_cluster = no -- - # By default it is assumed the Gluster volumes can be accessed using other - # methods besides UFO (not object only), which disables a caching - # optimizations in order to keep in sync with file system changes. -diff -ru a/ufo/gluster/swift/common/constraints.py b/ufo/gluster/swift/common/constraints.py ---- a/ufo/gluster/swift/common/constraints.py 2012-12-07 12:24:00.000000000 -0500 -+++ b/ufo/gluster/swift/common/constraints.py 2013-04-29 15:16:22.749000000 -0400 -@@ -16,7 +16,8 @@ - from webob.exc import HTTPBadRequest - - import swift.common.constraints --from gluster.swift.common import Glusterfs -+import swift.common.ring as _ring -+from gluster.swift.common import Glusterfs, ring - - - MAX_OBJECT_NAME_COMPONENT_LENGTH = swift.common.constraints.constraints_conf_int( -@@ -80,3 +81,9 @@ - - # Replace the original check mount with ours - swift.common.constraints.check_mount = gluster_check_mount -+ -+# Save the original Ring class -+__Ring = _ring.Ring -+ -+# Replace the original Ring class -+_ring.Ring = ring.Ring -diff -ru a/ufo/gluster/swift/common/Glusterfs.py b/ufo/gluster/swift/common/Glusterfs.py ---- a/ufo/gluster/swift/common/Glusterfs.py 2012-12-07 12:24:00.000000000 -0500 -+++ b/ufo/gluster/swift/common/Glusterfs.py 2013-04-29 15:16:22.753000000 -0400 -@@ -12,33 +12,35 @@ - # implied. - # See the License for the specific language governing permissions and - # limitations under the License. -+ - import logging - import os, fcntl, time --from ConfigParser import ConfigParser --from swift.common.utils import TRUE_VALUES -+from ConfigParser import ConfigParser, NoSectionError, NoOptionError -+from swift.common.utils import TRUE_VALUES, search_tree - from gluster.swift.common.fs_utils import mkdirs - -- - # - # Read the fs.conf file once at startup (module load) - # - _fs_conf = ConfigParser() - MOUNT_IP = 'localhost' --REMOTE_CLUSTER = False - OBJECT_ONLY = False -+RUN_DIR='/var/run/swift' -+SWIFT_DIR = '/etc/swift' - if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')): - try: - MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost') - except (NoSectionError, NoOptionError): - pass - try: -- REMOTE_CLUSTER = _fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES -+ OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES - except (NoSectionError, NoOptionError): - pass - try: -- OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES -+ RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', '/var/run/swift') - except (NoSectionError, NoOptionError): - pass -+ - NAME = 'glusterfs' - - -@@ -60,7 +62,7 @@ - if drive == export: - break - else: -- logging.error('No export found in %r matching drive %s', el, drive) -+ logging.error('No export found in %r matching drive, %s', el, drive) - return False - - # NOTE: root is typically the default value of /mnt/gluster-object -@@ -68,13 +70,12 @@ - if not os.path.isdir(full_mount_path): - mkdirs(full_mount_path) - -- pid_dir = "/var/lib/glusterd/vols/%s/run/" % drive -- pid_file = os.path.join(pid_dir, 'swift.pid'); -+ lck_file = os.path.join(RUN_DIR, '%s.lock' %drive); - -- if not os.path.exists(pid_dir): -- mkdirs(pid_dir) -+ if not os.path.exists(RUN_DIR): -+ mkdirs(RUN_DIR) - -- fd = os.open(pid_file, os.O_CREAT|os.O_RDWR) -+ fd = os.open(lck_file, os.O_CREAT|os.O_RDWR) - with os.fdopen(fd, 'r+b') as f: - try: - fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB) -@@ -100,19 +101,12 @@ - logging.error('Unable to unmount %s %s' % (full_mount_path, NAME)) - - def _get_export_list(): -- if REMOTE_CLUSTER: -- cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP -- else: -- cmnd = 'gluster volume info' -+ cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP - - export_list = [] - - if os.system(cmnd + ' >> /dev/null'): -- if REMOTE_CLUSTER: -- logging.error('Getting volume info failed for %s, make sure '\ -- 'gluster --remote-host=%s works', NAME, MOUNT_IP) -- else: -- logging.error('Getting volume info failed for %s', NAME) -+ logging.error('Getting volume info failed for %s', NAME) - else: - fp = os.popen(cmnd) - while True: -@@ -124,3 +118,20 @@ - export_list.append(item.split(':')[1].strip(' ')) - - return export_list -+ -+def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"): -+ """Read the object-server's configuration file and return -+ the device value""" -+ -+ mnt_dir = '' -+ conf_files = search_tree(conf_dir, conf_file, '.conf') -+ if not conf_files: -+ raise Exception("Config file not found") -+ -+ _conf = ConfigParser() -+ if _conf.read(conf_files[0]): -+ try: -+ mnt_dir = _conf.get('DEFAULT', 'devices', '') -+ except (NoSectionError, NoOptionError): -+ raise -+ return os.path.join(mnt_dir, vol_name) -diff -ru a/ufo/gluster/swift/common/ring.py b/ufo/gluster/swift/common/ring.py ---- a/ufo/gluster/swift/common/ring.py 2013-04-30 08:21:55.948000000 -0400 -+++ b/ufo/gluster/swift/common/ring.py 2013-04-29 15:16:22.755000000 -0400 -@@ -0,0 +1,111 @@ -+# Copyright (c) 2013 Red Hat, Inc. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+from ConfigParser import ConfigParser -+from swift.common.ring import ring -+from swift.common.utils import search_tree -+from gluster.swift.common.Glusterfs import SWIFT_DIR -+ -+reseller_prefix = "AUTH_" -+conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf') -+if conf_files: -+ conf_file = conf_files[0] -+ -+_conf = ConfigParser() -+if conf_files and _conf.read(conf_file): -+ if _conf.defaults().get("reseller_prefix", None): -+ reseller_prefix = _conf.defaults().get("reseller_prefix") -+ else: -+ for key, value in _conf._sections.items(): -+ if value.get("reseller_prefix", None): -+ reseller_prefix = value["reseller_prefix"] -+ break -+ -+if not reseller_prefix.endswith('_'): -+ reseller_prefix = reseller_prefix + '_' -+ -+class Ring(ring.Ring): -+ def _get_part_nodes(self, part): -+ seen_ids = set() -+ nodes = [dev for dev in self._devs \ -+ if dev['device'] == self.acc_name \ -+ and not (dev['id'] in seen_ids \ -+ or seen_ids.add(dev['id']))] -+ if not nodes: -+ nodes = [self.false_node] -+ return nodes -+ -+ def get_part_nodes(self, part): -+ """ -+ Get the nodes that are responsible for the partition. If one -+ node is responsible for more than one replica of the same -+ partition, it will only appear in the output once. -+ -+ :param part: partition to get nodes for -+ :returns: list of node dicts -+ -+ See :func:`get_nodes` for a description of the node dicts. -+ """ -+ return self._get_part_nodes(part) -+ -+ def get_nodes(self, account, container=None, obj=None): -+ """ -+ Get the partition and nodes for an account/container/object. -+ If a node is responsible for more than one replica, it will -+ only appear in the output once. -+ :param account: account name -+ :param container: container name -+ :param obj: object name -+ :returns: a tuple of (partition, list of node dicts) -+ -+ Each node dict will have at least the following keys: -+ ====== =============================================================== -+ id unique integer identifier amongst devices -+ weight a float of the relative weight of this device as compared to -+ others; this indicates how many partitions the builder will try -+ to assign to this device -+ zone integer indicating which zone the device is in; a given -+ partition will not be assigned to multiple devices within the -+ same zone -+ ip the ip address of the device -+ port the tcp port of the device -+ device the device's name on disk (sdb1, for example) -+ meta general use 'extra' field; for example: the online date, the -+ hardware description -+ ====== =============================================================== -+ """ -+ self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 0, \ -+ 'meta': '', 'device': 'volume_not_in_ring', \ -+ 'port': 6012} -+ if account.startswith(reseller_prefix): -+ self.acc_name = account.replace(reseller_prefix, '', 1) -+ else: -+ self.acc_name = account -+ -+ part = 0 -+ return part, self._get_part_nodes(part) -+ -+ -+ def get_more_nodes(self, part): -+ """ -+ Generator to get extra nodes for a partition for hinted handoff. -+ -+ :param part: partition to get handoff nodes for -+ :returns: generator of node dicts -+ -+ See :func:`get_nodes` for a description of the node dicts. -+ Should never be called in the swift UFO environment, so yield nothing -+ """ -+ yield self.false_node -diff -ru a/ufo/test/unit/common/test_ring.py b/ufo/test/unit/common/test_ring.py ---- a/ufo/test/unit/common/test_ring.py 2013-04-30 08:22:08.975000000 -0400 -+++ b/ufo/test/unit/common/test_ring.py 2013-04-29 15:16:22.756000000 -0400 -@@ -0,0 +1,81 @@ -+# Copyright (c) 2013 Red Hat, Inc. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import unittest -+import gluster.swift.common.constraints -+from gluster.swift.common.ring import * -+from gluster.swift.common.Glusterfs import SWIFT_DIR -+ -+def _mock_ring_data(): -+ return [{'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'port': 6012, \ -+ 'meta': '', 'device': 'test', 'id': 0}, -+ {'zone': 2, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 1, \ -+ 'meta': '', 'device': 'iops', 'port': 6012}] -+ -+class TestRing(unittest.TestCase): -+ """ Tests for common.utils """ -+ -+ def setUp(self): -+ self.ring = Ring(SWIFT_DIR, ring_name='object') -+ -+ def test_first_device(self): -+ try: -+ __devs = self.ring._devs -+ self.ring._devs = _mock_ring_data() -+ -+ part, node = self.ring.get_nodes('test') -+ assert node[0]['device'] == 'test' -+ node = self.ring.get_part_nodes(0) -+ assert node[0]['device'] == 'test' -+ for node in self.ring.get_more_nodes(0): -+ assert node['device'] == 'volume_not_in_ring' -+ finally: -+ self.ring._devs = __devs -+ -+ def test_invalid_device(self): -+ try: -+ __devs = self.ring._devs -+ self.ring._devs = _mock_ring_data() -+ -+ part, node = self.ring.get_nodes('test2') -+ assert node[0]['device'] == 'volume_not_in_ring' -+ node = self.ring.get_part_nodes(0) -+ assert node[0]['device'] == 'volume_not_in_ring' -+ finally: -+ self.ring._devs = __devs -+ -+ def test_second_device(self): -+ try: -+ __devs = self.ring._devs -+ self.ring._devs = _mock_ring_data() -+ -+ part, node = self.ring.get_nodes('iops') -+ assert node[0]['device'] == 'iops' -+ node = self.ring.get_part_nodes(0) -+ assert node[0]['device'] == 'iops' -+ for node in self.ring.get_more_nodes(0): -+ assert node['device'] == 'volume_not_in_ring' -+ finally: -+ self.ring._devs = __devs -+ -+ def test_second_device_with_reseller_prefix(self): -+ try: -+ __devs = self.ring._devs -+ self.ring._devs = _mock_ring_data() -+ -+ part, node = self.ring.get_nodes('AUTH_iops') -+ assert node[0]['device'] == 'iops' -+ finally: -+ self.ring._devs = __devs diff --git a/glusterfs-3.3.1.ufo.gluster.swift.common.DiskFile-1.7.4.patch b/glusterfs-3.3.1.ufo.gluster.swift.common.DiskFile-1.7.4.patch deleted file mode 100644 index 24af87f..0000000 --- a/glusterfs-3.3.1.ufo.gluster.swift.common.DiskFile-1.7.4.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- ufo/gluster/swift/common/DiskFile.py.orig 2012-12-21 11:40:12.763297073 -0500 -+++ ufo/gluster/swift/common/DiskFile.py 2013-01-09 16:44:02.607838685 -0500 -@@ -55,7 +55,8 @@ - - def __init__(self, path, device, partition, account, container, obj, - logger, keep_data_fp=False, disk_chunk_size=65536, -- uid=DEFAULT_UID, gid=DEFAULT_GID): -+ uid=DEFAULT_UID, gid=DEFAULT_GID, iter_hook=None): -+ self.iter_hook = iter_hook - self.disk_chunk_size = disk_chunk_size - #Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc - obj = obj.strip('/') diff --git a/glusterfs-3.3.2.libglusterfs.Makefile.patch b/glusterfs-3.3.2.libglusterfs.Makefile.patch deleted file mode 100644 index f539b91..0000000 --- a/glusterfs-3.3.2.libglusterfs.Makefile.patch +++ /dev/null @@ -1,24 +0,0 @@ ---- libglusterfs/src/Makefile.am.orig 2013-07-12 13:50:20.000000000 -0400 -+++ libglusterfs/src/Makefile.am 2013-07-12 20:10:12.156000000 -0400 -@@ -48,7 +48,8 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - - CLEANFILES = graph.lex.c y.tab.c y.tab.h - CONFIG_CLEAN_FILES = $(CONTRIB_BUILDDIR)/uuid/uuid_types.h ---- libglusterfs/src/Makefile.in.orig 2013-07-12 20:10:12.157000000 -0400 -+++ libglusterfs/src/Makefile.in 2013-07-12 20:12:43.022000000 -0400 -@@ -1101,7 +1101,8 @@ - $(LEX) -t $(srcdir)/graph.l > $@ - - y.tab.h: graph.y -- $(YACC) -d $(srcdir)/graph.y -+ $(YACC) -d -b foo $(srcdir)/graph.y -+ mv foo.tab.h y.tab.h && mv foo.tab.c y.tab.c - - # Tell versions [3.59,3.63) of GNU make to not export all variables. - # Otherwise a system limit (for SysV at least) may be exceeded. diff --git a/glusterfs-3.4.0.swift.egginfo-grizzly.patch b/glusterfs-3.4.0.swift.egginfo-grizzly.patch deleted file mode 100644 index ec9661b..0000000 --- a/glusterfs-3.4.0.swift.egginfo-grizzly.patch +++ /dev/null @@ -1,9 +0,0 @@ ---- a/tools/pip-requires 2013-04-05 10:55:21.988000000 -0400 -+++ b/tools/pip-requires 2013-04-05 10:55:28.649000000 -0400 -@@ -3,5 +3,5 @@ - netifaces>=0.5 - pastedeploy>=1.3.3 - simplejson>=2.0.9 --xattr>=0.4 -+pyxattr>=0.4 - python-swiftclient diff --git a/glusterfs-3.4.1.add.base-port.config.option.patch b/glusterfs-3.4.1.add.base-port.config.option.patch deleted file mode 100644 index 4282a03..0000000 --- a/glusterfs-3.4.1.add.base-port.config.option.patch +++ /dev/null @@ -1,156 +0,0 @@ -From http://review.gluster.org/6147 -From b1d288f6d24a3fe439730c2f4e28bcc7a9ae7ecd Mon Sep 17 00:00:00 2001 -From: Kaleb S. KEITHLEY -Date: Fri, 25 Oct 2013 09:05:18 -0400 -Subject: [PATCH] mgmt/glusterd: add option to specify a different base-port - -This is (arguably) a hack to work around a bug in libvirt which is not -well behaved wrt to using TCP ports in the unreserved space between -49152-65535. (See RFC 6335) - -Normally glusterd starts and binds to the first available port in range, -usually 49152. libvirt's live migration also tries to use ports in this -range, but has no fallback to use (an)other port(s) when the one it wants -is already in use. - -libvirt cannot fix this in time for their impending release. This is -submitted to gerrit to provide some minimal visibility upstream to justify -hacking this change (as a temporary patch) into the glusterfs-3.4.1 RPMs -for Fedora 18-21 until libvirt can fix their implementation. - -Change-Id: Ie77b00ac60730d1e48907dd0b38ddae92f3ac345 -Signed-off-by: Kaleb S. KEITHLEY ---- - doc/glusterd.vol | 1 + - xlators/mgmt/glusterd/src/glusterd-pmap.c | 10 +++++----- - xlators/mgmt/glusterd/src/glusterd-store.c | 5 ++--- - xlators/mgmt/glusterd/src/glusterd.c | 12 ++++++++++-- - xlators/mgmt/glusterd/src/glusterd.h | 1 + - 5 files changed, 19 insertions(+), 10 deletions(-) - -diff --git a/doc/glusterd.vol b/doc/glusterd.vol -index de17d8f..9bac52a 100644 ---- a/doc/glusterd.vol -+++ b/doc/glusterd.vol -@@ -5,4 +5,5 @@ volume management - option transport.socket.keepalive-time 10 - option transport.socket.keepalive-interval 2 - option transport.socket.read-fail-log off -+# option base-port 49152 - end-volume -diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c -index aab6744..7dec27c 100644 ---- a/xlators/mgmt/glusterd/src/glusterd-pmap.c -+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c -@@ -52,8 +52,8 @@ pmap_port_isfree (int port) - } - - --struct pmap_registry * --pmap_registry_new (void) -+static struct pmap_registry * -+pmap_registry_new (xlator_t *this) - { - struct pmap_registry *pmap = NULL; - int i = 0; -@@ -69,8 +69,8 @@ pmap_registry_new (void) - pmap->ports[i].type = GF_PMAP_PORT_FOREIGN; - } - -- pmap->base_port = GF_IANA_PRIV_PORTS_START; -- pmap->last_alloc = GF_IANA_PRIV_PORTS_START; -+ pmap->base_port = ((glusterd_conf_t *)(this->private))->base_port; -+ pmap->last_alloc = ((glusterd_conf_t *)(this->private))->base_port; - - return pmap; - } -@@ -86,7 +86,7 @@ pmap_registry_get (xlator_t *this) - - pmap = priv->pmap; - if (!pmap) { -- pmap = pmap_registry_new (); -+ pmap = pmap_registry_new (this); - if (!pmap) - return NULL; - priv->pmap = pmap; -diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c -index ae0c4e8..1790c5a 100644 ---- a/xlators/mgmt/glusterd/src/glusterd-store.c -+++ b/xlators/mgmt/glusterd/src/glusterd-store.c -@@ -1484,7 +1484,7 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo) - strlen (GLUSTERD_STORE_KEY_BRICK_PORT))) { - gf_string2int (value, &brickinfo->port); - -- if (brickinfo->port < GF_IANA_PRIV_PORTS_START){ -+ if (brickinfo->port < priv->base_port){ - /* This is required to adhere to the - IANA standards */ - brickinfo->port = 0; -@@ -1500,8 +1500,7 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo) - strlen (GLUSTERD_STORE_KEY_BRICK_RDMA_PORT))) { - gf_string2int (value, &brickinfo->rdma_port); - -- if (brickinfo->rdma_port < -- GF_IANA_PRIV_PORTS_START){ -+ if (brickinfo->rdma_port < priv->base_port) { - /* This is required to adhere to the - IANA standards */ - brickinfo->rdma_port = 0; -diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c -index 785e67a..62c4a57 100644 ---- a/xlators/mgmt/glusterd/src/glusterd.c -+++ b/xlators/mgmt/glusterd/src/glusterd.c -@@ -916,7 +916,6 @@ init (xlator_t *this) - int first_time = 0; - char *mountbroker_root = NULL; - int i = 0; -- - #ifdef DEBUG - char *valgrind_str = NULL; - #endif -@@ -1101,6 +1100,12 @@ init (xlator_t *this) - if (ret) - goto out; - -+ conf->base_port = GF_IANA_PRIV_PORTS_START; -+ if (dict_get_uint32(this->options, "base-port", &conf->base_port) == 0) { -+ gf_log (this->name, GF_LOG_INFO, -+ "base-port override: %d", conf->base_port); -+ } -+ - /* Set option to run bricks on valgrind if enabled in glusterd.vol */ - #ifdef DEBUG - conf->valgrind = _gf_false; -@@ -1116,7 +1121,6 @@ init (xlator_t *this) - } - } - #endif -- - this->private = conf; - (void) glusterd_nodesvc_set_online_status ("glustershd", _gf_false); - -@@ -1309,5 +1313,9 @@ struct volume_options options[] = { - .description = "Sets the quorum percentage for the trusted " - "storage pool." - }, -+ { .key = {"base-port"}, -+ .type = GF_OPTION_TYPE_INT, -+ .description = "Sets the base port for portmap query" -+ }, - { .key = {NULL} }, - }; -diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h -index 0033125..1054574 100644 ---- a/xlators/mgmt/glusterd/src/glusterd.h -+++ b/xlators/mgmt/glusterd/src/glusterd.h -@@ -148,6 +148,7 @@ typedef struct { - dict_t *opts; - synclock_t big_lock; - gf_boolean_t restart_done; -+ uint32_t base_port; - } glusterd_conf_t; - - --- -1.7.1 - diff --git a/glusterfs-3.4.3.xlator.nfs.server.nlm4.c.patch b/glusterfs-3.4.3.xlator.nfs.server.nlm4.c.patch deleted file mode 100644 index c49eed5..0000000 --- a/glusterfs-3.4.3.xlator.nfs.server.nlm4.c.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/xlators/nfs/server/src/nlm4.c b/xlators/nfs/server/src/nlm4.c -index e040aa5..eca107d 100644 ---- a/xlators/nfs/server/src/nlm4.c -+++ b/xlators/nfs/server/src/nlm4.c -@@ -941,7 +941,7 @@ nlm4_establish_callback (void *csarg) - case AF_INET: - inet_ntop (AF_INET, &sock_union.sin.sin_addr, peerip, - INET6_ADDRSTRLEN+1); -- inet_ntop (AF_INET, &(((struct sockaddr_in *)&cs->req->trans->myinfo.sockaddr)->sin_addr), -+ inet_ntop (AF_INET, &(((struct sockaddr_in *)&cs->trans->myinfo.sockaddr)->sin_addr), - myip, INET6_ADDRSTRLEN + 1); - - break; diff --git a/glusterfs-3.8.0.api.glfs-fops.patch b/glusterfs-3.8.0.api.glfs-fops.patch deleted file mode 100644 index b903562..0000000 --- a/glusterfs-3.8.0.api.glfs-fops.patch +++ /dev/null @@ -1,23 +0,0 @@ ---- glusterfs-3.8.0/api/src/glfs-fops.c.orig 2016-06-27 08:38:37.523161014 -0400 -+++ glusterfs-3.8.0/api/src/glfs-fops.c 2016-06-27 08:40:06.591161014 -0400 -@@ -803,7 +803,6 @@ - - GF_VALIDATE_OR_GOTO ("gfapi", frame, inval); - GF_VALIDATE_OR_GOTO ("gfapi", cookie, inval); -- GF_VALIDATE_OR_GOTO ("gfapi", iovec, inval); - - gio = frame->local; - frame->local = NULL; -@@ -817,6 +816,12 @@ - if (op_ret <= 0) { - goto out; - } else if (gio->op == GF_FOP_READ) { -+ if (!iovec) { -+ op_ret = -1; -+ op_errno = EINVAL; -+ goto out; -+ } -+ - op_ret = iov_copy (gio->iov, gio->count, iovec, count); - glfd->offset = gio->offset + op_ret; - } else if (gio->op == GF_FOP_WRITE) { diff --git a/glusterfs-3.8.0rc1.VERSION.patch b/glusterfs-3.8.0rc1.VERSION.patch deleted file mode 100644 index fb800c7..0000000 --- a/glusterfs-3.8.0rc1.VERSION.patch +++ /dev/null @@ -1,6 +0,0 @@ ---- glusterfs-3.8rc1/VERSION.orig 2016-05-16 10:59:02.691769734 -0400 -+++ glusterfs-3.8rc1/VERSION 2016-05-16 11:01:16.408769734 -0400 -@@ -1 +1 @@ --v3.8rc1-0 -\ No newline at end of file -+v3.8.0rc1 diff --git a/glusterfs-3.8.0rc2.VERSION.patch b/glusterfs-3.8.0rc2.VERSION.patch deleted file mode 100644 index 456d617..0000000 --- a/glusterfs-3.8.0rc2.VERSION.patch +++ /dev/null @@ -1,6 +0,0 @@ ---- glusterfs-3.8rc1/VERSION.orig 2016-05-16 10:59:02.691769734 -0400 -+++ glusterfs-3.8rc1/VERSION 2016-05-16 11:01:16.408769734 -0400 -@@ -1 +1 @@ --v3.8rc2-0 -\ No newline at end of file -+v3.8.0rc2 diff --git a/glusterfs-3.8.5.xlators.crypt.patch b/glusterfs-3.8.5.xlators.crypt.patch deleted file mode 100644 index abf3663..0000000 --- a/glusterfs-3.8.5.xlators.crypt.patch +++ /dev/null @@ -1,48 +0,0 @@ ---- glusterfs-3.8.5/xlators/encryption/crypt/src/keys.c.orig 2016-10-13 05:44:08.218131827 -0400 -+++ glusterfs-3.8.5/xlators/encryption/crypt/src/keys.c 2016-10-13 07:56:39.783730111 -0400 -@@ -113,29 +113,41 @@ - static void kderive_update(struct kderive_context *ctx) - { - uint32_t i; -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) - HMAC_CTX hctx; -+#endif -+ HMAC_CTX *phctx = NULL; - unsigned char *pos = ctx->out; - uint32_t *p_iter = (uint32_t *)ctx->fid; - uint32_t num_iters = ctx->out_len / PRF_OUTPUT_SIZE; - - check_prf_iters(num_iters); - -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) - HMAC_CTX_init(&hctx); -+ phctx = &hctx; -+#else -+ phctx = HMAC_CTX_new(); -+#endif - for (i = 0; i < num_iters; i++) { - /* - * update the iteration number in the fid - */ - *p_iter = htobe32(i); -- HMAC_Init_ex(&hctx, -+ HMAC_Init_ex(phctx, - ctx->pkey, ctx->pkey_len >> 3, - EVP_sha256(), - NULL); -- HMAC_Update(&hctx, ctx->fid, ctx->fid_len); -- HMAC_Final(&hctx, pos, NULL); -+ HMAC_Update(phctx, ctx->fid, ctx->fid_len); -+ HMAC_Final(phctx, pos, NULL); - - pos += PRF_OUTPUT_SIZE; - } -- HMAC_CTX_cleanup(&hctx); -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) -+ HMAC_CTX_cleanup(phctx); -+#else -+ HMAC_CTX_free(phctx); -+#endif - } - - static void kderive_final(struct kderive_context *ctx, unsigned char *child) diff --git a/glusterfs-3.9.0rc2.xlators.crypt.patch b/glusterfs-3.9.0rc2.xlators.crypt.patch deleted file mode 100644 index 4429f9f..0000000 --- a/glusterfs-3.9.0rc2.xlators.crypt.patch +++ /dev/null @@ -1,48 +0,0 @@ ---- glusterfs-3.9.0rc2/xlators/encryption/crypt/src/keys.c.orig 2016-10-26 10:28:14.832787325 -0400 -+++ glusterfs-3.9.0rc2/xlators/encryption/crypt/src/keys.c 2016-10-26 12:04:34.175730111 -0400 -@@ -113,29 +113,41 @@ - static void kderive_update(struct kderive_context *ctx) - { - uint32_t i; -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) - HMAC_CTX hctx; -+#endif -+ HMAC_CTX *phctx = NULL; - unsigned char *pos = ctx->out; - uint32_t *p_iter = (uint32_t *)ctx->fid; - uint32_t num_iters = ctx->out_len / PRF_OUTPUT_SIZE; - - check_prf_iters(num_iters); - -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) - HMAC_CTX_init(&hctx); -+ phctx = &hctx; -+#else -+ phctx = HMAC_CTX_new(); -+#endif - for (i = 0; i < num_iters; i++) { - /* - * update the iteration number in the fid - */ - *p_iter = htobe32(i); -- HMAC_Init_ex(&hctx, -+ HMAC_Init_ex(phctx, - ctx->pkey, ctx->pkey_len >> 3, - EVP_sha256(), - NULL); -- HMAC_Update(&hctx, ctx->fid, ctx->fid_len); -- HMAC_Final(&hctx, pos, NULL); -+ HMAC_Update(phctx, ctx->fid, ctx->fid_len); -+ HMAC_Final(phctx, pos, NULL); - - pos += PRF_OUTPUT_SIZE; - } -- HMAC_CTX_cleanup(&hctx); -+#if (OPENSSL_VERSION_NUMBER < 0x1010002f) -+ HMAC_CTX_cleanup(phctx); -+#else -+ HMAC_CTX_free(phctx); -+#endif - } - - static void kderive_final(struct kderive_context *ctx, unsigned char *child) diff --git a/openstack-swift-docmod.patch b/openstack-swift-docmod.patch deleted file mode 100644 index 03f193c..0000000 --- a/openstack-swift-docmod.patch +++ /dev/null @@ -1,14 +0,0 @@ ---- swift-1.4.4/doc/source/conf.py.orig 2011-11-24 08:59:50.000000000 -0500 -+++ swift-1.4.4/doc/source/conf.py 2012-01-04 22:35:55.571492761 -0500 -@@ -31,7 +31,10 @@ - # If extensions (or modules to document with autodoc) are in another directory, - # add these directories to sys.path here. If the directory is relative to the - # documentation root, use os.path.abspath to make it absolute, like shown here. --sys.path.append([os.path.abspath('../swift'), os.path.abspath('..'), os.path.abspath('../bin')]) -+sys.path = [os.path.abspath('../../swift'), -+ os.path.abspath('../..'), -+ os.path.abspath('../../bin') -+ ] + sys.path - - # -- General configuration ----------------------------------------------------- - diff --git a/openstack-swift-newdeps.patch b/openstack-swift-newdeps.patch deleted file mode 100644 index 3b4222a..0000000 --- a/openstack-swift-newdeps.patch +++ /dev/null @@ -1,36 +0,0 @@ -diff -ru swift-1.4.4-ORIG/swift/__init__.py swift-1.4.4/swift/__init__.py ---- swift-1.4.4-ORIG/swift/__init__.py 2011-11-24 14:59:50.000000000 +0100 -+++ swift-1.4.4/swift/__init__.py 2012-01-04 00:09:10.122030579 +0100 -@@ -1,3 +1,32 @@ -+import sys -+import pkg_resources -+ -+# If there is a conflicting non egg module, -+# i.e. an older standard system module installed, -+# then replace it with this requirement -+def replace_dist(requirement): -+ try: -+ return pkg_resources.require(requirement) -+ except pkg_resources.VersionConflict: -+ e = sys.exc_info()[1] -+ dist=e.args[0] -+ req=e.args[1] -+ if dist.key == req.key and not dist.location.endswith('.egg'): -+ del pkg_resources.working_set.by_key[dist.key] -+ # We assume there is no need to adjust sys.path -+ # and the associated pkg_resources.working_set.entries -+ return pkg_resources.require(requirement) -+ -+replace_dist("WebOb >= 1.0") -+ -+replace_dist("PasteDeploy >= 1.5.0") -+# This hack is needed because replace_dist() results in -+# the standard paste module path being at the start of __path__. -+# TODO: See can we get pkg_resources to do the right thing directly -+import paste -+paste.__path__.insert(0, paste.__path__.pop(-1)) -+ -+ - import gettext - - diff --git a/openstack-swift-nonet.patch b/openstack-swift-nonet.patch deleted file mode 100644 index af8cad4..0000000 --- a/openstack-swift-nonet.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- swift-1.4.4/doc/source/conf.py.orig 2012-01-04 22:40:43.190300958 -0500 -+++ swift-1.4.4/doc/source/conf.py 2012-01-04 22:41:26.980492712 -0500 -@@ -40,7 +40,7 @@ - - # Add any Sphinx extension module names here, as strings. They can be extensions - # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. --extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig'] -+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig'] - todo_include_todos = True - - # Add any paths that contain templates here, relative to this directory.