diff --git a/0017-gcc-12-omnibus.patch b/0017-gcc-12-omnibus.patch index 4440750..d725dc4 100644 --- a/0017-gcc-12-omnibus.patch +++ b/0017-gcc-12-omnibus.patch @@ -1,13 +1,3 @@ ---- ceph-16.2.7/src/include/buffer.h.orig 2022-01-17 12:17:19.193356237 -0500 -+++ ceph-16.2.7/src/include/buffer.h 2022-01-17 12:17:58.599639592 -0500 -@@ -38,6 +38,7 @@ - # include - #endif - -+#include - #include - #include - #include --- ceph-16.2.7/src/common/LogEntry.cc.orig 2022-01-17 13:52:10.799134159 -0500 +++ ceph-16.2.7/src/common/LogEntry.cc 2022-01-17 13:52:47.244469274 -0500 @@ -183,7 +183,7 @@ @@ -21,8 +11,8 @@ --- ceph-16.2.7/src/test/librados/tier_cxx.cc.orig 2022-01-19 09:30:47.209459506 -0500 +++ ceph-16.2.7/src/test/librados/tier_cxx.cc 2022-01-19 10:02:47.783240298 -0500 -@@ -114,7 +114,7 @@ - #include "rgw/rgw_common.h" +@@ -120,7 +120,7 @@ + } void check_fp_oid_refcount(librados::IoCtx& ioctx, std::string foid, uint64_t count, - std::string fp_algo = NULL) @@ -30,7 +20,7 @@ { bufferlist t; int size = foid.length(); -@@ -142,7 +142,7 @@ +@@ -148,7 +148,7 @@ ASSERT_LE(count, refs.count()); } @@ -39,17 +29,6 @@ { if (fp_algo == "sha1") { unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1]; ---- ceph-16.2.7/src/test/test_trans.cc.orig 2022-01-19 13:24:33.460008897 -0500 -+++ ceph-16.2.7/src/test/test_trans.cc 2022-01-19 13:24:58.211554005 -0500 -@@ -51,7 +51,7 @@ - cout << "#dev " << filename << std::endl; - cout << "#mb " << mb << std::endl; - -- ObjectStore *fs = new FileStore(cct.get(), filename, NULL); -+ ObjectStore *fs = new FileStore(cct.get(), filename, ""); - if (fs->mount() < 0) { - cout << "mount failed" << std::endl; - return -1; --- ceph-17.0.0-10335-gfd206722/src/s3select/include/s3select_functions.h.orig 2022-02-11 17:21:40.268627997 -0500 +++ ceph-17.0.0-10335-gfd206722/src/s3select/include/s3select_functions.h 2022-02-11 17:21:57.155325437 -0500 @@ -466,7 +466,7 @@ diff --git a/0018-src-rgw-store-dbstore-CMakeLists.txt.patch b/0018-src-rgw-store-dbstore-CMakeLists.txt.patch index 1adc56a..874bbcd 100644 --- a/0018-src-rgw-store-dbstore-CMakeLists.txt.patch +++ b/0018-src-rgw-store-dbstore-CMakeLists.txt.patch @@ -1,24 +1,15 @@ ---- ceph-17.1.0/src/rgw/store/dbstore/sqlite/CMakeLists.txt.orig 2022-03-01 08:19:04.974902872 -0500 -+++ ceph-17.1.0/src/rgw/store/dbstore/sqlite/CMakeLists.txt 2022-03-11 07:55:16.236261471 -0500 -@@ -12,5 +12,5 @@ - set(SQLITE_COMPILE_FLAGS "-DSQLITE_THREADSAFE=1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SQLITE_COMPILE_FLAGS}") - --add_library(sqlite_db ${sqlite_db_srcs}) -+add_library(sqlite_db STATIC ${sqlite_db_srcs}) - target_link_libraries(sqlite_db sqlite3 dbstore_lib rgw_common) ---- ceph-17.1.0/src/rgw/store/dbstore/CMakeLists.txt.orig 2022-02-28 14:11:49.987077811 -0500 -+++ ceph-17.1.0/src/rgw/store/dbstore/CMakeLists.txt 2022-03-11 08:40:13.409682698 -0500 -@@ -16,7 +16,7 @@ +--- ceph-18.0.0-3078-gc4847bf8/src/rgw/driver/dbstore/CMakeLists.txt.orig 2023-05-10 08:23:50.000000000 -0400 ++++ ceph-18.0.0-3078-gc4847bf8/src/rgw/driver/dbstore/CMakeLists.txt 2023-05-11 08:21:13.794152904 -0400 +@@ -24,7 +24,7 @@ dbstore_mgr.cc ) -add_library(dbstore_lib ${dbstore_srcs}) +add_library(dbstore_lib STATIC ${dbstore_srcs}) - target_include_directories(dbstore_lib PUBLIC "${CMAKE_SOURCE_DIR}/src/fmt/include") - target_include_directories(dbstore_lib PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw") - set(link_targets spawn) -@@ -38,6 +38,7 @@ + target_include_directories(dbstore_lib + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/store/rados" +@@ -49,6 +49,7 @@ # add pthread library set (CMAKE_LINK_LIBRARIES ${CMAKE_LINK_LIBRARIES} pthread) @@ -26,12 +17,3 @@ find_package(gtest QUIET) if(WITH_TESTS) -@@ -47,7 +48,7 @@ - endif() - - include_directories(${CMAKE_INCLUDE_DIR}) --add_library(dbstore ${dbstore_mgr_srcs}) -+add_library(dbstore STATIC ${dbstore_mgr_srcs}) - target_link_libraries(dbstore ${CMAKE_LINK_LIBRARIES}) - - # testing purpose diff --git a/0019-cmake-modules-CheckCxxAtomic.cmake.patch b/0019-cmake-modules-CheckCxxAtomic.cmake.patch deleted file mode 100644 index f353e3c..0000000 --- a/0019-cmake-modules-CheckCxxAtomic.cmake.patch +++ /dev/null @@ -1,43 +0,0 @@ ---- ceph-17.2.0-359-gb2fe9ec8/cmake/modules/CheckCxxAtomic.cmake.orig 2022-06-03 08:45:32.341075140 -0400 -+++ ceph-17.2.0-359-gb2fe9ec8/cmake/modules/CheckCxxAtomic.cmake 2022-06-03 08:46:47.195775813 -0400 -@@ -10,8 +10,9 @@ - check_cxx_source_compiles(" - #include - #include -+#include - --#if defined(__s390x__) || defined(__mips__) -+#if defined(__SIZEOF_INT128__) - // Boost needs 16-byte atomics for tagged pointers. - // These are implemented via inline instructions on the platform - // if 16-byte alignment can be proven, and are delegated to libatomic -@@ -21,13 +22,27 @@ - // We specifically test access via an otherwise unknown pointer here - // to ensure we get the most complex case. If this access can be - // done without libatomic, then all accesses can be done. --bool atomic16(std::atomic *ptr) -+struct tagged_ptr { -+ int* ptr; -+ std::size_t tag; -+}; -+ -+void atomic16(std::atomic *ptr) __attribute__ ((used)); -+void atomic16(std::atomic *ptr) - { -- return *ptr != 0; -+ tagged_ptr p{nullptr, 1}; -+ ptr->store(p); -+ tagged_ptr f = ptr->load(); -+ tagged_ptr new_tag{nullptr, 0}; -+ ptr->compare_exchange_strong(f, new_tag); - } - #endif - - int main() { -+#if defined(__SIZEOF_INT128__) -+ std::atomic ptr; -+ atomic16(&ptr); -+#endif - std::atomic w1; - std::atomic w2; - std::atomic w4; diff --git a/0021-cephfs-shell.patch b/0021-cephfs-shell.patch deleted file mode 100644 index f85a91e..0000000 --- a/0021-cephfs-shell.patch +++ /dev/null @@ -1,1756 +0,0 @@ ---- ceph-17.2.1/src/tools/cephfs/CMakeLists.txt.orig 2022-07-05 19:26:04.629170597 -0400 -+++ ceph-17.2.1/src/tools/cephfs/CMakeLists.txt 2022-07-05 19:26:40.710580427 -0400 -@@ -49,12 +49,7 @@ - - option(WITH_CEPHFS_SHELL "install cephfs-shell" OFF) - if(WITH_CEPHFS_SHELL) -- include(Distutils) -- distutils_install_module(cephfs-shell) -- if(WITH_TESTS) -- include(AddCephTest) -- add_tox_test(cephfs-shell) -- endif() -+ add_subdirectory(shell) - endif() - - option(WITH_CEPHFS_TOP "install cephfs-top utility" ON) ---- /dev/null 2022-06-30 09:45:32.996000000 -0400 -+++ ceph-17.2.1/src/tools/cephfs/shell/CMakeLists.txt 2022-07-05 19:27:58.983300150 -0400 -@@ -0,0 +1,7 @@ -+include(Distutils) -+distutils_install_module(cephfs-shell) -+ -+if(WITH_TESTS) -+ include(AddCephTest) -+ add_tox_test(cephfs-shell) -+endif() ---- /dev/null 2022-06-30 09:45:32.996000000 -0400 -+++ ceph-17.2.1/src/tools/cephfs/shell/cephfs-shell 2022-06-23 10:41:35.000000000 -0400 -@@ -0,0 +1,1687 @@ -+#!/usr/bin/python3 -+# coding = utf-8 -+ -+import argparse -+import os -+import os.path -+import sys -+import cephfs as libcephfs -+import shutil -+import traceback -+import colorama -+import fnmatch -+import math -+import re -+import shlex -+import stat -+import errno -+ -+from cmd2 import Cmd -+from cmd2 import __version__ as cmd2_version -+from distutils.version import LooseVersion -+ -+if sys.version_info.major < 3: -+ raise RuntimeError("cephfs-shell is only compatible with python3") -+ -+try: -+ from cmd2 import with_argparser -+except ImportError: -+ def with_argparser(argparser): -+ import functools -+ -+ def argparser_decorator(func): -+ @functools.wraps(func) -+ def wrapper(thiz, cmdline): -+ if isinstance(cmdline, list): -+ arglist = cmdline -+ else: -+ # do not split if it's already a list -+ arglist = shlex.split(cmdline, posix=False) -+ # in case user quotes the command args -+ arglist = [arg.strip('\'""') for arg in arglist] -+ try: -+ args = argparser.parse_args(arglist) -+ except SystemExit: -+ shell.exit_code = 1 -+ # argparse exits at seeing bad arguments -+ return -+ else: -+ return func(thiz, args) -+ argparser.prog = func.__name__[3:] -+ if argparser.description is None and func.__doc__: -+ argparser.description = func.__doc__ -+ -+ return wrapper -+ -+ return argparser_decorator -+ -+ -+cephfs = None # holds CephFS Python bindings -+shell = None # holds instance of class CephFSShell -+exit_codes = {'Misc': 1, -+ 'KeyboardInterrupt': 2, -+ errno.EPERM: 3, -+ errno.EACCES: 4, -+ errno.ENOENT: 5, -+ errno.EIO: 6, -+ errno.ENOSPC: 7, -+ errno.EEXIST: 8, -+ errno.ENODATA: 9, -+ errno.EINVAL: 10, -+ errno.EOPNOTSUPP: 11, -+ errno.ERANGE: 12, -+ errno.EWOULDBLOCK: 13, -+ errno.ENOTEMPTY: 14, -+ errno.ENOTDIR: 15, -+ errno.EDQUOT: 16, -+ errno.EPIPE: 17, -+ errno.ESHUTDOWN: 18, -+ errno.ECONNABORTED: 19, -+ errno.ECONNREFUSED: 20, -+ errno.ECONNRESET: 21, -+ errno.EINTR: 22} -+ -+ -+######################################################################### -+# -+# Following are methods are generically useful through class CephFSShell -+# -+####################################################################### -+ -+ -+def poutput(s, end='\n'): -+ shell.poutput(s, end=end) -+ -+ -+def perror(msg, **kwargs): -+ shell.perror(msg, **kwargs) -+ -+ -+def set_exit_code_msg(errcode='Misc', msg=''): -+ """ -+ Set exit code and print error message -+ """ -+ if isinstance(msg, libcephfs.Error): -+ shell.exit_code = exit_codes[msg.get_error_code()] -+ else: -+ shell.exit_code = exit_codes[errcode] -+ if msg: -+ perror(msg) -+ -+ -+def mode_notation(mode): -+ """ -+ """ -+ permission_bits = {'0': '---', -+ '1': '--x', -+ '2': '-w-', -+ '3': '-wx', -+ '4': 'r--', -+ '5': 'r-x', -+ '6': 'rw-', -+ '7': 'rwx'} -+ mode = str(oct(mode)) -+ notation = '-' -+ if mode[2] == '4': -+ notation = 'd' -+ elif mode[2:4] == '12': -+ notation = 'l' -+ for i in mode[-3:]: -+ notation += permission_bits[i] -+ return notation -+ -+ -+def get_chunks(file_size): -+ chunk_start = 0 -+ chunk_size = 0x20000 # 131072 bytes, default max ssl buffer size -+ while chunk_start + chunk_size < file_size: -+ yield(chunk_start, chunk_size) -+ chunk_start += chunk_size -+ final_chunk_size = file_size - chunk_start -+ yield(chunk_start, final_chunk_size) -+ -+ -+def to_bytes(param): -+ # don't convert as follows as it can lead unusable results like coverting -+ # [1, 2, 3, 4] to '[1, 2, 3, 4]' - -+ # str(param).encode('utf-8') -+ if isinstance(param, bytes): -+ return param -+ elif isinstance(param, str): -+ return bytes(param, encoding='utf-8') -+ elif isinstance(param, list): -+ return [i.encode('utf-8') if isinstance(i, str) else to_bytes(i) for -+ i in param] -+ elif isinstance(param, int) or isinstance(param, float): -+ return str(param).encode('utf-8') -+ elif param is None: -+ return None -+ -+ -+def ls(path, opts=''): -+ # opts tries to be like /bin/ls opts -+ almost_all = 'A' in opts -+ try: -+ with cephfs.opendir(path) as d: -+ while True: -+ dent = cephfs.readdir(d) -+ if dent is None: -+ return -+ elif almost_all and dent.d_name in (b'.', b'..'): -+ continue -+ yield dent -+ except libcephfs.ObjectNotFound as e: -+ set_exit_code_msg(msg=e) -+ -+ -+def glob(path, pattern): -+ paths = [] -+ parent_dir = os.path.dirname(path) -+ if parent_dir == b'': -+ parent_dir = b'/' -+ if path == b'/' or is_dir_exists(os.path.basename(path), parent_dir): -+ for i in ls(path, opts='A'): -+ if fnmatch.fnmatch(i.d_name, pattern): -+ paths.append(os.path.join(path, i.d_name)) -+ return paths -+ -+ -+def locate_file(name, case_sensitive=True): -+ dir_list = sorted(set(dirwalk(cephfs.getcwd()))) -+ if not case_sensitive: -+ return [dname for dname in dir_list if name.lower() in dname.lower()] -+ else: -+ return [dname for dname in dir_list if name in dname] -+ -+ -+def get_all_possible_paths(pattern): -+ complete_pattern = pattern[:] -+ paths = [] -+ is_rel_path = not os.path.isabs(pattern) -+ if is_rel_path: -+ dir_ = cephfs.getcwd() -+ else: -+ dir_ = b'/' -+ pattern = pattern[1:] -+ patterns = pattern.split(b'/') -+ paths.extend(glob(dir_, patterns[0])) -+ patterns.pop(0) -+ for pattern in patterns: -+ for path in paths: -+ paths.extend(glob(path, pattern)) -+ if is_rel_path: -+ complete_pattern = os.path.join(cephfs.getcwd(), complete_pattern) -+ return [path for path in paths if fnmatch.fnmatch(path, complete_pattern)] -+ -+ -+suffixes = ['B', 'K', 'M', 'G', 'T', 'P'] -+ -+ -+def humansize(nbytes): -+ i = 0 -+ while nbytes >= 1024 and i < len(suffixes) - 1: -+ nbytes /= 1024. -+ i += 1 -+ nbytes = math.ceil(nbytes) -+ f = ('%d' % nbytes).rstrip('.') -+ return '%s%s' % (f, suffixes[i]) -+ -+ -+def style_listing(path, is_dir, is_symlink, ls_long=False): -+ if not (is_dir or is_symlink): -+ return path -+ pretty = colorama.Style.BRIGHT -+ if is_symlink: -+ pretty += colorama.Fore.CYAN + path -+ if ls_long: -+ # Add target path -+ pretty += ' -> ' + cephfs.readlink(path, size=255).decode('utf-8') -+ elif is_dir: -+ pretty += colorama.Fore.BLUE + path + '/' -+ pretty += colorama.Style.RESET_ALL -+ return pretty -+ -+ -+def print_long(path, is_dir, is_symlink, human_readable): -+ info = cephfs.stat(path, follow_symlink=(not is_symlink)) -+ pretty = style_listing(os.path.basename(path.decode('utf-8')), is_dir, is_symlink, True) -+ if human_readable: -+ sizefmt = '\t {:10s}'.format(humansize(info.st_size)) -+ else: -+ sizefmt = '{:12d}'.format(info.st_size) -+ poutput(f'{mode_notation(info.st_mode)} {sizefmt} {info.st_uid} {info.st_gid} {info.st_mtime}' -+ f' {pretty}') -+ -+ -+def word_len(word): -+ """ -+ Returns the word length, minus any color codes. -+ """ -+ if word[0] == '\x1b': -+ return len(word) - 9 -+ return len(word) -+ -+ -+def is_dir_exists(path, dir_=b''): -+ path_to_stat = os.path.join(dir_, path) -+ try: -+ return ((cephfs.stat(path_to_stat).st_mode & 0o0040000) != 0) -+ except libcephfs.Error: -+ return False -+ -+ -+def is_file_exists(path, dir_=b''): -+ try: -+ # if its not a directory, then its a file -+ return ((cephfs.stat(os.path.join(dir_, path)).st_mode & 0o0040000) == 0) -+ except libcephfs.Error: -+ return False -+ -+ -+def print_list(words, termwidth=79): -+ if not words: -+ return -+ words = [word.decode('utf-8') if isinstance(word, bytes) else word for word in words] -+ width = max([word_len(word) for word in words]) + 2 -+ nwords = len(words) -+ ncols = max(1, (termwidth + 1) // (width + 1)) -+ nrows = (nwords + ncols - 1) // ncols -+ for row in range(nrows): -+ for i in range(row, nwords, nrows): -+ word = words[i] -+ print_width = width -+ if word[0] == '\x1b': -+ print_width = print_width + 10 -+ -+ poutput('%-*s' % (print_width, words[i]), -+ end='\n' if i + nrows >= nwords else '') -+ -+ -+def copy_from_local(local_path, remote_path): -+ stdin = -1 -+ file_ = None -+ fd = None -+ convert_to_bytes = False -+ if local_path == b'-': -+ file_ = sys.stdin -+ convert_to_bytes = True -+ else: -+ try: -+ file_ = open(local_path, 'rb') -+ except PermissionError as e: -+ set_exit_code_msg(e.errno, 'error: no permission to read local file {}'.format( -+ local_path.decode('utf-8'))) -+ return -+ stdin = 1 -+ try: -+ fd = cephfs.open(remote_path, 'w', 0o666) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ return -+ progress = 0 -+ while True: -+ data = file_.read(65536) -+ if not data or len(data) == 0: -+ break -+ if convert_to_bytes: -+ data = to_bytes(data) -+ wrote = cephfs.write(fd, data, progress) -+ if wrote < 0: -+ break -+ progress += wrote -+ cephfs.close(fd) -+ if stdin > 0: -+ file_.close() -+ poutput('') -+ -+ -+def copy_to_local(remote_path, local_path): -+ fd = None -+ if local_path != b'-': -+ local_dir = os.path.dirname(local_path) -+ dir_list = remote_path.rsplit(b'/', 1) -+ if not os.path.exists(local_dir): -+ os.makedirs(local_dir) -+ if len(dir_list) > 2 and dir_list[1] == b'': -+ return -+ fd = open(local_path, 'wb+') -+ file_ = cephfs.open(remote_path, 'r') -+ file_size = cephfs.stat(remote_path).st_size -+ if file_size <= 0: -+ return -+ progress = 0 -+ for chunk_start, chunk_size in get_chunks(file_size): -+ file_chunk = cephfs.read(file_, chunk_start, chunk_size) -+ progress += len(file_chunk) -+ if fd: -+ fd.write(file_chunk) -+ else: -+ poutput(file_chunk.decode('utf-8')) -+ cephfs.close(file_) -+ if fd: -+ fd.close() -+ -+ -+def dirwalk(path): -+ """ -+ walk a directory tree, using a generator -+ """ -+ path = os.path.normpath(path) -+ for item in ls(path, opts='A'): -+ fullpath = os.path.join(path, item.d_name) -+ src_path = fullpath.rsplit(b'/', 1)[0] -+ -+ yield os.path.normpath(fullpath) -+ if is_dir_exists(item.d_name, src_path): -+ for x in dirwalk(fullpath): -+ yield x -+ -+ -+################################################################## -+# -+# Following methods are implementation for CephFS Shell commands -+# -+################################################################# -+ -+class CephFSShell(Cmd): -+ -+ def __init__(self): -+ super().__init__() -+ self.working_dir = cephfs.getcwd().decode('utf-8') -+ self.set_prompt() -+ self.interactive = False -+ self.umask = '2' -+ -+ def default(self, line): -+ perror('Unrecognized command') -+ -+ def set_prompt(self): -+ self.prompt = ('\033[01;33mCephFS:~' + colorama.Fore.LIGHTCYAN_EX -+ + self.working_dir + colorama.Style.RESET_ALL -+ + '\033[01;33m>>>\033[00m ') -+ -+ def create_argparser(self, command): -+ try: -+ argparse_args = getattr(self, 'argparse_' + command) -+ except AttributeError: -+ set_exit_code_msg() -+ return None -+ doc_lines = getattr( -+ self, 'do_' + command).__doc__.expandtabs().splitlines() -+ if '' in doc_lines: -+ blank_idx = doc_lines.index('') -+ usage = doc_lines[:blank_idx] -+ description = doc_lines[blank_idx + 1:] -+ else: -+ usage = doc_lines -+ description = [] -+ parser = argparse.ArgumentParser( -+ prog=command, -+ usage='\n'.join(usage), -+ description='\n'.join(description), -+ formatter_class=argparse.ArgumentDefaultsHelpFormatter -+ ) -+ for args, kwargs in argparse_args: -+ parser.add_argument(*args, **kwargs) -+ return parser -+ -+ def complete_filenames(self, text, line, begidx, endidx): -+ if not text: -+ completions = [x.d_name.decode('utf-8') + '/' * int(x.is_dir()) -+ for x in ls(b".", opts='A')] -+ else: -+ if text.count('/') > 0: -+ completions = [text.rsplit('/', 1)[0] + '/' -+ + x.d_name.decode('utf-8') + '/' -+ * int(x.is_dir()) for x in ls('/' -+ + text.rsplit('/', 1)[0], opts='A') -+ if x.d_name.decode('utf-8').startswith( -+ text.rsplit('/', 1)[1])] -+ else: -+ completions = [x.d_name.decode('utf-8') + '/' -+ * int(x.is_dir()) for x in ls(b".", opts='A') -+ if x.d_name.decode('utf-8').startswith(text)] -+ if len(completions) == 1 and completions[0][-1] == '/': -+ dir_, file_ = completions[0].rsplit('/', 1) -+ completions.extend([dir_ + '/' + x.d_name.decode('utf-8') -+ + '/' * int(x.is_dir()) for x in -+ ls('/' + dir_, opts='A') -+ if x.d_name.decode('utf-8').startswith(file_)]) -+ return self.delimiter_complete(text, line, begidx, endidx, completions, '/') -+ return completions -+ -+ def onecmd(self, line, **kwargs): -+ """ -+ Global error catcher -+ """ -+ try: -+ res = Cmd.onecmd(self, line, **kwargs) -+ if self.interactive: -+ self.set_prompt() -+ return res -+ except ConnectionError as e: -+ set_exit_code_msg(e.errno, f'***\n{e}') -+ except KeyboardInterrupt: -+ set_exit_code_msg('KeyboardInterrupt', 'Command aborted') -+ except (libcephfs.Error, Exception) as e: -+ if shell.debug: -+ traceback.print_exc(file=sys.stdout) -+ set_exit_code_msg(msg=e) -+ -+ class path_to_bytes(argparse.Action): -+ def __call__(self, parser, namespace, values, option_string=None): -+ values = to_bytes(values) -+ setattr(namespace, self.dest, values) -+ -+ # TODO: move the necessary contents from here to `class path_to_bytes`. -+ class get_list_of_bytes_path(argparse.Action): -+ def __call__(self, parser, namespace, values, option_string=None): -+ values = to_bytes(values) -+ -+ if values == b'.': -+ values = cephfs.getcwd() -+ else: -+ for i in values: -+ if i == b'.': -+ values[values.index(i)] = cephfs.getcwd() -+ -+ setattr(namespace, self.dest, values) -+ -+ def complete_mkdir(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ class ModeAction(argparse.Action): -+ def __init__(self, option_strings, dest, nargs=None, **kwargs): -+ if nargs is not None and nargs != '?': -+ raise ValueError("more than one modes not allowed") -+ super().__init__(option_strings, dest, **kwargs) -+ -+ def __call__(self, parser, namespace, values, option_string=None): -+ o_mode = 0 -+ res = None -+ try: -+ o_mode = int(values, base=8) -+ except ValueError: -+ res = re.match('((u?g?o?)|(a?))(=)(r?w?x?)', values) -+ if res is None: -+ parser.error("invalid mode: %s\n" -+ "mode must be a numeric octal literal\n" -+ "or ((u?g?o?)|(a?))(=)(r?w?x?)" % -+ values) -+ else: -+ # we are supporting only assignment of mode and not + or - -+ # as is generally available with the chmod command -+ # eg. -+ # >>> res = re.match('((u?g?o?)|(a?))(=)(r?w?x?)', 'go=') -+ # >>> res.groups() -+ # ('go', 'go', None, '=', '') -+ val = res.groups() -+ -+ if val[3] != '=': -+ parser.error("need assignment operator between user " -+ "and mode specifiers") -+ if val[4] == '': -+ parser.error("invalid mode: %s\n" -+ "mode must be combination of: r | w | x" % -+ values) -+ users = '' -+ if val[2] is None: -+ users = val[1] -+ else: -+ users = val[2] -+ -+ t_mode = 0 -+ if users == 'a': -+ users = 'ugo' -+ -+ if 'r' in val[4]: -+ t_mode |= 4 -+ if 'w' in val[4]: -+ t_mode |= 2 -+ if 'x' in val[4]: -+ t_mode |= 1 -+ -+ if 'u' in users: -+ o_mode |= (t_mode << 6) -+ if 'g' in users: -+ o_mode |= (t_mode << 3) -+ if 'o' in users: -+ o_mode |= t_mode -+ -+ if o_mode < 0: -+ parser.error("invalid mode: %s\n" -+ "mode cannot be negative" % values) -+ if o_mode > 0o777: -+ parser.error("invalid mode: %s\n" -+ "mode cannot be greater than octal 0777" % values) -+ -+ setattr(namespace, self.dest, str(oct(o_mode))) -+ -+ mkdir_parser = argparse.ArgumentParser( -+ description='Create the directory(ies), if they do not already exist.') -+ mkdir_parser.add_argument('dirs', type=str, -+ action=path_to_bytes, -+ metavar='DIR_NAME', -+ help='Name of new_directory.', -+ nargs='+') -+ mkdir_parser.add_argument('-m', '--mode', type=str, -+ action=ModeAction, -+ help='Sets the access mode for the new directory.') -+ mkdir_parser.add_argument('-p', '--parent', action='store_true', -+ help='Create parent directories as necessary. ' -+ 'When this option is specified, no error is' -+ 'reported if a directory already exists.') -+ -+ @with_argparser(mkdir_parser) -+ def do_mkdir(self, args): -+ """ -+ Create directory. -+ """ -+ for path in args.dirs: -+ if args.mode: -+ permission = int(args.mode, 8) -+ else: -+ permission = 0o777 -+ if args.parent: -+ cephfs.mkdirs(path, permission) -+ else: -+ try: -+ cephfs.mkdir(path, permission) -+ except libcephfs.Error as e: -+ set_exit_code_msg(e) -+ -+ def complete_put(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ index_dict = {1: self.path_complete} -+ return self.index_based_complete(text, line, begidx, endidx, index_dict) -+ -+ put_parser = argparse.ArgumentParser( -+ description='Copy a file/directory to Ceph File System from Local File System.') -+ put_parser.add_argument('local_path', type=str, action=path_to_bytes, -+ help='Path of the file in the local system') -+ put_parser.add_argument('remote_path', type=str, action=path_to_bytes, -+ help='Path of the file in the remote system') -+ put_parser.add_argument('-f', '--force', action='store_true', -+ help='Overwrites the destination if it already exists.') -+ -+ @with_argparser(put_parser) -+ def do_put(self, args): -+ """ -+ Copy a local file/directory to CephFS. -+ """ -+ if args.local_path != b'-' and not os.path.isfile(args.local_path) \ -+ and not os.path.isdir(args.local_path): -+ set_exit_code_msg(errno.ENOENT, -+ msg=f"error: " -+ f"{args.local_path.decode('utf-8')}: " -+ f"No such file or directory") -+ return -+ -+ if (is_file_exists(args.remote_path) or is_dir_exists( -+ args.remote_path)) and not args.force: -+ set_exit_code_msg(msg=f"error: file/directory " -+ f"{args.remote_path.decode('utf-8')} " -+ f"exists, use --force to overwrite") -+ return -+ -+ root_src_dir = args.local_path -+ root_dst_dir = args.remote_path -+ if args.local_path == b'.' or args.local_path == b'./': -+ root_src_dir = os.getcwdb() -+ elif len(args.local_path.rsplit(b'/', 1)) < 2: -+ root_src_dir = os.path.join(os.getcwdb(), args.local_path) -+ else: -+ p = args.local_path.split(b'/') -+ if p[0] == b'.': -+ root_src_dir = os.getcwdb() -+ p.pop(0) -+ while len(p) > 0: -+ root_src_dir += b'/' + p.pop(0) -+ -+ if root_dst_dir == b'.': -+ if args.local_path != b'-': -+ root_dst_dir = root_src_dir.rsplit(b'/', 1)[1] -+ if root_dst_dir == b'': -+ root_dst_dir = root_src_dir.rsplit(b'/', 1)[0] -+ a = root_dst_dir.rsplit(b'/', 1) -+ if len(a) > 1: -+ root_dst_dir = a[1] -+ else: -+ root_dst_dir = a[0] -+ else: -+ set_exit_code_msg(errno.EINVAL, 'error: no filename specified ' -+ 'for destination') -+ return -+ -+ if root_dst_dir[-1] != b'/': -+ root_dst_dir += b'/' -+ -+ if args.local_path == b'-' or os.path.isfile(root_src_dir): -+ if args.local_path == b'-': -+ root_src_dir = b'-' -+ copy_from_local(root_src_dir, root_dst_dir) -+ else: -+ for src_dir, dirs, files in os.walk(root_src_dir): -+ if isinstance(src_dir, str): -+ src_dir = to_bytes(src_dir) -+ dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1) -+ dst_dir = re.sub(rb'\/+', b'/', cephfs.getcwd() -+ + dst_dir) -+ if args.force and dst_dir != b'/' and not is_dir_exists( -+ dst_dir[:-1]) and not locate_file(dst_dir): -+ try: -+ cephfs.mkdirs(dst_dir, 0o777) -+ except libcephfs.Error: -+ pass -+ if (not args.force) and dst_dir != b'/' and not is_dir_exists( -+ dst_dir) and not os.path.isfile(root_src_dir): -+ try: -+ cephfs.mkdirs(dst_dir, 0o777) -+ except libcephfs.Error: -+ # TODO: perhaps, set retval to 1? -+ pass -+ -+ for dir_ in dirs: -+ dir_name = os.path.join(dst_dir, dir_) -+ if not is_dir_exists(dir_name): -+ try: -+ cephfs.mkdirs(dir_name, 0o777) -+ except libcephfs.Error: -+ # TODO: perhaps, set retval to 1? -+ pass -+ -+ for file_ in files: -+ src_file = os.path.join(src_dir, file_) -+ dst_file = re.sub(rb'\/+', b'/', b'/' + dst_dir + b'/' + file_) -+ if (not args.force) and is_file_exists(dst_file): -+ return -+ copy_from_local(src_file, os.path.join(cephfs.getcwd(), -+ dst_file)) -+ -+ def complete_get(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ get_parser = argparse.ArgumentParser( -+ description='Copy a file from Ceph File System to Local Directory.') -+ get_parser.add_argument('remote_path', type=str, action=path_to_bytes, -+ help='Path of the file in the remote system') -+ get_parser.add_argument('local_path', type=str, action=path_to_bytes, -+ help='Path of the file in the local system') -+ get_parser.add_argument('-f', '--force', action='store_true', -+ help='Overwrites the destination if it already exists.') -+ -+ @with_argparser(get_parser) -+ def do_get(self, args): -+ """ -+ Copy a file/directory from CephFS to given path. -+ """ -+ if not is_file_exists(args.remote_path) and not \ -+ is_dir_exists(args.remote_path): -+ set_exit_code_msg(errno.ENOENT, "error: no file/directory" -+ " found at specified remote " -+ "path") -+ return -+ if (os.path.isfile(args.local_path) or os.path.isdir( -+ args.local_path)) and not args.force: -+ set_exit_code_msg(msg=f"error: file/directory " -+ f"{args.local_path.decode('utf-8')}" -+ f" already exists, use --force to " -+ f"overwrite") -+ return -+ root_src_dir = args.remote_path -+ root_dst_dir = args.local_path -+ fname = root_src_dir.rsplit(b'/', 1) -+ if args.local_path == b'.': -+ root_dst_dir = os.getcwdb() -+ if args.remote_path == b'.': -+ root_src_dir = cephfs.getcwd() -+ if args.local_path == b'-': -+ if args.remote_path == b'.' or args.remote_path == b'./': -+ set_exit_code_msg(errno.EINVAL, 'error: no remote file name specified') -+ return -+ copy_to_local(root_src_dir, b'-') -+ elif is_file_exists(args.remote_path): -+ copy_to_local(root_src_dir, root_dst_dir) -+ elif b'/' in root_src_dir and is_file_exists(fname[1], fname[0]): -+ copy_to_local(root_src_dir, root_dst_dir) -+ else: -+ files = list(reversed(sorted(dirwalk(root_src_dir)))) -+ for file_ in files: -+ dst_dirpath, dst_file = file_.rsplit(b'/', 1) -+ if dst_dirpath in files: -+ files.remove(dst_dirpath) -+ dst_path = os.path.join(root_dst_dir, dst_dirpath, dst_file) -+ dst_path = os.path.normpath(dst_path) -+ if is_dir_exists(file_): -+ try: -+ os.makedirs(dst_path) -+ except OSError: -+ pass -+ else: -+ copy_to_local(file_, dst_path) -+ -+ return 0 -+ -+ def complete_ls(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ ls_parser = argparse.ArgumentParser( -+ description='Copy a file from Ceph File System from Local Directory.') -+ ls_parser.add_argument('-l', '--long', action='store_true', -+ help='Detailed list of items in the directory.') -+ ls_parser.add_argument('-r', '--reverse', action='store_true', -+ help='Reverse order of listing items in the directory.') -+ ls_parser.add_argument('-H', action='store_true', help='Human Readable') -+ ls_parser.add_argument('-a', '--all', action='store_true', -+ help='Do not Ignore entries starting with .') -+ ls_parser.add_argument('-S', action='store_true', help='Sort by file_size') -+ ls_parser.add_argument('paths', help='Name of Directories', -+ action=path_to_bytes, nargs='*', default=['.']) -+ -+ @with_argparser(ls_parser) -+ def do_ls(self, args): -+ """ -+ List all the files and directories in the current working directory -+ """ -+ paths = args.paths -+ for path in paths: -+ values = [] -+ items = [] -+ try: -+ if path.count(b'*') > 0: -+ all_items = get_all_possible_paths(path) -+ if len(all_items) == 0: -+ continue -+ path = all_items[0].rsplit(b'/', 1)[0] -+ if path == b'': -+ path = b'/' -+ dirs = [] -+ for i in all_items: -+ for item in ls(path): -+ d_name = item.d_name -+ if os.path.basename(i) == d_name: -+ if item.is_dir(): -+ dirs.append(os.path.join(path, d_name)) -+ else: -+ items.append(item) -+ if dirs: -+ paths.extend(dirs) -+ else: -+ poutput(path.decode('utf-8'), end=':\n') -+ items = sorted(items, key=lambda item: item.d_name) -+ else: -+ if path != b'' and path != cephfs.getcwd() and len(paths) > 1: -+ poutput(path.decode('utf-8'), end=':\n') -+ items = sorted(ls(path), key=lambda item: item.d_name) -+ if not args.all: -+ items = [i for i in items if not i.d_name.startswith(b'.')] -+ if args.S: -+ items = sorted(items, key=lambda item: cephfs.stat( -+ path + b'/' + item.d_name, follow_symlink=( -+ not item.is_symbol_file())).st_size) -+ if args.reverse: -+ items = reversed(items) -+ for item in items: -+ filepath = item.d_name -+ is_dir = item.is_dir() -+ is_sym_lnk = item.is_symbol_file() -+ try: -+ if args.long and args.H: -+ print_long(os.path.join(cephfs.getcwd(), path, filepath), is_dir, -+ is_sym_lnk, True) -+ elif args.long: -+ print_long(os.path.join(cephfs.getcwd(), path, filepath), is_dir, -+ is_sym_lnk, False) -+ elif is_sym_lnk or is_dir: -+ values.append(style_listing(filepath.decode('utf-8'), is_dir, -+ is_sym_lnk)) -+ else: -+ values.append(filepath) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ if not args.long: -+ print_list(values, shutil.get_terminal_size().columns) -+ if path != paths[-1]: -+ poutput('') -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ def complete_rmdir(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ rmdir_parser = argparse.ArgumentParser(description='Remove Directory.') -+ rmdir_parser.add_argument('paths', help='Directory Path.', nargs='+', -+ action=path_to_bytes) -+ rmdir_parser.add_argument('-p', '--parent', action='store_true', -+ help='Remove parent directories as necessary. ' -+ 'When this option is specified, no error ' -+ 'is reported if a directory has any ' -+ 'sub-directories, files') -+ -+ @with_argparser(rmdir_parser) -+ def do_rmdir(self, args): -+ self.do_rmdir_helper(args) -+ -+ def do_rmdir_helper(self, args): -+ """ -+ Remove a specific Directory -+ """ -+ is_pattern = False -+ paths = args.paths -+ for path in paths: -+ if path.count(b'*') > 0: -+ is_pattern = True -+ all_items = get_all_possible_paths(path) -+ if len(all_items) > 0: -+ path = all_items[0].rsplit(b'/', 1)[0] -+ if path == b'': -+ path = b'/' -+ dirs = [] -+ for i in all_items: -+ for item in ls(path): -+ d_name = item.d_name -+ if os.path.basename(i) == d_name: -+ if item.is_dir(): -+ dirs.append(os.path.join(path, d_name)) -+ paths.extend(dirs) -+ continue -+ else: -+ is_pattern = False -+ -+ if args.parent: -+ path = os.path.join(cephfs.getcwd(), path.rsplit(b'/')[0]) -+ files = list(sorted(set(dirwalk(path)), reverse=True)) -+ if not files: -+ path = b'.' -+ for filepath in files: -+ try: -+ cephfs.rmdir(os.path.normpath(filepath)) -+ except libcephfs.Error as e: -+ perror(e) -+ path = b'.' -+ break -+ else: -+ path = os.path.normpath(os.path.join(cephfs.getcwd(), path)) -+ if not is_pattern and path != os.path.normpath(b''): -+ try: -+ cephfs.rmdir(path) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ def complete_rm(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ rm_parser = argparse.ArgumentParser(description='Remove File.') -+ rm_parser.add_argument('paths', help='File Path.', nargs='+', -+ action=path_to_bytes) -+ -+ @with_argparser(rm_parser) -+ def do_rm(self, args): -+ """ -+ Remove a specific file -+ """ -+ file_paths = args.paths -+ for path in file_paths: -+ if path.count(b'*') > 0: -+ file_paths.extend([i for i in get_all_possible_paths( -+ path) if is_file_exists(i)]) -+ else: -+ try: -+ cephfs.unlink(path) -+ except libcephfs.Error as e: -+ # NOTE: perhaps we need a better msg here -+ set_exit_code_msg(msg=e) -+ -+ def complete_mv(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ mv_parser = argparse.ArgumentParser(description='Move File.') -+ mv_parser.add_argument('src_path', type=str, action=path_to_bytes, -+ help='Source File Path.') -+ mv_parser.add_argument('dest_path', type=str, action=path_to_bytes, -+ help='Destination File Path.') -+ -+ @with_argparser(mv_parser) -+ def do_mv(self, args): -+ """ -+ Rename a file or Move a file from source path to the destination -+ """ -+ cephfs.rename(args.src_path, args.dest_path) -+ -+ def complete_cd(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ cd_parser = argparse.ArgumentParser(description='Change working directory') -+ cd_parser.add_argument('path', type=str, help='Name of the directory.', -+ action=path_to_bytes, nargs='?', default='/') -+ -+ @with_argparser(cd_parser) -+ def do_cd(self, args): -+ """ -+ Change working directory -+ """ -+ cephfs.chdir(args.path) -+ self.working_dir = cephfs.getcwd().decode('utf-8') -+ self.set_prompt() -+ -+ def do_cwd(self, arglist): -+ """ -+ Get current working directory. -+ """ -+ poutput(cephfs.getcwd().decode('utf-8')) -+ -+ def complete_chmod(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ chmod_parser = argparse.ArgumentParser(description='Create Directory.') -+ chmod_parser.add_argument('mode', type=str, action=ModeAction, help='Mode') -+ chmod_parser.add_argument('paths', type=str, action=path_to_bytes, -+ help='Name of the file', nargs='+') -+ -+ @with_argparser(chmod_parser) -+ def do_chmod(self, args): -+ """ -+ Change permission of a file -+ """ -+ for path in args.paths: -+ mode = int(args.mode, base=8) -+ try: -+ cephfs.chmod(path, mode) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ def complete_cat(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ cat_parser = argparse.ArgumentParser(description='') -+ cat_parser.add_argument('paths', help='Name of Files', action=path_to_bytes, -+ nargs='+') -+ -+ @with_argparser(cat_parser) -+ def do_cat(self, args): -+ """ -+ Print contents of a file -+ """ -+ for path in args.paths: -+ if is_file_exists(path): -+ copy_to_local(path, b'-') -+ else: -+ set_exit_code_msg(errno.ENOENT, '{}: no such file'.format( -+ path.decode('utf-8'))) -+ -+ umask_parser = argparse.ArgumentParser(description='Set umask value.') -+ umask_parser.add_argument('mode', help='Mode', type=str, action=ModeAction, -+ nargs='?', default='') -+ -+ @with_argparser(umask_parser) -+ def do_umask(self, args): -+ """ -+ Set Umask value. -+ """ -+ if args.mode == '': -+ poutput(self.umask.zfill(4)) -+ else: -+ mode = int(args.mode, 8) -+ self.umask = str(oct(cephfs.umask(mode))[2:]) -+ -+ def complete_write(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ write_parser = argparse.ArgumentParser(description='Writes data into a file') -+ write_parser.add_argument('path', type=str, action=path_to_bytes, -+ help='Name of File') -+ -+ @with_argparser(write_parser) -+ def do_write(self, args): -+ """ -+ Write data into a file. -+ """ -+ -+ copy_from_local(b'-', args.path) -+ -+ def complete_lcd(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ index_dict = {1: self.path_complete} -+ return self.index_based_complete(text, line, begidx, endidx, index_dict) -+ -+ lcd_parser = argparse.ArgumentParser(description='') -+ lcd_parser.add_argument('path', type=str, action=path_to_bytes, help='Path') -+ -+ @with_argparser(lcd_parser) -+ def do_lcd(self, args): -+ """ -+ Moves into the given local directory -+ """ -+ try: -+ os.chdir(os.path.expanduser(args.path)) -+ except OSError as e: -+ set_exit_code_msg(e.errno, "Cannot change to " -+ f"{e.filename.decode('utf-8')}: {e.strerror}") -+ -+ def complete_lls(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ index_dict = {1: self.path_complete} -+ return self.index_based_complete(text, line, begidx, endidx, index_dict) -+ -+ lls_parser = argparse.ArgumentParser( -+ description='List files in local system.') -+ lls_parser.add_argument('paths', help='Paths', action=path_to_bytes, -+ nargs='*') -+ -+ @with_argparser(lls_parser) -+ def do_lls(self, args): -+ """ -+ Lists all files and folders in the current local directory -+ """ -+ if not args.paths: -+ print_list(os.listdir(os.getcwdb())) -+ else: -+ for path in args.paths: -+ try: -+ items = os.listdir(path) -+ poutput("{}:".format(path.decode('utf-8'))) -+ print_list(items) -+ except OSError as e: -+ set_exit_code_msg(e.errno, f"{e.filename.decode('utf-8')}: " -+ f"{e.strerror}") -+ # Arguments to the with_argpaser decorator function are sticky. -+ # The items in args.path do not get overwritten in subsequent calls. -+ # The arguments remain in args.paths after the function exits and we -+ # neeed to clean it up to ensure the next call works as expected. -+ args.paths.clear() -+ -+ def do_lpwd(self, arglist): -+ """ -+ Prints the absolute path of the current local directory -+ """ -+ poutput(os.getcwd()) -+ -+ def complete_df(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ df_parser = argparse.ArgumentParser(description='Show information about\ -+ the amount of available disk space') -+ df_parser.add_argument('file', help='Name of the file', nargs='*', -+ default=['.'], action=path_to_bytes) -+ -+ @with_argparser(df_parser) -+ def do_df(self, arglist): -+ """ -+ Display the amount of available disk space for file systems -+ """ -+ header = True # Set to true for printing header only once -+ if b'.' == arglist.file[0]: -+ arglist.file = ls(b'.') -+ -+ for file in arglist.file: -+ if isinstance(file, libcephfs.DirEntry): -+ file = file.d_name -+ if file == b'.' or file == b'..': -+ continue -+ try: -+ statfs = cephfs.statfs(file) -+ stat = cephfs.stat(file) -+ block_size = (statfs['f_blocks'] * statfs['f_bsize']) // 1024 -+ available = block_size - stat.st_size -+ use = 0 -+ -+ if block_size > 0: -+ use = (stat.st_size * 100) // block_size -+ -+ if header: -+ header = False -+ poutput('{:25s}\t{:5s}\t{:15s}{:10s}{}'.format( -+ "1K-blocks", "Used", "Available", "Use%", -+ "Stored on")) -+ -+ poutput('{:d}\t{:18d}\t{:8d}\t{:10s} {}'.format(block_size, -+ stat.st_size, available, str(int(use)) + '%', -+ file.decode('utf-8'))) -+ except libcephfs.OSError as e: -+ set_exit_code_msg(e.get_error_code(), "could not statfs {}: {}".format( -+ file.decode('utf-8'), e.strerror)) -+ -+ locate_parser = argparse.ArgumentParser( -+ description='Find file within file system') -+ locate_parser.add_argument('name', help='name', type=str, -+ action=path_to_bytes) -+ locate_parser.add_argument('-c', '--count', action='store_true', -+ help='Count list of items located.') -+ locate_parser.add_argument( -+ '-i', '--ignorecase', action='store_true', help='Ignore case') -+ -+ @with_argparser(locate_parser) -+ def do_locate(self, args): -+ """ -+ Find a file within the File System -+ """ -+ if args.name.count(b'*') == 1: -+ if args.name[0] == b'*': -+ args.name += b'/' -+ elif args.name[-1] == '*': -+ args.name = b'/' + args.name -+ args.name = args.name.replace(b'*', b'') -+ if args.ignorecase: -+ locations = locate_file(args.name, False) -+ else: -+ locations = locate_file(args.name) -+ if args.count: -+ poutput(len(locations)) -+ else: -+ poutput((b'\n'.join(locations)).decode('utf-8')) -+ -+ def complete_du(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ du_parser = argparse.ArgumentParser( -+ description='Disk Usage of a Directory') -+ du_parser.add_argument('paths', type=str, action=get_list_of_bytes_path, -+ help='Name of the directory.', nargs='*', -+ default=[b'.']) -+ du_parser.add_argument('-r', action='store_true', -+ help='Recursive Disk usage of all directories.') -+ -+ @with_argparser(du_parser) -+ def do_du(self, args): -+ """ -+ Print disk usage of a given path(s). -+ """ -+ def print_disk_usage(files): -+ if isinstance(files, bytes): -+ files = (files, ) -+ -+ for f in files: -+ try: -+ st = cephfs.lstat(f) -+ -+ if stat.S_ISDIR(st.st_mode): -+ dusage = int(cephfs.getxattr(f, -+ 'ceph.dir.rbytes').decode('utf-8')) -+ else: -+ dusage = st.st_size -+ -+ # print path in local context -+ f = os.path.normpath(f) -+ if f[0] is ord('/'): -+ f = b'.' + f -+ poutput('{:10s} {}'.format(humansize(dusage), -+ f.decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ continue -+ -+ for path in args.paths: -+ if args.r: -+ print_disk_usage(sorted(set(dirwalk(path)).union({path}))) -+ else: -+ print_disk_usage(path) -+ -+ quota_parser = argparse.ArgumentParser( -+ description='Quota management for a Directory') -+ quota_parser.add_argument('op', choices=['get', 'set'], -+ help='Quota operation type.') -+ quota_parser.add_argument('path', type=str, action=path_to_bytes, -+ help='Name of the directory.') -+ quota_parser.add_argument('--max_bytes', type=int, default=-1, nargs='?', -+ help='Max cumulative size of the data under ' -+ 'this directory.') -+ quota_parser.add_argument('--max_files', type=int, default=-1, nargs='?', -+ help='Total number of files under this ' -+ 'directory tree.') -+ -+ @with_argparser(quota_parser) -+ def do_quota(self, args): -+ """ -+ Quota management. -+ """ -+ if not is_dir_exists(args.path): -+ set_exit_code_msg(errno.ENOENT, 'error: no such directory {}'.format( -+ args.path.decode('utf-8'))) -+ return -+ -+ if args.op == 'set': -+ if (args.max_bytes == -1) and (args.max_files == -1): -+ set_exit_code_msg(errno.EINVAL, 'please specify either ' -+ '--max_bytes or --max_files or both') -+ return -+ -+ if args.max_bytes >= 0: -+ max_bytes = to_bytes(str(args.max_bytes)) -+ try: -+ cephfs.setxattr(args.path, 'ceph.quota.max_bytes', -+ max_bytes, os.XATTR_CREATE) -+ poutput('max_bytes set to %d' % args.max_bytes) -+ except libcephfs.Error as e: -+ cephfs.setxattr(args.path, 'ceph.quota.max_bytes', -+ max_bytes, os.XATTR_REPLACE) -+ set_exit_code_msg(e.get_error_code(), 'max_bytes reset to ' -+ f'{args.max_bytes}') -+ -+ if args.max_files >= 0: -+ max_files = to_bytes(str(args.max_files)) -+ try: -+ cephfs.setxattr(args.path, 'ceph.quota.max_files', -+ max_files, os.XATTR_CREATE) -+ poutput('max_files set to %d' % args.max_files) -+ except libcephfs.Error as e: -+ cephfs.setxattr(args.path, 'ceph.quota.max_files', -+ max_files, os.XATTR_REPLACE) -+ set_exit_code_msg(e.get_error_code(), 'max_files reset to ' -+ f'{args.max_files}') -+ elif args.op == 'get': -+ max_bytes = '0' -+ max_files = '0' -+ try: -+ max_bytes = cephfs.getxattr(args.path, 'ceph.quota.max_bytes') -+ poutput('max_bytes: {}'.format(max_bytes.decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(e.get_error_code(), 'max_bytes is not set') -+ -+ try: -+ max_files = cephfs.getxattr(args.path, 'ceph.quota.max_files') -+ poutput('max_files: {}'.format(max_files.decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(e.get_error_code(), 'max_files is not set') -+ -+ snap_parser = argparse.ArgumentParser(description='Snapshot Management') -+ snap_parser.add_argument('op', type=str, -+ help='Snapshot operation: create or delete') -+ snap_parser.add_argument('name', type=str, action=path_to_bytes, -+ help='Name of snapshot') -+ snap_parser.add_argument('dir', type=str, action=path_to_bytes, -+ help='Directory for which snapshot ' -+ 'needs to be created or deleted') -+ -+ @with_argparser(snap_parser) -+ def do_snap(self, args): -+ """ -+ Snapshot management for the volume -+ """ -+ # setting self.colors to None turns off colorizing and -+ # perror emits plain text -+ self.colors = None -+ -+ snapdir = '.snap' -+ conf_snapdir = cephfs.conf_get('client_snapdir') -+ if conf_snapdir is not None: -+ snapdir = conf_snapdir -+ snapdir = to_bytes(snapdir) -+ if args.op == 'create': -+ try: -+ if is_dir_exists(args.dir): -+ cephfs.mkdir(os.path.join(args.dir, snapdir, args.name), 0o755) -+ else: -+ set_exit_code_msg(errno.ENOENT, "'{}': no such directory".format( -+ args.dir.decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(e.get_error_code(), -+ "snapshot '{}' already exists".format( -+ args.name.decode('utf-8'))) -+ elif args.op == 'delete': -+ snap_dir = os.path.join(args.dir, snapdir, args.name) -+ try: -+ if is_dir_exists(snap_dir): -+ newargs = argparse.Namespace(paths=[snap_dir], parent=False) -+ self.do_rmdir_helper(newargs) -+ else: -+ set_exit_code_msg(errno.ENOENT, "'{}': no such snapshot".format( -+ args.name.decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(e.get_error_code(), "error while deleting " -+ "'{}'".format(snap_dir.decode('utf-8'))) -+ else: -+ set_exit_code_msg(errno.EINVAL, "snapshot can only be created or " -+ "deleted; check - help snap") -+ -+ def do_help(self, line): -+ """ -+ Get details about a command. -+ Usage: help - for a specific command -+ help all - for all the commands -+ """ -+ if line == 'all': -+ for k in dir(self): -+ if k.startswith('do_'): -+ poutput('-' * 80) -+ super().do_help(k[3:]) -+ return -+ parser = self.create_argparser(line) -+ if parser: -+ parser.print_help() -+ else: -+ super().do_help(line) -+ -+ def complete_stat(self, text, line, begidx, endidx): -+ """ -+ auto complete of file name. -+ """ -+ return self.complete_filenames(text, line, begidx, endidx) -+ -+ stat_parser = argparse.ArgumentParser( -+ description='Display file or file system status') -+ stat_parser.add_argument('paths', type=str, help='file paths', -+ action=path_to_bytes, nargs='+') -+ -+ @with_argparser(stat_parser) -+ def do_stat(self, args): -+ """ -+ Display file or file system status -+ """ -+ for path in args.paths: -+ try: -+ stat = cephfs.stat(path) -+ atime = stat.st_atime.isoformat(' ') -+ mtime = stat.st_mtime.isoformat(' ') -+ ctime = stat.st_mtime.isoformat(' ') -+ -+ poutput("File: {}\nSize: {:d}\nBlocks: {:d}\nIO Block: {:d}\n" -+ "Device: {:d}\tInode: {:d}\tLinks: {:d}\nPermission: " -+ "{:o}/{}\tUid: {:d}\tGid: {:d}\nAccess: {}\nModify: " -+ "{}\nChange: {}".format(path.decode('utf-8'), -+ stat.st_size, stat.st_blocks, -+ stat.st_blksize, stat.st_dev, -+ stat.st_ino, stat.st_nlink, -+ stat.st_mode, -+ mode_notation(stat.st_mode), -+ stat.st_uid, stat.st_gid, atime, -+ mtime, ctime)) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ setxattr_parser = argparse.ArgumentParser( -+ description='Set extended attribute for a file') -+ setxattr_parser.add_argument('path', type=str, action=path_to_bytes, help='Name of the file') -+ setxattr_parser.add_argument('name', type=str, help='Extended attribute name') -+ setxattr_parser.add_argument('value', type=str, help='Extended attribute value') -+ -+ @with_argparser(setxattr_parser) -+ def do_setxattr(self, args): -+ """ -+ Set extended attribute for a file -+ """ -+ val_bytes = to_bytes(args.value) -+ name_bytes = to_bytes(args.name) -+ try: -+ cephfs.setxattr(args.path, name_bytes, val_bytes, os.XATTR_CREATE) -+ poutput('{} is successfully set to {}'.format(args.name, args.value)) -+ except libcephfs.ObjectExists: -+ cephfs.setxattr(args.path, name_bytes, val_bytes, os.XATTR_REPLACE) -+ poutput('{} is successfully reset to {}'.format(args.name, args.value)) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ getxattr_parser = argparse.ArgumentParser( -+ description='Get extended attribute set for a file') -+ getxattr_parser.add_argument('path', type=str, action=path_to_bytes, -+ help='Name of the file') -+ getxattr_parser.add_argument('name', type=str, help='Extended attribute name') -+ -+ @with_argparser(getxattr_parser) -+ def do_getxattr(self, args): -+ """ -+ Get extended attribute for a file -+ """ -+ try: -+ poutput('{}'.format(cephfs.getxattr(args.path, -+ to_bytes(args.name)).decode('utf-8'))) -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ listxattr_parser = argparse.ArgumentParser( -+ description='List extended attributes set for a file') -+ listxattr_parser.add_argument('path', type=str, action=path_to_bytes, -+ help='Name of the file') -+ -+ @with_argparser(listxattr_parser) -+ def do_listxattr(self, args): -+ """ -+ List extended attributes for a file -+ """ -+ try: -+ size, xattr_list = cephfs.listxattr(args.path) -+ if size > 0: -+ poutput('{}'.format(xattr_list.replace(b'\x00', b' ').decode('utf-8'))) -+ else: -+ poutput('No extended attribute is set') -+ except libcephfs.Error as e: -+ set_exit_code_msg(msg=e) -+ -+ -+####################################################### -+# -+# Following are methods that get cephfs-shell started. -+# -+##################################################### -+ -+def setup_cephfs(args): -+ """ -+ Mounting a cephfs -+ """ -+ global cephfs -+ try: -+ cephfs = libcephfs.LibCephFS(conffile='') -+ cephfs.mount(filesystem_name=args.fs) -+ except libcephfs.ObjectNotFound as e: -+ print('couldn\'t find ceph configuration not found') -+ sys.exit(e.get_error_code()) -+ except libcephfs.Error as e: -+ print(e) -+ sys.exit(e.get_error_code()) -+ -+ -+def str_to_bool(val): -+ """ -+ Return corresponding bool values for strings like 'true' or 'false'. -+ """ -+ if not isinstance(val, str): -+ return val -+ -+ val = val.replace('\n', '') -+ if val.lower() in ['true', 'yes']: -+ return True -+ elif val.lower() in ['false', 'no']: -+ return False -+ else: -+ return val -+ -+ -+def read_shell_conf(shell, shell_conf_file): -+ import configparser -+ -+ sec = 'cephfs-shell' -+ opts = [] -+ if LooseVersion(cmd2_version) >= LooseVersion("0.10.0"): -+ for attr in shell.settables.keys(): -+ opts.append(attr) -+ else: -+ if LooseVersion(cmd2_version) <= LooseVersion("0.9.13"): -+ # hardcoding options for 0.7.9 because - -+ # 1. we use cmd2 v0.7.9 with teuthology and -+ # 2. there's no way distinguish between a shell setting and shell -+ # object attribute until v0.10.0 -+ opts = ['abbrev', 'autorun_on_edit', 'colors', -+ 'continuation_prompt', 'debug', 'echo', 'editor', -+ 'feedback_to_output', 'locals_in_py', 'prompt', 'quiet', -+ 'timing'] -+ elif LooseVersion(cmd2_version) >= LooseVersion("0.9.23"): -+ opts.append('allow_style') -+ # no equivalent option was defined by cmd2. -+ else: -+ pass -+ -+ # default and only section in our conf file. -+ cp = configparser.ConfigParser(default_section=sec, strict=False) -+ cp.read(shell_conf_file) -+ for opt in opts: -+ if cp.has_option(sec, opt): -+ setattr(shell, opt, str_to_bool(cp.get(sec, opt))) -+ -+ -+def get_shell_conffile_path(arg_conf=''): -+ conf_filename = 'cephfs-shell.conf' -+ env_var = 'CEPHFS_SHELL_CONF' -+ -+ arg_conf = '' if not arg_conf else arg_conf -+ home_dir_conf = os.path.expanduser('~/.' + conf_filename) -+ env_conf = os.environ[env_var] if env_var in os.environ else '' -+ -+ # here's the priority by which conf gets read. -+ for path in (arg_conf, env_conf, home_dir_conf): -+ if os.path.isfile(path): -+ return path -+ else: -+ return '' -+ -+ -+def manage_args(): -+ main_parser = argparse.ArgumentParser(description='') -+ main_parser.add_argument('-b', '--batch', action='store', -+ help='Path to CephFS shell script/batch file' -+ 'containing CephFS shell commands', -+ type=str) -+ main_parser.add_argument('-c', '--config', action='store', -+ help='Path to Ceph configuration file.', -+ type=str) -+ main_parser.add_argument('-f', '--fs', action='store', -+ help='Name of filesystem to mount.', -+ type=str) -+ main_parser.add_argument('-t', '--test', action='store', -+ help='Test against transcript(s) in FILE', -+ nargs='+') -+ main_parser.add_argument('commands', nargs='*', help='Comma delimited ' -+ 'commands. The shell executes the given command ' -+ 'and quits immediately with the return value of ' -+ 'command. In case no commands are provided, the ' -+ 'shell is launched.', default=[]) -+ -+ args = main_parser.parse_args() -+ args.exe_and_quit = False # Execute and quit, don't launch the shell. -+ -+ if args.batch: -+ if LooseVersion(cmd2_version) <= LooseVersion("0.9.13"): -+ args.commands = ['load ' + args.batch, ',quit'] -+ else: -+ args.commands = ['run_script ' + args.batch, ',quit'] -+ if args.test: -+ args.commands.extend(['-t,'] + [arg + ',' for arg in args.test]) -+ if not args.batch and len(args.commands) > 0: -+ args.exe_and_quit = True -+ -+ manage_sys_argv(args) -+ -+ return args -+ -+ -+def manage_sys_argv(args): -+ exe = sys.argv[0] -+ sys.argv.clear() -+ sys.argv.append(exe) -+ sys.argv.extend([i.strip() for i in ' '.join(args.commands).split(',')]) -+ -+ setup_cephfs(args) -+ -+ -+def execute_cmd_args(args): -+ """ -+ Launch a shell session if no arguments were passed, else just execute -+ the given argument as a shell command and exit the shell session -+ immediately at (last) command's termination with the (last) command's -+ return value. -+ """ -+ if not args.exe_and_quit: -+ return shell.cmdloop() -+ return execute_cmds_and_quit(args) -+ -+ -+def execute_cmds_and_quit(args): -+ """ -+ Multiple commands might be passed separated by commas, feed onecmd() -+ one command at a time. -+ """ -+ # do_* methods triggered by cephfs-shell commands return None when they -+ # complete running successfully. Until 0.9.6, shell.onecmd() returned this -+ # value to indicate whether the execution of the commands should stop, but -+ # since 0.9.7 it returns the return value of do_* methods only if it's -+ # not None. When it is None it returns False instead of None. -+ if LooseVersion(cmd2_version) <= LooseVersion("0.9.6"): -+ stop_exec_val = None -+ else: -+ stop_exec_val = False -+ -+ args_to_onecmd = '' -+ if len(args.commands) <= 1: -+ args.commands = args.commands[0].split(' ') -+ for cmdarg in args.commands: -+ if ',' in cmdarg: -+ args_to_onecmd += ' ' + cmdarg[0:-1] -+ onecmd_retval = shell.onecmd(args_to_onecmd) -+ # if the curent command failed, let's abort the execution of -+ # series of commands passed. -+ if onecmd_retval is not stop_exec_val: -+ return onecmd_retval -+ if shell.exit_code != 0: -+ return shell.exit_code -+ -+ args_to_onecmd = '' -+ continue -+ -+ args_to_onecmd += ' ' + cmdarg -+ return shell.onecmd(args_to_onecmd) -+ -+ -+if __name__ == '__main__': -+ args = manage_args() -+ -+ shell = CephFSShell() -+ # TODO: perhaps, we should add an option to pass ceph.conf? -+ read_shell_conf(shell, get_shell_conffile_path(args.config)) -+ # XXX: setting shell.exit_code to zero so that in case there are no errors -+ # and exceptions, it is not set by any method or function of cephfs-shell -+ # and return values from shell.cmdloop() or shell.onecmd() is not an -+ # integer, we can treat it as the return value of cephfs-shell. -+ shell.exit_code = 0 -+ -+ retval = execute_cmd_args(args) -+ sys.exit(retval if retval else shell.exit_code) ---- /dev/null 2022-06-30 09:45:32.996000000 -0400 -+++ ceph-17.2.1/src/tools/cephfs/shell/setup.py 2022-07-05 11:00:12.411260682 -0400 -@@ -0,0 +1,27 @@ -+# -*- coding: utf-8 -*- -+ -+from setuptools import setup -+ -+__version__ = '0.0.1' -+ -+setup( -+ name='cephfs-shell', -+ version=__version__, -+ description='Interactive shell for Ceph file system', -+ keywords='cephfs, shell', -+ scripts=['cephfs-shell'], -+ install_requires=[ -+ 'cephfs', -+ 'cmd2', -+ 'colorama', -+ ], -+ classifiers=[ -+ 'Development Status :: 3 - Alpha', -+ 'Environment :: Console', -+ 'Intended Audience :: System Administrators', -+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)', -+ 'Operating System :: POSIX :: Linux', -+ 'Programming Language :: Python :: 3' -+ ], -+ license='LGPLv2+', -+) ---- /dev/null 2022-06-30 09:45:32.996000000 -0400 -+++ ceph-17.2.1/src/tools/cephfs/shell/tox.ini 2022-06-23 10:41:35.000000000 -0400 -@@ -0,0 +1,7 @@ -+[tox] -+envlist = py3 -+skipsdist = true -+ -+[testenv:py3] -+deps = flake8 -+commands = flake8 --ignore=W503 --max-line-length=100 cephfs-shell diff --git a/0022-mon-Replace-deprecated-use-of-format_to.patch b/0022-mon-Replace-deprecated-use-of-format_to.patch deleted file mode 100644 index 76edaff..0000000 --- a/0022-mon-Replace-deprecated-use-of-format_to.patch +++ /dev/null @@ -1,28 +0,0 @@ -From fff72cd14c58d06774cbd0274e6144b42448af03 Mon Sep 17 00:00:00 2001 -From: "Adam C. Emerson" -Date: Mon, 7 Mar 2022 18:54:30 -0500 -Subject: [PATCH] mon: Replace deprecated use of format_to - -The non-deprecated version takes an explicit OutputIterator. - -Signed-off-by: Adam C. Emerson ---- - src/mon/LogMonitor.cc | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/mon/LogMonitor.cc b/src/mon/LogMonitor.cc -index 9103ddf7c5b..c196e8429fb 100644 ---- a/src/mon/LogMonitor.cc -+++ b/src/mon/LogMonitor.cc -@@ -411,7 +411,7 @@ void LogMonitor::log_external(const LogEntry& le) - } - - if (fd >= 0) { -- fmt::format_to(file_log_buffer, "{}\n", le); -+ fmt::format_to(std::back_inserter(file_log_buffer), "{}\n", le); - int err = safe_write(fd, file_log_buffer.data(), file_log_buffer.size()); - file_log_buffer.clear(); - if (err < 0) { --- -2.36.1 - diff --git a/0023-src-s3select-include-s3select_parquet_intrf.h.patch b/0023-src-s3select-include-s3select_parquet_intrf.h.patch deleted file mode 100644 index b11d1da..0000000 --- a/0023-src-s3select-include-s3select_parquet_intrf.h.patch +++ /dev/null @@ -1,218 +0,0 @@ ---- ceph-17.2.3/src/s3select/include/s3select_parquet_intrf.h.orig 2022-01-11 15:47:52.000000000 -0500 -+++ ceph-17.2.3/src/s3select/include/s3select_parquet_intrf.h 2022-08-22 10:26:06.738082924 -0400 -@@ -26,6 +26,14 @@ - #include "internal_file_decryptor.h" - #include "encryption_internal.h" - -+#if ARROW_VERSION_MAJOR < 9 -+#define _ARROW_FD fd_ -+#define _ARROW_FD_TYPE int -+#else -+#define _ARROW_FD fd_.fd() -+#define _ARROW_FD_TYPE arrow::internal::FileDescriptor -+#endif -+ - /******************************************/ - /******************************************/ - class optional_yield; -@@ -164,7 +172,7 @@ - std::mutex lock_; - - // File descriptor -- int fd_; -+ _ARROW_FD_TYPE fd_; - - FileMode::type mode_; - -@@ -202,7 +210,7 @@ - mode_ = write_only ? FileMode::WRITE : FileMode::READWRITE; - - if (!truncate) { -- ARROW_ASSIGN_OR_RAISE(size_, ::arrow::internal::FileGetSize(fd_)); -+ ARROW_ASSIGN_OR_RAISE(size_, ::arrow::internal::FileGetSize(_ARROW_FD)); - } else { - size_ = 0; - } -@@ -222,7 +230,11 @@ - RETURN_NOT_OK(SetFileName(fd)); - is_open_ = true; - mode_ = FileMode::WRITE; -+ #if ARROW_VERSION_MAJOR < 9 - fd_ = fd; -+ #else -+ fd_ = arrow::internal::FileDescriptor{fd}; -+ #endif - return Status::OK(); - } - -@@ -230,7 +242,7 @@ - RETURN_NOT_OK(SetFileName(path)); - - ARROW_ASSIGN_OR_RAISE(fd_, ::arrow::internal::FileOpenReadable(file_name_)); -- ARROW_ASSIGN_OR_RAISE(size_, ::arrow::internal::FileGetSize(fd_)); -+ ARROW_ASSIGN_OR_RAISE(size_, ::arrow::internal::FileGetSize(_ARROW_FD)); - - is_open_ = true; - mode_ = FileMode::READ; -@@ -242,7 +254,11 @@ - RETURN_NOT_OK(SetFileName(fd)); - is_open_ = true; - mode_ = FileMode::READ; -+ #if ARROW_VERSION_MAJOR < 9 - fd_ = fd; -+ #else -+ fd_ = arrow::internal::FileDescriptor{fd}; -+ #endif - return Status::OK(); - } - -@@ -258,9 +274,13 @@ - // Even if closing fails, the fd will likely be closed (perhaps it's - // already closed). - is_open_ = false; -+ #if ARROW_VERSION_MAJOR < 9 - int fd = fd_; - fd_ = -1; - RETURN_NOT_OK(::arrow::internal::FileClose(fd)); -+ #else -+ RETURN_NOT_OK(fd_.Close()); -+ #endif - } - return Status::OK(); - } -@@ -268,7 +288,7 @@ - Result Read(int64_t nbytes, void* out) override { - RETURN_NOT_OK(CheckClosed()); - RETURN_NOT_OK(CheckPositioned()); -- return ::arrow::internal::FileRead(fd_, reinterpret_cast(out), nbytes); -+ return ::arrow::internal::FileRead(_ARROW_FD, reinterpret_cast(out), nbytes); - } - - Result ReadAt(int64_t position, int64_t nbytes, void* out) override { -@@ -277,7 +297,7 @@ - // ReadAt() leaves the file position undefined, so require that we seek - // before calling Read() or Write(). - need_seeking_.store(true); -- return ::arrow::internal::FileReadAt(fd_, reinterpret_cast(out), position, -+ return ::arrow::internal::FileReadAt(_ARROW_FD, reinterpret_cast(out), position, - nbytes); - } - -@@ -286,7 +306,7 @@ - if (pos < 0) { - return Status::Invalid("Invalid position"); - } -- Status st = ::arrow::internal::FileSeek(fd_, pos); -+ Status st = ::arrow::internal::FileSeek(_ARROW_FD, pos); - if (st.ok()) { - need_seeking_.store(false); - } -@@ -295,7 +315,7 @@ - - Result Tell() const override { - RETURN_NOT_OK(CheckClosed()); -- return ::arrow::internal::FileTell(fd_); -+ return ::arrow::internal::FileTell(_ARROW_FD); - } - - Status Write(const void* data, int64_t length) override { -@@ -306,11 +326,11 @@ - if (length < 0) { - return Status::IOError("Length must be non-negative"); - } -- return ::arrow::internal::FileWrite(fd_, reinterpret_cast(data), -+ return ::arrow::internal::FileWrite(_ARROW_FD, reinterpret_cast(data), - length); - } - -- int fd() const override { return fd_; } -+ int fd() const override { return _ARROW_FD; } - - bool is_open() const override { return is_open_; } - -@@ -345,7 +365,7 @@ - std::mutex lock_; - - // File descriptor -- int fd_; -+ _ARROW_FD_TYPE fd_; - - FileMode::type mode_; - -@@ -411,7 +431,11 @@ - // already closed). - is_open_ = false; - //int fd = fd_; -+ #if ARROW_VERSION_MAJOR < 9 - fd_ = -1; -+ #else -+ fd_.Close(); -+ #endif - //RETURN_NOT_OK(::arrow::internal::FileClose(fd)); - } - return Status::OK(); -@@ -421,7 +445,7 @@ - NOT_IMPLEMENT; - RETURN_NOT_OK(CheckClosed()); - RETURN_NOT_OK(CheckPositioned()); -- return ::arrow::internal::FileRead(fd_, reinterpret_cast(out), nbytes); -+ return ::arrow::internal::FileRead(_ARROW_FD, reinterpret_cast(out), nbytes); - } - - Result ReadAt(int64_t position, int64_t nbytes, void* out) { -@@ -443,7 +467,7 @@ - return Status::OK(); - } - -- int fd() const { return fd_; } -+ int fd() const { return _ARROW_FD; } - - bool is_open() const { return is_open_; } - -@@ -467,7 +491,7 @@ - std::mutex lock_; - - // File descriptor -- int fd_; -+ _ARROW_FD_TYPE fd_; - - FileMode::type mode_; - -@@ -609,7 +633,7 @@ - for (const auto& range : ranges) { - RETURN_NOT_OK(internal::ValidateRange(range.offset, range.length)); - #if defined(POSIX_FADV_WILLNEED) -- if (posix_fadvise(fd_, range.offset, range.length, POSIX_FADV_WILLNEED)) { -+ if (posix_fadvise(_ARROW_FD, range.offset, range.length, POSIX_FADV_WILLNEED)) { - return IOErrorFromErrno(errno, "posix_fadvise failed"); - } - #elif defined(F_RDADVISE) // macOS, BSD? -@@ -617,7 +641,7 @@ - off_t ra_offset; - int ra_count; - } radvisory{range.offset, static_cast(range.length)}; -- if (radvisory.ra_count > 0 && fcntl(fd_, F_RDADVISE, &radvisory) == -1) { -+ if (radvisory.ra_count > 0 && fcntl(_ARROW_FD, F_RDADVISE, &radvisory) == -1) { - return IOErrorFromErrno(errno, "fcntl(fd, F_RDADVISE, ...) failed"); - } - #endif -@@ -970,6 +994,9 @@ - CryptoContext ctx(col->has_dictionary_page(), row_group_ordinal_, - static_cast(i), meta_decryptor, data_decryptor); - return PageReader::Open(stream, col->num_values(), col->compression(), -+ #if ARROW_VERSION_MAJOR > 8 -+ false, -+ #endif - properties_.memory_pool(), &ctx); - } - -@@ -985,6 +1012,9 @@ - CryptoContext ctx(col->has_dictionary_page(), row_group_ordinal_, - static_cast(i), meta_decryptor, data_decryptor); - return PageReader::Open(stream, col->num_values(), col->compression(), -+ #if ARROW_VERSION_MAJOR > 8 -+ false, -+ #endif - properties_.memory_pool(), &ctx); - } - diff --git a/0024-gcc-13.patch b/0024-gcc-13.patch index 2e0e048..8682ab6 100644 --- a/0024-gcc-13.patch +++ b/0024-gcc-13.patch @@ -90,16 +90,6 @@ namespace librbd { ---- ceph-17.2.5/src/rocksdb/db/compaction/compaction_iteration_stats.h.orig 2023-01-26 17:05:20.605333926 -0500 -+++ ceph-17.2.5/src/rocksdb/db/compaction/compaction_iteration_stats.h 2023-01-26 17:05:46.376880846 -0500 -@@ -6,6 +6,7 @@ - #pragma once - - #include "rocksdb/rocksdb_namespace.h" -+#include - - struct CompactionIterationStats { - // Compaction statistics --- ceph-17.2.6/src/rocksdb/table/block_based/data_block_hash_index.h.orig 2023-04-21 17:46:42.186339184 -0400 +++ ceph-17.2.6/src/rocksdb/table/block_based/data_block_hash_index.h 2023-04-21 17:47:01.392005151 -0400 @@ -7,6 +7,7 @@ @@ -120,23 +110,3 @@ #include "rocksdb/rocksdb_namespace.h" ---- ceph-17.2.6/src/rocksdb/include/rocksdb/utilities/checkpoint.h.orig 2023-04-22 05:54:32.260798114 -0400 -+++ ceph-17.2.6/src/rocksdb/include/rocksdb/utilities/checkpoint.h 2023-04-22 05:55:42.997522143 -0400 -@@ -10,6 +10,7 @@ - - #include - #include -+#include - #include "rocksdb/status.h" - - namespace ROCKSDB_NAMESPACE { ---- ceph-17.2.6/src/rocksdb/third-party/folly/folly/synchronization/detail/ProxyLockable-inl.h.orig 2023-04-22 06:14:55.624679075 -0400 -+++ ceph-17.2.6/src/rocksdb/third-party/folly/folly/synchronization/detail/ProxyLockable-inl.h 2023-04-22 09:18:39.424624618 -0400 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - namespace folly { diff --git a/0025-selinux-prepare-for-anon-inode-controls-enablement.patch b/0025-selinux-prepare-for-anon-inode-controls-enablement.patch deleted file mode 100644 index b989f91..0000000 --- a/0025-selinux-prepare-for-anon-inode-controls-enablement.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 73218e291ca68a927965bdffa7d43d0fc62c2718 Mon Sep 17 00:00:00 2001 -From: Ondrej Mosnacek -Date: Wed, 27 Jul 2022 17:14:25 +0200 -Subject: [PATCH] selinux: prepare for anon inode controls enablement - -We plan to start labeling anon inodes (userfaultfd and io_uring file -descriptors) properly in selinux-policy, which means that domains using -these will need new rules. - -See: https://github.com/fedora-selinux/selinux-policy/pull/1351 - -Since ceph may optionally use io_uring, this patch adds the necessary -interface call to its policy to avoid a regression. As the new interface -call is put under a conditional, the policy package will be buildable -against selinux-policy with or without the above PR merged, but it will -need to be rebuilt against the updated selinux-policy to actually pick -up the new rules. - -I tested this on a minimal ceph cluster with 'bdev_ioring = true' added -to ceph.conf. I got io_uring denials without this patch + with -selinux-policy with PR#1351 and no denials with ceph rebuilt with this -patch. - -Signed-off-by: Ondrej Mosnacek ---- - selinux/ceph.te | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/selinux/ceph.te b/selinux/ceph.te -index 77d35d9714b60..729bce1fc8589 100644 ---- a/selinux/ceph.te -+++ b/selinux/ceph.te -@@ -75,6 +75,9 @@ manage_lnk_files_pattern(ceph_t, ceph_var_run_t, ceph_var_run_t) - - kernel_read_system_state(ceph_t) - kernel_read_network_state(ceph_t) -+ifdef(`kernel_io_uring_use',` -+ kernel_io_uring_use(ceph_t) -+') - allow ceph_t kernel_t:system module_request; - - corenet_all_recvfrom_unlabeled(ceph_t) diff --git a/0025-src-osd-scrubber-scrub_backend.h.patch b/0025-src-osd-scrubber-scrub_backend.h.patch new file mode 100644 index 0000000..622daaa --- /dev/null +++ b/0025-src-osd-scrubber-scrub_backend.h.patch @@ -0,0 +1,32 @@ +--- ceph-18.0.0-1810-g728e8ac0/src/osd/scrubber/scrub_backend.h.orig 2023-01-18 16:35:03.398700052 -0500 ++++ ceph-18.0.0-1810-g728e8ac0/src/osd/scrubber/scrub_backend.h 2023-01-18 16:37:55.882677965 -0500 +@@ -183,20 +183,20 @@ + // note: 'if' chain, as hard to consistently (on all compilers) avoid some + // warnings for a switch plus multiple return paths + if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_usable) { +- return format_to(ctx.out(), +- "{{shard-not-usable:{}}}", +- as_auth.error_text); ++ return fmt::format_to(ctx.out(), ++ "{{shard-not-usable:{}}}", ++ as_auth.error_text.c_str()); + } + if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_found) { +- return format_to(ctx.out(), "{{shard-not-found}}"); ++ return fmt::format_to(ctx.out(), "{{shard-not-found}}"); + } +- return format_to(ctx.out(), +- "{{shard-usable: soid:{} {{txt:{}}} }}", +- as_auth.oi.soid, +- as_auth.error_text); ++ return fmt::format_to(ctx.out(), ++ "{{shard-usable: soid:{} {{txt:{}}} }}", ++ as_auth.oi.soid, ++ as_auth.error_text.c_str()); + + } else { +- return format_to( ++ return fmt::format_to( + ctx.out(), + "usable:{} soid:{} {{txt:{}}}", + (as_auth.possible_auth == shard_as_auth_t::usable_t::usable) ? "yes" diff --git a/0026-src-boost-libs-python-src-object.patch b/0026-src-boost-libs-python-src-object.patch deleted file mode 100644 index 3ed36b4..0000000 --- a/0026-src-boost-libs-python-src-object.patch +++ /dev/null @@ -1,99 +0,0 @@ ---- ceph-17.2.5/src/boost/libs/python/src/object/enum.cpp.orig 2023-02-23 08:45:36.498595122 -0500 -+++ ceph-17.2.5/src/boost/libs/python/src/object/enum.cpp 2023-02-23 08:46:11.277990890 -0500 -@@ -153,7 +153,7 @@ - { - if (enum_type_object.tp_dict == 0) - { -- Py_TYPE(&enum_type_object) = incref(&PyType_Type); -+ Py_SET_TYPE(&enum_type_object, incref(&PyType_Type)); - #if PY_VERSION_HEX >= 0x03000000 - enum_type_object.tp_base = &PyLong_Type; - #else ---- ceph-17.2.5/src/boost/libs/python/src/object/function.cpp.orig 2023-02-23 08:44:19.995920877 -0500 -+++ ceph-17.2.5/src/boost/libs/python/src/object/function.cpp 2023-02-23 08:45:26.426770100 -0500 -@@ -107,7 +107,7 @@ - PyObject* p = this; - if (Py_TYPE(&function_type) == 0) - { -- Py_TYPE(&function_type) = &PyType_Type; -+ Py_SET_TYPE(&function_type, &PyType_Type); - ::PyType_Ready(&function_type); - } - ---- ceph-17.2.5/src/boost/libs/python/src/object/life_support.cpp.orig 2023-02-23 08:43:37.511650115 -0500 -+++ ceph-17.2.5/src/boost/libs/python/src/object/life_support.cpp 2023-02-23 08:44:10.225088588 -0500 -@@ -93,7 +93,7 @@ - - if (Py_TYPE(&life_support_type) == 0) - { -- Py_TYPE(&life_support_type) = &PyType_Type; -+ Py_SET_TYPE(&life_support_type, &PyType_Type); - PyType_Ready(&life_support_type); - } - ---- ceph-17.2.5/src/boost/libs/python/src/object/class.cpp.orig 2023-02-23 08:46:22.394797757 -0500 -+++ ceph-17.2.5/src/boost/libs/python/src/object/class.cpp 2023-02-23 10:54:56.016527900 -0500 -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -208,7 +209,7 @@ - { - if (static_data_object.tp_dict == 0) - { -- Py_TYPE(&static_data_object) = &PyType_Type; -+ Py_SET_TYPE(&static_data_object, &PyType_Type); - static_data_object.tp_base = &PyProperty_Type; - if (PyType_Ready(&static_data_object)) - return 0; -@@ -316,7 +317,7 @@ - { - if (class_metatype_object.tp_dict == 0) - { -- Py_TYPE(&class_metatype_object) = &PyType_Type; -+ Py_SET_TYPE(&class_metatype_object, &PyType_Type); - class_metatype_object.tp_base = &PyType_Type; - if (PyType_Ready(&class_metatype_object)) - return type_handle(); -@@ -374,12 +375,7 @@ - // like, so we'll store the total size of the object - // there. A negative number indicates that the extra - // instance memory is not yet allocated to any holders. --#if PY_VERSION_HEX >= 0x02060000 -- Py_SIZE(result) = --#else -- result->ob_size = --#endif -- -(static_cast(offsetof(instance<>,storage) + instance_size)); -+ Py_SET_SIZE(result,-static_cast(offsetof(instance<>,storage) + instance_size)); - } - return (PyObject*)result; - } -@@ -470,7 +466,7 @@ - { - if (class_type_object.tp_dict == 0) - { -- Py_TYPE(&class_type_object) = incref(class_metatype().get()); -+ Py_SET_TYPE(&class_type_object, incref(class_metatype().get())); - class_type_object.tp_base = &PyBaseObject_Type; - if (PyType_Ready(&class_type_object)) - return type_handle(); -@@ -738,8 +734,13 @@ - // holder_offset should at least point into the variable-sized part - assert(holder_offset >= offsetof(objects::instance<>,storage)); - -+ size_t allocated = holder_size + 8; -+ void* storage = (char*)self + holder_offset; -+ void* aligned_storage = ::boost::alignment::align(8, holder_size, storage, allocated); -+ - // Record the fact that the storage is occupied, noting where it starts -- Py_SIZE(self) = holder_offset; -+ const size_t offset = reinterpret_cast(aligned_storage) - reinterpret_cast(storage) + holder_offset; -+ Py_SET_SIZE(self, offset); - return (char*)self + holder_offset; - } - else diff --git a/0026-src-osd-scrubber-scrub_backend.cc.patch b/0026-src-osd-scrubber-scrub_backend.cc.patch new file mode 100644 index 0000000..2fdc52a --- /dev/null +++ b/0026-src-osd-scrubber-scrub_backend.cc.patch @@ -0,0 +1,199 @@ +--- ceph-18.0.0-2148-g9754cafc/src/osd/scrubber/scrub_backend.cc.orig 2023-02-08 16:01:53.800709761 -0500 ++++ ceph-18.0.0-2148-g9754cafc/src/osd/scrubber/scrub_backend.cc 2023-02-11 05:06:14.954254050 -0500 +@@ -507,11 +507,11 @@ + } + } + +- dout(10) << fmt::format("{}: selecting osd {} for obj {} with oi {}", ++ dout(10) << fmt::format("{}: selecting osd {} for obj {} with oi {:p}", + __func__, + ret_auth.auth_shard, + ho, +- ret_auth.auth_oi) ++ (void*)&ret_auth.auth_oi) + << dendl; + + return ret_auth; +@@ -1171,23 +1171,23 @@ + + if (auth.digest_present && candidate.digest_present && + auth.digest != candidate.digest) { +- format_to(std::back_inserter(out), +- "data_digest {:#x} != data_digest {:#x} from shard {}", +- candidate.digest, +- auth.digest, +- auth_shard); ++ fmt::format_to(std::back_inserter(out), ++ "data_digest {:#x} != data_digest {:#x} from shard {}", ++ candidate.digest, ++ auth.digest, ++ auth_shard); + error = true; + obj_result.set_data_digest_mismatch(); + } + + if (auth.omap_digest_present && candidate.omap_digest_present && + auth.omap_digest != candidate.omap_digest) { +- format_to(std::back_inserter(out), +- "{}omap_digest {:#x} != omap_digest {:#x} from shard {}", +- sep(error), +- candidate.omap_digest, +- auth.omap_digest, +- auth_shard); ++ fmt::format_to(std::back_inserter(out), ++ "{}omap_digest {:#x} != omap_digest {:#x} from shard {}", ++ sep(error), ++ candidate.omap_digest, ++ auth.omap_digest, ++ auth_shard); + obj_result.set_omap_digest_mismatch(); + } + +@@ -1195,24 +1195,24 @@ + if (m_is_replicated) { + if (auth_oi.is_data_digest() && candidate.digest_present && + auth_oi.data_digest != candidate.digest) { +- format_to(std::back_inserter(out), +- "{}data_digest {:#x} != data_digest {:#x} from auth oi {}", +- sep(error), +- candidate.digest, +- auth_oi.data_digest, +- auth_oi); ++ fmt::format_to(std::back_inserter(out), ++ "{}data_digest {:#x} != data_digest {:#x} from auth oi {:p}", ++ sep(error), ++ candidate.digest, ++ auth_oi.data_digest, ++ (void*)&auth_oi); + shard_result.set_data_digest_mismatch_info(); + } + + // for replicated: + if (auth_oi.is_omap_digest() && candidate.omap_digest_present && + auth_oi.omap_digest != candidate.omap_digest) { +- format_to(std::back_inserter(out), +- "{}omap_digest {:#x} != omap_digest {:#x} from auth oi {}", +- sep(error), +- candidate.omap_digest, +- auth_oi.omap_digest, +- auth_oi); ++ fmt::format_to(std::back_inserter(out), ++ "{}omap_digest {:#x} != omap_digest {:#x} from auth oi {:p}", ++ sep(error), ++ candidate.omap_digest, ++ auth_oi.omap_digest, ++ (void*)&auth_oi); + shard_result.set_omap_digest_mismatch_info(); + } + } +@@ -1241,7 +1241,7 @@ + auth_bl.push_back(auth_attr->second); + + if (!can_bl.contents_equal(auth_bl)) { +- format_to(std::back_inserter(out), "{}object info inconsistent ", sep(error)); ++ fmt::format_to(std::back_inserter(out), "{}object info inconsistent ", sep(error)); + obj_result.set_object_info_inconsistency(); + } + } +@@ -1261,7 +1261,7 @@ + auth_bl.push_back(auth_attr->second); + + if (!can_bl.contents_equal(auth_bl)) { +- format_to(std::back_inserter(out), "{}snapset inconsistent ", sep(error)); ++ fmt::format_to(std::back_inserter(out), "{}snapset inconsistent ", sep(error)); + obj_result.set_snapset_inconsistency(); + } + } +@@ -1284,7 +1284,7 @@ + auth_bl.push_back(auth_hi->second); + + if (!can_bl.contents_equal(auth_bl)) { +- format_to(std::back_inserter(out), "{}hinfo inconsistent ", sep(error)); ++ fmt::format_to(std::back_inserter(out), "{}hinfo inconsistent ", sep(error)); + obj_result.set_hinfo_inconsistency(); + } + } +@@ -1296,22 +1296,22 @@ + + uint64_t oi_size = logical_to_ondisk_size(auth_oi.size); + if (oi_size != candidate.size) { +- format_to(std::back_inserter(out), +- "{}size {} != size {} from auth oi {}", +- sep(error), +- candidate.size, +- oi_size, +- auth_oi); ++ fmt::format_to(std::back_inserter(out), ++ "{}size {} != size {} from auth oi {:p}", ++ sep(error), ++ candidate.size, ++ oi_size, ++ (void*)&auth_oi); + shard_result.set_size_mismatch_info(); + } + + if (auth.size != candidate.size) { +- format_to(std::back_inserter(out), +- "{}size {} != size {} from shard {}", +- sep(error), +- candidate.size, +- auth.size, +- auth_shard); ++ fmt::format_to(std::back_inserter(out), ++ "{}size {} != size {} from shard {}", ++ sep(error), ++ candidate.size, ++ auth.size, ++ auth_shard); + obj_result.set_size_mismatch(); + } + +@@ -1320,11 +1320,11 @@ + if (candidate.size > m_conf->osd_max_object_size && + !obj_result.has_size_too_large()) { + +- format_to(std::back_inserter(out), +- "{}size {} > {} is too large", +- sep(error), +- candidate.size, +- m_conf->osd_max_object_size); ++ fmt::format_to(std::back_inserter(out), ++ "{}size {} > {} is too large", ++ sep(error), ++ candidate.size, ++ m_conf->osd_max_object_size); + obj_result.set_size_too_large(); + } + +@@ -1340,10 +1340,10 @@ + + auto cand = candidate.attrs.find(k); + if (cand == candidate.attrs.end()) { +- format_to(std::back_inserter(out), "{}attr name mismatch '{}'", sep(error), k); ++ fmt::format_to(std::back_inserter(out), "{}attr name mismatch '{}'", sep(error), k); + obj_result.set_attr_name_mismatch(); + } else if (cand->second.cmp(v)) { +- format_to(std::back_inserter(out), "{}attr value mismatch '{}'", sep(error), k); ++ fmt::format_to(std::back_inserter(out), "{}attr value mismatch '{}'", sep(error), k); + obj_result.set_attr_value_mismatch(); + } + } +@@ -1356,7 +1356,7 @@ + + auto in_auth = auth.attrs.find(k); + if (in_auth == auth.attrs.end()) { +- format_to(std::back_inserter(out), "{}attr name mismatch '{}'", sep(error), k); ++ fmt::format_to(std::back_inserter(out), "{}attr name mismatch '{}'", sep(error), k); + obj_result.set_attr_name_mismatch(); + } + } +@@ -1823,8 +1823,7 @@ + SnapMapReaderI& snaps_getter) + { + using result_t = Scrub::SnapMapReaderI::result_t; +- dout(15) << fmt::format("{}: obj:{} snapset:{}", __func__, hoid, snapset) +- << dendl; ++ // dout(15) << fmt::format("{}: obj:{} snapset:{}", __func__, hoid, snapset) << dendl; + + auto p = snapset.clone_snaps.find(hoid.snap); + if (p == snapset.clone_snaps.end()) { diff --git a/0027-src-kv-rocksdb_cache-ShardedCache.h.patch b/0027-src-kv-rocksdb_cache-ShardedCache.h.patch new file mode 100644 index 0000000..347624e --- /dev/null +++ b/0027-src-kv-rocksdb_cache-ShardedCache.h.patch @@ -0,0 +1,151 @@ +--- ceph-17.2.6/src/kv/rocksdb_cache/ShardedCache.h.orig 2023-04-05 11:09:51.000000000 -0400 ++++ ceph-17.2.6/src/kv/rocksdb_cache/ShardedCache.h 2023-04-21 16:22:26.665067333 -0400 +@@ -15,7 +15,7 @@ + #include + + #include "rocksdb/version.h" +-#include "rocksdb/cache.h" ++#include "rocksdb/advanced_cache.h" + #include "include/ceph_hash.h" + #include "common/PriorityCache.h" + //#include "hash.h" +@@ -26,7 +26,8 @@ + + namespace rocksdb_cache { + +-using DeleterFn = void (*)(const rocksdb::Slice& key, void* value); ++// using DeleterFn = void (*)(const rocksdb::Slice& key, void* value); ++using DeleterFn = void (*)(rocksdb::Cache::ObjectPtr obj, rocksdb::MemoryAllocator* allocator); + + // Single cache shard interface. + class CacheShard { +@@ -34,11 +35,19 @@ + CacheShard() = default; + virtual ~CacheShard() = default; + +- virtual rocksdb::Status Insert(const rocksdb::Slice& key, uint32_t hash, void* value, +- size_t charge, +- DeleterFn deleter, +- rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) = 0; +- virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, uint32_t hash) = 0; ++ virtual rocksdb::Status Insert(const rocksdb::Slice& key, ++ rockdb::ObjectPtr obj, ++ const rocksdb::CacheItemHelper* helper, ++ size_t charge, ++ rocksdb:Handle** handle = nullptr, ++ Rocksdb::Priority priority = Rocksdb::Priority::LOW) ++ virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, ++ const rocksdb::CacheItemHelper* helper = nullptr, ++ rocksdb::CreateContext* create_context = nullptr, ++ rocksdb::Priority priority = rocksdb::Priority::LOW, ++ bool wait = true, ++ rocksdb::Statistics* stats = nullptr); ++ + virtual bool Ref(rocksdb::Cache::Handle* handle) = 0; + virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) = 0; + virtual void Erase(const rocksdb::Slice& key, uint32_t hash) = 0; +@@ -68,8 +77,8 @@ + virtual const char* Name() const override = 0; + virtual rocksdb::Status Insert(const rocksdb::Slice& key, void* value, size_t charge, + DeleterFn, +- rocksdb::Cache::Handle** handle, Priority priority) override; +- virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, rocksdb::Statistics* stats) override; ++ rocksdb::Cache::Handle** handle, Priority priority); ++ virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, rocksdb::Statistics* stats); + virtual bool Ref(rocksdb::Cache::Handle* handle) override; + virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) override; + virtual void* Value(Handle* handle) override = 0; +@@ -84,14 +93,17 @@ + virtual size_t GetPinnedUsage() const override; + virtual size_t GetCharge(Handle* handle) const = 0; + #if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22)) +- virtual DeleterFn GetDeleter(Handle* handle) const override; ++ virtual DeleterFn GetDeleter(Handle* handle) const; + #endif + virtual void DisownData() override = 0; + #if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22)) ++ virtual const rocksdb::CacheItemHelper* GetCacheItemHelper(rocksdb::Cache::Handle* handle) const; + virtual void ApplyToAllEntries( +- const std::function& callback, +- const ApplyToAllEntriesOptions& opts) override; ++ const std::function& callback, ++ const rocksdb::ApplyToAllEntriesOptions& opts); + #else + virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t), + bool thread_safe) override; +--- ceph-17.2.6/src/kv/RocksDBStore.cc.orig 2023-04-05 11:09:51.000000000 -0400 ++++ ceph-17.2.6/src/kv/RocksDBStore.cc 2023-04-20 16:19:29.280669881 -0400 +@@ -903,6 +903,19 @@ + // base_name - name of column without shard suffix: "-"+number + // options - additional options to apply + // cf_opt - column family options to update ++ ++rocksdb::Status GetColumnFamilyOptionsFromMap( ++ const rocksdb::ColumnFamilyOptions& base_options, ++ const std::unordered_map& opts_map, ++ rocksdb::ColumnFamilyOptions* new_options, bool input_strings_escaped, ++ bool ignore_unknown_options) { ++ rocksdb::ConfigOptions config_options; ++ config_options.ignore_unknown_options = ignore_unknown_options; ++ config_options.input_strings_escaped = input_strings_escaped; ++ return rocksdb::GetColumnFamilyOptionsFromMap(config_options, base_options, opts_map, ++ new_options); ++} ++ + int RocksDBStore::update_column_family_options(const std::string& base_name, + const std::string& more_options, + rocksdb::ColumnFamilyOptions* cf_opt) +@@ -916,7 +929,7 @@ + << " options=" << more_options << dendl; + return r; + } +- status = rocksdb::GetColumnFamilyOptionsFromMap(*cf_opt, options_map, cf_opt); ++ status = GetColumnFamilyOptionsFromMap(*cf_opt, options_map, cf_opt, false, false); + if (!status.ok()) { + dout(5) << __func__ << " invalid column family optionsp; column family=" + << base_name << " options=" << more_options << dendl; +@@ -937,6 +950,20 @@ + return 0; + } + ++rocksdb::Status GetBlockBasedTableOptionsFromMap( ++ const rocksdb::BlockBasedTableOptions& table_options, ++ const std::unordered_map& opts_map, ++ rocksdb::BlockBasedTableOptions* new_table_options, bool input_strings_escaped, ++ bool ignore_unknown_options) { ++ rocksdb::ConfigOptions config_options; ++ config_options.input_strings_escaped = input_strings_escaped; ++ config_options.ignore_unknown_options = ignore_unknown_options; ++ config_options.invoke_prepare_options = false; ++ ++ return rocksdb::GetBlockBasedTableOptionsFromMap(config_options, table_options, ++ opts_map, new_table_options); ++} ++ + int RocksDBStore::apply_block_cache_options(const std::string& column_name, + const std::string& block_cache_opt, + rocksdb::ColumnFamilyOptions* cf_opt) +@@ -981,7 +1008,7 @@ + } + + rocksdb::BlockBasedTableOptions column_bbt_opts; +- status = GetBlockBasedTableOptionsFromMap(bbt_opts, cache_options_map, &column_bbt_opts); ++ status = GetBlockBasedTableOptionsFromMap(bbt_opts, cache_options_map, &column_bbt_opts, false, false); + if (!status.ok()) { + dout(5) << __func__ << " invalid block cache options; column=" << column_name + << " options=" << block_cache_opt << dendl; +--- ceph-17.2.6/src/kv/rocksdb_cache/BinnedLRUCache.h.orig 2023-04-21 10:11:00.180387609 -0400 ++++ ceph-17.2.6/src/kv/rocksdb_cache/BinnedLRUCache.h 2023-04-21 10:17:15.527816193 -0400 +@@ -121,7 +121,7 @@ + void Free() { + ceph_assert((refs == 1 && InCache()) || (refs == 0 && !InCache())); + if (deleter) { +- (*deleter)(key(), value); ++ (*deleter)(this, nullptr); + } + delete[] key_data; + delete this; diff --git a/0027-src-rocksdb-table-block_based-data_block_hash_index.h.patch b/0027-src-rocksdb-table-block_based-data_block_hash_index.h.patch deleted file mode 100644 index d03276f..0000000 --- a/0027-src-rocksdb-table-block_based-data_block_hash_index.h.patch +++ /dev/null @@ -1,40 +0,0 @@ ---- ceph-17.2.6/src/rocksdb/table/block_based/data_block_hash_index.h.orig 2023-04-21 17:46:42.186339184 -0400 -+++ ceph-17.2.6/src/rocksdb/table/block_based/data_block_hash_index.h 2023-04-21 17:47:01.392005151 -0400 -@@ -7,6 +7,7 @@ - - #include - #include -+#include - - #include "rocksdb/slice.h" - ---- ceph-17.2.6/src/rocksdb/util/string_util.h.orig 2023-04-21 18:13:51.060496792 -0400 -+++ ceph-17.2.6/src/rocksdb/util/string_util.h 2023-04-21 18:14:06.496223220 -0400 -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #include "rocksdb/rocksdb_namespace.h" - ---- ceph-17.2.6/src/rocksdb/include/rocksdb/utilities/checkpoint.h.orig 2023-04-22 05:54:32.260798114 -0400 -+++ ceph-17.2.6/src/rocksdb/include/rocksdb/utilities/checkpoint.h 2023-04-22 05:55:42.997522143 -0400 -@@ -10,6 +10,7 @@ - - #include - #include -+#include - #include "rocksdb/status.h" - - namespace ROCKSDB_NAMESPACE { ---- ceph-17.2.6/src/rocksdb/third-party/folly/folly/synchronization/detail/ProxyLockable-inl.h.orig 2023-04-22 06:14:55.624679075 -0400 -+++ ceph-17.2.6/src/rocksdb/third-party/folly/folly/synchronization/detail/ProxyLockable-inl.h 2023-04-22 09:18:39.424624618 -0400 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - namespace folly { diff --git a/0028-cmake-modules-BuildBoost.cmake.patch b/0028-cmake-modules-BuildBoost.cmake.patch deleted file mode 100644 index 631d072..0000000 --- a/0028-cmake-modules-BuildBoost.cmake.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- ceph-17.2.6/cmake/modules/BuildBoost.cmake.orig 2023-04-27 14:00:28.239524778 -0400 -+++ ceph-17.2.6/cmake/modules/BuildBoost.cmake 2023-04-28 07:49:59.743342207 -0400 -@@ -63,7 +63,11 @@ - else() - list(APPEND boost_features "address-model=32") - endif() -- set(BOOST_CXXFLAGS "-fPIC -w") # check on arm, etc <---XXX -+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") -+ set(BOOST_CXXFLAGS "-fPIC -w -fcf-protection") # check on arm, etc <---XXX -+ else() -+ set(BOOST_CXXFLAGS "-fPIC -w") # check on arm, etc <---XXX -+ endif() - list(APPEND boost_features "cxxflags=${BOOST_CXXFLAGS}") - - set(boost_with_libs) diff --git a/0029-src-rgw-rgw_amqp.cc.patch b/0029-src-rgw-rgw_amqp.cc.patch new file mode 100644 index 0000000..8cd8fa9 --- /dev/null +++ b/0029-src-rgw-rgw_amqp.cc.patch @@ -0,0 +1,17 @@ +--- ceph-18.0.0-2726-g7cea3740/src/rgw/rgw_amqp.cc.orig 2023-03-14 18:22:35.636864260 -0400 ++++ ceph-18.0.0-2726-g7cea3740/src/rgw/rgw_amqp.cc 2023-03-14 18:24:36.362756771 -0400 +@@ -2,10 +2,10 @@ + // vim: ts=8 sw=2 smarttab ft=cpp + + #include "rgw_amqp.h" +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++#include + #include "include/ceph_assert.h" + #include + #include diff --git a/0030-src-CMakeLists.txt.patch b/0030-src-CMakeLists.txt.patch deleted file mode 100644 index bc363db..0000000 --- a/0030-src-CMakeLists.txt.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- ceph-17.2.6/src/CMakeLists.txt.orig 2023-05-10 10:00:58.457793274 -0400 -+++ ceph-17.2.6/src/CMakeLists.txt 2023-05-10 10:01:31.553198698 -0400 -@@ -809,6 +809,7 @@ - if(WITH_KRBD) - add_library(krbd STATIC krbd.cc - $) -+ add_dependencies(krbd legacy-option-headers) - target_link_libraries(krbd keyutils::keyutils) - endif() - add_subdirectory(librbd) diff --git a/0030-src-rgw-rgw_asio_client.cc.patch b/0030-src-rgw-rgw_asio_client.cc.patch new file mode 100644 index 0000000..bcb5fcc --- /dev/null +++ b/0030-src-rgw-rgw_asio_client.cc.patch @@ -0,0 +1,61 @@ +--- ceph-18.0.0-2726-g7cea3740/src/rgw/rgw_asio_client.cc.orig 2023-03-14 18:46:02.037195570 -0400 ++++ ceph-18.0.0-2726-g7cea3740/src/rgw/rgw_asio_client.cc 2023-03-14 18:55:14.446438244 -0400 +@@ -39,11 +39,13 @@ + const auto& value = header->value(); + + if (field == beast::http::field::content_length) { +- env.set("CONTENT_LENGTH", value.to_string()); ++ std::string scratch{value.data(), value.size()}; ++ env.set("CONTENT_LENGTH", scratch.c_str()); + continue; + } + if (field == beast::http::field::content_type) { +- env.set("CONTENT_TYPE", value.to_string()); ++ std::string scratch{value.data(), value.size()}; ++ env.set("CONTENT_TYPE", scratch.c_str()); + continue; + } + +@@ -62,26 +64,37 @@ + } + *dest = '\0'; + +- env.set(buf, value.to_string()); ++ std::string scratch{value.data(), value.size()}; ++ env.set(buf, scratch.c_str()); + } + + int major = request.version() / 10; + int minor = request.version() % 10; + env.set("HTTP_VERSION", std::to_string(major) + '.' + std::to_string(minor)); + +- env.set("REQUEST_METHOD", request.method_string().to_string()); ++ { ++ std::string scratch {request.method_string().data(),request.method_string().size()}; ++ env.set("REQUEST_METHOD", scratch.c_str()); ++ } + + // split uri from query + auto uri = request.target(); + auto pos = uri.find('?'); + if (pos != uri.npos) { + auto query = uri.substr(pos + 1); +- env.set("QUERY_STRING", query.to_string()); ++ std::string scratch{query.data(), query.size()}; ++ env.set("QUERY_STRING", scratch.c_str()); + uri = uri.substr(0, pos); + } +- env.set("SCRIPT_URI", uri.to_string()); ++ { ++ std::string scratch {uri.data(), uri.size()}; ++ env.set("SCRIPT_URI", scratch.c_str()); ++ } + +- env.set("REQUEST_URI", request.target().to_string()); ++ { ++ std::string scratch {request.target().data(), request.target().size()}; ++ env.set("REQUEST_URI", scratch.c_str()); ++ } + + char port_buf[16]; + snprintf(port_buf, sizeof(port_buf), "%d", local_endpoint.port()); diff --git a/0032-cmake-modules-BuildBoost.cmake.patch b/0032-cmake-modules-BuildBoost.cmake.patch new file mode 100644 index 0000000..91c8d20 --- /dev/null +++ b/0032-cmake-modules-BuildBoost.cmake.patch @@ -0,0 +1,24 @@ +--- ceph-18.0.0-2950-g1c931bc4/cmake/modules/BuildBoost.cmake.orig 2023-04-28 18:30:19.133064577 -0400 ++++ ceph-18.0.0-2950-g1c931bc4/cmake/modules/BuildBoost.cmake 2023-04-28 18:31:55.290354383 -0400 +@@ -104,12 +104,21 @@ + set(user_config ${CMAKE_BINARY_DIR}/user-config.jam) + # edit the user-config.jam so b2 will be able to use the specified + # toolset and python ++if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") ++ file(WRITE ${user_config} ++ "using ${toolset}" ++ " : " ++ " : ${CMAKE_CXX_COMPILER}" ++ " : -fPIC -w -fcf-protection -Wno-everything" ++ " ;\n") ++else() + file(WRITE ${user_config} + "using ${toolset}" + " : " + " : ${CMAKE_CXX_COMPILER}" + " : -fPIC -w -Wno-everything" + " ;\n") ++endif() + if(with_python_version) + find_package(Python3 ${with_python_version} QUIET REQUIRED + COMPONENTS Development) diff --git a/0029-boost-asm.patch b/0033-boost-asm.patch similarity index 100% rename from 0029-boost-asm.patch rename to 0033-boost-asm.patch diff --git a/ceph.spec b/ceph.spec index d70991e..95a21b4 100644 --- a/ceph.spec +++ b/ceph.spec @@ -35,15 +35,6 @@ %else %bcond_with rbd_rwl_cache %endif -%if 0%{?rhel} -%bcond_with ld_mold -%else -%ifarch x86_64 aarch64 -%bcond_without ld_mold -%else -%bcond_with ld_mold -%endif -%endif %if 0%{?fedora} || 0%{?rhel} %ifarch s390x %{arm64} %bcond_with system_pmdk @@ -51,11 +42,7 @@ %bcond_without system_pmdk %endif %bcond_without selinux -%if 0%{?rhel} >= 8 -%bcond_with cephfs_java -%else %bcond_without cephfs_java -%endif %bcond_without amqp_endpoint %bcond_without kafka_endpoint %bcond_without lttng @@ -66,7 +53,11 @@ %global _remote_tarball_prefix https://download.ceph.com/tarballs/ %endif %if 0%{?suse_version} +%ifarch s390x +%bcond_with system_pmdk +%else %bcond_without system_pmdk +%endif %bcond_with amqp_endpoint %bcond_with cephfs_java %bcond_with kafka_endpoint @@ -99,7 +90,11 @@ %endif %endif %bcond_with seastar +%if 0%{?suse_version} %bcond_with jaeger +%else +%bcond_without jaeger +%endif %if 0%{?fedora} || 0%{?suse_version} >= 1500 || 0%{?rhel} >= 10 # distros that ship cmd2 and/or colorama %bcond_without cephfs_shell @@ -107,8 +102,15 @@ # distros that do _not_ ship cmd2/colorama %bcond_with cephfs_shell %endif +%if 0%{?fedora} || 0%{?rhel} >= 9 %bcond_without system_arrow %bcond_without system_utf8proc +%else +# for centos 8, utf8proc-devel comes from the subversion-devel module which isn't available in EPEL8 +# this is tracked in https://bugzilla.redhat.com/2152265 +%bcond_with system_arrow +%bcond_with system_utf8proc +%endif %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8 %global weak_deps 1 %endif @@ -129,7 +131,6 @@ %{!?python3_version: %global python3_version 3} %{!?gts_prefix: %global gts_prefix gcc-toolset-11} - %if ! 0%{?suse_version} # use multi-threaded xz compression: xz level 7 using ncpus threads %global _source_payload w7T%{_smp_build_ncpus}.xzdio @@ -168,8 +169,8 @@ # main package definition ################################################################################# Name: ceph -Version: 17.2.6 -Release: 8%{?dist} +Version: 18.1.0 +Release: 0.1%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 %endif @@ -180,13 +181,13 @@ Epoch: 2 Summary: User space components of the Ceph file system #License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT -#License: (LGPLv2+ or LGPLv3) and CC-BY-SA-3.0 and GPLv2 and Boost and BSD and MIT -License: (LGPL-2.1-or-later OR LGPL-3.0-only) and CC-BY-SA-3.0 and GPL-2.0-only and BSL-1.0 and BSD-3-Clause and MIT +License: (LGPLv2+ or LGPLv3) and CC-BY-SA-3.0 and GPLv2 and Boost and BSD and MIT %if 0%{?suse_version} Group: System/Filesystems %endif URL: http://ceph.com/ Source0: https://download.ceph.com/tarballs/ceph-%{version}.tar.gz +#Source0: https://1.chacra.ceph.com/r/ceph/quincy/ Patch0001: 0001-src-common-crc32c_intel_fast.patch Patch0003: 0003-src-common-bitstr.h.patch Patch0008: 0008-cmake-modules-Finduring.cmake.patch @@ -196,15 +197,14 @@ Patch0012: 0012-spdk-isa-l-CET-Add-CET-marker-to-x86-64-crc32-assemb.patch Patch0016: 0016-src-tracing-patch Patch0017: 0017-gcc-12-omnibus.patch Patch0018: 0018-src-rgw-store-dbstore-CMakeLists.txt.patch -Patch0019: 0019-cmake-modules-CheckCxxAtomic.cmake.patch Patch0020: 0020-src-arrow-cpp-cmake_modules-ThirdpartyToolchain.cmake.patch -Patch0023: 0023-src-s3select-include-s3select_parquet_intrf.h.patch Patch0024: 0024-gcc-13.patch -Patch0025: 0025-selinux-prepare-for-anon-inode-controls-enablement.patch -Patch0026: 0026-src-boost-libs-python-src-object.patch -Patch0028: 0028-cmake-modules-BuildBoost.cmake.patch -Patch0029: 0029-boost-asm.patch -Patch0030: 0030-src-CMakeLists.txt.patch +Patch0025: 0025-src-osd-scrubber-scrub_backend.h.patch +Patch0026: 0026-src-osd-scrubber-scrub_backend.cc.patch +Patch0029: 0029-src-rgw-rgw_amqp.cc.patch +Patch0030: 0030-src-rgw-rgw_asio_client.cc.patch +Patch0032: 0032-cmake-modules-BuildBoost.cmake.patch +Patch0033: 0033-boost-asm.patch # ceph 14.0.1 does not support 32-bit architectures, bugs #1727788, #1727787 ExcludeArch: i686 armv7hl %if 0%{?suse_version} @@ -221,6 +221,7 @@ Requires: ceph-mon = %{_epoch_prefix}%{version}-%{release} Requires(post): binutils %if 0%{with cephfs_java} BuildRequires: java-devel +BuildRequires: jpackage-utils BuildRequires: sharutils %endif %if 0%{with selinux} @@ -230,21 +231,31 @@ BuildRequires: selinux-policy-devel BuildRequires: gperf BuildRequires: cmake > 3.5 BuildRequires: fuse3-devel -%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} == 9 +%if 0%{?fedora} || 0%{?suse_version} > 1500 || 0%{?rhel} >= 9 BuildRequires: gcc-c++ >= 11 %endif +%if 0%{?suse_version} == 1500 +BuildRequires: gcc11-c++ +%endif %if 0%{?rhel} == 8 BuildRequires: %{gts_prefix}-gcc-c++ BuildRequires: %{gts_prefix}-build -%ifarch aarch64 BuildRequires: %{gts_prefix}-libatomic-devel %endif -%endif -%if 0%{?fedora} || 0%{?rhel} == 9 -BuildRequires: libatomic +%if 0%{?fedora} || 0%{?rhel} >= 9 +BuildRequires: libatomic BuildRequires: gcc-c++ %endif BuildRequires: libatomic +%if 0%{?rhel} +%bcond_with ld_mold +%else +%ifarch x86_64 aarch64 +%bcond_without ld_mold +%else +%bcond_with ld_mold +%endif +%endif %if 0%{with ld_mold} BuildRequires: mold %endif @@ -265,8 +276,9 @@ BuildRequires: libaio-devel BuildRequires: libblkid-devel >= 2.17 BuildRequires: cryptsetup-devel BuildRequires: libcurl-devel +BuildRequires: libcap-devel BuildRequires: libcap-ng-devel -BuildRequires: fmt-devel >= 6.2.1 +#BuildRequires: fmt-devel >= 6.2.1 %if 0%{?fedora} || 0%{?rhel} >= 10 BuildRequires: rocksdb-devel Requires: rocksdb @@ -346,16 +358,18 @@ BuildRequires: nlohmann_json-devel BuildRequires: libevent-devel %endif %if 0%{with system_pmdk} +%if 0%{?suse_version} +BuildRequires: libndctl-devel >= 63 +%else +BuildRequires: ndctl-devel >= 63 +BuildRequires: daxctl-devel >= 63 +%endif BuildRequires: libpmem-devel -BuildRequires: libpmemobj-devel +BuildRequires: libpmemobj-devel >= 1.8 %endif %if 0%{with system_arrow} BuildRequires: libarrow-devel BuildRequires: parquet-libs-devel -%else -BuildRequires: xsimd-devel -%endif -%if 0%{with system_utf8proc} BuildRequires: utf8proc-devel %endif %if 0%{with seastar} @@ -371,10 +385,10 @@ BuildRequires: libubsan BuildRequires: libasan %endif %if 0%{?rhel} == 8 -BuildRequires: %{gts_prefix}-annobin -BuildRequires: %{gts_prefix}-annobin-plugin-gcc -BuildRequires: %{gts_prefix}-libubsan-devel -BuildRequires: %{gts_prefix}-libasan-devel +BuildRequires: %{gts_prefix}-annobin +BuildRequires: %{gts_prefix}-annobin-plugin-gcc +BuildRequires: %{gts_prefix}-libubsan-devel +BuildRequires: %{gts_prefix}-libasan-devel %endif %endif ################################################################################# @@ -413,6 +427,7 @@ BuildRequires: boost-devel BuildRequires: boost-random BuildRequires: nss-devel BuildRequires: keyutils-libs-devel +BuildRequires: libatomic BuildRequires: libibverbs-devel BuildRequires: librdmacm-devel BuildRequires: ninja-build @@ -511,7 +526,7 @@ Summary: Ceph Base Package %if 0%{?suse_version} Group: System/Filesystems %endif -Provides: ceph-test:/usr/bin/ceph-kvstore-tool = %{_epoch_prefix}%{version}-%{release} +Provides: ceph-test:/usr/bin/ceph-kvstore-tool Requires: ceph-common = %{_epoch_prefix}%{version}-%{release} Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} @@ -611,7 +626,7 @@ Summary: Ceph Monitor Daemon %if 0%{?suse_version} Group: System/Filesystems %endif -Provides: ceph-test:/usr/bin/ceph-monstore-tool = %{_epoch_prefix}%{version}-%{release} +Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} %description mon ceph-mon is the cluster monitor daemon for the Ceph distributed file @@ -788,9 +803,9 @@ Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Daemon for mirroring CephFS snapshots between Ceph clusters. %package -n ceph-exporter -Summary: Daemon for exposing perf counters as Prometheus metrics +Summary: Daemon for exposing perf counters as Prometheus metrics %if 0%{?suse_version} -Group: System/Filesystems +Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} %description -n ceph-exporter @@ -889,7 +904,7 @@ Summary: Ceph Object Storage Daemon %if 0%{?suse_version} Group: System/Filesystems %endif -Provides: ceph-test:/usr/bin/ceph-osdomap-tool = %{_epoch_prefix}%{version}-%{release} +Provides: ceph-test:/usr/bin/ceph-osdomap-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: sudo Requires: libstoragemgmt @@ -1206,7 +1221,7 @@ Requires: python%{python3_pkgversion}-colorama Requires: python%{python3_pkgversion}-cephfs %description -n cephfs-shell This package contains an interactive tool that allows accessing a Ceph -file system without mounting it by providing a nice pseudo-shell which +file system without mounting it by providing a nice pseudo-shell which works like an FTP client. %endif @@ -1316,11 +1331,20 @@ Group: System/Monitoring %description prometheus-alerts This package provides Ceph default alerts for Prometheus. +%package mib +Summary: MIB for SNMP alerts +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Monitoring +%endif +%description mib +This package provides a Ceph MIB for SNMP traps. + ################################################################################# # common ################################################################################# %prep -%autosetup -p1 +%autosetup -p1 -n %{name}-%{version} %build # Disable lto on systems that do not support symver attribute @@ -1354,7 +1378,10 @@ export CXXFLAGS="$RPM_OPT_FLAGS -DFMT_DEPRECATED_OSTREAM" %if 0%{with seastar} # seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk() -%undefine _fortify_level +export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g') +# remove from CFLAGS too because it causes the arrow submodule to fail with: +# warning _FORTIFY_SOURCE requires compiling with optimization (-O) +export CFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g') %endif env | sort @@ -1378,6 +1405,8 @@ env | sort -DWITH_TESTS:BOOL=OFF \ %endif %if 0%{with cephfs_java} + -DJAVA_HOME=%{java_home} \ + -DJAVA_LIB_INSTALL_DIR=%{_jnidir} \ -DWITH_CEPHFS_JAVA:BOOL=ON \ %endif %if 0%{with selinux} @@ -1395,7 +1424,7 @@ env | sort -DWITH_OCF:BOOL=ON \ %endif %if 0%{?fedora} || 0%{?rhel} >= 10 - -DWITH_SYSTEM_ROCKSDB:BOOL=OFF \ + -DWITH_SYSTEM_ROCKSDB:BOOL=OFF\ %endif -DWITH_SYSTEM_LIBURING:BOOL=ON \ -DWITH_SYSTEM_BOOST:BOOL=OFF \ @@ -1435,8 +1464,8 @@ env | sort %if 0%{with system_pmdk} -DWITH_SYSTEM_PMDK:BOOL=ON \ %endif -%if 0%{with jaeger} - -DWITH_JAEGER:BOOL=ON \ +%if 0%{without jaeger} + -DWITH_JAEGER:BOOL=OFF \ %endif %if 0%{?suse_version} -DBOOST_J:STRING=%{jobs} \ @@ -1447,13 +1476,11 @@ env | sort -DWITH_SYSTEM_GTEST:BOOL=ON \ %endif -DWITH_SYSTEM_ZSTD:BOOL=ON \ -%if 0%{?rhel} +%if 0%{?fedora} || 0%{?rhel} -DWITH_FMT_HEADER_ONLY:BOOL=ON \ %endif %if 0%{with system_arrow} -DWITH_SYSTEM_ARROW:BOOL=ON \ -%endif -%if 0%{with system_utf8proc} -DWITH_SYSTEM_UTF8PROC:BOOL=ON \ %endif %if 0%{with ld_mold} @@ -1507,7 +1534,6 @@ install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce -install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm mkdir -p %{buildroot}%{_sharedstatedir}/cephadm chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh @@ -1516,7 +1542,7 @@ touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys # firewall templates and /sbin/mount.ceph symlink -%if 0%{?suse_version} && !0%{?usrmerged} +%if 0%{?suse_version} && 0%{?suse_version} < 1550 mkdir -p %{buildroot}/sbin ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph %endif @@ -1554,6 +1580,9 @@ mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror # prometheus alerts install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml +# SNMP MIB +install -m 644 -D -t %{buildroot}%{_datadir}/snmp/mibs monitoring/snmp/CEPH-MIB.txt + %if 0%{?suse_version} # create __pycache__ directories and their contents %py3_compile %{buildroot}%{python3_sitelib} @@ -1586,6 +1615,8 @@ install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/p %dir %{_libdir}/ceph %dir %{_libdir}/ceph/erasure-code %{_libdir}/ceph/erasure-code/libec_*.so* +%dir %{_libdir}/ceph/extblkdev +%{_libdir}/ceph/extblkdev/libceph_*.so* %dir %{_libdir}/ceph/compressor %{_libdir}/ceph/compressor/libceph_*.so* %{_unitdir}/ceph-crash.service @@ -1685,8 +1716,12 @@ exit 0 %{_bindir}/rbd-replay %{_bindir}/rbd-replay-many %{_bindir}/rbdmap +%{_bindir}/rgw-gap-list +%{_bindir}/rgw-gap-list-comparator +%{_bindir}/rgw-orphan-list +%{_bindir}/rgw-restore-bucket-index %{_sbindir}/mount.ceph -%if 0%{?suse_version} && !0%{?usrmerged} +%if 0%{?suse_version} && 0%{?suse_version} < 1550 /sbin/mount.ceph %endif %if %{with lttng} @@ -1899,6 +1934,7 @@ fi %{_datadir}/ceph/mgr/prometheus %{_datadir}/ceph/mgr/rbd_support %{_datadir}/ceph/mgr/restful +%{_datadir}/ceph/mgr/rgw %{_datadir}/ceph/mgr/selftest %{_datadir}/ceph/mgr/snap_schedule %{_datadir}/ceph/mgr/stats @@ -2141,17 +2177,14 @@ fi %{_bindir}/radosgw-token %{_bindir}/radosgw-es %{_bindir}/radosgw-object-expirer -%{_bindir}/rgw-gap-list -%{_bindir}/rgw-gap-list-comparator -%{_bindir}/rgw-orphan-list -%{_libdir}/libradosgw.so* +%{_bindir}/rgw-policy-check %{_mandir}/man8/radosgw.8* +%{_mandir}/man8/rgw-policy-check.8* %dir %{_localstatedir}/lib/ceph/radosgw %{_unitdir}/ceph-radosgw@.service %{_unitdir}/ceph-radosgw.target %post radosgw -%{?ldconfig} %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || : @@ -2173,7 +2206,6 @@ fi %endif %postun radosgw -%{?ldconfig} %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to @@ -2415,6 +2447,7 @@ fi %dir %{_includedir}/cephfs %{_includedir}/cephfs/libcephfs.h %{_includedir}/cephfs/ceph_ll_client.h +%{_includedir}/cephfs/types.h %dir %{_includedir}/cephfs/metrics %{_includedir}/cephfs/metrics/Types.h %{_libdir}/libcephfs.so @@ -2614,7 +2647,14 @@ exit 0 %attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph %config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml +%files mib +%attr(0755,root,root) %dir %{_datadir}/snmp +%{_datadir}/snmp/mibs + %changelog +* Tue Jun 13 2023 Kaleb S. KEITHLEY - 2:18.1.0-0.1 +- ceph-18.1.0 RC1 + * Wed Jun 07 2023 Yaakov Selkowitz - 2:17.2.6-8 - Do not use mold in RHEL/ELN builds @@ -2714,9 +2754,6 @@ exit 0 * Sun Jul 17 2022 Robert-André Mauchin - 2:17.2.1-5 - Rebuild for new fmt -* Sun Jul 10 2022 Mamoru TASAKA - 2:17.2.1-4 -- Rebuild for new gtest - * Wed Jul 6 2022 Kaleb S. KEITHLEY - 2:17.2.1-3 - enable cephfs-shell @@ -2930,15 +2967,15 @@ exit 0 - Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild * Tue Jul 21 2020 Kaleb S. KEITHLEY - 2:15.2.4-9 -- %%cmake_build and %%cmake_install +- %cmake_build and %cmake_install * Mon Jul 20 2020 Kaleb S. KEITHLEY - 2:15.2.4-8 - see 15.2.4-4 (f33-java11) for real this time -- and use %%make_install macro +- and use %make_install macro * Mon Jul 20 2020 Kaleb S. KEITHLEY - 2:15.2.4-7 - see 15.2.4-3, hopefully for real this time -- and use %%make_install macro +- and use %make_install macro * Fri Jul 17 2020 Kaleb S. KEITHLEY - 2:15.2.4-6 - see 15.2.4-4 diff --git a/sources b/sources index 11254c3..09af4c1 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -SHA512 (ceph-17.2.6.tar.gz) = dca9aea2ce210c15fcc34cb06a5dc5b4488ffa36d684166d47ebd87e48b54b6fee0882e1c67007a780e1c25754e9bc6e760cc10f60ea1183263f8504ef2dbd9b +SHA512 (ceph-18.1.0.tar.gz) = 9818b061da77e447143666c4be171cd3461b2bfeb4bad4fec5163b2b3b27bc3a5874426833465673204877b3c287b109b65099d9335544dde1323eb39da7f418