From c8dccd6468bbcb5b832e10d8e1dccc808ffadfc0 Mon Sep 17 00:00:00 2001 From: DistroBaker Date: Thu, 4 Feb 2021 07:40:18 +0000 Subject: [PATCH] Merged update from upstream sources This is an automated DistroBaker update from upstream sources. If you do not know what this is about or would like to opt out, contact the OSCI team. Source: https://src.fedoraproject.org/rpms/ceph.git#3d9c93f7ab21ee93893b67bda96a70e98a350cf1 --- 0006-src-blk-CMakeLists.txt.patch | 11 + 0006-src-rgw-rgw_asio_frontend.cc.patch | 64 - 0007-src-test-neorados-CMakeLists.txt.patch | 11 + 0007-src-tools-rbd-action-Bench.cc.patch | 10 - 0008-cmake-modules-Finduring.cmake.patch | 11 + ceph.spec | 1579 +++++++++++++++++-- 6 files changed, 1480 insertions(+), 206 deletions(-) create mode 100644 0006-src-blk-CMakeLists.txt.patch delete mode 100644 0006-src-rgw-rgw_asio_frontend.cc.patch create mode 100644 0007-src-test-neorados-CMakeLists.txt.patch delete mode 100644 0007-src-tools-rbd-action-Bench.cc.patch create mode 100644 0008-cmake-modules-Finduring.cmake.patch diff --git a/0006-src-blk-CMakeLists.txt.patch b/0006-src-blk-CMakeLists.txt.patch new file mode 100644 index 0000000..548f676 --- /dev/null +++ b/0006-src-blk-CMakeLists.txt.patch @@ -0,0 +1,11 @@ +--- ceph-16.1.0-43-g6b74fb5c/src/blk/CMakeLists.txt.orig 2021-02-01 08:16:26.719517641 -0500 ++++ ceph-16.1.0-43-g6b74fb5c/src/blk/CMakeLists.txt 2021-02-01 08:16:47.810092341 -0500 +@@ -25,7 +25,7 @@ + zoned/HMSMRDevice.cc) + endif() + +-add_library(blk ${libblk_srcs}) ++add_library(blk STATIC ${libblk_srcs}) + target_include_directories(blk PRIVATE "./") + + if(HAVE_LIBAIO) diff --git a/0006-src-rgw-rgw_asio_frontend.cc.patch b/0006-src-rgw-rgw_asio_frontend.cc.patch deleted file mode 100644 index de47ac6..0000000 --- a/0006-src-rgw-rgw_asio_frontend.cc.patch +++ /dev/null @@ -1,64 +0,0 @@ ---- ceph-15.2.8/src/rgw/rgw_asio_frontend.cc.orig 2020-12-16 12:29:50.000000000 -0500 -+++ ceph-15.2.8/src/rgw/rgw_asio_frontend.cc 2021-01-04 18:20:49.156104233 -0500 -@@ -2,6 +2,7 @@ - // vim: ts=8 sw=2 smarttab ft=cpp - - #include -+#include - #include - #include - -@@ -138,6 +139,29 @@ - return out << h.quote << p->value() << h.quote; - } - -+// log fractional seconds in milliseconds -+struct log_ms_remainder { -+ ceph::coarse_real_time t; -+ log_ms_remainder(ceph::coarse_real_time t) : t(t) {} -+}; -+std::ostream& operator<<(std::ostream& out, const log_ms_remainder& m) { -+ using namespace std::chrono; -+ return out << std::setfill('0') << std::setw(3) -+ << duration_cast(m.t.time_since_epoch()).count() % 1000; -+} -+ -+// log time in apache format: day/month/year:hour:minute:second zone -+struct log_apache_time { -+ ceph::coarse_real_time t; -+ log_apache_time(ceph::coarse_real_time t) : t(t) {} -+}; -+std::ostream& operator<<(std::ostream& out, const log_apache_time& a) { -+ const auto t = ceph::coarse_real_clock::to_time_t(a.t); -+ const auto local = std::localtime(&t); -+ return out << std::put_time(local, "%d/%b/%Y:%T.") << log_ms_remainder{a.t} -+ << std::put_time(local, " %z"); -+}; -+ - using SharedMutex = ceph::async::SharedMutex; - - template -@@ -228,16 +252,20 @@ - RGWRestfulIO client(cct, &real_client_io); - auto y = optional_yield{context, yield}; - int http_ret = 0; -+ string user = "-"; -+ const auto started = ceph::coarse_real_clock::now(); -+ - process_request(env.store, env.rest, &req, env.uri_prefix, - *env.auth_registry, &client, env.olog, y, - scheduler, &http_ret); - - if (cct->_conf->subsys.should_gather(dout_subsys, 1)) { - // access log line elements begin per Apache Combined Log Format with additions following -- const auto now = ceph::coarse_real_clock::now(); -- using ceph::operator<<; // for coarse_real_time -+ // const auto now = ceph::coarse_real_clock::now(); -+ // using ceph::operator<<; // for coarse_real_time - ldout(cct, 1) << "beast: " << hex << &req << dec << ": " -- << remote_endpoint.address() << " - - [" << now << "] \"" -+ // << remote_endpoint.address() << " - - [" << now << "] \"" -+ << remote_endpoint.address() << " - " << user << " [" << log_apache_time{started} << "] \"" - << message.method_string() << ' ' << message.target() << ' ' - << http_version{message.version()} << "\" " << http_ret << ' ' - << client.get_bytes_sent() + client.get_bytes_received() << ' ' diff --git a/0007-src-test-neorados-CMakeLists.txt.patch b/0007-src-test-neorados-CMakeLists.txt.patch new file mode 100644 index 0000000..a160f38 --- /dev/null +++ b/0007-src-test-neorados-CMakeLists.txt.patch @@ -0,0 +1,11 @@ +--- ceph-16.1.0-43-g6b74fb5c/src/test/neorados/CMakeLists.txt.orig 2021-02-01 08:25:18.006965821 -0500 ++++ ceph-16.1.0-43-g6b74fb5c/src/test/neorados/CMakeLists.txt 2021-02-01 08:25:34.244407147 -0500 +@@ -19,7 +19,7 @@ + target_link_libraries(ceph_test_neorados_op_speed + libneorados fmt::fmt ${unittest_libs}) + +-add_library(neoradostest-support common_tests.cc) ++add_library(neoradostest-support STATIC common_tests.cc) + target_link_libraries(neoradostest-support + libneorados fmt::fmt) + diff --git a/0007-src-tools-rbd-action-Bench.cc.patch b/0007-src-tools-rbd-action-Bench.cc.patch deleted file mode 100644 index d0ced55..0000000 --- a/0007-src-tools-rbd-action-Bench.cc.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- ceph-15.2.8/src/tools/rbd/action/Bench.cc.orig 2021-01-25 17:08:06.189223814 -0500 -+++ ceph-15.2.8/src/tools/rbd/action/Bench.cc 2021-01-25 17:08:27.297805754 -0500 -@@ -10,6 +10,7 @@ - #include "include/types.h" - #include "global/signal_handler.h" - #include -+#include - #include - #include - #include diff --git a/0008-cmake-modules-Finduring.cmake.patch b/0008-cmake-modules-Finduring.cmake.patch new file mode 100644 index 0000000..61b93f7 --- /dev/null +++ b/0008-cmake-modules-Finduring.cmake.patch @@ -0,0 +1,11 @@ +--- ceph-16.1.0-43-g6b74fb5c/cmake/modules/Finduring.cmake.orig 2021-02-01 08:45:39.316108287 -0500 ++++ ceph-16.1.0-43-g6b74fb5c/cmake/modules/Finduring.cmake 2021-02-01 08:45:59.813665378 -0500 +@@ -5,7 +5,7 @@ + # uring_FOUND - True if uring found. + + find_path(URING_INCLUDE_DIR liburing.h) +-find_library(URING_LIBRARIES liburing.a liburing) ++find_library(URING_LIBRARIES liburing.so liburing) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(uring DEFAULT_MSG URING_LIBRARIES URING_INCLUDE_DIR) diff --git a/ceph.spec b/ceph.spec index 8487e45..639d387 100644 --- a/ceph.spec +++ b/ceph.spec @@ -25,17 +25,44 @@ %bcond_with make_check %bcond_with zbd %bcond_with cmake_verbose_logging -%bcond_with ceph_test_package +%bcond_without ceph_test_package +%ifarch s390 s390x %bcond_with tcmalloc +%else +%bcond_without tcmalloc +%endif %if 0%{?fedora} || 0%{?rhel} -%bcond_with amqp_endpoint -%bcond_with kafka_endpoint -%bcond_with lttng +%bcond_without selinux +%if 0%{?rhel} >= 8 +%bcond_with cephfs_java +%else +%bcond_without cephfs_java +%endif +%bcond_without amqp_endpoint +%bcond_without kafka_endpoint +%bcond_without lttng %bcond_without libradosstriper -%bcond_with ocf +%bcond_without ocf %global _remote_tarball_prefix https://download.ceph.com/tarballs/ %endif +%if 0%{?suse_version} +%bcond_with amqp_endpoint +%bcond_with cephfs_java +%bcond_with kafka_endpoint +%ifarch x86_64 aarch64 ppc64le +%bcond_without lttng +%else +%bcond_with lttng +%endif +%bcond_with ocf +%bcond_with selinux +#Compat macro for _fillupdir macro introduced in Nov 2017 +%if ! %{defined _fillupdir} +%global _fillupdir /var/adm/fillup-templates +%endif +%endif +%bcond_with seastar %bcond_with jaeger %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache @@ -49,6 +76,28 @@ %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8 %global weak_deps 1 %endif +%if %{with selinux} +# get selinux policy version +# Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos +%if 0%{?centos} +%global _selinux_policy_version 0.0.0 +%else +%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0} +%endif +%endif + +%if 0%{?suse_version} +%if !0%{?is_opensuse} +# SLE does not support luarocks +%bcond_with lua_packages +%else +%global luarocks_package_name lua53-luarocks +%bcond_without lua_packages +%endif +%else +%global luarocks_package_name luarocks +%bcond_without lua_packages +%endif %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} %{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create} @@ -88,6 +137,9 @@ Patch0002: 0002-src-common-CMakeLists.txt.patch Patch0003: 0003-src-common-bitstr.h.patch Patch0004: 0004-src-CMakeLists.txt.patch Patch0005: 0005-src-librbd-migration.patch +Patch0006: 0006-src-blk-CMakeLists.txt.patch +Patch0007: 0007-src-test-neorados-CMakeLists.txt.patch +Patch0008: 0008-cmake-modules-Finduring.cmake.patch Source1: cmake-modules-BuildBoost.cmake.noautopatch # ceph 14.0.1 does not support 32-bit architectures, bugs #1727788, #1727787 ExcludeArch: i686 armv7hl @@ -103,12 +155,21 @@ Requires: ceph-mds = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mon = %{_epoch_prefix}%{version}-%{release} Requires(post): binutils +%if 0%{with cephfs_java} +BuildRequires: java-devel +BuildRequires: sharutils +%endif +%if 0%{with selinux} +BuildRequires: checkpolicy +BuildRequires: selinux-policy-devel +%endif BuildRequires: gperf BuildRequires: cmake > 3.5 BuildRequires: cryptsetup BuildRequires: fuse3-devel BuildRequires: fmt-devel -BuildRequires: doxygen +BuildRequires: rocksdb-devel +BuildRequires: liburing-devel %if 0%{?rhel} == 7 # devtoolset offers newer make and valgrind-devel, but the old ones are good # enough. @@ -125,6 +186,7 @@ BuildRequires: gperftools-devel >= 2.6.1 BuildRequires: gperftools-devel >= 2.4 %endif %endif +BuildRequires: leveldb-devel > 1.2 BuildRequires: libaio-devel BuildRequires: libblkid-devel >= 2.17 BuildRequires: cryptsetup-devel @@ -193,6 +255,28 @@ BuildRequires: nlohmann_json-devel BuildRequires: libevent-devel BuildRequires: yaml-cpp-devel %endif +%if 0%{with seastar} +BuildRequires: c-ares-devel +BuildRequires: gnutls-devel +BuildRequires: hwloc-devel +BuildRequires: libpciaccess-devel +BuildRequires: lksctp-tools-devel +BuildRequires: protobuf-devel +BuildRequires: ragel +BuildRequires: systemtap-sdt-devel +BuildRequires: yaml-cpp-devel +%if 0%{?fedora} +BuildRequires: libubsan +BuildRequires: libasan +BuildRequires: libatomic +%endif +%if 0%{?rhel} +BuildRequires: gcc-toolset-9-annobin +BuildRequires: gcc-toolset-9-libubsan-devel +BuildRequires: gcc-toolset-9-libasan-devel +BuildRequires: gcc-toolset-9-libatomic-devel +%endif +%endif ################################################################################# # distro-conditional dependencies ################################################################################# @@ -293,6 +377,17 @@ BuildRequires: expat-devel %if 0%{?fedora} || 0%{?rhel} BuildRequires: redhat-rpm-config %endif +%if 0%{with seastar} +%if 0%{?fedora} || 0%{?rhel} +BuildRequires: cryptopp-devel +BuildRequires: numactl-devel +BuildRequires: protobuf-compiler +%endif +%if 0%{?suse_version} +BuildRequires: libcryptopp-devel +BuildRequires: libnuma-devel +%endif +%endif %if 0%{?rhel} >= 8 BuildRequires: /usr/bin/pathfix.py %endif @@ -305,6 +400,56 @@ on commodity hardware and delivers object, block and file system storage. ################################################################################# # subpackages ################################################################################# +%package base +Summary: Ceph Base Package +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Provides: ceph-test:/usr/bin/ceph-kvstore-tool +Requires: ceph-common = %{_epoch_prefix}%{version}-%{release} +Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} +Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} +%if 0%{with selinux} +Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release} +%endif +Requires: cryptsetup +Requires: e2fsprogs +Requires: findutils +Requires: grep +Requires: logrotate +Requires: parted +Requires: psmisc +Requires: python%{python3_pkgversion}-setuptools +Requires: util-linux +Requires: xfsprogs +Requires: which +%if 0%{?fedora} || 0%{?rhel} +# The following is necessary due to tracker 36508 and can be removed once the +# associated upstream bugs are resolved. +%if 0%{with tcmalloc} +Requires: gperftools-libs >= 2.6.1 +%endif +%endif +%if 0%{?weak_deps} +Recommends: chrony +%endif +%description base +Base is the package that includes all the files shared amongst ceph servers + +%package -n cephadm +Summary: Utility to bootstrap Ceph clusters +BuildArch: noarch +Requires: lvm2 +Requires: python%{python3_pkgversion} +%if 0%{?weak_deps} +Recommends: podman +%endif +%description -n cephadm +Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed +with systemd and podman. + %package -n ceph-common Summary: Ceph Common %if 0%{?suse_version} @@ -339,6 +484,324 @@ Requires(pre): pwdutils Common utilities to mount and interact with a ceph storage cluster. Comprised of files that are common to Ceph clients and servers. +%package mds +Summary: Ceph Metadata Server Daemon +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%description mds +ceph-mds is the metadata server daemon for the Ceph distributed file system. +One or more instances of ceph-mds collectively manage the file system +namespace, coordinating access to the shared OSD cluster. + +%package mon +Summary: Ceph Monitor Daemon +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Provides: ceph-test:/usr/bin/ceph-monstore-tool +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +%if 0%{?suse_version} +Requires: smartmontools +%else +Recommends: smartmontools +%endif +%endif +%if 0%{with jaeger} +Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} +%endif +%description mon +ceph-mon is the cluster monitor daemon for the Ceph distributed file +system. One or more instances of ceph-mon form a Paxos part-time +parliament cluster that provides extremely reliable and durable storage +of cluster membership, configuration, and state. + +%package mgr +Summary: Ceph Manager Daemon +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release} +Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release} +Recommends: ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release} +Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release} +Recommends: ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release} +Recommends: python%{python3_pkgversion}-influxdb +%endif +%description mgr +ceph-mgr enables python modules that provide services (such as the REST +module derived from Calamari) and expose CLI hooks. ceph-mgr gathers +the cluster maps, the daemon metadata, and performance counters, and +exposes all these to the python modules. + +%package mgr-dashboard +Summary: Ceph Dashboard +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} +Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +%if 0%{?fedora} || 0%{?rhel} +Requires: python%{python3_pkgversion}-cherrypy +Requires: python%{python3_pkgversion}-jwt +Requires: python%{python3_pkgversion}-routes +Requires: python%{python3_pkgversion}-werkzeug +%if 0%{?weak_deps} +Recommends: python%{python3_pkgversion}-saml +%endif +%endif +%if 0%{?suse_version} +Requires: python%{python3_pkgversion}-CherryPy +Requires: python%{python3_pkgversion}-PyJWT +Requires: python%{python3_pkgversion}-Routes +Requires: python%{python3_pkgversion}-Werkzeug +Recommends: python%{python3_pkgversion}-python3-saml +%endif +%description mgr-dashboard +ceph-mgr-dashboard is a manager module, providing a web-based application +to monitor and manage many aspects of a Ceph cluster and related components. +See the Dashboard documentation at http://docs.ceph.com/ for details and a +detailed feature overview. + +%package mgr-diskprediction-local +Summary: Ceph Manager module for predicting disk failures +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-numpy +%if 0%{?fedora} || 0%{?suse_version} +Requires: python%{python3_pkgversion}-scikit-learn +%endif +Requires: python3-scipy +%description mgr-diskprediction-local +ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict +disk failures using local algorithms and machine-learning databases. + +%package mgr-modules-core +Summary: Ceph Manager modules which are always enabled +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: python%{python3_pkgversion}-bcrypt +Requires: python%{python3_pkgversion}-pecan +Requires: python%{python3_pkgversion}-pyOpenSSL +Requires: python%{python3_pkgversion}-requests +Requires: python%{python3_pkgversion}-dateutil +%if 0%{?fedora} || 0%{?rhel} >= 8 +Requires: python%{python3_pkgversion}-cherrypy +Requires: python%{python3_pkgversion}-pyyaml +Requires: python%{python3_pkgversion}-werkzeug +%endif +%if 0%{?suse_version} +Requires: python%{python3_pkgversion}-CherryPy +Requires: python%{python3_pkgversion}-PyYAML +Requires: python%{python3_pkgversion}-Werkzeug +%endif +%if 0%{?weak_deps} +Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release} +%endif +%description mgr-modules-core +ceph-mgr-modules-core provides a set of modules which are always +enabled by ceph-mgr. + +%package mgr-rook +BuildArch: noarch +Summary: Ceph Manager module for Rook-based orchestration +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-kubernetes +Requires: python%{python3_pkgversion}-jsonpatch +%description mgr-rook +ceph-mgr-rook is a ceph-mgr module for orchestration functions using +a Rook backend. + +%package mgr-k8sevents +BuildArch: noarch +Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-kubernetes +%description mgr-k8sevents +ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events +to kubernetes' events API + +%package mgr-cephadm +Summary: Ceph Manager module for cephadm-based orchestration +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-remoto +Requires: cephadm = %{_epoch_prefix}%{version}-%{release} +%if 0%{?suse_version} +Requires: openssh +Requires: python%{python3_pkgversion}-Jinja2 +%endif +%if 0%{?rhel} || 0%{?fedora} +Requires: openssh-clients +Requires: python%{python3_pkgversion}-jinja2 +%endif +%description mgr-cephadm +ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using +the integrated cephadm deployment tool management operations. + +%package fuse +Summary: Ceph fuse-based client +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: fuse +Requires: python%{python3_pkgversion} +%description fuse +FUSE based client for Ceph distributed network file system + +%package -n cephfs-mirror +Summary: Ceph daemon for mirroring CephFS snapshots +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} +%description -n cephfs-mirror +Daemon for mirroring CephFS snapshots between Ceph clusters. + +%package -n rbd-fuse +Summary: Ceph fuse-based client +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} +%description -n rbd-fuse +FUSE based client to map Ceph rbd images to files + +%package -n rbd-mirror +Summary: Ceph daemon for mirroring RBD images +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} +%description -n rbd-mirror +Daemon for mirroring RBD images between Ceph clusters, streaming +changes asynchronously. + +%package immutable-object-cache +Summary: Ceph daemon for immutable object cache +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +%description immutable-object-cache +Daemon for immutable object cache. + +%package -n rbd-nbd +Summary: Ceph RBD client base on NBD +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} +%description -n rbd-nbd +NBD based client to map Ceph rbd images to local device + +%package radosgw +Summary: Rados REST gateway +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%if 0%{with selinux} +Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release} +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} +%if 0%{?rhel} || 0%{?fedora} +Requires: mailcap +%endif +%description radosgw +RADOS is a distributed object store used by the Ceph distributed +storage system. This package provides a REST gateway to the +object store that aims to implement a superset of Amazon's S3 +service as well as the OpenStack Object Storage ("Swift") API. + +%package -n cephfs-top +Summary: top(1) like utility for Ceph Filesystem +BuildArch: noarch +Requires: python%{python3_pkgversion}-rados +%description -n cephfs-top +This package provides a top(1) like utility to display Ceph Filesystem metrics +in realtime. + +%if %{with ocf} +%package resource-agents +Summary: OCF-compliant resource agents for Ceph daemons +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version} +Requires: resource-agents +%description resource-agents +Resource agents for monitoring and managing Ceph daemons +under Open Cluster Framework (OCF) compliant resource +managers such as Pacemaker. +%endif + +%package osd +Summary: Ceph Object Storage Daemon +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Provides: ceph-test:/usr/bin/ceph-osdomap-tool +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: lvm2 +Requires: sudo +Requires: libstoragemgmt +Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +%if 0%{?suse_version} +Requires: smartmontools +%else +Recommends: smartmontools +%endif +%endif +%description osd +ceph-osd is the object storage daemon for the Ceph distributed file +system. It is responsible for storing objects on a local file system +and providing access to them over the network. + +%if 0%{with seastar} +%package crimson-osd +Summary: Ceph Object Storage Daemon (crimson) +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release} +%description crimson-osd +crimson-osd is the object storage daemon for the Ceph distributed file +system. It is responsible for storing objects on a local file system +and providing access to them over the network. +%endif + %package -n librados2 Summary: RADOS distributed object store client library %if 0%{?suse_version} @@ -377,6 +840,43 @@ Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} This package contains C++ libraries and headers needed to develop programs that use RADOS object store. +%package -n librgw2 +Summary: RADOS gateway client library +%if 0%{?suse_version} +Group: System/Libraries +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +%description -n librgw2 +This package provides a library implementation of the RADOS gateway +(distributed object store with S3 and Swift personalities). + +%package -n librgw-devel +Summary: RADOS gateway client library +%if 0%{?suse_version} +Group: Development/Libraries/C and C++ +%endif +Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} +Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} +Provides: librgw2-devel = %{_epoch_prefix}%{version}-%{release} +Obsoletes: librgw2-devel < %{_epoch_prefix}%{version}-%{release} +%description -n librgw-devel +This package contains libraries and headers needed to develop programs +that use RADOS gateway client library. + +%package -n python%{python3_pkgversion}-rgw +Summary: Python 3 libraries for the RADOS gateway +%if 0%{?suse_version} +Group: Development/Libraries/Python +%endif +Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} +%{?python_provide:%python_provide python%{python3_pkgversion}-rgw} +Provides: python-rgw = %{_epoch_prefix}%{version}-%{release} +Obsoletes: python-rgw < %{_epoch_prefix}%{version}-%{release} +%description -n python%{python3_pkgversion}-rgw +This package contains Python 3 libraries for interacting with Ceph RADOS +gateway. + %package -n python%{python3_pkgversion}-rados Summary: Python 3 libraries for the RADOS object store %if 0%{?suse_version} @@ -496,6 +996,20 @@ Obsoletes: libcephfs2-devel < %{_epoch_prefix}%{version}-%{release} This package contains libraries and headers needed to develop programs that use Ceph distributed file system. +%if 0%{with jaeger} +%package -n libjaeger +Summary: Ceph distributed file system tracing library +%if 0%{?suse_version} +Group: System/Libraries +%endif +Provides: libjaegertracing.so.0()(64bit) +Provides: libopentracing.so.1()(64bit) +Provides: libthrift.so.0.13.0()(64bit) +%description -n libjaeger +This package contains libraries needed to provide distributed +tracing for Ceph. +%endif + %package -n python%{python3_pkgversion}-cephfs Summary: Python 3 libraries for Ceph distributed file system %if 0%{?suse_version} @@ -539,6 +1053,18 @@ Group: Development/Libraries/Python This package contains data structures, classes and functions used by Ceph. It also contains utilities used for the cephadm orchestrator. +%if 0%{with cephfs_shell} +%package -n cephfs-shell +Summary: Interactive shell for Ceph file system +Requires: python%{python3_pkgversion}-cmd2 +Requires: python%{python3_pkgversion}-colorama +Requires: python%{python3_pkgversion}-cephfs +%description -n cephfs-shell +This package contains an interactive tool that allows accessing a Ceph +file system without mounting it by providing a nice pseudo-shell which +works like an FTP client. +%endif + %if 0%{with ceph_test_package} %package -n ceph-test Summary: Ceph benchmarks and test tools @@ -555,6 +1081,47 @@ BuildRequires: gmock-devel This package contains Ceph benchmarks and test tools. %endif +%if 0%{with cephfs_java} + +%package -n libcephfs_jni1 +Summary: Java Native Interface library for CephFS Java bindings +%if 0%{?suse_version} +Group: System/Libraries +%endif +Requires: java +Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} +%description -n libcephfs_jni1 +This package contains the Java Native Interface library for CephFS Java +bindings. + +%package -n libcephfs_jni-devel +Summary: Development files for CephFS Java Native Interface library +%if 0%{?suse_version} +Group: Development/Libraries/Java +%endif +Requires: java +Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release} +Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} +Provides: libcephfs_jni1-devel = %{_epoch_prefix}%{version}-%{release} +Obsoletes: libcephfs_jni1-devel < %{_epoch_prefix}%{version}-%{release} +%description -n libcephfs_jni-devel +This package contains the development files for CephFS Java Native Interface +library. + +%package -n cephfs-java +Summary: Java libraries for the Ceph File System +%if 0%{?suse_version} +Group: System/Libraries +%endif +Requires: java +Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release} +Requires: junit +BuildRequires: junit +%description -n cephfs-java +This package contains the Java libraries for the Ceph File System. + +%endif + %package -n rados-objclass-devel Summary: RADOS object class development kit %if 0%{?suse_version} @@ -565,6 +1132,45 @@ Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} This package contains libraries and headers needed to develop RADOS object class plugins. +%if 0%{with selinux} + +%package selinux +Summary: SELinux support for Ceph MON, OSD and MDS +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: policycoreutils, libselinux-utils +Requires(post): ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk +Requires(postun): policycoreutils +%description selinux +This package contains SELinux support for Ceph MON, OSD and MDS. The package +also performs file-system relabelling which can take a long time on heavily +populated file-systems. + +%endif + +%package grafana-dashboards +Summary: The set of Grafana dashboards for monitoring purposes +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +%description grafana-dashboards +This package provides a set of Grafana dashboards for monitoring of +Ceph clusters. The dashboards require a Prometheus server setup +collecting data from Ceph Manager "prometheus" module and Prometheus +project "node_exporter" module. The dashboards are designed to be +integrated with the Ceph Manager Dashboard web UI. + +%package prometheus-alerts +Summary: Prometheus alerts for a Ceph deplyoment +BuildArch: noarch +Group: System/Monitoring +%description prometheus-alerts +This package provides Ceph default alerts for Prometheus. + ################################################################################# # common ################################################################################# @@ -579,6 +1185,17 @@ patch -p1 < %{SOURCE1} # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 %define _lto_cflags %{nil} +%if 0%{with seastar} &&0%{?rhel} +. /opt/rh/devtoolset-9/enable +%endif + +%if 0%{with cephfs_java} +# Find jni.h +for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do + [ -d $i ] && java_inc="$java_inc -I$i" +done +%endif + %if 0%{?suse_version} # the following setting fixed an OOM condition we once encountered in the OBS RPM_OPT_FLAGS="$RPM_OPT_FLAGS --param ggc-min-expand=20 --param ggc-min-heapsize=32768" @@ -589,6 +1206,11 @@ export CFLAGS="$RPM_OPT_FLAGS" export CXXFLAGS="$RPM_OPT_FLAGS" export LDFLAGS="$RPM_LD_FLAGS" +%if 0%{with seastar} +# seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk() +export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g') +%endif + # Parallel build settings ... CEPH_MFLAGS_JOBS="%{?_smp_mflags}" CEPH_SMP_NCPUS=$(echo "$CEPH_MFLAGS_JOBS" | sed 's/-j//') @@ -627,23 +1249,18 @@ cd build -DCMAKE_INSTALL_DOCDIR=%{_docdir}/ceph \ -DCMAKE_INSTALL_INCLUDEDIR=%{_includedir} \ -DCMAKE_INSTALL_SYSTEMD_SERVICEDIR=%{_unitdir} \ - -DWITH_MGR=OFF \ - -DWITH_EMBEDDED=OFF \ -DWITH_MANPAGE=ON \ -DWITH_PYTHON3=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND=OFF \ - -DWITH_SYSTEMD=ON \ - -DWITH_SYSTEM_BOOST=ON \ - -DWITH_SPDK=OFF \ - -DWITH_PMEM=OFF \ - -DWITH_BOOST_CONTEXT=OFF \ - -DWITH_LEVELDB=OFF \ - -DWITH_RADOSGW=OFF \ - -DWITH_SELINUX=OFF \ - -DWITH_CEPHFS_JAVA=OFF \ %if 0%{without ceph_test_package} -DWITH_TESTS=OFF \ %endif +%if 0%{with cephfs_java} + -DWITH_CEPHFS_JAVA=ON \ +%endif +%if 0%{with selinux} + -DWITH_SELINUX=ON \ +%endif %if %{with lttng} -DWITH_LTTNG=ON \ -DWITH_BABELTRACE=ON \ @@ -656,6 +1273,8 @@ cd build -DWITH_OCF=ON \ %endif -DWITH_REENTRANT_STRSIGNAL=ON \ + -DWITH_SYSTEM_ROCKSDB=ON \ + -DWITH_SYSTEM_LIBURING=ON \ -DWITH_SYSTEM_BOOST=ON \ %if 0%{with cephfs_shell} -DWITH_CEPHFS_SHELL=ON \ @@ -676,7 +1295,7 @@ cd build -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF \ %endif %if 0%{without lua_packages} - -DWITH_RADOSGW_LUA_PACKAGES=OFF \ + -DWITH_RADOSGW_LUA_PACKAGES=OFF %endif %if 0%{with zbd} -DWITH_ZBD=ON \ @@ -694,7 +1313,7 @@ cd build %if 0%{with ceph_test_package} -DWITH_SYSTEM_GTEST=ON \ %endif - -DWITH_GRAFANA=OFF + -DWITH_GRAFANA=ON %if %{with cmake_verbose_logging} cat ./CMakeFiles/CMakeOutput.log @@ -720,11 +1339,33 @@ pushd build # we have dropped sysvinit bits rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph popd +%if 0%{with seastar} +# package crimson-osd with the name of ceph-osd +install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd +%endif install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap +%if 0%{?fedora} || 0%{?rhel} +install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph +%endif +%if 0%{?suse_version} +install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name} +%endif install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf +install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset mkdir -p %{buildroot}%{_sbindir} +install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING +install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf +install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce + +install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm +mkdir -p %{buildroot}%{_sharedstatedir}/cephadm +chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm +mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh +chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh +touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys +chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys # firewall templates and /sbin/mount.ceph symlink %if 0%{?suse_version} @@ -735,6 +1376,9 @@ ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph # udev rules install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules +# sudoers.d +install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl + %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/* @@ -744,116 +1388,23 @@ pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/* mkdir -p %{buildroot}%{_sysconfdir}/ceph mkdir -p %{buildroot}%{_localstatedir}/run/ceph mkdir -p %{buildroot}%{_localstatedir}/log/ceph -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror -# Remove the rbd/fuse bits -rm -f %{buildroot}%{_bindir}/ceph-fuse -rm -f %{buildroot}%{_mandir}/man8/ceph-fuse.8* -rm -f %{buildroot}%{_sbindir}/mount.fuse.ceph -rm -f %{buildroot}%{_mandir}/man8/mount.fuse.ceph.8* -rm -f %{buildroot}%{_unitdir}/ceph-fuse@.service -rm -f %{buildroot}%{_unitdir}/ceph-fuse.target -rm -f %{buildroot}%{_bindir}/rbd-fuse -rm -f %{buildroot}%{_mandir}/man8/rbd-fuse.8* - -# Remove the ceph-base package -rm -f %{buildroot}%{_bindir}/ceph-crash -rm -f %{buildroot}%{_bindir}/crushtool -rm -f %{buildroot}%{_bindir}/monmaptool -rm -f %{buildroot}%{_bindir}/osdmaptool -rm -f %{buildroot}%{_bindir}/ceph-kvstore-tool -rm -f %{buildroot}%{_bindir}/ceph-run -rm -f %{buildroot}%{_sbindir}/ceph-create-keys -rm -f %{buildroot}%{_sbindir}/ceph-volume -rm -f %{buildroot}%{_sbindir}/ceph-volume-systemd -rm -f %{buildroot}%{_libexecdir}/ceph/ceph_common.sh -rm -rf %{buildroot}%{_libdir}/rados-classes -rm -rf %{buildroot}%{_libdir}/ceph/erasure-code -rm -rf %{buildroot}%{_libdir}/ceph/compressor -rm -rf %{buildroot}%{_libdir}/ceph/crypto -rm -f %{buildroot}%{_unitdir}/ceph-crash.service -rm -f %{buildroot}%{_unitdir}/ceph-volume@.service -rm -f %{buildroot}%{_unitdir}/ceph.target -rm -rf %{buildroot}%{python3_sitelib}/ceph_volume/* -rm -rf %{buildroot}%{python3_sitelib}/ceph_volume-* -rm -f %{buildroot}%{_mandir}/man8/ceph-deploy.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-create-keys.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-volume.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-volume-systemd.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-run.8* -rm -f %{buildroot}%{_mandir}/man8/crushtool.8* -rm -f %{buildroot}%{_mandir}/man8/osdmaptool.8* -rm -f %{buildroot}%{_mandir}/man8/monmaptool.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-kvstore-tool.8* - -# Remove the ceph-mds package -rm -f %{buildroot}%{_bindir}/ceph-mds -rm -f %{buildroot}%{_mandir}/man8/ceph-mds.8* -rm -f %{buildroot}%{_unitdir}/ceph-mds@.service -rm -f %{buildroot}%{_unitdir}/ceph-mds.target - -# Remove the ceph-mgr package -rm -f %{buildroot}%{_unitdir}/ceph-mgr@.service -rm -f %{buildroot}%{_unitdir}/ceph-mgr.target - -# Remove the ceph-mon package -rm -f %{buildroot}%{_bindir}/ceph-mon -rm -f %{buildroot}%{_bindir}/ceph-monstore-tool -rm -f %{buildroot}%{_mandir}/man8/ceph-mon.8* -rm -f %{buildroot}%{_unitdir}/ceph-mon@.service -rm -f %{buildroot}%{_unitdir}/ceph-mon.target - -# Remove the ceph-radosgw package -rm -f %{buildroot}%{_unitdir}/ceph-radosgw@.service -rm -f %{buildroot}%{_unitdir}/ceph-radosgw.target - -# Remove the ceph-osd package -rm -f %{buildroot}%{_bindir}/ceph-clsinfo -rm -f %{buildroot}%{_bindir}/ceph-bluestore-tool -rm -f %{buildroot}%{_bindir}/ceph-erasure-code-tool -rm -f %{buildroot}%{_bindir}/ceph-objectstore-tool -rm -f %{buildroot}%{_bindir}/ceph-osdomap-tool -rm -f %{buildroot}%{_bindir}/ceph-osd -rm -f %{buildroot}%{_libexecdir}/ceph/ceph-osd-prestart.sh -rm -f %{buildroot}%{_mandir}/man8/ceph-clsinfo.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-osd.8* -rm -f %{buildroot}%{_mandir}/man8/ceph-bluestore-tool.8* -rm -f %{buildroot}%{_unitdir}/ceph-osd@.service -rm -f %{buildroot}%{_unitdir}/ceph-osd.target - -# Remove rbd-mirror -rm -f %{buildroot}%{_bindir}/rbd-mirror -rm -f %{buildroot}%{_mandir}/man8/rbd-mirror.8* -rm -f %{buildroot}%{_unitdir}/ceph-rbd-mirror@.service -rm -f %{buildroot}%{_unitdir}/ceph-rbd-mirror.target - -# Remove rbd-nbd -rm -f %{buildroot}%{_bindir}/rbd-nbd -rm -f %{buildroot}%{_mandir}/man8/rbd-nbd.8* - -# Remove cephfs-top -rm -rf %{buildroot}%{python3_sitelib}/cephfs_top-*.egg-info -rm -f %{buildroot}%{_bindir}/cephfs-top -rm -f %{buildroot}%{_mandir}/man8/cephfs-top.8* - -# Remove additional files -rm -f %{buildroot}%{_bindir}/ceph-diff-sorted -rm -f %{buildroot}%{_mandir}/man8/ceph-diff-sorted.8* - -# Remove immutable-object-cache -rm -f %{buildroot}%{_bindir}/ceph-immutable-object-cache -rm -f %{buildroot}%{_mandir}/man8/ceph-immutable-object-cache.8* -rm -f %{buildroot}%{_unitdir}/ceph-immutable-object-cache@.service -rm -f %{buildroot}%{_unitdir}/ceph-immutable-object-cache.target - -# Remove cephfs-mirror -rm -f %{buildroot}%{_bindir}/cephfs-mirror -rm -f %{buildroot}%{_mandir}/man8/cephfs-mirror.8* -rm -f %{buildroot}%{_unitdir}/cephfs-mirror@.service -rm -f %{buildroot}%{_unitdir}/cephfs-mirror.target - -# Remove cephadm -rm -f %{buildroot}%{_mandir}/man8/cephadm.8* +# prometheus alerts +install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml %if 0%{?suse_version} # create __pycache__ directories and their contents @@ -871,6 +1422,104 @@ rm -f %{buildroot}%{_mandir}/man8/cephadm.8* ################################################################################# %files +%files base +%{_bindir}/ceph-crash +%{_bindir}/crushtool +%{_bindir}/monmaptool +%{_bindir}/osdmaptool +%{_bindir}/ceph-kvstore-tool +%{_bindir}/ceph-run +%{_presetdir}/50-ceph.preset +%{_sbindir}/ceph-create-keys +%dir %{_libexecdir}/ceph +%{_libexecdir}/ceph/ceph_common.sh +%dir %{_libdir}/rados-classes +%{_libdir}/rados-classes/* +%dir %{_libdir}/ceph +%dir %{_libdir}/ceph/erasure-code +%{_libdir}/ceph/erasure-code/libec_*.so* +%dir %{_libdir}/ceph/compressor +%{_libdir}/ceph/compressor/libceph_*.so* +%{_unitdir}/ceph-crash.service +%dir %{_libdir}/ceph/crypto +%{_libdir}/ceph/crypto/libceph_*.so* +%if %{with lttng} +%{_libdir}/libos_tp.so* +%{_libdir}/libosd_tp.so* +%endif +%config(noreplace) %{_sysconfdir}/logrotate.d/ceph +%if 0%{?fedora} || 0%{?rhel} +%config(noreplace) %{_sysconfdir}/sysconfig/ceph +%endif +%if 0%{?suse_version} +%{_fillupdir}/sysconfig.* +%endif +%{_unitdir}/ceph.target +%dir %{python3_sitelib}/ceph_volume +%{python3_sitelib}/ceph_volume/* +%{python3_sitelib}/ceph_volume-* +%{_mandir}/man8/ceph-deploy.8* +%{_mandir}/man8/ceph-create-keys.8* +%{_mandir}/man8/ceph-run.8* +%{_mandir}/man8/crushtool.8* +%{_mandir}/man8/osdmaptool.8* +%{_mandir}/man8/monmaptool.8* +%{_mandir}/man8/ceph-kvstore-tool.8* +#set up placeholder directories +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror + +%post base +/sbin/ldconfig +%if 0%{?suse_version} +%fillup_only +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl preset ceph.target ceph-crash.service >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph.target ceph-crash.service +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || : +fi + +%preun base +%if 0%{?suse_version} +%service_del_preun ceph.target ceph-crash.service +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph.target ceph-crash.service +%endif + +%postun base +/sbin/ldconfig + +%pre -n cephadm +getent group cephadm >/dev/null || groupadd -r cephadm +getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm +exit 0 + +%if ! 0%{?suse_version} +%postun -n cephadm +userdel -r cephadm || true +exit 0 +%endif + +%files -n cephadm +%{_sbindir}/cephadm +%{_mandir}/man8/cephadm.8* +%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm +%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh +%attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys + %files common %dir %{_docdir}/ceph %doc %{_docdir}/ceph/sample.ceph.conf @@ -885,6 +1534,7 @@ rm -f %{buildroot}%{_mandir}/man8/cephadm.8* %{_bindir}/cephfs-journal-tool %{_bindir}/cephfs-table-tool %{_bindir}/rados +%{_bindir}/radosgw-admin %{_bindir}/rbd %{_bindir}/rbd-replay %{_bindir}/rbd-replay-many @@ -901,17 +1551,20 @@ rm -f %{buildroot}%{_mandir}/man8/cephadm.8* %{_mandir}/man8/ceph-authtool.8* %{_mandir}/man8/ceph-conf.8* %{_mandir}/man8/ceph-dencoder.8* +%{_mandir}/man8/ceph-diff-sorted.8* %{_mandir}/man8/ceph-rbdnamer.8* %{_mandir}/man8/ceph-syn.8* %{_mandir}/man8/ceph-post-file.8* %{_mandir}/man8/ceph.8* %{_mandir}/man8/mount.ceph.8* %{_mandir}/man8/rados.8* +%{_mandir}/man8/radosgw-admin.8* %{_mandir}/man8/rbd.8* %{_mandir}/man8/rbdmap.8* %{_mandir}/man8/rbd-replay.8* %{_mandir}/man8/rbd-replay-many.8* %{_mandir}/man8/rbd-replay-prep.8* +%{_mandir}/man8/rgw-orphan-list.8* %dir %{_datadir}/ceph/ %{_datadir}/ceph/known_hosts_drop.ceph.com %{_datadir}/ceph/id_rsa_drop.ceph.com @@ -920,6 +1573,7 @@ rm -f %{buildroot}%{_mandir}/man8/cephadm.8* %config %{_sysconfdir}/bash_completion.d/ceph %config %{_sysconfdir}/bash_completion.d/rados %config %{_sysconfdir}/bash_completion.d/rbd +%config %{_sysconfdir}/bash_completion.d/radosgw-admin %config(noreplace) %{_sysconfdir}/ceph/rbdmap %{_unitdir}/rbdmap.service %dir %{_udevrulesdir} @@ -963,6 +1617,498 @@ if [ "$1" -eq "0" ] ; then rm -rf %{_sysconfdir}/ceph fi +%files mds +%{_bindir}/ceph-mds +%{_mandir}/man8/ceph-mds.8* +%{_unitdir}/ceph-mds@.service +%{_unitdir}/ceph-mds.target +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds + +%post mds +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-mds@\*.service ceph-mds.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || : +fi + +%preun mds +%if 0%{?suse_version} +%service_del_preun ceph-mds@\*.service ceph-mds.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-mds@\*.service ceph-mds.target +%endif + +%postun mds +%systemd_postun ceph-mds@\*.service ceph-mds.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || : + fi +fi + +%files mgr +%{_bindir}/ceph-mgr +%dir %{_datadir}/ceph/mgr +%{_datadir}/ceph/mgr/mgr_module.* +%{_datadir}/ceph/mgr/mgr_util.* +%{_unitdir}/ceph-mgr@.service +%{_unitdir}/ceph-mgr.target +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr + +%post mgr +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-mgr@\*.service ceph-mgr.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-mgr@\*.service ceph-mgr.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || : +fi + +%preun mgr +%if 0%{?suse_version} +%service_del_preun ceph-mgr@\*.service ceph-mgr.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-mgr@\*.service ceph-mgr.target +%endif + +%postun mgr +%systemd_postun ceph-mgr@\*.service ceph-mgr.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || : + fi +fi + +%files mgr-dashboard +%{_datadir}/ceph/mgr/dashboard + +%post mgr-dashboard +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-dashboard +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mgr-diskprediction-local +%{_datadir}/ceph/mgr/diskprediction_local + +%post mgr-diskprediction-local +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-diskprediction-local +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mgr-modules-core +%dir %{_datadir}/ceph/mgr +%{_datadir}/ceph/mgr/alerts +%{_datadir}/ceph/mgr/balancer +%{_datadir}/ceph/mgr/crash +%{_datadir}/ceph/mgr/devicehealth +%{_datadir}/ceph/mgr/influx +%{_datadir}/ceph/mgr/insights +%{_datadir}/ceph/mgr/iostat +%{_datadir}/ceph/mgr/localpool +%{_datadir}/ceph/mgr/mds_autoscaler +%{_datadir}/ceph/mgr/orchestrator +%{_datadir}/ceph/mgr/osd_perf_query +%{_datadir}/ceph/mgr/osd_support +%{_datadir}/ceph/mgr/pg_autoscaler +%{_datadir}/ceph/mgr/progress +%{_datadir}/ceph/mgr/prometheus +%{_datadir}/ceph/mgr/rbd_support +%{_datadir}/ceph/mgr/restful +%{_datadir}/ceph/mgr/selftest +%{_datadir}/ceph/mgr/snap_schedule +%{_datadir}/ceph/mgr/stats +%{_datadir}/ceph/mgr/status +%{_datadir}/ceph/mgr/telegraf +%{_datadir}/ceph/mgr/telemetry +%{_datadir}/ceph/mgr/test_orchestrator +%{_datadir}/ceph/mgr/volumes +%{_datadir}/ceph/mgr/zabbix + +%files mgr-rook +%{_datadir}/ceph/mgr/rook + +%post mgr-rook +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-rook +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mgr-k8sevents +%{_datadir}/ceph/mgr/k8sevents + +%post mgr-k8sevents +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-k8sevents +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mgr-cephadm +%{_datadir}/ceph/mgr/cephadm + +%post mgr-cephadm +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-cephadm +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mon +%{_bindir}/ceph-mon +%{_bindir}/ceph-monstore-tool +%{_mandir}/man8/ceph-mon.8* +%{_unitdir}/ceph-mon@.service +%{_unitdir}/ceph-mon.target +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon + +%post mon +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-mon@\*.service ceph-mon.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || : +fi + +%preun mon +%if 0%{?suse_version} +%service_del_preun ceph-mon@\*.service ceph-mon.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-mon@\*.service ceph-mon.target +%endif + +%postun mon +%systemd_postun ceph-mon@\*.service ceph-mon.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || : + fi +fi + +%files fuse +%{_bindir}/ceph-fuse +%{_mandir}/man8/ceph-fuse.8* +%{_sbindir}/mount.fuse.ceph +%{_mandir}/man8/mount.fuse.ceph.8* +%{_unitdir}/ceph-fuse@.service +%{_unitdir}/ceph-fuse.target + +%files -n cephfs-mirror +%{_bindir}/cephfs-mirror +%{_mandir}/man8/cephfs-mirror.8* +%{_unitdir}/cephfs-mirror@.service +%{_unitdir}/cephfs-mirror.target + +%post -n cephfs-mirror +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post cephfs-mirror@\*.service cephfs-mirror.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || : +fi + +%preun -n cephfs-mirror +%if 0%{?suse_version} +%service_del_preun cephfs-mirror@\*.service cephfs-mirror.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun cephfs-mirror@\*.service cephfs-mirror.target +%endif + +%postun -n cephfs-mirror +%systemd_postun cephfs-mirror@\*.service cephfs-mirror.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || : + fi +fi + +%files -n rbd-fuse +%{_bindir}/rbd-fuse +%{_mandir}/man8/rbd-fuse.8* + +%files -n rbd-mirror +%{_bindir}/rbd-mirror +%{_mandir}/man8/rbd-mirror.8* +%{_unitdir}/ceph-rbd-mirror@.service +%{_unitdir}/ceph-rbd-mirror.target + +%post -n rbd-mirror +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || : +fi + +%preun -n rbd-mirror +%if 0%{?suse_version} +%service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif + +%postun -n rbd-mirror +%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : + fi +fi + +%files immutable-object-cache +%{_bindir}/ceph-immutable-object-cache +%{_mandir}/man8/ceph-immutable-object-cache.8* +%{_unitdir}/ceph-immutable-object-cache@.service +%{_unitdir}/ceph-immutable-object-cache.target + +%post immutable-object-cache +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || : +fi + +%preun immutable-object-cache +%if 0%{?suse_version} +%service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%endif + +%postun immutable-object-cache +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || : + fi +fi + +%files -n rbd-nbd +%{_bindir}/rbd-nbd +%{_mandir}/man8/rbd-nbd.8* +%dir %{_libexecdir}/rbd-nbd +%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce + +%files radosgw +%{_bindir}/ceph-diff-sorted +%{_bindir}/radosgw +%{_bindir}/radosgw-token +%{_bindir}/radosgw-es +%{_bindir}/radosgw-object-expirer +%{_bindir}/rgw-orphan-list +%{_libdir}/libradosgw.so* +%{_mandir}/man8/radosgw.8* +%dir %{_localstatedir}/lib/ceph/radosgw +%{_unitdir}/ceph-radosgw@.service +%{_unitdir}/ceph-radosgw.target + +%post radosgw +/sbin/ldconfig +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-radosgw@\*.service ceph-radosgw.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || : +fi + +%preun radosgw +%if 0%{?suse_version} +%service_del_preun ceph-radosgw@\*.service ceph-radosgw.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-radosgw@\*.service ceph-radosgw.target +%endif + +%postun radosgw +/sbin/ldconfig +%systemd_postun ceph-radosgw@\*.service ceph-radosgw.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || : + fi +fi + +%files osd +%{_bindir}/ceph-clsinfo +%{_bindir}/ceph-bluestore-tool +%{_bindir}/ceph-erasure-code-tool +%{_bindir}/ceph-objectstore-tool +%{_bindir}/ceph-osdomap-tool +%{_bindir}/ceph-osd +%{_libexecdir}/ceph/ceph-osd-prestart.sh +%{_sbindir}/ceph-volume +%{_sbindir}/ceph-volume-systemd +%{_mandir}/man8/ceph-clsinfo.8* +%{_mandir}/man8/ceph-osd.8* +%{_mandir}/man8/ceph-bluestore-tool.8* +%{_mandir}/man8/ceph-volume.8* +%{_mandir}/man8/ceph-volume-systemd.8* +%{_unitdir}/ceph-osd@.service +%{_unitdir}/ceph-osd.target +%{_unitdir}/ceph-volume@.service +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd +%config(noreplace) %{_sysctldir}/90-ceph-osd.conf +%{_sysconfdir}/sudoers.d/ceph-osd-smartctl + +%post osd +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || : +fi +%if 0%{?sysctl_apply} + %sysctl_apply 90-ceph-osd.conf +%else + /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || : +%endif + +%preun osd +%if 0%{?suse_version} +%service_del_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +%endif + +%postun osd +%systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || : + fi +fi + +%if 0%{with seastar} +%files crimson-osd +%{_bindir}/crimson-osd +%endif + +%if %{with ocf} + +%files resource-agents +%dir %{_prefix}/lib/ocf +%dir %{_prefix}/lib/ocf/resource.d +%dir %{_prefix}/lib/ocf/resource.d/ceph +%attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd + +%endif + %files -n librados2 %{_libdir}/librados.so.* %dir %{_libdir}/ceph @@ -1039,6 +2185,31 @@ fi %{_libdir}/librbd_tp.so %endif +%files -n librgw2 +%{_libdir}/librgw.so.* +%if %{with lttng} +%{_libdir}/librgw_op_tp.so.* +%{_libdir}/librgw_rados_tp.so.* +%endif + +%post -n librgw2 -p /sbin/ldconfig + +%postun -n librgw2 -p /sbin/ldconfig + +%files -n librgw-devel +%dir %{_includedir}/rados +%{_includedir}/rados/librgw.h +%{_includedir}/rados/rgw_file.h +%{_libdir}/librgw.so +%if %{with lttng} +%{_libdir}/librgw_op_tp.so +%{_libdir}/librgw_rados_tp.so +%endif + +%files -n python%{python3_pkgversion}-rgw +%{python3_sitearch}/rgw.cpython*.so +%{python3_sitearch}/rgw-*.egg-info + %files -n python%{python3_pkgversion}-rbd %{python3_sitearch}/rbd.cpython*.so %{python3_sitearch}/rbd-*.egg-info @@ -1059,6 +2230,15 @@ fi %{_includedir}/cephfs/metrics/Types.h %{_libdir}/libcephfs.so +%if %{with jaeger} +%files -n libjaeger +%{_libdir}/libopentracing.so.* +%{_libdir}/libthrift.so.* +%{_libdir}/libjaegertracing.so.* +%post -n libjaeger -p /sbin/ldconfig +%postun -n libjaeger -p /sbin/ldconfig +%endif + %files -n python%{python3_pkgversion}-cephfs %{python3_sitearch}/cephfs.cpython*.so %{python3_sitearch}/cephfs-*.egg-info @@ -1075,6 +2255,17 @@ fi %{python3_sitelib}/ceph %{python3_sitelib}/ceph-*.egg-info +%if 0%{with cephfs_shell} +%files -n cephfs-shell +%{python3_sitelib}/cephfs_shell-*.egg-info +%{_bindir}/cephfs-shell +%endif + +%files -n cephfs-top +%{python3_sitelib}/cephfs_top-*.egg-info +%{_bindir}/cephfs-top +%{_mandir}/man8/cephfs-top.8* + %if 0%{with ceph_test_package} %files -n ceph-test %{_bindir}/ceph-client-debug @@ -1098,20 +2289,144 @@ fi %{_bindir}/ceph-coverage %{_bindir}/ceph-debugpack %{_bindir}/ceph-dedup-tool +%if 0%{with seastar} +%{_bindir}/crimson-store-nbd +%endif %{_mandir}/man8/ceph-debugpack.8* %dir %{_libdir}/ceph %{_libdir}/ceph/ceph-monstore-update-crush.sh %endif +%if 0%{with cephfs_java} +%files -n libcephfs_jni1 +%{_libdir}/libcephfs_jni.so.* + +%post -n libcephfs_jni1 -p /sbin/ldconfig + +%postun -n libcephfs_jni1 -p /sbin/ldconfig + +%files -n libcephfs_jni-devel +%{_libdir}/libcephfs_jni.so + +%files -n cephfs-java +%{_javadir}/libcephfs.jar +%{_javadir}/libcephfs-test.jar +%endif + %files -n rados-objclass-devel %dir %{_includedir}/rados %{_includedir}/rados/objclass.h +%if 0%{with selinux} +%files selinux +%attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp +%{_datadir}/selinux/devel/include/contrib/ceph.if +%{_mandir}/man8/ceph_selinux.8* + +%post selinux +# backup file_contexts before update +. /etc/selinux/config +FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts +cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre + +# Install the policy +/usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp + +# Load the policy if SELinux is enabled +if ! /usr/sbin/selinuxenabled; then + # Do not relabel if selinux is not enabled + exit 0 +fi + +if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then + # Do not relabel if file contexts did not change + exit 0 +fi + +# Check whether the daemons are running +/usr/bin/systemctl status ceph.target > /dev/null 2>&1 +STATUS=$? + +# Stop the daemons if they were running +if test $STATUS -eq 0; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 +fi + +# Relabel the files fix for first package install +/usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null + +rm -f ${FILE_CONTEXT}.pre +# The fixfiles command won't fix label for /var/run/ceph +/usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 + +# Start the daemons iff they were running before +if test $STATUS -eq 0; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : +fi +exit 0 + +%postun selinux +if [ $1 -eq 0 ]; then + # backup file_contexts before update + . /etc/selinux/config + FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts + cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre + + # Remove the module + /usr/sbin/semodule -n -r ceph > /dev/null 2>&1 + + # Reload the policy if SELinux is enabled + if ! /usr/sbin/selinuxenabled ; then + # Do not relabel if SELinux is not enabled + exit 0 + fi + + # Check whether the daemons are running + /usr/bin/systemctl status ceph.target > /dev/null 2>&1 + STATUS=$? + + # Stop the daemons if they were running + if test $STATUS -eq 0; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi + + /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null + rm -f ${FILE_CONTEXT}.pre + # The fixfiles command won't fix label for /var/run/ceph + /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 + + # Start the daemons if they were running before + if test $STATUS -eq 0; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi +fi +exit 0 + +%endif + +%files grafana-dashboards +%if 0%{?suse_version} +%attr(0755,root,root) %dir %{_sysconfdir}/grafana +%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards +%endif +%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard +%config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/* +%doc monitoring/grafana/dashboards/README +%doc monitoring/grafana/README.md + +%files prometheus-alerts +%if 0%{?suse_version} +%attr(0755,root,root) %dir %{_sysconfdir}/prometheus +%endif +%attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph +%config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml + %changelog -* Fri Jan 29 2021 Boris Ranto - 2:16.1.0-0.2.snapshot -- disable multiple build options -- disable multiple packages -- remove unnecessary files +* Mon Feb 1 2021 Kaleb S. KEITHLEY - 2:16.1.0-0.2.snapshot +- libblk.so -> libblk.a +- libneoradostest-support.so -> libneoradostest-support.a +- w/ liburing-devel, -DWITH_SYSTEM_LIBURING +- w/ rocksdb-devel, -DWITH_SYSTEM_LIBURING * Fri Jan 29 2021 Kaleb S. KEITHLEY - 2:16.1.0-0.1.snapshot - ceph 16.1.0 RC (ceph-16.1.0-43-g6b74fb5c)