import libhugetlbfs-2.21-12.el8

This commit is contained in:
CentOS Sources 2020-04-28 05:40:39 -04:00 committed by Andrew Lukoshko
parent 651835c213
commit 68c86434a3
36 changed files with 2213 additions and 422 deletions

View File

@ -0,0 +1,100 @@
From d42f467a923dfc09309acb7a83b42e3285fbd8f4 Mon Sep 17 00:00:00 2001
Message-Id: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:28 +0530
Subject: [RHEL7 PATCH 01/31] tests: Add utility to check for a minimum number
of online cpus
This adds a test utility to check if a minimum number (N)
of online cpus are available. If available, this will also
provide a list of the first N online cpus.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 1 +
tests/testutils.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index 8b1d8d9..e3179e6 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -43,6 +43,7 @@ extern char *test_name;
void check_free_huge_pages(int nr_pages_needed);
void check_must_be_root(void);
void check_hugetlb_shm_group(void);
+void check_online_cpus(int[], int);
void test_init(int argc, char *argv[]);
int test_addr_huge(void *p);
unsigned long long get_mapping_page_size(void *p);
diff --git a/tests/testutils.c b/tests/testutils.c
index 6298370..2b47547 100644
--- a/tests/testutils.c
+++ b/tests/testutils.c
@@ -33,6 +33,8 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/types.h>
#include <sys/mman.h>
#include <fcntl.h>
@@ -80,6 +82,52 @@ void check_hugetlb_shm_group(void)
CONFIG("Do not have permission to use SHM_HUGETLB");
}
+#define SYSFS_CPU_ONLINE_FMT "/sys/devices/system/cpu/cpu%d/online"
+
+void check_online_cpus(int online_cpus[], int nr_cpus_needed)
+{
+ char cpu_state, path_buf[64];
+ int total_cpus, cpu_idx, fd, ret, i;
+
+ total_cpus = get_nprocs_conf();
+ cpu_idx = 0;
+
+ if (get_nprocs() < nr_cpus_needed)
+ CONFIG("Atleast online %d cpus are required", nr_cpus_needed);
+
+ for (i = 0; i < total_cpus && cpu_idx < nr_cpus_needed; i++) {
+ errno = 0;
+ sprintf(path_buf, SYSFS_CPU_ONLINE_FMT, i);
+ fd = open(path_buf, O_RDONLY);
+ if (fd < 0) {
+ /* If 'online' is absent, the cpu cannot be offlined */
+ if (errno == ENOENT) {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ continue;
+ } else {
+ FAIL("Unable to open %s: %s", path_buf,
+ strerror(errno));
+ }
+ }
+
+ ret = read(fd, &cpu_state, 1);
+ if (ret < 1)
+ FAIL("Unable to read %s: %s", path_buf,
+ strerror(errno));
+
+ if (cpu_state == '1') {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ }
+
+ close(fd);
+ }
+
+ if (cpu_idx < nr_cpus_needed)
+ CONFIG("Atleast %d online cpus were not found", nr_cpus_needed);
+}
+
void __attribute__((weak)) cleanup(void)
{
}
--
1.8.3.1

View File

@ -0,0 +1,74 @@
From 865d160eff7e6c69968d0196272030f206dd3430 Mon Sep 17 00:00:00 2001
Message-Id: <865d160eff7e6c69968d0196272030f206dd3430.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:29 +0530
Subject: [RHEL7 PATCH 02/31] tests: slbpacaflush: Use online cpus only
This ensures that the two cpus between which the thread is
migrated are online. For offline cpus, sched_setaffinity()
will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/slbpacaflush.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/tests/slbpacaflush.c b/tests/slbpacaflush.c
index 8893c4d..765e069 100644
--- a/tests/slbpacaflush.c
+++ b/tests/slbpacaflush.c
@@ -57,29 +57,32 @@ int main(int argc, char *argv[])
int fd;
void *p;
volatile unsigned long *q;
- int err;
+ int online_cpus[2], err;
cpu_set_t cpu0, cpu1;
test_init(argc, argv);
hpage_size = check_hugepagesize();
+ check_online_cpus(online_cpus, 2);
fd = hugetlbfs_unlinked_fd();
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
CPU_ZERO(&cpu0);
- CPU_SET(0, &cpu0);
+ CPU_SET(online_cpus[0], &cpu0);
CPU_ZERO(&cpu1);
- CPU_SET(1, &cpu1);
+ CPU_SET(online_cpus[1], &cpu1);
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu1);
if (err != 0)
- CONFIG("sched_setaffinity(): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[1],
+ strerror(errno));
p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (p == MAP_FAILED)
@@ -87,7 +90,8 @@ int main(int argc, char *argv[])
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
q = (volatile unsigned long *)(p + getpagesize());
*q = 0xdeadbeef;
--
1.8.3.1

View File

@ -0,0 +1,70 @@
From 4ba9722027d9aeec173866b5ca12282268594f35 Mon Sep 17 00:00:00 2001
Message-Id: <4ba9722027d9aeec173866b5ca12282268594f35.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:30 +0530
Subject: [RHEL7 PATCH 03/31] tests: alloc-instantiate-race: Use online cpus
only
This ensures that the two processes or threads between which
the race condition is introduced are always running on online
cpus. For offline cpus, sched_setaffinity() will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/alloc-instantiate-race.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/tests/alloc-instantiate-race.c b/tests/alloc-instantiate-race.c
index 7f84e8a..f55e2f7 100644
--- a/tests/alloc-instantiate-race.c
+++ b/tests/alloc-instantiate-race.c
@@ -121,7 +121,9 @@ static void run_race(void *syncarea, int race_type)
int fd;
void *p;
int status1, status2;
- int ret;
+ int online_cpus[2], ret;
+
+ check_online_cpus(online_cpus, 2);
memset(syncarea, 0, sizeof(*trigger1) + sizeof(*trigger2));
trigger1 = syncarea;
@@ -143,13 +145,13 @@ static void run_race(void *syncarea, int race_type)
if (child1 < 0)
FAIL("fork(): %s", strerror(errno));
if (child1 == 0)
- proc_racer(p, 0, trigger1, trigger2);
+ proc_racer(p, online_cpus[0], trigger1, trigger2);
child2 = fork();
if (child2 < 0)
FAIL("fork(): %s", strerror(errno));
if (child2 == 0)
- proc_racer(p, 1, trigger2, trigger1);
+ proc_racer(p, online_cpus[1], trigger2, trigger1);
/* wait() calls */
ret = waitpid(child1, &status1, 0);
@@ -175,13 +177,13 @@ static void run_race(void *syncarea, int race_type)
} else {
struct racer_info ri1 = {
.p = p,
- .cpu = 0,
+ .cpu = online_cpus[0],
.mytrigger = trigger1,
.othertrigger = trigger2,
};
struct racer_info ri2 = {
.p = p,
- .cpu = 1,
+ .cpu = online_cpus[1],
.mytrigger = trigger2,
.othertrigger = trigger1,
};
--
1.8.3.1

View File

@ -0,0 +1,52 @@
From 2f38664f81e1877f81b16ed327b540d69d175a5b Mon Sep 17 00:00:00 2001
Message-Id: <2f38664f81e1877f81b16ed327b540d69d175a5b.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:31 +0530
Subject: [RHEL7 PATCH 04/31] tests: task-size-overrun: Make test faster on
powerpc64
As of now, powerpc64 supports 64TB, 128TB, 512TB, 1PB, 2PB and
4PB user address space sizes with 4TB being the default for the
newer kernels. With the relatively conservative increments that
this test uses to find the task size, it takes a very long time
but this can be made faster by also increasing the increment
factor in steps of the different supported task sizes.
Fixes: 02df38e ("Defined task size value to be 512T if it is more that 64Tb.")
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/task-size-overrun.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/tests/task-size-overrun.c b/tests/task-size-overrun.c
index dc9ce0e..914ef65 100644
--- a/tests/task-size-overrun.c
+++ b/tests/task-size-overrun.c
@@ -83,8 +83,18 @@ static unsigned long find_task_size(void)
munmap(p, getpagesize());
addr += getpagesize();
#if defined(__powerpc64__)
- if (addr > (1UL << 46) && addr < (1UL << 49))
- addr = 1UL << 49;
+ if (addr > (1UL << 46) && addr < (1UL << 47))
+ addr = 1UL << 47; /* 64TB */
+ else if (addr > (1UL << 47) && addr < (1UL << 48))
+ addr = 1UL << 48; /* 128TB */
+ else if (addr > (1UL << 48) && addr < (1UL << 49))
+ addr = 1UL << 49; /* 512TB */
+ else if (addr > (1UL << 49) && addr < (1UL << 50))
+ addr = 1UL << 50; /* 1PB */
+ else if (addr > (1UL << 50) && addr < (1UL << 51))
+ addr = 1UL << 51; /* 2PB */
+ else if (addr > (1UL << 51) && addr < (1UL << 52))
+ addr = 1UL << 52; /* 4PB */
#endif
#if defined(__s390x__)
if (addr > (1UL << 42) && addr < (1UL << 53))
--
1.8.3.1

View File

@ -0,0 +1,50 @@
From 2a63852ac9358cdddce9944aade1d443f686246a Mon Sep 17 00:00:00 2001
Message-Id: <2a63852ac9358cdddce9944aade1d443f686246a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:32 +0530
Subject: [RHEL7 PATCH 05/31] tests: truncate-above-4GB: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/truncate_above_4GB.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/tests/truncate_above_4GB.c b/tests/truncate_above_4GB.c
index 4c427fc..2e29931 100644
--- a/tests/truncate_above_4GB.c
+++ b/tests/truncate_above_4GB.c
@@ -79,6 +79,13 @@ int main(int argc, char *argv[])
page_size = getpagesize();
hpage_size = check_hugepagesize();
+ truncate_point = FOURGIG;
+
+ if (hpage_size > truncate_point)
+ CONFIG("Huge page size is too large");
+
+ if (truncate_point % hpage_size > 0)
+ CONFIG("Truncation point is not aligned to huge page size");
check_free_huge_pages(3);
@@ -86,7 +93,6 @@ int main(int argc, char *argv[])
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
- truncate_point = FOURGIG;
buggy_offset = truncate_point / (hpage_size / page_size);
buggy_offset = ALIGN(buggy_offset, hpage_size);
--
1.8.3.1

View File

@ -0,0 +1,49 @@
From 65c07c0f64ef1c97f9aea80d0c8470417e377a6a Mon Sep 17 00:00:00 2001
Message-Id: <65c07c0f64ef1c97f9aea80d0c8470417e377a6a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:33 +0530
Subject: [RHEL7 PATCH 06/31] tests: map-high-truncate-2: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/map_high_truncate_2.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/tests/map_high_truncate_2.c b/tests/map_high_truncate_2.c
index 2a2560b..fc44a13 100644
--- a/tests/map_high_truncate_2.c
+++ b/tests/map_high_truncate_2.c
@@ -56,6 +56,7 @@
#define TRUNCATE_POINT 0x60000000UL
#endif
#define HIGH_ADDR 0xa0000000UL
+#define FOURGIG ((off64_t)0x100000000ULL)
int main(int argc, char *argv[])
{
@@ -69,6 +70,12 @@ int main(int argc, char *argv[])
hpage_size = check_hugepagesize();
+ if (hpage_size > TRUNCATE_POINT)
+ CONFIG("Huge page size is too large");
+
+ if (TRUNCATE_POINT % hpage_size)
+ CONFIG("Truncation point is not aligned to huge page size");
+
check_free_huge_pages(4);
fd = hugetlbfs_unlinked_fd();
--
1.8.3.1

View File

@ -0,0 +1,130 @@
From e472e326d31a125e21453d75cb46bba9cf387952 Mon Sep 17 00:00:00 2001
Message-Id: <e472e326d31a125e21453d75cb46bba9cf387952.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:34 +0530
Subject: [RHEL7 PATCH 07/31] morecore: tests: Run tests only for default huge
page size
The morecore tests (malloc, malloc-manysmall and heapshrink)
are not linked against libhugetlbfs and cannot invoke library
functions like gethugepagesize(). Hence, run these tests only
for the kernel's default huge page size.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 81 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 49 insertions(+), 32 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 3c95a03..70c5a6a 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -362,6 +362,16 @@ def do_test_with_rlimit(rtype, limit, cmd, bits=None, **env):
do_test(cmd, bits, **env)
resource.setrlimit(rtype, oldlimit)
+def do_test_with_pagesize(pagesize, cmd, bits=None, **env):
+ """
+ Run a test case, testing with a specified huge page size and
+ each indicated word size.
+ """
+ if bits == None:
+ bits = wordsizes
+ for b in (set(bits) & wordsizes_by_pagesize[pagesize]):
+ run_test(pagesize, b, cmd, **env)
+
def do_elflink_test(cmd, **env):
"""
Run an elflink test case, skipping known-bad configurations.
@@ -563,15 +573,22 @@ def functional_tests():
do_test("private")
do_test("fork-cow")
do_test("direct")
- do_test("malloc")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:none")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:malloc")
- do_test("malloc_manysmall")
- do_test("malloc_manysmall", LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:none")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
# After upstream commit: (glibc-2.25.90-688-gd5c3fafc43) glibc has a
# new per-thread caching mechanism that will NOT allow heapshrink test to
@@ -584,29 +601,29 @@ def functional_tests():
# program context (not even with a constructor function), and the tunable
# is only evaluated during malloc() initialization.
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
do_test("heap-overflow", HUGETLB_VERBOSE="1", HUGETLB_MORECORE="yes")
--
1.8.3.1

View File

@ -0,0 +1,53 @@
From 4ba60a2f5c3f5405c599caddc5a124c5781c9beb Mon Sep 17 00:00:00 2001
Message-Id: <4ba60a2f5c3f5405c599caddc5a124c5781c9beb.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:35 +0530
Subject: [RHEL7 PATCH 08/31] hugeutils: Make writing a ulong to a file more
reliable
This makes file_write_ulong() more reliable in terms of error
detection for certain cases like writing an invalid value to
a file under procfs or sysfs. Also, using fprintf() does not
guarantee that errno would be set under such circumstances.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/hugeutils.c b/hugeutils.c
index 60488e8..fc64946 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -219,17 +219,18 @@ long file_read_ulong(char *file, const char *tag)
int file_write_ulong(char *file, unsigned long val)
{
- FILE *f;
- int ret;
+ int fd, ret, buflen;
+ char buf[20];
- f = fopen(file, "w");
- if (!f) {
+ fd = open(file, O_WRONLY);
+ if (fd < 0) {
ERROR("Couldn't open %s: %s\n", file, strerror(errno));
return -1;
}
- ret = fprintf(f, "%lu", val);
- fclose(f);
+ buflen = sprintf(buf, "%lu", val);
+ ret = write(fd, buf, buflen);
+ close(fd);
return ret > 0 ? 0 : -1;
}
--
1.8.3.1

View File

@ -0,0 +1,59 @@
From a4879cc4f88b560958950d9277ba0df487b145f4 Mon Sep 17 00:00:00 2001
Message-Id: <a4879cc4f88b560958950d9277ba0df487b145f4.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:36 +0530
Subject: [RHEL7 PATCH 09/31] tests: Add utility to check if huge pages are
gigantic
This adds a test utility to check if the currently selected
huge page size corresponds to that of a gigantic page.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index e3179e6..bc4e16a 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -22,6 +22,7 @@
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include "libhugetlbfs_privutils.h"
#include "libhugetlbfs_testprobes.h"
@@ -136,6 +137,24 @@ static inline long check_hugepagesize()
return __hpage_size;
}
+static inline void check_if_gigantic_page(void)
+{
+ long page_size, hpage_size, max_order;
+ FILE *fp;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ hpage_size = gethugepagesize();
+ fp = popen("cat /proc/pagetypeinfo | "
+ "awk '/Free pages count per migrate type at order/ "
+ "{print $NF}'", "r");
+ if (!fp || fscanf(fp, "%lu", &max_order) < 0)
+ FAIL("Couldn't determine max page allocation order");
+
+ pclose(fp);
+ if (hpage_size > ((1 << max_order) * page_size))
+ CONFIG("Gigantic pages are not supported");
+}
+
int using_system_hpage_size(const char *mount);
/* WARNING: Racy -- use for test cases only! */
--
1.8.3.1

View File

@ -0,0 +1,49 @@
From 2d41ec367199f9f9d4b7caf00c3be25030a7a873 Mon Sep 17 00:00:00 2001
Message-Id: <2d41ec367199f9f9d4b7caf00c3be25030a7a873.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:37 +0530
Subject: [RHEL7 PATCH 10/31] tests: counters: Skip if using gigantic huge
pages
The kernel does not allow setting an overcommit limit for
gigantic huge pages, i.e. any page size beyond the max page
allocation order. For such cases, nr_overcommit_hugepages
cannot be modified and is always zero. So, skip this test
as mmap() using a hugetlbfs file descriptor will fail when
both nr_hugepages and nr_overcommit_hugepages are zero.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/counters.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/tests/counters.c b/tests/counters.c
index 0284809..34b1ef3 100644
--- a/tests/counters.c
+++ b/tests/counters.c
@@ -83,7 +83,17 @@ void verify_dynamic_pool_support(void)
saved_oc_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_OC);
if (saved_oc_hugepages < 0)
FAIL("Kernel appears to lack dynamic hugetlb pool support");
- set_nr_overcommit_hugepages(hpage_size, 10);
+ if (set_nr_overcommit_hugepages(hpage_size, 10) < 0) {
+ /*
+ * In case writing to nr_overcommit_hugepages failed with the
+ * reason that it was an attempt to write an invalid argument,
+ * it might be because the page size corresponds to gigantic
+ * pages which do not support this feature.
+ */
+ if (errno == EINVAL)
+ check_if_gigantic_page();
+ FAIL("Couldn't set overcommit limit");
+ }
}
void bad_value(int line, const char *name, long expect, long actual)
--
1.8.3.1

View File

@ -0,0 +1,72 @@
From 8cc33a134681892a71a4f67397bb13a541bb463e Mon Sep 17 00:00:00 2001
Message-Id: <8cc33a134681892a71a4f67397bb13a541bb463e.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:38 +0530
Subject: [RHEL7 PATCH 11/31] hugeutils: Add utility to check if slices are
supported
This adds an utility to check if the current processor
architecture supports slices. Slices are used to divide
up a virtual address space and put certain restrictions
like on powerpc64 with Hash MMU where one can have only
one page size per slice.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 21 +++++++++++++++++++++
libhugetlbfs_privutils.h | 3 +++
2 files changed, 24 insertions(+)
diff --git a/hugeutils.c b/hugeutils.c
index fc64946..e573622 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -800,6 +800,27 @@ int hpool_sizes(struct hpage_pool *pools, int pcnt)
return (which < pcnt) ? which : -1;
}
+int arch_has_slice_support(void)
+{
+#ifdef __powerpc64__
+ char mmu_type[16];
+ FILE *fp;
+
+ fp = popen("cat /proc/cpuinfo | grep MMU | awk '{ print $3}'", "r");
+ if (!fp || fscanf(fp, "%s", mmu_type) < 0) {
+ ERROR("Failed to determine MMU type\n");
+ abort();
+ }
+
+ pclose(fp);
+ return strcmp(mmu_type, "Hash") == 0;
+#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
/*
* If we have a default page size then we support hugepages.
*/
diff --git a/libhugetlbfs_privutils.h b/libhugetlbfs_privutils.h
index 149e42f..8b12fed 100644
--- a/libhugetlbfs_privutils.h
+++ b/libhugetlbfs_privutils.h
@@ -53,6 +53,9 @@ int set_nr_hugepages(long pagesize, unsigned long val);
#define set_nr_overcommit_hugepages __pu_set_nr_overcommit_hugepages
int set_nr_overcommit_hugepages(long pagesize, unsigned long val);
+#define arch_has_slice_support __pu_arch_has_slice_support
+int arch_has_slice_support(void);
+
#define kernel_has_hugepages __pu_kernel_has_hugepages
int kernel_has_hugepages(void);
--
1.8.3.1

View File

@ -0,0 +1,38 @@
From 1329c4f5f4d201724d379d43dc5d516d1c9356dc Mon Sep 17 00:00:00 2001
Message-Id: <1329c4f5f4d201724d379d43dc5d516d1c9356dc.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:39 +0530
Subject: [RHEL7 PATCH 12/31] tests: brk-near-huge: Fix next chunk computation
for powerpc64
For powerpc64, the use of slices applies only to Hash MMU.
Hence, when determining the next chunk size, ensure that
the address is aligned to the slice size for Hash MMU and
the huge page size otherwise.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/brk_near_huge.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/brk_near_huge.c b/tests/brk_near_huge.c
index f6d1e07..c9662f4 100644
--- a/tests/brk_near_huge.c
+++ b/tests/brk_near_huge.c
@@ -40,6 +40,9 @@
#ifdef __powerpc64__
void *next_chunk(void *addr)
{
+ if (!arch_has_slice_support())
+ return PALIGN(addr, gethugepagesize());
+
if ((unsigned long)addr < 0x100000000UL)
/* 256M segments below 4G */
return PALIGN(addr, 0x10000000UL);
--
1.8.3.1

View File

@ -0,0 +1,143 @@
From 9fe6594da91e86280c9d71877a91cee83aaedae6 Mon Sep 17 00:00:00 2001
Message-Id: <9fe6594da91e86280c9d71877a91cee83aaedae6.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:30 +0530
Subject: [RHEL7 PATCH 13/31] elflink: Fix program header address calculation
This fixes the virtual address calculation for the ELF program
header. Based on the man page of dl_iterate_phdr(), the location
of a particular program header in virtual memory should be the
sum of the base address of the shared object and the segment's
virtual address.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/elflink.c b/elflink.c
index ffc84dd..1150bde 100644
--- a/elflink.c
+++ b/elflink.c
@@ -374,7 +374,8 @@ static int get_shared_file_name(struct seg_info *htlb_seg_info, char *file_path)
}
/* Find the .dynamic program header */
-static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
+static int find_dynamic(Elf_Dyn **dyntab, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
int i = 1;
@@ -382,7 +383,7 @@ static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
++i;
}
if (phdr[i].p_type == PT_DYNAMIC) {
- *dyntab = (Elf_Dyn *)phdr[i].p_vaddr;
+ *dyntab = (Elf_Dyn *)(addr + phdr[i].p_vaddr);
return 0;
} else {
DEBUG("No dynamic segment found\n");
@@ -473,7 +474,8 @@ ElfW(Word) __attribute__ ((weak)) plt_extrasz(ElfW(Dyn) *dyntab)
* include these initialized variables in our copy.
*/
-static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
+static void get_extracopy(struct seg_info *seg, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
Elf_Dyn *dyntab; /* dynamic segment table */
Elf_Sym *symtab = NULL; /* dynamic symbol table */
@@ -492,7 +494,7 @@ static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
goto bail2;
/* Find dynamic program header */
- ret = find_dynamic(&dyntab, phdr, phnum);
+ ret = find_dynamic(&dyntab, addr, phdr, phnum);
if (ret < 0)
goto bail;
@@ -608,7 +610,8 @@ static unsigned long hugetlb_prev_slice_end(unsigned long addr)
/*
* Store a copy of the given program header
*/
-static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
+static int save_phdr(int table_idx, int phnum, const ElfW(Addr) addr,
+ const ElfW(Phdr) *phdr)
{
int prot = 0;
@@ -626,7 +629,7 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
if (phdr->p_flags & PF_X)
prot |= PROT_EXEC;
- htlb_seg_table[table_idx].vaddr = (void *) phdr->p_vaddr;
+ htlb_seg_table[table_idx].vaddr = (void *)(addr + phdr->p_vaddr);
htlb_seg_table[table_idx].filesz = phdr->p_filesz;
htlb_seg_table[table_idx].memsz = phdr->p_memsz;
htlb_seg_table[table_idx].prot = prot;
@@ -634,8 +637,8 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
INFO("Segment %d (phdr %d): %#0lx-%#0lx (filesz=%#0lx) "
"(prot = %#0x)\n", table_idx, phnum,
- (unsigned long) phdr->p_vaddr,
- (unsigned long) phdr->p_vaddr + phdr->p_memsz,
+ (unsigned long) addr + phdr->p_vaddr,
+ (unsigned long) addr + phdr->p_vaddr + phdr->p_memsz,
(unsigned long) phdr->p_filesz, (unsigned int) prot);
return 0;
@@ -718,16 +721,19 @@ int parse_elf_normal(struct dl_phdr_info *info, size_t size, void *data)
seg_psize = segment_requested_page_size(&info->dlpi_phdr[i]);
if (seg_psize != page_size) {
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
get_extracopy(&htlb_seg_table[htlb_num_segs],
- &info->dlpi_phdr[0], info->dlpi_phnum);
+ info->dlpi_addr, info->dlpi_phdr,
+ info->dlpi_phnum);
htlb_seg_table[htlb_num_segs].page_size = seg_psize;
htlb_num_segs++;
}
- start = ALIGN_DOWN(info->dlpi_phdr[i].p_vaddr, seg_psize);
- end = ALIGN(info->dlpi_phdr[i].p_vaddr +
- info->dlpi_phdr[i].p_memsz, seg_psize);
+ start = ALIGN_DOWN(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr, seg_psize);
+ end = ALIGN(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr +
+ info->dlpi_phdr[i].p_memsz, seg_psize);
segments[num_segs].page_size = seg_psize;
segments[num_segs].start = start;
@@ -771,8 +777,9 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
* in this forced way won't violate any contiguity
* constraints.
*/
- vaddr = hugetlb_next_slice_start(info->dlpi_phdr[i].p_vaddr);
- gap = vaddr - info->dlpi_phdr[i].p_vaddr;
+ vaddr = hugetlb_next_slice_start(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr);
+ gap = vaddr - (info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
slice_end = hugetlb_slice_end(vaddr);
/*
* we should stop remapping just before the slice
@@ -795,7 +802,8 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
}
memsz = hugetlb_prev_slice_end(vaddr + memsz) - vaddr;
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
/*
--
1.8.3.1

View File

@ -0,0 +1,64 @@
From 5022d5f86d02882a11700825258ecdba8dee683c Mon Sep 17 00:00:00 2001
Message-Id: <5022d5f86d02882a11700825258ecdba8dee683c.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:31 +0530
Subject: [RHEL7 PATCH 14/31] elflink: powerpc64: Use slices based on MMU type
For powerpc64, the concept of slices is not applicable to the
recently introduced Radix MMU. So, slice boundaries should be
calculated based on the MMU type.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/elflink.c b/elflink.c
index 1150bde..a6bd44c 100644
--- a/elflink.c
+++ b/elflink.c
@@ -569,6 +569,10 @@ bail2:
*/
static unsigned long hugetlb_slice_start(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_DOWN(addr, gethugepagesize());
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
@@ -578,13 +582,15 @@ static unsigned long hugetlb_slice_start(unsigned long addr)
return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
-#else
- return ALIGN_DOWN(addr, gethugepagesize());
#endif
}
static unsigned long hugetlb_slice_end(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_UP(addr, gethugepagesize()) - 1;
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
@@ -592,8 +598,6 @@ static unsigned long hugetlb_slice_end(unsigned long addr)
return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
-#else
- return ALIGN_UP(addr, gethugepagesize()) - 1;
#endif
}
--
1.8.3.1

View File

@ -0,0 +1,62 @@
From adb3feea5dde087d7bb8017e5b8da2da548473bf Mon Sep 17 00:00:00 2001
Message-Id: <adb3feea5dde087d7bb8017e5b8da2da548473bf.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:32 +0530
Subject: [RHEL7 PATCH 15/31] ld.hugetlbfs: powerpc64: Add support for
different huge page sizes
This ensures that the page and slice sizes are determined by
looking at the default huge page size and MMU type rather than
having them hardcoded.
This is important because powerpc64 supports different huge
page sizes based on the MMU type. Hash MMU supports 16MB and
16GB whereas Radix MMU supports 2MB and 1GB.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
ld.hugetlbfs | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/ld.hugetlbfs b/ld.hugetlbfs
index 388f7b4..6ee8238 100755
--- a/ld.hugetlbfs
+++ b/ld.hugetlbfs
@@ -105,8 +105,16 @@ fi
MB=$((1024*1024))
case "$EMU" in
-elf32ppclinux|elf64ppc) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
-elf64lppc) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
+elf32ppclinux) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
+elf64ppc|elf64lppc)
+ hpage_kb=$(cat /proc/meminfo | grep Hugepagesize: | awk '{print $2}')
+ MMU_TYPE=$(cat /proc/cpuinfo | grep MMU | awk '{ print $3}')
+ HPAGE_SIZE=$((hpage_kb * 1024))
+ if [ "$MMU_TYPE" == "Hash" ] ; then
+ SLICE_SIZE=$((256*$MB))
+ else
+ SLICE_SIZE=$HPAGE_SIZE
+ fi ;;
elf_i386|elf_x86_64) HPAGE_SIZE=$((4*$MB)) SLICE_SIZE=$HPAGE_SIZE ;;
elf_s390|elf64_s390) HPAGE_SIZE=$((1*$MB)) SLICE_SIZE=$HPAGE_SIZE ;;
armelf*_linux_eabi|aarch64elf*|aarch64linux*)
@@ -124,6 +132,11 @@ if [ "$HTLB_ALIGN" == "slice" ]; then
case "$EMU" in
armelf*_linux_eabi|aarch64elf*|aarch64linux*) HTLBOPTS="$HTLBOPTS -Ttext-segment=$SLICE_SIZE" ;;
elf_i386) HTLBOPTS="$HTLBOPTS -Ttext-segment=0x08000000" ;;
+ elf64ppc|elf64lppc)
+ if [ "$MMU_TYPE" == "Hash" ] ; then
+ printf -v TEXTADDR "%x" "$SLICE_SIZE"
+ HTLBOPTS="$HTLBOPTS -Ttext-segment=$TEXTADDR"
+ fi ;;
esac
fi
--
1.8.3.1

View File

@ -0,0 +1,102 @@
From 4dfdd96a6b4bd019210c9a44de42369aae772b98 Mon Sep 17 00:00:00 2001
Message-Id: <4dfdd96a6b4bd019210c9a44de42369aae772b98.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:33 +0530
Subject: [RHEL7 PATCH 16/31] elflink: tests: Run tests only for default huge
page size
The elflink tests (linkhuge, linkhuge-nofd, linkhuge-rw and
linkshare) are usually linked in a way that ensures that the
ELF segment boundaries are aligned to the kernel's default
huge page size. Hence, run these tests only for the kernel's
default huge page size as the program segments will not be
remapped otherwise.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 41 ++++++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 15 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 70c5a6a..94000ea 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -376,10 +376,11 @@ def do_elflink_test(cmd, **env):
"""
Run an elflink test case, skipping known-bad configurations.
"""
- for p in pagesizes:
- for b in wordsizes_by_pagesize[p]:
- if b in linkhuge_wordsizes: run_test(p, b, cmd, **env)
- else: skip_test(p, b, cmd, **env)
+ for b in wordsizes_by_pagesize[system_default_hpage_size]:
+ if b in linkhuge_wordsizes:
+ run_test(system_default_hpage_size, b, cmd, **env)
+ else:
+ skip_test(system_default_hpage_size, b, cmd, **env)
def elflink_test(cmd, **env):
"""
@@ -388,9 +389,10 @@ def elflink_test(cmd, **env):
Test various combinations of: preloading libhugetlbfs, B vs. BDT link
modes, minimal copying on or off, and disabling segment remapping.
"""
- do_test(cmd, **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd, **env)
# Test we don't blow up if not linked for hugepage
- do_test(cmd, LD_PRELOAD="libhugetlbfs.so", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ LD_PRELOAD="libhugetlbfs.so", **env)
# Only run custom ldscript tests when -l option is set
if not custom_ldscripts:
@@ -412,16 +414,23 @@ def elflink_rw_test(cmd, **env):
Test various combinations of: remapping modes and minimal copy on or off.
"""
# Basic tests: None, Read-only, Write-only, Read-Write, exlicit disable
- do_test(cmd, **env)
- do_test(cmd, HUGETLB_ELFMAP="R", **env)
- do_test(cmd, HUGETLB_ELFMAP="W", **env)
- do_test(cmd, HUGETLB_ELFMAP="RW", **env)
- do_test(cmd, HUGETLB_ELFMAP="no", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd, **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="R", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="W", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="RW", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="no", **env)
# Test we don't blow up if HUGETLB_MINIMAL_COPY is disabled
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="R", **env)
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="W", **env)
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="RW", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="R", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="W", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="RW", **env)
def elfshare_test(cmd, **env):
"""
@@ -458,7 +467,9 @@ def elflink_rw_and_share_test(cmd, **env):
clear_hpages()
for mode in ("R", "W", "RW"):
for i in range(2):
- do_test(cmd, HUGETLB_ELFMAP=mode, HUGETLB_SHARE=repr(i), **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP=mode, HUGETLB_SHARE=repr(i),
+ **env)
clear_hpages()
def setup_shm_sysctl(limit):
--
1.8.3.1

View File

@ -0,0 +1,73 @@
From 421dbc6d9dfc66f249dde787a69327d22979ca74 Mon Sep 17 00:00:00 2001
Message-Id: <421dbc6d9dfc66f249dde787a69327d22979ca74.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Fri, 16 Aug 2019 11:45:07 +0530
Subject: [RHEL7 PATCH 17/31] tests: Update utility to get free and total huge
pages by size
This makes the utilities to get the number of free and total
huge pages multi-size aware. If a page size is specified, they
will return counts corresponding to that. Otherwise, they will
return counts for the kernel's default huge page size.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 94000ea..f19024f 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -172,26 +172,32 @@ def results_summary():
print_per_size("Strange test result", R["strange"])
print "**********"
-def free_hpages():
+def free_hpages(size=None):
"""
- Return the number of free huge pages.
+ Return the number of free huge pages for a given size. If size is not
+ passed, use the default huge page size.
- Parse /proc/meminfo to obtain the number of free huge pages for
- the default page size.
- XXX: This function is not multi-size aware yet.
+ Parse /sys/kernel/mm/hugepages/hugepages-<size-in-kB>/free_hugepages to
+ obtain the number of free huge pages for the given page size.
"""
- (rc, out) = bash("grep 'HugePages_Free:' /proc/meminfo | cut -f2 -d:")
+ if size == None: size = system_default_hpage_size
+ size_kb = size / 1024
+ cmd = "cat /sys/kernel/mm/hugepages/hugepages-%dkB/free_hugepages" % size_kb
+ (rc, out) = bash(cmd)
return (rc, int(out))
-def total_hpages():
+def total_hpages(size=None):
"""
- Return the total number of huge pages in the pool.
+ Return the total number of huge pages in the pool for a given size. If
+ size is not passed, use the default huge page size.
- Parse /proc/meminfo to obtain the number of huge pages for the default
- page size.
- XXX: This function is not multi-size aware yet.
+ Parse /sys/kernel/mm/hugepages/hugepages-<size-in-kB>/nr_hugepages to
+ obtain the number of huge pages for the given page size.
"""
- (rc, out) = bash("grep 'HugePages_Total:' /proc/meminfo | cut -f2 -d:")
+ if size == None: size = system_default_hpage_size
+ size_kb = size / 1024
+ cmd = "cat /sys/kernel/mm/hugepages/hugepages-%dkB/nr_hugepages" % size_kb
+ (rc, out) = bash(cmd)
return (rc, int(out))
def hpage_size():
--
1.8.3.1

View File

@ -0,0 +1,54 @@
From d228c0e688e7a0771d30457d21b38d745cea63bf Mon Sep 17 00:00:00 2001
Message-Id: <d228c0e688e7a0771d30457d21b38d745cea63bf.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Fri, 16 Aug 2019 11:45:08 +0530
Subject: [RHEL7 PATCH 18/31] mmap: tests: Run tests with correct huge page
count
This ensures that the mmap-gettest and mmap-cow tests are run
with the correct count of free huge pages. Previously, it was
always using the free page count for the default huge page size
for all huge page sizes. Since these counts can differ, trying
to get more pages via mmap() than what is available in the pool
can make these tests fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index f19024f..b132da2 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -697,14 +697,19 @@ def stress_tests():
# Don't update NRPAGES every time like above because we want to catch the
# failures that happen when the kernel doesn't release all of the huge pages
# after a stress test terminates
- (rc, nr_pages) = free_hpages()
+ nr_pages = {p: free_hpages(p)[1] for p in pagesizes}
- do_test(("mmap-gettest", repr(iterations), repr(nr_pages)))
+ for p in pagesizes:
+ cmd = ("mmap-gettest", repr(iterations), repr(nr_pages[p]))
+ do_test_with_pagesize(p, cmd)
- # mmap-cow needs a hugepages for each thread plus one extra
- do_test(("mmap-cow", repr(nr_pages-1), repr(nr_pages)))
+ for p in pagesizes:
+ # mmap-cow needs a hugepage for each thread plus one extra
+ cmd = ("mmap-cow", repr(nr_pages[p]-1), repr(nr_pages[p]))
+ do_test_with_pagesize(p, cmd)
(rc, tot_pages) = total_hpages()
+ nr_pages = nr_pages[system_default_hpage_size]
limit = system_default_hpage_size * tot_pages
threads = 10 # Number of threads for shm-fork
--
1.8.3.1

View File

@ -0,0 +1,32 @@
From 4326f49e3c3246443b52f319cefbc3d296e09e64 Mon Sep 17 00:00:00 2001
Message-Id: <4326f49e3c3246443b52f319cefbc3d296e09e64.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:43 +1000
Subject: [RHEL7 PATCH 19/31] Be explicit about using Python2 in the test
script
Since Python2 is now end-of-life, distros are increasingly not having bare
"python" refer to the Python2 interpreter.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index b132da2..721c1af 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/python2
import subprocess
import types
--
1.8.3.1

View File

@ -1,23 +1,33 @@
From 85b75e22bf685948f417044676de42f2da66a902 Mon Sep 17 00:00:00 2001
Message-Id: <85b75e22bf685948f417044676de42f2da66a902.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:44 +1000
Subject: [RHEL7 PATCH 20/31] Switch test runner script to print function
This is the more modern Python style, and reduces difference to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 84 ++++++++++++++++++++++++++++--------------------------
1 file changed, 44 insertions(+), 40 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 617ed93..036cef5 100755
index 721c1af..47eb183 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#!/usr/bin/env python3
@@ -1,5 +1,7 @@
#! /usr/bin/python2
+from __future__ import print_function
+
import subprocess
import types
@@ -47,7 +47,7 @@ def bash(cmd):
# Abort and mark this a strange test result
return (127, "")
out = p.stdout.read().strip()
- return (rc, out)
+ return (rc, out.decode())
def snapshot_pool_state():
l = []
@@ -60,7 +60,7 @@ def snapshot_pool_state():
import os
@@ -60,7 +62,7 @@ def snapshot_pool_state():
def run_test_prog(bits, pagesize, cmd, **env):
if paranoid_pool_check:
beforepool = snapshot_pool_state()
@ -26,16 +36,7 @@ index 617ed93..036cef5 100755
local_env = os.environ.copy()
local_env.update(env)
@@ -75,7 +75,7 @@ def run_test_prog(bits, pagesize, cmd, **env):
rc = p.wait()
except KeyboardInterrupt:
# Abort and mark this a strange test result
- return (None, "")
+ return (127, "")
except OSError as e:
return (-e.errno, "")
out = p.stdout.read().strip()
@@ -83,26 +83,26 @@ def run_test_prog(bits, pagesize, cmd, **env):
@@ -83,9 +85,9 @@ def run_test_prog(bits, pagesize, cmd, **env):
if paranoid_pool_check:
afterpool = snapshot_pool_state()
if afterpool != beforepool:
@ -47,37 +48,17 @@ index 617ed93..036cef5 100755
+ print("AFTER: %s" % str(afterpool), file=sys.stderr)
sys.exit(98)
- return (rc, out)
+ return (rc, out.decode())
def setup_env(override, defaults):
"""
Set up the environment for running commands in the shell.
"""
# All items in override are unconditionally set or unset
- for (var, val) in override.items():
+ for (var, val) in list(override.items()):
if val == None:
if var in os.environ:
del os.environ[var]
else:
os.environ[var] = val
# If not already set, these variables are given default values
- for (var, val) in defaults.items():
+ for (var, val) in list(defaults.items()):
if var not in os.environ or os.environ[var] == "":
os.environ[var] = val
@@ -143,22 +143,22 @@ def print_per_size(title, values):
return (rc, out)
@@ -143,22 +145,24 @@ def print_per_size(title, values):
Print the results of a given result type on one line. The results for all
page sizes and word sizes are written in a table format.
"""
- print "*%20s: " % title,
+ print("*%20s: " % title, end=' ')
+ print("*%20s: " % title, end=" ")
for sz in pagesizes:
- print "%4s %4s " % (values[sz][32], values[sz][64]),
- print
+ print("%4s %4s " % (values[sz][32], values[sz][64]), end=' ')
+ print("%4s %4s " % (values[sz][32], values[sz][64]), end="")
+ print()
def results_summary():
@ -92,40 +73,33 @@ index 617ed93..036cef5 100755
- for p in pagesizes: print "32-bit 64-bit ",
- print
+ print("********** TEST SUMMARY")
+ print("*%21s" % "", end=' ')
+ for p in pagesizes: print("%-13s " % pretty_page_size(p), end=' ')
+ print("*%21s" % "", end=" ")
+ for p in pagesizes:
+ print("%-13s " % pretty_page_size(p), end="")
+ print()
+ print("*%21s" % "", end=' ')
+ for p in pagesizes: print("32-bit 64-bit ", end=' ')
+ print("*%21s" % "", end=" ")
+ for p in pagesizes:
+ print("32-bit 64-bit ", end="")
+ print()
print_per_size("Total testcases", R["total"])
print_per_size("Skipped", R["skip"])
@@ -170,7 +170,7 @@ def results_summary():
@@ -170,7 +174,7 @@ def results_summary():
print_per_size("Unexpected PASS", R["xpass"])
print_per_size("Test not present", R["nofile"])
print_per_size("Strange test result", R["strange"])
- print "**********"
+ print("**********")
def free_hpages():
def free_hpages(size=None):
"""
@@ -216,7 +216,7 @@ def clear_hpages():
cleaned up automatically and must be removed to free up the huge pages.
"""
for mount in mounts:
- dir = mount + "/elflink-uid-" + `os.getuid()`
+ dir = mount + "/elflink-uid-" + repr(os.getuid())
for root, dirs, files in os.walk(dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
@@ -270,13 +270,13 @@ def check_hugetlbfs_path():
@@ -276,13 +280,13 @@ def check_hugetlbfs_path():
okbits.append(b)
mounts.append(out)
if len(okbits) == 0:
- print "run_tests.py: No mountpoints available for page size %s" % \
- pretty_page_size(p)
+ print("run_tests.py: No mountpoints available for page size %s" % \
+ print("run_tests.py: No mountpoints available for page size %s" %
+ pretty_page_size(p))
wordsizes_by_pagesize[p] = set()
continue
@ -137,21 +111,20 @@ index 617ed93..036cef5 100755
wordsizes_by_pagesize[p] = set(okbits)
def check_linkhuge_tests():
@@ -298,10 +298,9 @@ def check_linkhuge_tests():
@@ -304,10 +308,10 @@ def check_linkhuge_tests():
def print_cmd(pagesize, bits, cmd, env):
if env:
- print ' '.join(['%s=%s' % (k, v) for k, v in env.items()]),
- if type(cmd) != types.StringType:
- cmd = ' '.join(cmd)
+ print(' '.join(['%s=%s' % (k, v) for k, v in env.items()]), end=" ")
if type(cmd) != types.StringType:
cmd = ' '.join(cmd)
- print "%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits),
+ print(' '.join(['%s=%s' % (k, v) for k, v in list(env.items())]), end=' ')
+
+ print("%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits), end=' ')
+ print("%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits), end="")
sys.stdout.flush()
def run_test(pagesize, bits, cmd, **env):
@@ -321,7 +320,7 @@ def run_test(pagesize, bits, cmd, **env):
@@ -327,7 +331,7 @@ def run_test(pagesize, bits, cmd, **env):
print_cmd(pagesize, bits, cmd, env)
(rc, out) = run_test_prog(bits, pagesize, cmd, **env)
@ -160,7 +133,7 @@ index 617ed93..036cef5 100755
R["total"][pagesize][bits] += 1
if rc == 0: R["pass"][pagesize][bits] += 1
@@ -342,7 +341,7 @@ def skip_test(pagesize, bits, cmd, **env):
@@ -348,7 +352,7 @@ def skip_test(pagesize, bits, cmd, **env):
R["total"][pagesize][bits] += 1
R["skip"][pagesize][bits] += 1
print_cmd(pagesize, bits, cmd, env)
@ -169,28 +142,16 @@ index 617ed93..036cef5 100755
def do_test(cmd, bits=None, **env):
"""
@@ -466,9 +465,9 @@ def setup_shm_sysctl(limit):
sysctls[f] = fh.read()
fh.close()
@@ -495,7 +499,7 @@ def setup_shm_sysctl(limit):
fh = open(f, "w")
- fh.write(`limit`)
+ fh.write(repr(limit))
fh.write(`limit`)
fh.close()
- print "set shmmax limit to %s" % limit
+ print("set shmmax limit to %s" % limit)
return sysctls
def restore_shm_sysctl(sysctls):
@@ -476,7 +475,7 @@ def restore_shm_sysctl(sysctls):
Restore the sysctls named in 'sysctls' to the given values.
"""
if os.getuid() != 0: return
- for (file, val) in sysctls.items():
+ for (file, val) in list(sysctls.items()):
fh = open(file, "w")
fh.write(val)
fh.close()
@@ -659,17 +658,17 @@ def stress_tests():
@@ -725,17 +729,17 @@ def stress_tests():
do_test("fallocate_stress.sh")
def print_help():
@ -219,18 +180,16 @@ index 617ed93..036cef5 100755
sys.exit(0)
def main():
@@ -685,8 +684,8 @@ def main():
@@ -752,7 +756,7 @@ def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vVft:b:p:c:lh")
- except getopt.GetoptError, err:
except getopt.GetoptError, err:
- print str(err)
+ except getopt.GetoptError as err:
+ print(str(err))
sys.exit(1)
for opt, arg in opts:
if opt == "-v":
@@ -715,8 +714,8 @@ def main():
@@ -781,8 +785,8 @@ def main():
if len(pagesizes) == 0: pagesizes = get_pagesizes()
if len(pagesizes) == 0:
@ -241,7 +200,7 @@ index 617ed93..036cef5 100755
return 1
setup_env(env_override, env_defaults)
@@ -724,8 +723,8 @@ def main():
@@ -790,8 +794,8 @@ def main():
(rc, system_default_hpage_size) = hpage_size()
if rc != 0:
@ -252,3 +211,6 @@ index 617ed93..036cef5 100755
return 1
check_hugetlbfs_path()
--
1.8.3.1

View File

@ -0,0 +1,43 @@
From 5246d996e621274a2cc22282451bb60c10d59227 Mon Sep 17 00:00:00 2001
Message-Id: <5246d996e621274a2cc22282451bb60c10d59227.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:45 +1000
Subject: [RHEL7 PATCH 21/31] Remove backtick operator from test runner script
The `` operator doesn't exist in Python3, so remove it to avoid future
porting problems.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 47eb183..13a404a 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -226,7 +226,7 @@ def clear_hpages():
cleaned up automatically and must be removed to free up the huge pages.
"""
for mount in mounts:
- dir = mount + "/elflink-uid-" + `os.getuid()`
+ dir = mount + "/elflink-uid-" + repr(os.getuid())
for root, dirs, files in os.walk(dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
@@ -497,7 +497,7 @@ def setup_shm_sysctl(limit):
sysctls[f] = fh.read()
fh.close()
fh = open(f, "w")
- fh.write(`limit`)
+ fh.write(repr(limit))
fh.close()
print("set shmmax limit to %s" % limit)
return sysctls
--
1.8.3.1

View File

@ -0,0 +1,35 @@
From 2f88d3a2b29f181e744cc59f5e0889588f67588f Mon Sep 17 00:00:00 2001
Message-Id: <2f88d3a2b29f181e744cc59f5e0889588f67588f.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:46 +1000
Subject: [RHEL7 PATCH 22/31] tests: Avoid old-style except syntax in the test
runner script
The "except Foo as bar" syntax is the modern style and will be easier to
port to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 13a404a..f812923 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -755,7 +755,7 @@ def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vVft:b:p:c:lh")
- except getopt.GetoptError, err:
+ except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
for opt, arg in opts:
--
1.8.3.1

View File

@ -0,0 +1,35 @@
From e5f91fcc3e6bd0a610e47e51891f4c1669d2f8b1 Mon Sep 17 00:00:00 2001
Message-Id: <e5f91fcc3e6bd0a610e47e51891f4c1669d2f8b1.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:47 +1000
Subject: [RHEL7 PATCH 23/31] tests: Avoid explicit type() comparison in runner
script
Using isinstance() is the more modern idiom, and won't cause complications
in porting to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index f812923..e2025fe 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -309,7 +309,7 @@ def check_linkhuge_tests():
def print_cmd(pagesize, bits, cmd, env):
if env:
print(' '.join(['%s=%s' % (k, v) for k, v in env.items()]), end=" ")
- if type(cmd) != types.StringType:
+ if not isinstance(cmd, str):
cmd = ' '.join(cmd)
print("%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits), end="")
sys.stdout.flush()
--
1.8.3.1

View File

@ -0,0 +1,62 @@
From 3482dcfe74102da1e2d95d8adbc29940c06b1fef Mon Sep 17 00:00:00 2001
Message-Id: <3482dcfe74102da1e2d95d8adbc29940c06b1fef.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:48 +1000
Subject: [RHEL7 PATCH 24/31] tests: Explicitly decode subprocess output
The output we get from subprocesses is logically a sequence of bytes, but
we want to treat it as Python strings, which means decoding it into Unicode
based on some encoding.
In Python2 we can get away with skipping that step, but in Python3 we won't
be able to. So, to get ready, add an explicit decode() step, using the
system default encoding (probably UTF-8 in most cases).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index e2025fe..79e0385 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -48,7 +48,7 @@ def bash(cmd):
except KeyboardInterrupt:
# Abort and mark this a strange test result
return (127, "")
- out = p.stdout.read().strip()
+ out = p.stdout.read().decode().strip()
return (rc, out)
def snapshot_pool_state():
@@ -80,7 +80,7 @@ def run_test_prog(bits, pagesize, cmd, **env):
return (None, "")
except OSError as e:
return (-e.errno, "")
- out = p.stdout.read().strip()
+ out = p.stdout.read().decode().strip()
if paranoid_pool_check:
afterpool = snapshot_pool_state()
@@ -247,9 +247,11 @@ def get_pagesizes():
sizes = set()
out = ""
(rc, out) = bash("../obj/hugeadm --page-sizes")
- if rc != 0 or out == "": return sizes
+ if rc != 0 or out == "":
+ return sizes
- for size in out.split("\n"): sizes.add(int(size))
+ for size in out.split("\n"):
+ sizes.add(int(size))
return sizes
def get_wordsizes():
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From b9b3e12c7be2c5a9ff67b3cdaad8679dbd1fe938 Mon Sep 17 00:00:00 2001
Message-Id: <b9b3e12c7be2c5a9ff67b3cdaad8679dbd1fe938.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:49 +1000
Subject: [RHEL7 PATCH 25/31] tests: Use modern style division in runner script
This is the current norm and will reduce changes for moving to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 79e0385..2847417 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,6 +1,7 @@
#! /usr/bin/python2
from __future__ import print_function
+from __future__ import division
import subprocess
import types
@@ -723,7 +724,7 @@ def stress_tests():
# This is to catch off-by-ones or races in the kernel allocated that
# can make allocating all hugepages a problem
if nr_pages > 1:
- do_shm_test(("shm-fork", repr(threads), repr(nr_pages / 2)), limit)
+ do_shm_test(("shm-fork", repr(threads), repr(nr_pages // 2)), limit)
do_shm_test(("shm-fork", repr(threads), repr(nr_pages)), limit)
do_shm_test(("shm-getraw", repr(nr_pages), "/dev/full"), limit)
--
1.8.3.1

View File

@ -0,0 +1,34 @@
From 9380eba133bcc941437e2b0d664f550f6854d63b Mon Sep 17 00:00:00 2001
Message-Id: <9380eba133bcc941437e2b0d664f550f6854d63b.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:50 +1000
Subject: [RHEL7 PATCH 26/31] tests: Switch test runner to Python3
Python2 has been end-of-life for a while now, and some distros are no
longer installing it by default.
Previous cleanups mean the script is now both valid Python2 and Python3,
so we can simply change the interpreter.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 2847417..018264d 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python2
+#! /usr/bin/python3
from __future__ import print_function
from __future__ import division
--
1.8.3.1

View File

@ -0,0 +1,115 @@
From 96efdf51429812ec9b09f5ddb6ff24c80719e628 Mon Sep 17 00:00:00 2001
Message-Id: <96efdf51429812ec9b09f5ddb6ff24c80719e628.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sun, 18 Aug 2019 16:03:21 +1000
Subject: [RHEL7 PATCH 27/31] tests: Improve TASK_SIZE detection in
task-size-overrun
task-size-overrun is designed to test kernel behaviour in some edge cases
involving making a hugepage mapping right near the address space limits.
In order to do that, it needs to know the TASK_SIZE of the kernel it's
running on.
Currently it does that with a linear search from the last extant mapping.
But with kernels supporting a very large address space that can take
prohibitively long. We've had problems with that before, resulting in some
hacks to skip a large chunk of address space.
Those hacks are dependent on platform, though, which is ugly and fragile.
Case in point, recent powerpc kernels now support a 4PiB address space,
so the logic we have there is insufficient to finish the search in
reasonable time.
To handle this in a more robust way, this replaces the linear search with
a binary search between the last extant mapping and (2^wordsize).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/task-size-overrun.c | 57 +++++++++++++++++++++++------------------------
1 file changed, 28 insertions(+), 29 deletions(-)
diff --git a/tests/task-size-overrun.c b/tests/task-size-overrun.c
index 914ef65..29b6045 100644
--- a/tests/task-size-overrun.c
+++ b/tests/task-size-overrun.c
@@ -64,45 +64,44 @@ static unsigned long find_last_mapped(void)
return end;
}
+#define ALIGN_DOWN(x,a) ((x) & ~((a) - 1))
+
static unsigned long find_task_size(void)
{
- unsigned long addr;
+ unsigned long low, high; /* PFNs */
void *p;
- addr = find_last_mapped();
- if (!addr || ((addr % getpagesize()) != 0))
- FAIL("Bogus stack end address, 0x%lx!?", addr);
+ low = find_last_mapped();
+ if (!low || ((low % getpagesize()) != 0))
+ FAIL("Bogus stack end address, 0x%lx!?", low);
+ low = low / getpagesize();
+
+ /* This sum should get us (2^(wordsize) - 2 pages) */
+ high = (unsigned long)(-2 * getpagesize()) / getpagesize();
+
+ verbose_printf("Binary searching for task size PFNs 0x%lx..0x%lx\n",
+ low, high);
+
+ while (high > low + 1) {
+ unsigned long pfn = (low + high) / 2;
+ unsigned long addr = pfn * getpagesize();
+
+ assert((pfn >= low) && (pfn <= high));
- while (addr) {
p = mmap64((void *)addr, getpagesize(), PROT_READ,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
if (p == MAP_FAILED) {
- verbose_printf("Searching map failed: %s\n", strerror(errno));
- return addr;
+ verbose_printf("Map failed at 0x%lx (%s)\n",
+ addr, strerror(errno));
+ high = pfn;
+ } else {
+ verbose_printf("Map succeeded at 0x%lx\n", addr);
+ munmap(p, getpagesize());
+ low = pfn;
}
- munmap(p, getpagesize());
- addr += getpagesize();
-#if defined(__powerpc64__)
- if (addr > (1UL << 46) && addr < (1UL << 47))
- addr = 1UL << 47; /* 64TB */
- else if (addr > (1UL << 47) && addr < (1UL << 48))
- addr = 1UL << 48; /* 128TB */
- else if (addr > (1UL << 48) && addr < (1UL << 49))
- addr = 1UL << 49; /* 512TB */
- else if (addr > (1UL << 49) && addr < (1UL << 50))
- addr = 1UL << 50; /* 1PB */
- else if (addr > (1UL << 50) && addr < (1UL << 51))
- addr = 1UL << 51; /* 2PB */
- else if (addr > (1UL << 51) && addr < (1UL << 52))
- addr = 1UL << 52; /* 4PB */
-#endif
-#if defined(__s390x__)
- if (addr > (1UL << 42) && addr < (1UL << 53))
- addr = 1UL << 53;
-#endif
}
- /* addr wrapped around */
- return 0;
+
+ return low * getpagesize();
}
int main(int argc, char *argv[])
--
1.8.3.1

View File

@ -0,0 +1,143 @@
From 0d29e25727e5e112de48ea2d4efbd99d378ba3ed Mon Sep 17 00:00:00 2001
Message-Id: <0d29e25727e5e112de48ea2d4efbd99d378ba3ed.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Eric B Munson <eric@munsonfam.org>
Date: Sat, 17 Aug 2019 13:59:58 -0400
Subject: [RHEL7 PATCH 28/31] Remove man page for cpupcstat
This script was deleted some time ago, remove the man page.
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
man/cpupcstat.8 | 117 --------------------------------------------------------
1 file changed, 117 deletions(-)
delete mode 100644 man/cpupcstat.8
diff --git a/man/cpupcstat.8 b/man/cpupcstat.8
deleted file mode 100644
index d84a726..0000000
--- a/man/cpupcstat.8
+++ /dev/null
@@ -1,117 +0,0 @@
-.\" Hey, EMACS: -*- nroff -*-
-.\" First parameter, NAME, should be all caps
-.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
-.\" other parameters are allowed: see man(7), man(1)
-.TH CPUCPSTAT 8 "9 June, 2009"
-.\" Please adjust this date whenever revising the manpage.
-.\"
-.\" Some roff macros, for reference:
-.\" .nh disable hyphenation
-.\" .hy enable hyphenation
-.\" .ad l left justify
-.\" .ad b justify to both left and right margins
-.\" .nf disable filling
-.\" .fi enable filling
-.\" .br insert line break
-.\" .sp <n> insert n+1 empty lines
-.\" for manpage-specific macros, see man(7)
-.SH NAME
-cpupcstat \- Measure the DTLB miss rate
-.SH SYNOPSIS
-.B cpupcstat [options] [target]
-.SH DESCRIPTION
-\fBcpupcstat\fP uses oprofile to measure the DTLB miss rate of a
-specified application or the kernel. It configures oprofile to count the
-number of DTLB misses, optionally starts the \fBtarget\fP, and reports on the
-miss rate over a specified interval as \fBtarget\fP executes.
-
-The following options can be used to configure how \fBcpupcstat\fP works:
-
-.TP
-.B --vmlinux </path/to/vmlinux>
-
-This allows the user to specify where the appropriate vmlinux file is for their
-kernel. If this is not specified, /boot/vmlinux\-\`uname \-r\` will be used.
-
-.TP
-.B --delay <seconds>
-
-This allows the user to specify the reporting interval. The default is 10
-seconds.
-
-.TP
-.B --target-global
-
-Gather statistics for all processes and the kernel running in the system.
-
-.TP
-.B --target-pid <pid>
-
-This allows the user to specify the pid of a process already that is already
-running. If this option is specified, \fBtarget\fP will be ignored.
-
-.TP
-.B --real-target <real-target>
-
-Use this to specify the real name of the program to monitor if the \fBtarget\fP
-is a launcher script. When this is specified, \fBtarget\fP is executed but the
-report will be for \fBreal-target\fP.
-
-.TP
-.B --time-limit <sec>
-
-This option sets the time limit for monitoring. If this is specified the
-\fBtarget\fP or \fBpid\fP will only be monitored for \fBsec\fP seconds. The
-default continues monitoring while \fBtarget\fP or \fBpid\fP are still alive.
-
-.TP
-.B --kernel
-
-This allows the user to request DTLB miss rate data be collected for the kernel
-as well as the \fBtarget\fP.
-
-.TP
-.B --misses-per-instruction
-
-This option requests that the ratio of instructions retired per TLB miss.
-
-.TP
-.B --misses-per-cycle
-
-This option requests that the ratio of CPU cycles per TLB miss.
-
-.TP
-.B --time-servicing
-
-This option requests that the percentage of CPU cycles spent servicing TLB
-misses is displayed when \fBcpupcstat\fB exits. To use this option the cost
-in CPU cycles for a single TLB miss must be specified using either the
-\fB--cost-config\fB option or the \fBtlbmiss_cost.sh\fB script.
-
-.TP
-.B --cost-config </path/to/config>
-
-This option tells \fBcpupcstat\fB that the cost in CPU cycles of a TLB miss
-can be found in the specified file, it should be specified as:
-
-TLB_MISS_COST=XX
-
-Where XX is the cost in cycles. This option is only used with the
-\fB--time-servicing\fB option.
-
-.TP
-.B --force-oprofile
-
-\fBcpupcstat\fP prefers the perf tool for data collection, only using oprofile
-if perf is not present or supported. This option will force \fBcpupcstat\fP to
-use oprofile for data collection.
-
-.SH SEE ALSO
-.I oprofile(1)
-.I perf(1)
-.I tlbmiss_cost.sh(8)
-.br
-.SH AUTHORS
-Eric B Munson <ebmunson@us.ibm.com> is the primary author. See the documentation
-for other contributors.
-
--
1.8.3.1

View File

@ -0,0 +1,44 @@
From 413573f442f1abbea47e54683758281e2a770a68 Mon Sep 17 00:00:00 2001
Message-Id: <413573f442f1abbea47e54683758281e2a770a68.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Eric B Munson <eric@munsonfam.org>
Date: Sat, 17 Aug 2019 13:59:05 -0400
Subject: [RHEL7 PATCH 29/31] Fix spelling of khugepaged options in hugeadm
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeadm.c | 2 +-
man/hugeadm.8 | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hugeadm.c b/hugeadm.c
index fe4211d..62e13ec 100644
--- a/hugeadm.c
+++ b/hugeadm.c
@@ -112,7 +112,7 @@ void print_usage()
CONT("should scan on each pass");
OPTION("--thp-khugepaged-scan-sleep <milliseconds>", "Time in ms to sleep between");
CONT("khugepaged passes");
- OPTION("--thp-khugepages-alloc-sleep <milliseconds>", "Time in ms for khugepaged");
+ OPTION("--thp-khugepaged-alloc-sleep <milliseconds>", "Time in ms for khugepaged");
CONT("to wait if there was a huge page allocation failure");
OPTION("--pool-pages-max <size|DEFAULT>:[+|-]<pagecount|memsize<G|M|K>>", "");
CONT("Adjust pool 'size' upper bound");
diff --git a/man/hugeadm.8 b/man/hugeadm.8
index 28de91e..6f17800 100644
--- a/man/hugeadm.8
+++ b/man/hugeadm.8
@@ -266,7 +266,7 @@ Configure the number of pages that khugepaged should scan on each pass
Configure how many milliseconds khugepaged should wait between passes
.TP
-.B --thp-khugepages-alloc-sleep <milliseconds>
+.B --thp-khugepaged-alloc-sleep <milliseconds>
Configure how many milliseconds khugepaged should wait after failing to
allocate a huge page to throttle the next attempt.
--
1.8.3.1

View File

@ -0,0 +1,36 @@
From 1c69af9d9c53361f64c181d7b8ed7936299f9201 Mon Sep 17 00:00:00 2001
Message-Id: <1c69af9d9c53361f64c181d7b8ed7936299f9201.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Mon, 19 Aug 2019 11:46:25 +0530
Subject: [RHEL7 PATCH 30/31] Makefile: Remove cpupcstat from man page target
This fixes the man page installation target by removing
a reference to the man page for the deprecated cpupcstat
script.
Fixes: 0d29e25 ("Remove man page for cpupcstat")
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 51e41f0..a107a62 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@ INSTALL_MAN3 = get_huge_pages.3 get_hugepage_region.3 gethugepagesize.3 \
gethugepagesizes.3 getpagesizes.3 hugetlbfs_find_path.3 \
hugetlbfs_test_path.3 hugetlbfs_unlinked_fd.3
INSTALL_MAN7 = libhugetlbfs.7
-INSTALL_MAN8 = hugectl.8 hugeedit.8 hugeadm.8 cpupcstat.8
+INSTALL_MAN8 = hugectl.8 hugeedit.8 hugeadm.8
LDSCRIPT_TYPES = B BDT
LDSCRIPT_DIST_ELF = elf32ppclinux elf64ppc elf_i386 elf_x86_64
INSTALL_OBJSCRIPT = ld.hugetlbfs
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From e9482399d7eee7199a4a31fe943c940f52a245ba Mon Sep 17 00:00:00 2001
Message-Id: <e9482399d7eee7199a4a31fe943c940f52a245ba.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Mon, 19 Aug 2019 14:48:38 +0530
Subject: [RHEL7 PATCH 31/31] tests: noresv-preserve-resv-page: Fix failure in
case of overcommit
This adds an additional check to see if the mapping created
with MAP_NORESERVE does not raise a SIGBUS upon being written
to because nr_overcommit_pages is set to a non-zero value and
surplus pages gets provisioned. In this case, the test should
pass.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/noresv-preserve-resv-page.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/tests/noresv-preserve-resv-page.c b/tests/noresv-preserve-resv-page.c
index b7b8043..b93bf36 100644
--- a/tests/noresv-preserve-resv-page.c
+++ b/tests/noresv-preserve-resv-page.c
@@ -66,6 +66,7 @@ int main(int argc, char *argv[])
{
long hpage_size;
int nr_hugepages;
+ int surp_hugepages;
int fd1, fd2, err;
char *p, *q;
struct sigaction sa = {
@@ -104,6 +105,13 @@ int main(int argc, char *argv[])
verbose_printf("Write to %p to steal reserved page\n", q);
+ surp_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_SURP);
test_write(q);
+
+ /* Provisioning succeeded because of overcommit */
+ if (get_huge_page_counter(hpage_size, HUGEPAGES_SURP) ==
+ surp_hugepages + 1)
+ PASS();
+
FAIL("Steal reserved page");
}
--
1.8.3.1

View File

@ -0,0 +1,59 @@
diff --git a/elflink.c b/elflink.c
index a6bd44c..953e843 100644
--- a/elflink.c
+++ b/elflink.c
@@ -569,36 +569,34 @@ bail2:
*/
static unsigned long hugetlb_slice_start(unsigned long addr)
{
- if (!arch_has_slice_support()) {
- return ALIGN_DOWN(addr, gethugepagesize());
- }
-
+ if (arch_has_slice_support()) {
#if defined(__powerpc64__)
- if (addr < SLICE_LOW_TOP)
- return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
- else if (addr < SLICE_HIGH_SIZE)
- return SLICE_LOW_TOP;
- else
- return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
+ if (addr < SLICE_LOW_TOP)
+ return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
+ else if (addr < SLICE_HIGH_SIZE)
+ return SLICE_LOW_TOP;
+ else
+ return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
- return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
+ return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
#endif
+ }
+ return ALIGN_DOWN(addr, gethugepagesize());
}
static unsigned long hugetlb_slice_end(unsigned long addr)
{
- if (!arch_has_slice_support()) {
- return ALIGN_UP(addr, gethugepagesize()) - 1;
- }
-
+ if (arch_has_slice_support()) {
#if defined(__powerpc64__)
- if (addr < SLICE_LOW_TOP)
- return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
- else
- return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
+ if (addr < SLICE_LOW_TOP)
+ return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
+ else
+ return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
- return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
+ return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
#endif
+ }
+ return ALIGN_UP(addr, gethugepagesize()) - 1;
}
static unsigned long hugetlb_next_slice_start(unsigned long addr)

View File

@ -1,304 +0,0 @@
diff --git a/huge_page_setup_helper.py b/huge_page_setup_helper.py
index 43c9916..7ba0c92 100755
--- a/huge_page_setup_helper.py
+++ b/huge_page_setup_helper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# Tool to set up Linux large page support with minimal effort
@@ -14,13 +14,13 @@ debug = False
# must be executed under the root to operate
if os.geteuid() != 0:
- print "You must be root to setup hugepages!"
+ print("You must be root to setup hugepages!")
os._exit(1)
# config files we need access to
sysctlConf = "/etc/sysctl.conf"
if not os.access(sysctlConf, os.W_OK):
- print "Cannot access %s" % sysctlConf
+ print("Cannot access %s" % sysctlConf)
if debug == False:
os._exit(1)
@@ -41,7 +41,7 @@ for line in hugeadmexplain:
break
if memTotal == 0:
- print "Your version of libhugetlbfs' hugeadm utility is too old!"
+ print("Your version of libhugetlbfs' hugeadm utility is too old!")
os._exit(1)
@@ -54,7 +54,7 @@ for line in poolList:
break
if hugePageSize == 0:
- print "Aborting, cannot determine system huge page size!"
+ print("Aborting, cannot determine system huge page size!")
os._exit(1)
# Get initial sysctl settings
@@ -83,22 +83,22 @@ for line in groupNames:
# dump system config as we see it before we start tweaking it
-print "Current configuration:"
-print " * Total System Memory......: %6d MB" % memTotal
-print " * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024))
-print " * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024))
-print " * Number of Huge Pages.....: %6d" % hugePages
-print " * Total size of Huge Pages.: %6d MB" % (hugePages * hugePageSize / (1024 * 1024))
-print " * Remaining System Memory..: %6d MB" % (memTotal - (hugePages * hugePageSize / (1024 * 1024)))
-print " * Huge Page User Group.....: %s (%d)" % (hugeGIDName, hugeGID)
-print
+print("Current configuration:")
+print(" * Total System Memory......: %6d MB" % memTotal)
+print(" * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024)))
+print(" * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024)))
+print(" * Number of Huge Pages.....: %6d" % hugePages)
+print(" * Total size of Huge Pages.: %6d MB" % (hugePages * hugePageSize / (1024 * 1024)))
+print(" * Remaining System Memory..: %6d MB" % (memTotal - (hugePages * hugePageSize / (1024 * 1024))))
+print(" * Huge Page User Group.....: %s (%d)" % (hugeGIDName, hugeGID))
+print()
# ask how memory they want to allocate for huge pages
userIn = None
while not userIn:
try:
- userIn = raw_input("How much memory would you like to allocate for huge pages? "
+ userIn = input("How much memory would you like to allocate for huge pages? "
"(input in MB, unless postfixed with GB): ")
if userIn[-2:] == "GB":
userHugePageReqMB = int(userIn[0:-2]) * 1024
@@ -113,19 +113,19 @@ while not userIn:
# As a sanity safeguard, require at least 128M not be allocated to huge pages
if userHugePageReqMB > (memTotal - 128):
userIn = None
- print "Refusing to allocate %d, you must leave at least 128MB for the system" % userHugePageReqMB
+ print("Refusing to allocate %d, you must leave at least 128MB for the system" % userHugePageReqMB)
elif userHugePageReqMB < (hugePageSize / (1024 * 1024)):
userIn = None
- print "Sorry, allocation must be at least a page's worth!"
+ print("Sorry, allocation must be at least a page's worth!")
else:
break
except ValueError:
userIn = None
- print "Input must be an integer, please try again!"
+ print("Input must be an integer, please try again!")
userHugePageReqKB = userHugePageReqMB * 1024
userHugePagesReq = userHugePageReqKB / (hugePageSize / 1024)
-print "Okay, we'll try to allocate %d MB for huge pages..." % userHugePageReqMB
-print
+print("Okay, we'll try to allocate %d MB for huge pages..." % userHugePageReqMB)
+print()
# some basic user input validation
@@ -134,24 +134,24 @@ inputIsValid = False
# ask for the name of the group allowed access to huge pages
while inputIsValid == False:
foundbad = False
- userGroupReq = raw_input("What group should have access to the huge pages?"
+ userGroupReq = input("What group should have access to the huge pages?"
"(The group will be created, if need be) [hugepages]: ")
if userGroupReq is '':
userGroupReq = 'hugepages'
if userGroupReq[0].isdigit() or userGroupReq[0] == "-":
foundbad = True
- print "Group names cannot start with a number or dash, please try again!"
+ print("Group names cannot start with a number or dash, please try again!")
for char in badchars:
if char in userGroupReq:
foundbad = True
- print "Illegal characters in group name, please try again!"
+ print("Illegal characters in group name, please try again!")
break
if len(userGroupReq) > 16:
foundbad = True
- print "Group names can't be more than 16 characaters, please try again!"
+ print("Group names can't be more than 16 characaters, please try again!")
if foundbad == False:
inputIsValid = True
-print "Okay, we'll give group %s access to the huge pages" % userGroupReq
+print("Okay, we'll give group %s access to the huge pages" % userGroupReq)
# see if group already exists, use it if it does, if not, create it
@@ -163,20 +163,20 @@ for line in groupNames:
break
if userGIDReq > -1:
- print "Group %s (gid %d) already exists, we'll use it" % (userGroupReq, userGIDReq)
+ print("Group %s (gid %d) already exists, we'll use it" % (userGroupReq, userGIDReq))
else:
if debug == False:
os.popen("/usr/sbin/groupadd %s" % userGroupReq)
else:
- print "/usr/sbin/groupadd %s" % userGroupReq
+ print("/usr/sbin/groupadd %s" % userGroupReq)
groupNames = os.popen("/usr/bin/getent group %s" % userGroupReq).readlines()
for line in groupNames:
curGroupName = line.split(":")[0]
if curGroupName == userGroupReq:
userGIDReq = int(line.split(":")[2])
break
- print "Created group %s (gid %d) for huge page use" % (userGroupReq, userGIDReq)
-print
+ print("Created group %s (gid %d) for huge page use" % (userGroupReq, userGIDReq))
+print()
# basic user input validation, take 2
@@ -186,20 +186,20 @@ inputIsValid = False
# ask for user(s) that should be in the huge page access group
while inputIsValid == False:
foundbad = False
- userUsersReq = raw_input("What user(s) should have access to the huge pages (space-delimited list, users created as needed)? ")
+ userUsersReq = input("What user(s) should have access to the huge pages (space-delimited list, users created as needed)? ")
for char in badchars:
if char in userUsersReq:
foundbad = True
- print "Illegal characters in user name(s) or invalid list format, please try again!"
+ print("Illegal characters in user name(s) or invalid list format, please try again!")
break
for n in userUsersReq.split():
if len(n) > 32:
foundbad = True
- print "User names can't be more than 32 characaters, please try again!"
+ print("User names can't be more than 32 characaters, please try again!")
break
if n[0] == "-":
foundbad = True
- print "User names cannot start with a dash, please try again!"
+ print("User names cannot start with a dash, please try again!")
break
if foundbad == False:
inputIsValid = True
@@ -211,24 +211,24 @@ for hugeUser in hugePageUserList:
for line in curUserList:
curUser = line.split(":")[0]
if curUser == hugeUser:
- print "Adding user %s to huge page group" % hugeUser
+ print("Adding user %s to huge page group" % hugeUser)
userExists = True
if debug == False:
os.popen("/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser))
else:
- print "/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser)
+ print("/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser))
if userExists == True:
break
if userExists == False:
- print "Creating user %s with membership in huge page group" % hugeUser
+ print("Creating user %s with membership in huge page group" % hugeUser)
if debug == False:
if hugeUser == userGroupReq:
os.popen("/usr/sbin/useradd %s -g %s" % (hugeUser, userGroupReq))
else:
os.popen("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
else:
- print "/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq)
-print
+ print("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
+print()
# set values for the current running environment
@@ -238,11 +238,11 @@ if debug == False:
os.popen("/usr/bin/hugeadm --set-shm-group %d" % userGIDReq)
os.popen("/usr/bin/hugeadm --set-recommended-shmmax")
else:
- print "/usr/bin/hugeadm --pool-pages-min DEFAULT:%sM" % userHugePageReqMB
- print "/usr/bin/hugeadm --pool-pages-max DEFAULT:%sM" % userHugePageReqMB
- print "/usr/bin/hugeadm --set-shm-group %d" % userGIDReq
- print "/usr/bin/hugeadm --set-recommended-shmmax"
- print
+ print("/usr/bin/hugeadm --pool-pages-min DEFAULT:%sM" % userHugePageReqMB)
+ print("/usr/bin/hugeadm --pool-pages-max DEFAULT:%sM" % userHugePageReqMB)
+ print("/usr/bin/hugeadm --set-shm-group %d" % userGIDReq)
+ print("/usr/bin/hugeadm --set-recommended-shmmax")
+ print()
# figure out what that shmmax value we just set was
hugeadmexplain = os.popen("/usr/bin/hugeadm --explain 2>/dev/null").readlines()
@@ -258,7 +258,7 @@ if debug == False:
try:
sysctlConfLines = open(sysctlConf).readlines()
os.rename(sysctlConf, sysctlConf + ".backup")
- print("Saved original %s as %s.backup" % (sysctlConf, sysctlConf))
+ print(("Saved original %s as %s.backup" % (sysctlConf, sysctlConf)))
except:
pass
@@ -279,11 +279,11 @@ if debug == False:
fd.close()
else:
- print "Add to %s:" % sysctlConf
- print "kernel.shmmax = %d" % shmmax
- print "vm.nr_hugepages = %d" % userHugePagesReq
- print "vm.hugetlb_shm_group = %d" % userGIDReq
- print
+ print("Add to %s:" % sysctlConf)
+ print("kernel.shmmax = %d" % shmmax)
+ print("vm.nr_hugepages = %d" % userHugePagesReq)
+ print("vm.hugetlb_shm_group = %d" % userGIDReq)
+ print()
# write out limits.conf changes to persist across reboot
@@ -293,7 +293,7 @@ if debug == False:
try:
limitsConfLines = open(limitsConf).readlines()
os.rename(limitsConf, limitsConf + ".backup")
- print("Saved original %s as %s.backup" % (limitsConf, limitsConf))
+ print(("Saved original %s as %s.backup" % (limitsConf, limitsConf)))
except:
pass
@@ -319,25 +319,25 @@ if debug == False:
fd.close()
else:
- print "Add to %s:" % limitsConf
+ print("Add to %s:" % limitsConf)
for hugeUser in hugePageUserList:
- print "%s soft memlock %d" % (hugeUser, userHugePageReqKB)
- print "%s hard memlock %d" % (hugeUser, userHugePageReqKB)
+ print("%s soft memlock %d" % (hugeUser, userHugePageReqKB))
+ print("%s hard memlock %d" % (hugeUser, userHugePageReqKB))
# dump the final configuration of things now that we're done tweaking
-print
-print "Final configuration:"
-print " * Total System Memory......: %6d MB" % memTotal
+print()
+print("Final configuration:")
+print(" * Total System Memory......: %6d MB" % memTotal)
if debug == False:
- print " * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024))
+ print(" * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024)))
else:
# This should be what we *would* have set it to, had we actually run hugeadm --set-recommended-shmmax
- print " * Shared Mem Max Mapping...: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024))
-print " * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024))
-print " * Available Huge Pages.....: %6d" % userHugePagesReq
-print " * Total size of Huge Pages.: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024))
-print " * Remaining System Memory..: %6d MB" % (memTotal - userHugePageReqMB)
-print " * Huge Page User Group.....: %s (%d)" % (userGroupReq, userGIDReq)
-print
+ print(" * Shared Mem Max Mapping...: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024)))
+print(" * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024)))
+print(" * Available Huge Pages.....: %6d" % userHugePagesReq)
+print(" * Total size of Huge Pages.: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024)))
+print(" * Remaining System Memory..: %6d MB" % (memTotal - userHugePageReqMB))
+print(" * Huge Page User Group.....: %s (%d)" % (userGroupReq, userGIDReq))
+print()

View File

@ -0,0 +1,12 @@
diff --git a/tests/Makefile b/tests/Makefile
index 508a6ec..9fd15eb 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -295,7 +295,6 @@ obj64/install:
$(INSTALL) -m 755 wrapper-utils.sh $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 $(HELPERS:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 $(HELPER_LIBS:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
- $(INSTALL) -m 755 $(TESTS_64:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 run_tests.py $(DESTDIR)$(INST_TESTSDIR64)
install: $(OBJDIRS:%=%/install)

View File

@ -0,0 +1,13 @@
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 5c10f6d..b6f73bb 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -247,7 +247,7 @@ def get_pagesizes():
"""
sizes = set()
out = ""
- (rc, out) = bash("../obj/hugeadm --page-sizes")
+ (rc, out) = bash("hugeadm --page-sizes")
if rc != 0 or out == "":
return sizes

View File

@ -1,30 +1,12 @@
Name: libhugetlbfs
Version: 2.21
Release: 3%{?dist}
Release: 12%{?dist}
Summary: A library which provides easy access to huge pages of memory
Group: System Environment/Libraries
License: LGPLv2+
URL: https://github.com/libhugetlbfs/libhugetlbfs
Source0: https://github.com/%{name}/%{name}/releases/download/%{version}/%{name}-%{version}.tar.gz
# Patch0: build flags adjusts to build in stricter RHEL-8 buildroots
Patch0: build_flags.patch
# Patch1: huge_page_setup_helper.py Python3 conversion
# Upstream tickets:
# Fedora: https://bugzilla.redhat.com/show_bug.cgi?id=1598570
# libhugetlbfs: https://github.com/libhugetlbfs/libhugetlbfs/issues/35
Patch1: huge_page_setup_helper-python3-convert.patch
# Downstream patch for further Python3 conversion RHBZ#1620250
Patch2: run_tests-python3-convert.patch
# Downstream patch testcases to avoid bogus annoying failures
# RHBZ#1611780 && RHBZ#1611782
Patch3: 0001-testutils-fix-range_is_mapped.patch
Patch4: 0002-stack_grow_into_huge-don-t-clobber-existing-mappings.patch
# RHBZ#1628794 undersized SHMMAX when running on aarch64
# https://github.com/libhugetlbfs/libhugetlbfs/issues/39
Patch5: tests_shm-perms_adjust_max_segment_size_for_bigger_hugepages.patch
BuildRequires: glibc-devel
BuildRequires: glibc-static
BuildRequires: python3-devel
@ -32,6 +14,55 @@ BuildRequires: execstack
%define ldscriptdir %{_datadir}/%{name}/ldscripts
# Patch0: build flags adjusts to build in stricter RHEL-8 buildroots
Patch0: build_flags.patch
# Downstream patch testcases to avoid bogus annoying failures
# RHBZ#1611780 && RHBZ#1611782
Patch1: 0001-testutils-fix-range_is_mapped.patch
Patch2: 0002-stack_grow_into_huge-don-t-clobber-existing-mappings.patch
# RHBZ#1628794 undersized SHMMAX when running on aarch64
# https://github.com/libhugetlbfs/libhugetlbfs/issues/39
Patch3: tests_shm-perms_adjust_max_segment_size_for_bigger_hugepages.patch
# Downstream fix for Covscan CI error.
Patch4: elflink-return-type-fix.patch
# Downstream patches to remove an IA-64 target leftover that breaks the
# tests install and fix run_tests.py path for hugeadm tool call
Patch5: tests-makefile-fix.patch
Patch6: tests-run_tests-fix-hugeadm-path.patch
# Upstream follow-ups for libhugetlbfs-2.21
Patch50: 0001-tests-Add-utility-to-check-for-a-minimum-number-of-o.patch
Patch51: 0002-tests-slbpacaflush-Use-online-cpus-only.patch
Patch52: 0003-tests-alloc-instantiate-race-Use-online-cpus-only.patch
Patch53: 0004-tests-task-size-overrun-Make-test-faster-on-powerpc6.patch
Patch54: 0005-tests-truncate-above-4GB-Skip-if-truncation-point-is.patch
Patch55: 0006-tests-map-high-truncate-2-Skip-if-truncation-point-i.patch
Patch56: 0007-morecore-tests-Run-tests-only-for-default-huge-page-.patch
Patch57: 0008-hugeutils-Make-writing-a-ulong-to-a-file-more-reliab.patch
Patch58: 0009-tests-Add-utility-to-check-if-huge-pages-are-giganti.patch
Patch59: 0010-tests-counters-Skip-if-using-gigantic-huge-pages.patch
Patch60: 0011-hugeutils-Add-utility-to-check-if-slices-are-support.patch
Patch61: 0012-tests-brk-near-huge-Fix-next-chunk-computation-for-p.patch
Patch62: 0013-elflink-Fix-program-header-address-calculation.patch
Patch63: 0014-elflink-powerpc64-Use-slices-based-on-MMU-type.patch
Patch64: 0015-ld.hugetlbfs-powerpc64-Add-support-for-different-hug.patch
Patch65: 0016-elflink-tests-Run-tests-only-for-default-huge-page-s.patch
Patch66: 0017-tests-Update-utility-to-get-free-and-total-huge-page.patch
Patch67: 0018-mmap-tests-Run-tests-with-correct-huge-page-count.patch
Patch68: 0019-Be-explicit-about-using-Python2-in-the-test-script.patch
Patch69: 0020-Switch-test-runner-script-to-print-function.patch
Patch70: 0021-Remove-backtick-operator-from-test-runner-script.patch
Patch71: 0022-tests-Avoid-old-style-except-syntax-in-the-test-runn.patch
Patch72: 0023-tests-Avoid-explicit-type-comparison-in-runner-scrip.patch
Patch73: 0024-tests-Explicitly-decode-subprocess-output.patch
Patch74: 0025-tests-Use-modern-style-division-in-runner-script.patch
Patch75: 0026-tests-Switch-test-runner-to-Python3.patch
Patch76: 0027-tests-Improve-TASK_SIZE-detection-in-task-size-overr.patch
Patch77: 0028-Remove-man-page-for-cpupcstat.patch
Patch78: 0029-Fix-spelling-of-khugepaged-options-in-hugeadm.patch
Patch79: 0030-Makefile-Remove-cpupcstat-from-man-page-target.patch
Patch80: 0031-tests-noresv-preserve-resv-page-Fix-failure-in-case-.patch
%description
libhugetlbfs is a library which provides easy access to huge pages of memory.
It is a wrapper for the hugetlbfs file system. Applications can use huge pages
@ -57,49 +88,91 @@ segment remapping behavior. hugectl sets environment variables for using huge
pages and then execs the target program. hugeadm gives easy access to huge page
pool size control. pagesize lists page sizes available on the machine.
%package tests
Summary: Test cases to help on validating the library environment
Group: Development/Libraries
Requires: %{name}-utils = %{version}-%{release}
%description tests
This packages contains a number of testcases that will help developers
to verify the libhugetlbfs functionality and validate the library.
%prep
%setup -q -n %{name}-%{version}
# apply upstream patchset first
%patch50 -p1
%patch51 -p1
%patch52 -p1
%patch53 -p1
%patch54 -p1
%patch55 -p1
%patch56 -p1
%patch57 -p1
%patch58 -p1
%patch59 -p1
%patch60 -p1
%patch61 -p1
%patch62 -p1
%patch63 -p1
%patch64 -p1
%patch65 -p1
%patch66 -p1
%patch67 -p1
%patch68 -p1
%patch69 -p1
%patch70 -p1
%patch71 -p1
%patch72 -p1
%patch73 -p1
%patch74 -p1
%patch75 -p1
%patch76 -p1
%patch77 -p1
%patch78 -p1
%patch79 -p1
%patch80 -p1
# downstream patches
%patch0 -p1
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
pathfix.py -i %{__python3} -pn huge_page_setup_helper.py \
tests/run_tests.py
%build
%set_build_flags
# Parallel builds are not reliable
make BUILDTYPE=NATIVEONLY V=1
make all BUILDTYPE=NATIVEONLY V=1
%install
make install PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
make install-helper PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
make install-tests PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
mkdir -p -m755 $RPM_BUILD_ROOT%{_sysconfdir}/security/limits.d
touch $RPM_BUILD_ROOT%{_sysconfdir}/security/limits.d/hugepages.conf
# clear execstack flag
execstack --clear-execstack %{buildroot}/%{_libdir}/libhugetlbfs.so
execstack --clear-execstack %{buildroot}/%{_libdir}/libhugetlbfs_privutils.so
# remove statically built libraries:
rm -f $RPM_BUILD_ROOT/%{_libdir}/*.a
# remove unused sbin directory
rm -fr $RPM_BUILD_ROOT/%{_sbindir}/
# fix up dangling symlink warnings on RPMDiff
rm -f %{buildroot}/%{_libdir}/libhugetlbfs_privutils.so
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
%files
%{_libdir}/libhugetlbfs.so*
%{_libdir}/libhugetlbfs_privutils.so*
%{_datadir}/%{name}/
%{_mandir}/man7/libhugetlbfs.7.gz
%ghost %config(noreplace) %{_sysconfdir}/security/limits.d/hugepages.conf
%exclude %{_libdir}/libhugetlbfs_privutils.so
%doc README HOWTO LGPL-2.1 NEWS
%files devel
@ -134,7 +207,18 @@ rm -f %{buildroot}/%{_libdir}/libhugetlbfs_privutils.so
%exclude %{_mandir}/man8/cpupcstat.8.gz
%exclude %{_libdir}/perl5/TLBC
%files tests
%{_libdir}/libhugetlbfs
%changelog
* Tue Oct 29 2019 Rafael Aquini <aquini@redhat.com> - 2.21-12
- Fix: Introduce libhugetlbfs-tests subpkg for CI tests (1688930)
- trim repetitive changelogs for interim debug builds
* Mon Oct 28 2019 Rafael Aquini <aquini@redhat.com> - 2.21-4
- Fix: task-size-overrun hung over 8 hours on ppc64le (1737370)
- Introduce libhugetlbfs-tests subpkg for CI tests (1688930)
* Tue Apr 2 2019 Rafael Aquini <aquini@redhat.com> - 2.21-3
- Fix: Adding CI gating basic infrastructure (1680621)