Compare commits

..

No commits in common. "c8" and "c10s" have entirely different histories.
c8 ... c10s

53 changed files with 107 additions and 1566 deletions

1
.gitignore vendored
View File

@ -1 +0,0 @@
SOURCES/libhugetlbfs-2.21.tar.gz

View File

@ -1 +0,0 @@
8ed79a12d07be1e858ef4e0148ab1f4115094ef6 SOURCES/libhugetlbfs-2.21.tar.gz

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# Package Not Available
This package is not available on CentOS Stream 10.
It may be available on another branch.

View File

@ -1,100 +0,0 @@
From d42f467a923dfc09309acb7a83b42e3285fbd8f4 Mon Sep 17 00:00:00 2001
Message-Id: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:28 +0530
Subject: [RHEL7 PATCH 01/31] tests: Add utility to check for a minimum number
of online cpus
This adds a test utility to check if a minimum number (N)
of online cpus are available. If available, this will also
provide a list of the first N online cpus.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 1 +
tests/testutils.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index 8b1d8d9..e3179e6 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -43,6 +43,7 @@ extern char *test_name;
void check_free_huge_pages(int nr_pages_needed);
void check_must_be_root(void);
void check_hugetlb_shm_group(void);
+void check_online_cpus(int[], int);
void test_init(int argc, char *argv[]);
int test_addr_huge(void *p);
unsigned long long get_mapping_page_size(void *p);
diff --git a/tests/testutils.c b/tests/testutils.c
index 6298370..2b47547 100644
--- a/tests/testutils.c
+++ b/tests/testutils.c
@@ -33,6 +33,8 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/types.h>
#include <sys/mman.h>
#include <fcntl.h>
@@ -80,6 +82,52 @@ void check_hugetlb_shm_group(void)
CONFIG("Do not have permission to use SHM_HUGETLB");
}
+#define SYSFS_CPU_ONLINE_FMT "/sys/devices/system/cpu/cpu%d/online"
+
+void check_online_cpus(int online_cpus[], int nr_cpus_needed)
+{
+ char cpu_state, path_buf[64];
+ int total_cpus, cpu_idx, fd, ret, i;
+
+ total_cpus = get_nprocs_conf();
+ cpu_idx = 0;
+
+ if (get_nprocs() < nr_cpus_needed)
+ CONFIG("Atleast online %d cpus are required", nr_cpus_needed);
+
+ for (i = 0; i < total_cpus && cpu_idx < nr_cpus_needed; i++) {
+ errno = 0;
+ sprintf(path_buf, SYSFS_CPU_ONLINE_FMT, i);
+ fd = open(path_buf, O_RDONLY);
+ if (fd < 0) {
+ /* If 'online' is absent, the cpu cannot be offlined */
+ if (errno == ENOENT) {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ continue;
+ } else {
+ FAIL("Unable to open %s: %s", path_buf,
+ strerror(errno));
+ }
+ }
+
+ ret = read(fd, &cpu_state, 1);
+ if (ret < 1)
+ FAIL("Unable to read %s: %s", path_buf,
+ strerror(errno));
+
+ if (cpu_state == '1') {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ }
+
+ close(fd);
+ }
+
+ if (cpu_idx < nr_cpus_needed)
+ CONFIG("Atleast %d online cpus were not found", nr_cpus_needed);
+}
+
void __attribute__((weak)) cleanup(void)
{
}
--
1.8.3.1

View File

@ -1,68 +0,0 @@
From 192ac21a3c057c5dedca4cdd1bf700f38992030c Mon Sep 17 00:00:00 2001
Message-Id: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
From: Jan Stancek <jstancek@redhat.com>
Date: Thu, 1 Jun 2017 09:48:41 +0200
Subject: [PATCH v2 1/2] testutils: fix range_is_mapped()
It doesn't return correct value when tested region is
completely inside existing mapping:
+--------------------------------------+
^ start ^ end
+----------------+
^ low ^ high
Rather than testing for all combinations of 2 regions overlapping,
flip the condition and test if they don't overlap.
Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
tests/testutils.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
This is a v2 series for:
https://groups.google.com/forum/#!topic/libhugetlbfs/tAsWjuJ7x8k
diff --git a/tests/testutils.c b/tests/testutils.c
index 629837045465..f42852e1938b 100644
--- a/tests/testutils.c
+++ b/tests/testutils.c
@@ -190,19 +190,29 @@ int range_is_mapped(unsigned long low, unsigned long high)
return -1;
}
- if ((start >= low) && (start < high)) {
+ /*
+ * (existing mapping) (tested region)
+ * +----------------+ +.......+
+ * ^start ^end ^ low ^high
+ */
+ if (low >= end) {
fclose(f);
- return 1;
+ return 0;
}
- if ((end >= low) && (end < high)) {
+
+ /*
+ * (tested region) (existing mapping)
+ * +.....+ +----------------+
+ * ^low ^high ^ start ^end
+ */
+ if (high <= start) {
fclose(f);
- return 1;
+ return 0;
}
-
}
fclose(f);
- return 0;
+ return 1;
}
/*
--
1.8.3.1

View File

@ -1,52 +0,0 @@
commit e7b3e6817421763eee37cb35ef8627bdd37a3690
Author: Chunyu Hu <chuhu@redhat.com>
Date: Wed May 6 18:59:43 2020 +0800
Wait child with os.wait()
os.popen() is an async method, it fork() child and exec() in child
with the arg command. If it's slow enough, main process could get
incomplete result.
During our test, we find python3 is faster than python2,after coverting
to python3, 'groupadd' usually doesn't finish when the followed step iter
on groups, we would get '-1' as the groupid and lead to error.
To reproduce it with python3:
/root/rpmbuild/BUILD/libhugetlbfs-2.21/huge_page_setup_helper.py <<EOF
128
hugepages
hugepages root
EOF
...
hugeadm:ERROR: Invalid group specification (-1)
...
Signed-off-by: Chunyu Hu <chuhu@redhat.com>
diff --git a/huge_page_setup_helper.py b/huge_page_setup_helper.py
index a9ba2bf..01fc8dc 100755
--- a/huge_page_setup_helper.py
+++ b/huge_page_setup_helper.py
@@ -169,6 +169,10 @@ else:
os.popen("/usr/sbin/groupadd %s" % userGroupReq)
else:
print("/usr/sbin/groupadd %s" % userGroupReq)
+
+ # wait for the groupadd finish
+ os.wait()
+
groupNames = os.popen("/usr/bin/getent group %s" % userGroupReq).readlines()
for line in groupNames:
curGroupName = line.split(":")[0]
@@ -244,6 +248,9 @@ else:
print("/usr/bin/hugeadm --set-recommended-shmmax")
print()
+# wait for the hugepage setups finish
+os.wait()
+
# figure out what that shmmax value we just set was
hugeadmexplain = os.popen("/usr/bin/hugeadm --explain 2>/dev/null").readlines()
for line in hugeadmexplain:

View File

@ -1,173 +0,0 @@
From a329008ea54056f0ed9d85cc3d0d9129474f7cd5 Mon Sep 17 00:00:00 2001
Message-Id: <a329008ea54056f0ed9d85cc3d0d9129474f7cd5.1496667760.git.jstancek@redhat.com>
In-Reply-To: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
References: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
From: Jan Stancek <jstancek@redhat.com>
Date: Thu, 1 Jun 2017 10:00:47 +0200
Subject: [PATCH v2 2/2] stack_grow_into_huge: don't clobber existing mappings
This test allocates hugepages above stack using MAP_FIXED and then
grows stack while it can. If a MAP_FIXED request is successful,
then mapping established by mmap() replaces any previous mappings
for the process' pages. If there's anything important there (libc
mappings), these can get clobbered as described here:
http://marc.info/?l=linux-arm-kernel&m=149036535209519&w=2.
This patch is creating extra stack for new child and maps
one hugepage above it. The search starts at heap until it
hits existing mapping or until it can successfully map
huge page and stack below it.
If suitable place can't be found, test PASSes as inconclusive.
Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
tests/stack_grow_into_huge.c | 101 ++++++++++++++++++++++++++++---------------
1 file changed, 67 insertions(+), 34 deletions(-)
This is a v2 series for:
https://groups.google.com/forum/#!topic/libhugetlbfs/tAsWjuJ7x8k
diff --git a/tests/stack_grow_into_huge.c b/tests/stack_grow_into_huge.c
index a380da063264..9b8ea8d74887 100644
--- a/tests/stack_grow_into_huge.c
+++ b/tests/stack_grow_into_huge.c
@@ -25,6 +25,7 @@
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/wait.h>
+#include <sched.h>
#include <hugetlbfs.h>
#include "hugetests.h"
@@ -54,7 +55,10 @@
#define STACK_ALLOCATION_SIZE (16*1024*1024)
#endif
-void do_child(void *stop_address)
+#define MIN_CHILD_STACK (2*1024*1024)
+#define STEP (STACK_ALLOCATION_SIZE)
+
+int do_child(void *stop_address)
{
struct rlimit r;
volatile int *x;
@@ -71,15 +75,68 @@ void do_child(void *stop_address)
x = alloca(STACK_ALLOCATION_SIZE);
*x = 1;
} while ((void *)x >= stop_address);
+
+ return 0;
+}
+
+void *try_setup_stack_and_huge(int fd, void *hint)
+{
+ void *mmap_address, *stack_start, *tmp;
+ long hpage_size = gethugepagesize();
+ void *stop = alloca(1);
+
+ /*
+ * Find a spot for huge page. We start at "hint" and
+ * keep going down in "STEP" increments until we find
+ * a place where we can mmap huge page.
+ */
+ mmap_address = PALIGN(hint, hpage_size);
+ do {
+ mmap_address += STEP;
+ if (mmap_address >= stop)
+ return NULL;
+ if (range_is_mapped((unsigned long)mmap_address,
+ (unsigned long)mmap_address + hpage_size))
+ continue;
+ tmp = mmap(mmap_address, hpage_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ } while (tmp == MAP_FAILED);
+ verbose_printf("huge page is at: %p-%p\n",
+ mmap_address, mmap_address + hpage_size);
+
+ /*
+ * Find a spot for stack below huge page. We start at end of
+ * huge page we found above and keep trying to mmap stack
+ * below. Because stack needs to grow into hugepage, we
+ * also have to make sure nothing is mapped in gap between
+ * stack and huge page.
+ */
+ stack_start = mmap_address + hpage_size;
+ do {
+ if (range_is_mapped((unsigned long)stack_start,
+ (unsigned long)stack_start + STEP + MIN_CHILD_STACK)) {
+ verbose_printf("range is mapped: %p-%p\n", stack_start,
+ stack_start + STEP + MIN_CHILD_STACK);
+ munmap(mmap_address, hpage_size);
+ return NULL;
+ }
+ stack_start += STEP;
+ if (stack_start >= stop)
+ return NULL;
+ tmp = mmap(stack_start, MIN_CHILD_STACK, PROT_READ|PROT_WRITE,
+ MAP_GROWSDOWN|MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
+ } while (tmp == MAP_FAILED);
+
+ verbose_printf("Child stack is at %p-%p\n",
+ stack_start, stack_start + MIN_CHILD_STACK);
+ return stack_start + MIN_CHILD_STACK;
}
int main(int argc, char *argv[])
{
int fd, pid, s, ret;
struct rlimit r;
- char *b;
- long hpage_size = gethugepagesize();
- void *stack_address, *mmap_address, *heap_address;
+ void *stack_end;
test_init(argc, argv);
@@ -94,37 +151,13 @@ int main(int argc, char *argv[])
if (fd < 0)
CONFIG("Couldn't get hugepage fd");
- stack_address = alloca(0);
- heap_address = sbrk(0);
+ stack_end = try_setup_stack_and_huge(fd, sbrk(0));
+ if (!stack_end)
+ PASS_INCONCLUSIVE();
- /*
- * paranoia: start mapping two hugepages below the start of the stack,
- * in case the alignment would cause us to map over something if we
- * only used a gap of one hugepage.
- */
- mmap_address = PALIGN(stack_address - 2 * hpage_size, hpage_size);
-
- do {
- b = mmap(mmap_address, hpage_size, PROT_READ|PROT_WRITE,
- MAP_FIXED|MAP_SHARED, fd, 0);
- mmap_address -= hpage_size;
- /*
- * if we get all the way down to the heap, stop trying
- */
- if (mmap_address <= heap_address)
- break;
- } while (b == MAP_FAILED);
-
- if (b == MAP_FAILED)
- FAIL("mmap: %s", strerror(errno));
-
- if ((pid = fork()) < 0)
- FAIL("fork: %s", strerror(errno));
-
- if (pid == 0) {
- do_child(mmap_address);
- exit(0);
- }
+ pid = clone(do_child, stack_end, SIGCHLD, 0);
+ if (pid < 0)
+ FAIL("clone: %s", strerror(errno));
ret = waitpid(pid, &s, 0);
if (ret == -1)
--
1.8.3.1

View File

@ -1,74 +0,0 @@
From 865d160eff7e6c69968d0196272030f206dd3430 Mon Sep 17 00:00:00 2001
Message-Id: <865d160eff7e6c69968d0196272030f206dd3430.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:29 +0530
Subject: [RHEL7 PATCH 02/31] tests: slbpacaflush: Use online cpus only
This ensures that the two cpus between which the thread is
migrated are online. For offline cpus, sched_setaffinity()
will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/slbpacaflush.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/tests/slbpacaflush.c b/tests/slbpacaflush.c
index 8893c4d..765e069 100644
--- a/tests/slbpacaflush.c
+++ b/tests/slbpacaflush.c
@@ -57,29 +57,32 @@ int main(int argc, char *argv[])
int fd;
void *p;
volatile unsigned long *q;
- int err;
+ int online_cpus[2], err;
cpu_set_t cpu0, cpu1;
test_init(argc, argv);
hpage_size = check_hugepagesize();
+ check_online_cpus(online_cpus, 2);
fd = hugetlbfs_unlinked_fd();
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
CPU_ZERO(&cpu0);
- CPU_SET(0, &cpu0);
+ CPU_SET(online_cpus[0], &cpu0);
CPU_ZERO(&cpu1);
- CPU_SET(1, &cpu1);
+ CPU_SET(online_cpus[1], &cpu1);
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu1);
if (err != 0)
- CONFIG("sched_setaffinity(): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[1],
+ strerror(errno));
p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (p == MAP_FAILED)
@@ -87,7 +90,8 @@ int main(int argc, char *argv[])
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
q = (volatile unsigned long *)(p + getpagesize());
*q = 0xdeadbeef;
--
1.8.3.1

View File

@ -1,70 +0,0 @@
From 4ba9722027d9aeec173866b5ca12282268594f35 Mon Sep 17 00:00:00 2001
Message-Id: <4ba9722027d9aeec173866b5ca12282268594f35.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:30 +0530
Subject: [RHEL7 PATCH 03/31] tests: alloc-instantiate-race: Use online cpus
only
This ensures that the two processes or threads between which
the race condition is introduced are always running on online
cpus. For offline cpus, sched_setaffinity() will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/alloc-instantiate-race.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/tests/alloc-instantiate-race.c b/tests/alloc-instantiate-race.c
index 7f84e8a..f55e2f7 100644
--- a/tests/alloc-instantiate-race.c
+++ b/tests/alloc-instantiate-race.c
@@ -121,7 +121,9 @@ static void run_race(void *syncarea, int race_type)
int fd;
void *p;
int status1, status2;
- int ret;
+ int online_cpus[2], ret;
+
+ check_online_cpus(online_cpus, 2);
memset(syncarea, 0, sizeof(*trigger1) + sizeof(*trigger2));
trigger1 = syncarea;
@@ -143,13 +145,13 @@ static void run_race(void *syncarea, int race_type)
if (child1 < 0)
FAIL("fork(): %s", strerror(errno));
if (child1 == 0)
- proc_racer(p, 0, trigger1, trigger2);
+ proc_racer(p, online_cpus[0], trigger1, trigger2);
child2 = fork();
if (child2 < 0)
FAIL("fork(): %s", strerror(errno));
if (child2 == 0)
- proc_racer(p, 1, trigger2, trigger1);
+ proc_racer(p, online_cpus[1], trigger2, trigger1);
/* wait() calls */
ret = waitpid(child1, &status1, 0);
@@ -175,13 +177,13 @@ static void run_race(void *syncarea, int race_type)
} else {
struct racer_info ri1 = {
.p = p,
- .cpu = 0,
+ .cpu = online_cpus[0],
.mytrigger = trigger1,
.othertrigger = trigger2,
};
struct racer_info ri2 = {
.p = p,
- .cpu = 1,
+ .cpu = online_cpus[1],
.mytrigger = trigger2,
.othertrigger = trigger1,
};
--
1.8.3.1

View File

@ -1,52 +0,0 @@
From 2f38664f81e1877f81b16ed327b540d69d175a5b Mon Sep 17 00:00:00 2001
Message-Id: <2f38664f81e1877f81b16ed327b540d69d175a5b.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:31 +0530
Subject: [RHEL7 PATCH 04/31] tests: task-size-overrun: Make test faster on
powerpc64
As of now, powerpc64 supports 64TB, 128TB, 512TB, 1PB, 2PB and
4PB user address space sizes with 4TB being the default for the
newer kernels. With the relatively conservative increments that
this test uses to find the task size, it takes a very long time
but this can be made faster by also increasing the increment
factor in steps of the different supported task sizes.
Fixes: 02df38e ("Defined task size value to be 512T if it is more that 64Tb.")
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/task-size-overrun.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/tests/task-size-overrun.c b/tests/task-size-overrun.c
index dc9ce0e..914ef65 100644
--- a/tests/task-size-overrun.c
+++ b/tests/task-size-overrun.c
@@ -83,8 +83,18 @@ static unsigned long find_task_size(void)
munmap(p, getpagesize());
addr += getpagesize();
#if defined(__powerpc64__)
- if (addr > (1UL << 46) && addr < (1UL << 49))
- addr = 1UL << 49;
+ if (addr > (1UL << 46) && addr < (1UL << 47))
+ addr = 1UL << 47; /* 64TB */
+ else if (addr > (1UL << 47) && addr < (1UL << 48))
+ addr = 1UL << 48; /* 128TB */
+ else if (addr > (1UL << 48) && addr < (1UL << 49))
+ addr = 1UL << 49; /* 512TB */
+ else if (addr > (1UL << 49) && addr < (1UL << 50))
+ addr = 1UL << 50; /* 1PB */
+ else if (addr > (1UL << 50) && addr < (1UL << 51))
+ addr = 1UL << 51; /* 2PB */
+ else if (addr > (1UL << 51) && addr < (1UL << 52))
+ addr = 1UL << 52; /* 4PB */
#endif
#if defined(__s390x__)
if (addr > (1UL << 42) && addr < (1UL << 53))
--
1.8.3.1

View File

@ -1,50 +0,0 @@
From 2a63852ac9358cdddce9944aade1d443f686246a Mon Sep 17 00:00:00 2001
Message-Id: <2a63852ac9358cdddce9944aade1d443f686246a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:32 +0530
Subject: [RHEL7 PATCH 05/31] tests: truncate-above-4GB: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/truncate_above_4GB.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/tests/truncate_above_4GB.c b/tests/truncate_above_4GB.c
index 4c427fc..2e29931 100644
--- a/tests/truncate_above_4GB.c
+++ b/tests/truncate_above_4GB.c
@@ -79,6 +79,13 @@ int main(int argc, char *argv[])
page_size = getpagesize();
hpage_size = check_hugepagesize();
+ truncate_point = FOURGIG;
+
+ if (hpage_size > truncate_point)
+ CONFIG("Huge page size is too large");
+
+ if (truncate_point % hpage_size > 0)
+ CONFIG("Truncation point is not aligned to huge page size");
check_free_huge_pages(3);
@@ -86,7 +93,6 @@ int main(int argc, char *argv[])
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
- truncate_point = FOURGIG;
buggy_offset = truncate_point / (hpage_size / page_size);
buggy_offset = ALIGN(buggy_offset, hpage_size);
--
1.8.3.1

View File

@ -1,49 +0,0 @@
From 65c07c0f64ef1c97f9aea80d0c8470417e377a6a Mon Sep 17 00:00:00 2001
Message-Id: <65c07c0f64ef1c97f9aea80d0c8470417e377a6a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:33 +0530
Subject: [RHEL7 PATCH 06/31] tests: map-high-truncate-2: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/map_high_truncate_2.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/tests/map_high_truncate_2.c b/tests/map_high_truncate_2.c
index 2a2560b..fc44a13 100644
--- a/tests/map_high_truncate_2.c
+++ b/tests/map_high_truncate_2.c
@@ -56,6 +56,7 @@
#define TRUNCATE_POINT 0x60000000UL
#endif
#define HIGH_ADDR 0xa0000000UL
+#define FOURGIG ((off64_t)0x100000000ULL)
int main(int argc, char *argv[])
{
@@ -69,6 +70,12 @@ int main(int argc, char *argv[])
hpage_size = check_hugepagesize();
+ if (hpage_size > TRUNCATE_POINT)
+ CONFIG("Huge page size is too large");
+
+ if (TRUNCATE_POINT % hpage_size)
+ CONFIG("Truncation point is not aligned to huge page size");
+
check_free_huge_pages(4);
fd = hugetlbfs_unlinked_fd();
--
1.8.3.1

View File

@ -1,130 +0,0 @@
From e472e326d31a125e21453d75cb46bba9cf387952 Mon Sep 17 00:00:00 2001
Message-Id: <e472e326d31a125e21453d75cb46bba9cf387952.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:34 +0530
Subject: [RHEL7 PATCH 07/31] morecore: tests: Run tests only for default huge
page size
The morecore tests (malloc, malloc-manysmall and heapshrink)
are not linked against libhugetlbfs and cannot invoke library
functions like gethugepagesize(). Hence, run these tests only
for the kernel's default huge page size.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 81 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 49 insertions(+), 32 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 3c95a03..70c5a6a 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -362,6 +362,16 @@ def do_test_with_rlimit(rtype, limit, cmd, bits=None, **env):
do_test(cmd, bits, **env)
resource.setrlimit(rtype, oldlimit)
+def do_test_with_pagesize(pagesize, cmd, bits=None, **env):
+ """
+ Run a test case, testing with a specified huge page size and
+ each indicated word size.
+ """
+ if bits == None:
+ bits = wordsizes
+ for b in (set(bits) & wordsizes_by_pagesize[pagesize]):
+ run_test(pagesize, b, cmd, **env)
+
def do_elflink_test(cmd, **env):
"""
Run an elflink test case, skipping known-bad configurations.
@@ -563,15 +573,22 @@ def functional_tests():
do_test("private")
do_test("fork-cow")
do_test("direct")
- do_test("malloc")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:none")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:malloc")
- do_test("malloc_manysmall")
- do_test("malloc_manysmall", LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:none")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
# After upstream commit: (glibc-2.25.90-688-gd5c3fafc43) glibc has a
# new per-thread caching mechanism that will NOT allow heapshrink test to
@@ -584,29 +601,29 @@ def functional_tests():
# program context (not even with a constructor function), and the tunable
# is only evaluated during malloc() initialization.
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
do_test("heap-overflow", HUGETLB_VERBOSE="1", HUGETLB_MORECORE="yes")
--
1.8.3.1

View File

@ -1,53 +0,0 @@
From 4ba60a2f5c3f5405c599caddc5a124c5781c9beb Mon Sep 17 00:00:00 2001
Message-Id: <4ba60a2f5c3f5405c599caddc5a124c5781c9beb.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:35 +0530
Subject: [RHEL7 PATCH 08/31] hugeutils: Make writing a ulong to a file more
reliable
This makes file_write_ulong() more reliable in terms of error
detection for certain cases like writing an invalid value to
a file under procfs or sysfs. Also, using fprintf() does not
guarantee that errno would be set under such circumstances.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/hugeutils.c b/hugeutils.c
index 60488e8..fc64946 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -219,17 +219,18 @@ long file_read_ulong(char *file, const char *tag)
int file_write_ulong(char *file, unsigned long val)
{
- FILE *f;
- int ret;
+ int fd, ret, buflen;
+ char buf[20];
- f = fopen(file, "w");
- if (!f) {
+ fd = open(file, O_WRONLY);
+ if (fd < 0) {
ERROR("Couldn't open %s: %s\n", file, strerror(errno));
return -1;
}
- ret = fprintf(f, "%lu", val);
- fclose(f);
+ buflen = sprintf(buf, "%lu", val);
+ ret = write(fd, buf, buflen);
+ close(fd);
return ret > 0 ? 0 : -1;
}
--
1.8.3.1

View File

@ -1,59 +0,0 @@
From a4879cc4f88b560958950d9277ba0df487b145f4 Mon Sep 17 00:00:00 2001
Message-Id: <a4879cc4f88b560958950d9277ba0df487b145f4.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:36 +0530
Subject: [RHEL7 PATCH 09/31] tests: Add utility to check if huge pages are
gigantic
This adds a test utility to check if the currently selected
huge page size corresponds to that of a gigantic page.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index e3179e6..bc4e16a 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -22,6 +22,7 @@
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include "libhugetlbfs_privutils.h"
#include "libhugetlbfs_testprobes.h"
@@ -136,6 +137,24 @@ static inline long check_hugepagesize()
return __hpage_size;
}
+static inline void check_if_gigantic_page(void)
+{
+ long page_size, hpage_size, max_order;
+ FILE *fp;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ hpage_size = gethugepagesize();
+ fp = popen("cat /proc/pagetypeinfo | "
+ "awk '/Free pages count per migrate type at order/ "
+ "{print $NF}'", "r");
+ if (!fp || fscanf(fp, "%lu", &max_order) < 0)
+ FAIL("Couldn't determine max page allocation order");
+
+ pclose(fp);
+ if (hpage_size > ((1 << max_order) * page_size))
+ CONFIG("Gigantic pages are not supported");
+}
+
int using_system_hpage_size(const char *mount);
/* WARNING: Racy -- use for test cases only! */
--
1.8.3.1

View File

@ -1,49 +0,0 @@
From 2d41ec367199f9f9d4b7caf00c3be25030a7a873 Mon Sep 17 00:00:00 2001
Message-Id: <2d41ec367199f9f9d4b7caf00c3be25030a7a873.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:37 +0530
Subject: [RHEL7 PATCH 10/31] tests: counters: Skip if using gigantic huge
pages
The kernel does not allow setting an overcommit limit for
gigantic huge pages, i.e. any page size beyond the max page
allocation order. For such cases, nr_overcommit_hugepages
cannot be modified and is always zero. So, skip this test
as mmap() using a hugetlbfs file descriptor will fail when
both nr_hugepages and nr_overcommit_hugepages are zero.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/counters.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/tests/counters.c b/tests/counters.c
index 0284809..34b1ef3 100644
--- a/tests/counters.c
+++ b/tests/counters.c
@@ -83,7 +83,17 @@ void verify_dynamic_pool_support(void)
saved_oc_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_OC);
if (saved_oc_hugepages < 0)
FAIL("Kernel appears to lack dynamic hugetlb pool support");
- set_nr_overcommit_hugepages(hpage_size, 10);
+ if (set_nr_overcommit_hugepages(hpage_size, 10) < 0) {
+ /*
+ * In case writing to nr_overcommit_hugepages failed with the
+ * reason that it was an attempt to write an invalid argument,
+ * it might be because the page size corresponds to gigantic
+ * pages which do not support this feature.
+ */
+ if (errno == EINVAL)
+ check_if_gigantic_page();
+ FAIL("Couldn't set overcommit limit");
+ }
}
void bad_value(int line, const char *name, long expect, long actual)
--
1.8.3.1

View File

@ -1,72 +0,0 @@
From 8cc33a134681892a71a4f67397bb13a541bb463e Mon Sep 17 00:00:00 2001
Message-Id: <8cc33a134681892a71a4f67397bb13a541bb463e.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:38 +0530
Subject: [RHEL7 PATCH 11/31] hugeutils: Add utility to check if slices are
supported
This adds an utility to check if the current processor
architecture supports slices. Slices are used to divide
up a virtual address space and put certain restrictions
like on powerpc64 with Hash MMU where one can have only
one page size per slice.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 21 +++++++++++++++++++++
libhugetlbfs_privutils.h | 3 +++
2 files changed, 24 insertions(+)
diff --git a/hugeutils.c b/hugeutils.c
index fc64946..e573622 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -800,6 +800,27 @@ int hpool_sizes(struct hpage_pool *pools, int pcnt)
return (which < pcnt) ? which : -1;
}
+int arch_has_slice_support(void)
+{
+#ifdef __powerpc64__
+ char mmu_type[16];
+ FILE *fp;
+
+ fp = popen("cat /proc/cpuinfo | grep MMU | awk '{ print $3}'", "r");
+ if (!fp || fscanf(fp, "%s", mmu_type) < 0) {
+ ERROR("Failed to determine MMU type\n");
+ abort();
+ }
+
+ pclose(fp);
+ return strcmp(mmu_type, "Hash") == 0;
+#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
/*
* If we have a default page size then we support hugepages.
*/
diff --git a/libhugetlbfs_privutils.h b/libhugetlbfs_privutils.h
index 149e42f..8b12fed 100644
--- a/libhugetlbfs_privutils.h
+++ b/libhugetlbfs_privutils.h
@@ -53,6 +53,9 @@ int set_nr_hugepages(long pagesize, unsigned long val);
#define set_nr_overcommit_hugepages __pu_set_nr_overcommit_hugepages
int set_nr_overcommit_hugepages(long pagesize, unsigned long val);
+#define arch_has_slice_support __pu_arch_has_slice_support
+int arch_has_slice_support(void);
+
#define kernel_has_hugepages __pu_kernel_has_hugepages
int kernel_has_hugepages(void);
--
1.8.3.1

View File

@ -1,38 +0,0 @@
From 1329c4f5f4d201724d379d43dc5d516d1c9356dc Mon Sep 17 00:00:00 2001
Message-Id: <1329c4f5f4d201724d379d43dc5d516d1c9356dc.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:39 +0530
Subject: [RHEL7 PATCH 12/31] tests: brk-near-huge: Fix next chunk computation
for powerpc64
For powerpc64, the use of slices applies only to Hash MMU.
Hence, when determining the next chunk size, ensure that
the address is aligned to the slice size for Hash MMU and
the huge page size otherwise.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/brk_near_huge.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/brk_near_huge.c b/tests/brk_near_huge.c
index f6d1e07..c9662f4 100644
--- a/tests/brk_near_huge.c
+++ b/tests/brk_near_huge.c
@@ -40,6 +40,9 @@
#ifdef __powerpc64__
void *next_chunk(void *addr)
{
+ if (!arch_has_slice_support())
+ return PALIGN(addr, gethugepagesize());
+
if ((unsigned long)addr < 0x100000000UL)
/* 256M segments below 4G */
return PALIGN(addr, 0x10000000UL);
--
1.8.3.1

View File

@ -1,143 +0,0 @@
From 9fe6594da91e86280c9d71877a91cee83aaedae6 Mon Sep 17 00:00:00 2001
Message-Id: <9fe6594da91e86280c9d71877a91cee83aaedae6.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:30 +0530
Subject: [RHEL7 PATCH 13/31] elflink: Fix program header address calculation
This fixes the virtual address calculation for the ELF program
header. Based on the man page of dl_iterate_phdr(), the location
of a particular program header in virtual memory should be the
sum of the base address of the shared object and the segment's
virtual address.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/elflink.c b/elflink.c
index ffc84dd..1150bde 100644
--- a/elflink.c
+++ b/elflink.c
@@ -374,7 +374,8 @@ static int get_shared_file_name(struct seg_info *htlb_seg_info, char *file_path)
}
/* Find the .dynamic program header */
-static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
+static int find_dynamic(Elf_Dyn **dyntab, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
int i = 1;
@@ -382,7 +383,7 @@ static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
++i;
}
if (phdr[i].p_type == PT_DYNAMIC) {
- *dyntab = (Elf_Dyn *)phdr[i].p_vaddr;
+ *dyntab = (Elf_Dyn *)(addr + phdr[i].p_vaddr);
return 0;
} else {
DEBUG("No dynamic segment found\n");
@@ -473,7 +474,8 @@ ElfW(Word) __attribute__ ((weak)) plt_extrasz(ElfW(Dyn) *dyntab)
* include these initialized variables in our copy.
*/
-static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
+static void get_extracopy(struct seg_info *seg, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
Elf_Dyn *dyntab; /* dynamic segment table */
Elf_Sym *symtab = NULL; /* dynamic symbol table */
@@ -492,7 +494,7 @@ static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
goto bail2;
/* Find dynamic program header */
- ret = find_dynamic(&dyntab, phdr, phnum);
+ ret = find_dynamic(&dyntab, addr, phdr, phnum);
if (ret < 0)
goto bail;
@@ -608,7 +610,8 @@ static unsigned long hugetlb_prev_slice_end(unsigned long addr)
/*
* Store a copy of the given program header
*/
-static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
+static int save_phdr(int table_idx, int phnum, const ElfW(Addr) addr,
+ const ElfW(Phdr) *phdr)
{
int prot = 0;
@@ -626,7 +629,7 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
if (phdr->p_flags & PF_X)
prot |= PROT_EXEC;
- htlb_seg_table[table_idx].vaddr = (void *) phdr->p_vaddr;
+ htlb_seg_table[table_idx].vaddr = (void *)(addr + phdr->p_vaddr);
htlb_seg_table[table_idx].filesz = phdr->p_filesz;
htlb_seg_table[table_idx].memsz = phdr->p_memsz;
htlb_seg_table[table_idx].prot = prot;
@@ -634,8 +637,8 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
INFO("Segment %d (phdr %d): %#0lx-%#0lx (filesz=%#0lx) "
"(prot = %#0x)\n", table_idx, phnum,
- (unsigned long) phdr->p_vaddr,
- (unsigned long) phdr->p_vaddr + phdr->p_memsz,
+ (unsigned long) addr + phdr->p_vaddr,
+ (unsigned long) addr + phdr->p_vaddr + phdr->p_memsz,
(unsigned long) phdr->p_filesz, (unsigned int) prot);
return 0;
@@ -718,16 +721,19 @@ int parse_elf_normal(struct dl_phdr_info *info, size_t size, void *data)
seg_psize = segment_requested_page_size(&info->dlpi_phdr[i]);
if (seg_psize != page_size) {
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
get_extracopy(&htlb_seg_table[htlb_num_segs],
- &info->dlpi_phdr[0], info->dlpi_phnum);
+ info->dlpi_addr, info->dlpi_phdr,
+ info->dlpi_phnum);
htlb_seg_table[htlb_num_segs].page_size = seg_psize;
htlb_num_segs++;
}
- start = ALIGN_DOWN(info->dlpi_phdr[i].p_vaddr, seg_psize);
- end = ALIGN(info->dlpi_phdr[i].p_vaddr +
- info->dlpi_phdr[i].p_memsz, seg_psize);
+ start = ALIGN_DOWN(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr, seg_psize);
+ end = ALIGN(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr +
+ info->dlpi_phdr[i].p_memsz, seg_psize);
segments[num_segs].page_size = seg_psize;
segments[num_segs].start = start;
@@ -771,8 +777,9 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
* in this forced way won't violate any contiguity
* constraints.
*/
- vaddr = hugetlb_next_slice_start(info->dlpi_phdr[i].p_vaddr);
- gap = vaddr - info->dlpi_phdr[i].p_vaddr;
+ vaddr = hugetlb_next_slice_start(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr);
+ gap = vaddr - (info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
slice_end = hugetlb_slice_end(vaddr);
/*
* we should stop remapping just before the slice
@@ -795,7 +802,8 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
}
memsz = hugetlb_prev_slice_end(vaddr + memsz) - vaddr;
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
/*
--
1.8.3.1

View File

@ -1,64 +0,0 @@
From 5022d5f86d02882a11700825258ecdba8dee683c Mon Sep 17 00:00:00 2001
Message-Id: <5022d5f86d02882a11700825258ecdba8dee683c.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:31 +0530
Subject: [RHEL7 PATCH 14/31] elflink: powerpc64: Use slices based on MMU type
For powerpc64, the concept of slices is not applicable to the
recently introduced Radix MMU. So, slice boundaries should be
calculated based on the MMU type.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/elflink.c b/elflink.c
index 1150bde..a6bd44c 100644
--- a/elflink.c
+++ b/elflink.c
@@ -569,6 +569,10 @@ bail2:
*/
static unsigned long hugetlb_slice_start(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_DOWN(addr, gethugepagesize());
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
@@ -578,13 +582,15 @@ static unsigned long hugetlb_slice_start(unsigned long addr)
return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
-#else
- return ALIGN_DOWN(addr, gethugepagesize());
#endif
}
static unsigned long hugetlb_slice_end(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_UP(addr, gethugepagesize()) - 1;
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
@@ -592,8 +598,6 @@ static unsigned long hugetlb_slice_end(unsigned long addr)
return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
-#else
- return ALIGN_UP(addr, gethugepagesize()) - 1;
#endif
}
--
1.8.3.1

View File

@ -1,212 +0,0 @@
From 815072b9163cae73671baae448f974cc8f8a84be Mon Sep 17 00:00:00 2001
From: Rafael Aquini <aquini@redhat.com>
Date: Sun, 12 Apr 2020 21:08:01 -0400
Subject: [PATCH] tests: fix covscan SHELLCHECK_WARNING complaints
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/counters.sh | 2 +-
tests/fadvise_reserve.sh | 2 +-
tests/fallocate_align.sh | 2 +-
tests/fallocate_basic.sh | 2 +-
tests/fallocate_stress.sh | 2 +-
tests/madvise_reserve.sh | 2 +-
tests/mremap-expand-slice-collision.sh | 2 +-
tests/mremap-fixed-huge-near-normal.sh | 2 +-
tests/mremap-fixed-normal-near-huge.sh | 2 +-
tests/quota.sh | 2 +-
tests/readahead_reserve.sh | 2 +-
tests/wrapper-utils.sh | 18 +++++++++---------
12 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/tests/counters.sh b/tests/counters.sh
index e3ffabe..27bfca3 100755
--- a/tests/counters.sh
+++ b/tests/counters.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# Huge page overcommit was not available until 2.6.24
-compare_kvers `uname -r` "2.6.24"
+compare_kvers "$(uname -r)" "2.6.24"
if [ $? -eq 1 ]; then
EXP_RC=$RC_FAIL
else
diff --git a/tests/fadvise_reserve.sh b/tests/fadvise_reserve.sh
index 74496ec..ff96003 100755
--- a/tests/fadvise_reserve.sh
+++ b/tests/fadvise_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# fadvise is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/fallocate_align.sh b/tests/fallocate_align.sh
index 5105151..4397cd3 100755
--- a/tests/fallocate_align.sh
+++ b/tests/fallocate_align.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/fallocate_basic.sh b/tests/fallocate_basic.sh
index 904dfd6..1af6196 100755
--- a/tests/fallocate_basic.sh
+++ b/tests/fallocate_basic.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/fallocate_stress.sh b/tests/fallocate_stress.sh
index 622084f..3b5b70a 100755
--- a/tests/fallocate_stress.sh
+++ b/tests/fallocate_stress.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/madvise_reserve.sh b/tests/madvise_reserve.sh
index cfe582d..eb289d6 100755
--- a/tests/madvise_reserve.sh
+++ b/tests/madvise_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# madvise is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-expand-slice-collision.sh b/tests/mremap-expand-slice-collision.sh
index 8c9d98a..dd4eba3 100755
--- a/tests/mremap-expand-slice-collision.sh
+++ b/tests/mremap-expand-slice-collision.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-expand-slice-collision is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-fixed-huge-near-normal.sh b/tests/mremap-fixed-huge-near-normal.sh
index 4b89c35..22fde79 100755
--- a/tests/mremap-fixed-huge-near-normal.sh
+++ b/tests/mremap-fixed-huge-near-normal.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-fixed-huge-near-normal is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-fixed-normal-near-huge.sh b/tests/mremap-fixed-normal-near-huge.sh
index 9ed058f..45b8f26 100755
--- a/tests/mremap-fixed-normal-near-huge.sh
+++ b/tests/mremap-fixed-normal-near-huge.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-fixed-normal-near-huge is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/quota.sh b/tests/quota.sh
index 398d442..55c764a 100755
--- a/tests/quota.sh
+++ b/tests/quota.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# There are known bugs in quota accounting prior to 2.6.24
-compare_kvers `uname -r` "2.6.24"
+compare_kvers "$(uname -r)" "2.6.24"
if [ $? -eq 1 ]; then
EXP_RC=$RC_FAIL
else
diff --git a/tests/readahead_reserve.sh b/tests/readahead_reserve.sh
index 5ab7400..861ef5a 100755
--- a/tests/readahead_reserve.sh
+++ b/tests/readahead_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# readahead is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/wrapper-utils.sh b/tests/wrapper-utils.sh
index 2f6451d..79e7ed1 100644
--- a/tests/wrapper-utils.sh
+++ b/tests/wrapper-utils.sh
@@ -1,12 +1,12 @@
#!/bin/bash
# Standard return codes
-RC_PASS=0
-RC_CONFIG=1
-RC_FAIL=2
-RC_XFAIL=3
-RC_XPASS=4
-RC_BUG=99
+export RC_PASS=0
+export RC_CONFIG=1
+export RC_FAIL=2
+export RC_XFAIL=3
+export RC_XPASS=4
+export RC_BUG=99
function unexpected_pass()
{
@@ -28,10 +28,10 @@ function check_rc()
EXP_RC=$1
ACT_RC=$2
- if [ $ACT_RC -eq $RC_PASS -a $EXP_RC -ne $RC_PASS ]; then
+ if [[ ($ACT_RC -eq $RC_PASS) && ($EXP_RC -ne $RC_PASS) ]]; then
unexpected_pass
return $RC_XPASS
- elif [ $EXP_RC -ne $RC_PASS -a $EXP_RC -eq $ACT_RC ]; then
+ elif [[ ($EXP_RC -ne $RC_PASS) && ($EXP_RC -eq $ACT_RC) ]]; then
expected_fail
return $RC_XFAIL
else
@@ -47,7 +47,7 @@ function exec_and_check()
EXP_RC=$1
shift
- OUTPUT=`$@`
+ OUTPUT=$("$@")
check_rc $EXP_RC $?
RC=$?
echo $OUTPUT
--
2.25.2

View File

@ -1,56 +0,0 @@
From 112f4b7266cae313e5a7f3d720360cdb294db496 Mon Sep 17 00:00:00 2001
From: Rafael Aquini <aquini@redhat.com>
Date: Sun, 12 Apr 2020 22:59:32 -0400
Subject: [PATCH] tests: include missing LDFLAGS to make targets
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/Makefile | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/tests/Makefile b/tests/Makefile
index 9fd15eb..216942e 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -120,32 +120,32 @@ shmoverride_linked.c: shmoverride_unlinked.c
obj32/%.o: %.c
@$(VECHO) CC32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ -c $<
obj64/%.o: %.c
@$(VECHO) CC64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ -c $<
obj32/%-pic.o: %.c
@$(VECHO) CC32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -fPIC -o $@ -c $<
obj64/%-pic.o: %.c
@$(VECHO) CC64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -fPIC -o $@ -c $<
obj32/libheapshrink.so: obj32/heapshrink-helper-pic.o
@$(VECHO) LD32 "(shared)" $@
@mkdir -p obj32
- $(CC32) -Wl,-soname,$(notdir $@) -shared -o $@ $^
+ $(CC32) $(LDFLAGS) -Wl,-soname,$(notdir $@) -shared -o $@ $^
obj64/libheapshrink.so: obj64/heapshrink-helper-pic.o
@$(VECHO) LD64 "(shared)" $@
@mkdir -p obj64
- $(CC64) -Wl,-soname,$(notdir $@) -shared -o $@ $^
+ $(CC64) $(LDFLAGS) -Wl,-soname,$(notdir $@) -shared -o $@ $^
$(LIB_TESTS:%=obj32/%): %: %.o obj32/testutils.o obj32/libtestutils.o
@$(VECHO) LD32 "(lib test)" $@
--
2.25.2

1
dead.package Normal file
View File

@ -0,0 +1 @@
libhugetlbfs package is retired on branch c10s for CS-2551

6
gating.yaml Normal file
View File

@ -0,0 +1,6 @@
--- !Policy
product_versions:
- rhel-8
decision_context: osci_compose_gate
rules:
- !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional}

8
sources Normal file
View File

@ -0,0 +1,8 @@
SHA512 (libhugetlbfs-2.21.tar.gz) = 1179778bc85e51412d4c85b0581258dbe23ce7eb69fdd257e2eb007ec203c295bd97e281045de778c6c8942368e5288387ac3fcbd4860ff0cd0134ca0e0a35f8
SHA512 (0001-testutils-fix-range_is_mapped.patch) = 6188d741ad89ba1ca5117221aa9bf16b054fb4dc1f779bb56cc29ad2be302344accc4d21e17feac92ff9079da0b0b6fdc02f65da3b12d849e1d38f7cbc14e345
SHA512 (0002-stack_grow_into_huge-don-t-clobber-existing-mappings.patch) = 8f235ce4cb9a5df3c72d5df1ceb9e2b3e420da9270ad76748186f50e844ca8b3fc0be1df8c0c4fa1dabeab45b7d6d542b21ae3f16a0e59cbff291e066c1a45e6
SHA512 (huge_page_setup_helper-python3-convert.patch) = 491c20e8a9e029caeae545fa2ce6ca9216bacb19c4e0c882564a40e40eec768d3c163de73e50cb44ecff9e56f2c4d81a9205a19b1838b7290fe903110b4e1137
SHA512 (build_flags.patch) = 7f83d9b717362bfb60d4ccb4ef6e8868c86a21eb6e17e61fa1e05d34fe62c1a212253b0575ad768babcbadb915c70cc24430a752d42146730b7a0582608b493d
SHA512 (tests_shm-perms_adjust_max_segment_size_for_bigger_hugepages.patch) = a2eeca8d63ae81efd42c903fb144775edbbb5059c23e792264c4c27b1152babefbf45484f9c916af9f436157a7f42af5f0f1bbee7f5e0d89995d8076d59b3417
SHA512 (tests-fix-covscan-SHELLCHECK_WARNING-complaints.patch) = e48f712627499f7eaf0ab4ab284c61bdf659385cbc0955786b1b6b50ee89d73262246dd5b4d9f25d1cc223b89d6744338ef7a81cbb2bc2d5ee69994a2fd33cdb
SHA512 (tests-include-missing-LDFLAGS-to-make-targets.patch) = dc94af4aa90fb4a064c44d8d00941298a6bb2db520762372c44f83329e7db8e8aab1c854966541ab2260586f4b6bbdce607d3694b4f457246b9ce4d85534aaae

1
tests/.fmf/version Normal file
View File

@ -0,0 +1 @@
1

67
tests/libhugetlbfs-test.sh Executable file
View File

@ -0,0 +1,67 @@
#!/bin/sh
SUCCESS=0;
FAILURE=1;
RETCODE=255;
TMPFILE=/tmp/libhugetlbfs-test.log
function check_mem_size {
# libhugetblfs tests need to allocate 512 hugepages
# so the system needs to have enough memory to accomodate
# that request plus an extra to run the code without hiccups
# in order to provide room for that need, lets ask for, t least,
# 60% more memory than the hugepage pool size, given the size
# of the hugepages for this system.
#MIN_MEM_KB=1872732;
MIN_MEM_KB=$(awk '/Hugepagesize:/ {print int($2*512*1.6);}' /proc/meminfo);
SYS_MEM_KB=$(awk '/MemTotal:/ {print $2;}' /proc/meminfo);
if [[ $MIN_MEM_KB -gt $SYS_MEM_KB || $MIN_MEM_KB -eq $SYS_MEM_KB ]]; then
RETCODE=$FAILURE;
else
RETCODE=$SUCCESS;
fi
}
function setup_and_check_hugepages {
echo 3 > /proc/sys/vm/drop_caches;
echo 1 > /proc/sys/vm/compact_memory;
echo 512 > /proc/sys/vm/nr_hugepages;
sleep 15;
NR_FREE_HP=$(awk '/HugePages_Free:/ {print $2;}' /proc/meminfo);
if [ $NR_FREE_HP -lt 512 ]; then
RETCODE=$FAILURE;
else
RETCODE=$SUCCESS;
fi
}
check_mem_size;
if [ $RETCODE != $SUCCESS ]; then
echo "ERROR: system does not have enough RAM";
exit $RETCODE;
fi
setup_and_check_hugepages;
if [ $RETCODE != $SUCCESS ]; then
echo "ERROR: not possible to allocate enough hugepages for a complete test";
exit $RETCODE;
fi
pushd /usr/lib64/libhugetlbfs/tests/;
./run_tests.py &> $TMPFILE;
popd;
FAILCNT=$(awk '/ FAIL:/ {print $3+$4}' $TMPFILE);
if [ $FAILCNT != 0 ]; then
cat $TMPFILE
echo "FAIL";
exit $FAILURE;
else
echo "PASS";
exit $SUCCESS;
fi

5
tests/provision.fmf Normal file
View File

@ -0,0 +1,5 @@
---
standard-inventory-qcow2:
qemu:
m: 3G

16
tests/tests.yml Normal file
View File

@ -0,0 +1,16 @@
---
- hosts: localhost
roles:
- role: standard-test-basic
tags:
- classic
required_packages:
- libhugetlbfs
- libhugetlbfs-utils
- libhugetlbfs-tests
- gawk
- python3
tests:
- libhugetlbfs_test:
dir: .
run: ./libhugetlbfs-test.sh