- Backport: support: Add support_next_to_fault_before support function - Backport: posix: Rewrite cpuset tests Resolves: RHEL-61569
112 lines
4.0 KiB
Diff
112 lines
4.0 KiB
Diff
commit fa53723cdb6f0338558e57a2a0a6459c00a1bc5f
|
|
Author: Frédéric Bérat <fberat@redhat.com>
|
|
Date: Fri Nov 29 14:48:43 2024 +0100
|
|
|
|
support: Add support_next_to_fault_before support function
|
|
|
|
Refactor the support_next_to_fault and add the
|
|
support_next_to_fault_before method returns a buffer with a protected
|
|
page before it, to be able to test buffer underflow accesses.
|
|
|
|
Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@redhat.com>
|
|
|
|
# Conflicts:
|
|
# support/next_to_fault.c (copyright update)
|
|
|
|
diff --git a/support/next_to_fault.c b/support/next_to_fault.c
|
|
index 6d9bcc8ea74c4a5f..b60d5a574484f8e7 100644
|
|
--- a/support/next_to_fault.c
|
|
+++ b/support/next_to_fault.c
|
|
@@ -1,5 +1,5 @@
|
|
-/* Memory allocation next to an unmapped page.
|
|
- Copyright (C) 2017-2021 Free Software Foundation, Inc.
|
|
+/* Memory allocation either before or after an unmapped page.
|
|
+ Copyright (C) 2017-2025 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
@@ -16,34 +16,58 @@
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
+#include <stdbool.h>
|
|
#include <support/check.h>
|
|
#include <support/next_to_fault.h>
|
|
#include <support/xunistd.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/param.h>
|
|
|
|
-struct support_next_to_fault
|
|
-support_next_to_fault_allocate (size_t size)
|
|
+static struct support_next_to_fault
|
|
+support_next_to_fault_allocate_any (size_t size, bool fault_after_alloc)
|
|
{
|
|
long page_size = sysconf (_SC_PAGE_SIZE);
|
|
+ long protect_offset = 0;
|
|
+ long buffer_offset = page_size;
|
|
+
|
|
TEST_VERIFY_EXIT (page_size > 0);
|
|
struct support_next_to_fault result;
|
|
result.region_size = roundup (size, page_size) + page_size;
|
|
if (size + page_size <= size || result.region_size <= size)
|
|
- FAIL_EXIT1 ("support_next_to_fault_allocate (%zu): overflow", size);
|
|
+ FAIL_EXIT1 ("%s (%zu): overflow", __func__, size);
|
|
result.region_start
|
|
= xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1);
|
|
- /* Unmap the page after the allocation. */
|
|
- xmprotect (result.region_start + (result.region_size - page_size),
|
|
- page_size, PROT_NONE);
|
|
- /* Align the allocation within the region so that it ends just
|
|
- before the PROT_NONE page. */
|
|
- result.buffer = result.region_start + result.region_size - page_size - size;
|
|
+
|
|
+ if (fault_after_alloc)
|
|
+ {
|
|
+ protect_offset = result.region_size - page_size;
|
|
+ buffer_offset = protect_offset - size;
|
|
+ }
|
|
+
|
|
+ /* Unmap the page before or after the allocation. */
|
|
+ xmprotect (result.region_start + protect_offset, page_size, PROT_NONE);
|
|
+ /* Align the allocation within the region so that it starts after or ends
|
|
+ just before the PROT_NONE page. */
|
|
+ result.buffer = result.region_start + buffer_offset;
|
|
result.length = size;
|
|
return result;
|
|
}
|
|
|
|
+/* Unmapped a page after the buffer */
|
|
+struct support_next_to_fault
|
|
+support_next_to_fault_allocate (size_t size)
|
|
+{
|
|
+ return support_next_to_fault_allocate_any (size, true);
|
|
+}
|
|
+
|
|
+/* Unmapped a page before the buffer */
|
|
+struct support_next_to_fault
|
|
+support_next_to_fault_allocate_before (size_t size)
|
|
+{
|
|
+ return support_next_to_fault_allocate_any (size, false);
|
|
+}
|
|
+
|
|
void
|
|
support_next_to_fault_free (struct support_next_to_fault *ntf)
|
|
{
|
|
diff --git a/support/next_to_fault.h b/support/next_to_fault.h
|
|
index bd2a0cffedbdc5dc..eb8ff24412f8ae94 100644
|
|
--- a/support/next_to_fault.h
|
|
+++ b/support/next_to_fault.h
|
|
@@ -41,6 +41,11 @@ struct support_next_to_fault
|
|
fault). */
|
|
struct support_next_to_fault support_next_to_fault_allocate (size_t size);
|
|
|
|
+/* Allocate a buffer of SIZE bytes just *after* a page which is mapped
|
|
+ with PROT_NONE (so that under-running the buffer will cause a
|
|
+ fault). */
|
|
+struct support_next_to_fault support_next_to_fault_allocate_before (size_t size);
|
|
+
|
|
/* Deallocate the memory region allocated by
|
|
next_to_fault_allocate. */
|
|
void support_next_to_fault_free (struct support_next_to_fault *);
|