Compare commits

...

No commits in common. "imports/c8s/gcc-toolset-10-valgrind-3.16.0-2.el8" and "c8" have entirely different histories.

7 changed files with 703 additions and 1 deletions

View File

@ -0,0 +1,64 @@
commit 6aa4f7e7e76b40c183fb29650540d119ce1b4a4a
Author: Julian Seward <jseward@acm.org>
Date: Thu Jun 11 09:01:52 2020 +0200
expr_is_guardable, stmt_is_guardable, add_guarded_stmt_to_end_of: handle GetI/PutI cases.
This fixes #422715.
diff --git a/VEX/priv/guest_generic_bb_to_IR.c b/VEX/priv/guest_generic_bb_to_IR.c
index 2f204c5b0..0cee970e4 100644
--- a/VEX/priv/guest_generic_bb_to_IR.c
+++ b/VEX/priv/guest_generic_bb_to_IR.c
@@ -425,6 +425,7 @@ static Bool expr_is_guardable ( const IRExpr* e )
case Iex_ITE:
case Iex_CCall:
case Iex_Get:
+ case Iex_GetI:
case Iex_Const:
case Iex_RdTmp:
return True;
@@ -450,6 +451,7 @@ static Bool stmt_is_guardable ( const IRStmt* st )
case Ist_NoOp:
case Ist_IMark:
case Ist_Put:
+ case Ist_PutI:
return True;
// These are definitely not guardable, or at least it's way too much
// hassle to do so.
@@ -506,7 +508,7 @@ static void add_guarded_stmt_to_end_of ( /*MOD*/IRSB* bb,
// Put(offs, e) ==> Put(offs, ITE(guard, e, Get(offs, sizeof(e))))
// Which when flattened out is:
// t1 = Get(offs, sizeof(e))
- // t2 = ITE(guard, e, t2)
+ // t2 = ITE(guard, e, t1)
// Put(offs, t2)
Int offset = st->Ist.Put.offset;
IRExpr* e = st->Ist.Put.data;
@@ -519,6 +521,26 @@ static void add_guarded_stmt_to_end_of ( /*MOD*/IRSB* bb,
addStmtToIRSB(bb, IRStmt_Put(offset, IRExpr_RdTmp(t2)));
break;
}
+ case Ist_PutI: {
+ // PutI(descr,ix,bias, e) ==> Put(descr,ix,bias, ITE(guard, e, GetI(descr,ix,bias)))
+ // Which when flattened out is:
+ // t1 = GetI(descr,ix,bias)
+ // t2 = ITE(guard, e, t1)
+ // PutI(descr,ix,bias, t2)
+ IRPutI* details = st->Ist.PutI.details;
+ IRRegArray* descr = details->descr;
+ IRExpr* ix = details->ix;
+ Int bias = details->bias;
+ IRExpr* e = details->data;
+ IRType ty = typeOfIRExpr(bb->tyenv, e);
+ IRTemp t1 = newIRTemp(bb->tyenv, ty);
+ IRTemp t2 = newIRTemp(bb->tyenv, ty);
+ addStmtToIRSB(bb, IRStmt_WrTmp(t1, IRExpr_GetI(descr,ix,bias)));
+ addStmtToIRSB(bb, IRStmt_WrTmp(t2, IRExpr_ITE(IRExpr_RdTmp(guard),
+ e, IRExpr_RdTmp(t1))));
+ addStmtToIRSB(bb, IRStmt_PutI(mkIRPutI(descr,ix,bias, IRExpr_RdTmp(t2))));
+ break;
+ }
case Ist_Exit: {
// Exit(xguard, dst, jk, offsIP)
// ==> t1 = And1(xguard, guard)

View File

@ -0,0 +1,29 @@
commit fb6f7abcbc92506d302fb18a2c5fc853d2929248
Author: Carl Love <cel@us.ibm.com>
Date: Tue Jun 9 10:42:03 2020 -0500
Power PC Fix extraction of the L field for sync instruction
The L field is currently a two bit[22:21] field in ISA 3.0. The size of the
L field has changed over time.
Currently the ISA 3.0 Valgrind sync instruction support code sets the
flag_L for the instruction L field to a five bit value that includes bits
that are marked reserved the sync instruction. This patch fixes the issue for ISA 3.0
to only setting flag_L the specified two bits.
Valgrind bugzilla: https://bugs.kde.org/show_bug.cgi?id=422677
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
index 582c59ec0..c4965a19e 100644
--- a/VEX/priv/guest_ppc_toIR.c
+++ b/VEX/priv/guest_ppc_toIR.c
@@ -8777,7 +8777,7 @@ static Bool dis_memsync ( UInt theInstr )
/* X-Form, XL-Form */
UChar opc1 = ifieldOPC(theInstr);
UInt b11to25 = IFIELD(theInstr, 11, 15);
- UChar flag_L = ifieldRegDS(theInstr);
+ UChar flag_L = IFIELD(theInstr, 21, 2); //ISA 3.0
UInt b11to20 = IFIELD(theInstr, 11, 10);
UInt M0 = IFIELD(theInstr, 11, 5);
UChar rD_addr = ifieldRegDS(theInstr);

View File

@ -0,0 +1,38 @@
commit e2dec0ff9b1e071779bee2c4e6fc82f8194b1c1d
Author: Mark Wielaard <mark@klomp.org>
Date: Sun Jul 26 21:17:23 2020 +0200
Handle REX prefixed JMP instruction.
The NET Core runtime might generate a JMP with a REX prefix.
For Jv (32bit offset) and Jb (8bit offset) this is valid.
Prefixes that change operand size are ignored for such JMPs.
So remove the check for sz == 4 and force sz = 4 for Jv.
https://bugs.kde.org/show_bug.cgi?id=422174
diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c
index fadf47d41..7888132eb 100644
--- a/VEX/priv/guest_amd64_toIR.c
+++ b/VEX/priv/guest_amd64_toIR.c
@@ -21392,8 +21392,8 @@ Long dis_ESC_NONE (
case 0xE9: /* Jv (jump, 16/32 offset) */
if (haveF3(pfx)) goto decode_failure;
- if (sz != 4)
- goto decode_failure; /* JRS added 2004 July 11 */
+ sz = 4; /* Prefixes that change operand size are ignored for this
+ instruction. Operand size is forced to 32bit. */
if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
d64 = (guest_RIP_bbstart+delta+sz) + getSDisp(sz,delta);
delta += sz;
@@ -21404,8 +21404,7 @@ Long dis_ESC_NONE (
case 0xEB: /* Jb (jump, byte offset) */
if (haveF3(pfx)) goto decode_failure;
- if (sz != 4)
- goto decode_failure; /* JRS added 2004 July 11 */
+ /* Prefixes that change operand size are ignored for this instruction. */
if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
d64 = (guest_RIP_bbstart+delta+1) + getSDisp8(delta);
delta++;

View File

@ -0,0 +1,206 @@
commit f4abcc05fdba3f25890a9b30b71d511ccc906d46
Author: Mark Wielaard <mark@klomp.org>
Date: Mon Jul 27 22:43:28 2020 +0200
Incorrect call-graph tracking due to new _dl_runtime_resolve_xsave*
Newer glibc have alternate ld.so _ld_runtime_resolve functions.
Namely _dl_runtime_resolve_xsave and _dl_runtime_resolve_xsave'2
This patch recognizes the xsave, xsvec and fxsave variants and
changes callgrind so that any variant counts as _dl_runtime_resolve.
Original patch by paulo.cesar.pereira.de.andrade@gmail.com
https://bugs.kde.org/show_bug.cgi?id=415293
diff --git a/callgrind/fn.c b/callgrind/fn.c
index e9d8dd214..7cce1a0c7 100644
--- a/callgrind/fn.c
+++ b/callgrind/fn.c
@@ -30,8 +30,11 @@
static fn_array current_fn_active;
-static Addr runtime_resolve_addr = 0;
-static int runtime_resolve_length = 0;
+/* x86_64 defines 4 variants. */
+#define MAX_RESOLVE_ADDRS 4
+static int runtime_resolve_addrs = 0;
+static Addr runtime_resolve_addr[MAX_RESOLVE_ADDRS];
+static int runtime_resolve_length[MAX_RESOLVE_ADDRS];
// a code pattern is a list of tuples (start offset, length)
struct chunk_t { int start, len; };
@@ -56,6 +59,9 @@ static Bool check_code(obj_node* obj,
/* first chunk of pattern should always start at offset 0 and
* have at least 3 bytes */
CLG_ASSERT((pat->chunk[0].start == 0) && (pat->chunk[0].len >2));
+
+ /* and we cannot be called more than MAX_RESOLVE_ADDRS times */
+ CLG_ASSERT(runtime_resolve_addrs < MAX_RESOLVE_ADDRS);
CLG_DEBUG(1, "check_code: %s, pattern %s, check %d bytes of [%x %x %x...]\n",
obj->name, pat->name, pat->chunk[0].len, code[0], code[1], code[2]);
@@ -93,8 +99,9 @@ static Bool check_code(obj_node* obj,
pat->name, obj->name + obj->last_slash_pos,
addr - obj->start, addr, pat->len);
- runtime_resolve_addr = addr;
- runtime_resolve_length = pat->len;
+ runtime_resolve_addr[runtime_resolve_addrs] = addr;
+ runtime_resolve_length[runtime_resolve_addrs] = pat->len;
+ runtime_resolve_addrs++;
return True;
}
}
@@ -138,8 +145,9 @@ static Bool search_runtime_resolve(obj_node* obj)
"x86-glibc2.8", 30, {{ 0,12 }, { 16,14 }, { 30,0}} };
if (VG_(strncmp)(obj->name, "/lib/ld", 7) != 0) return False;
- if (check_code(obj, code, &pat)) return True;
- if (check_code(obj, code_28, &pat_28)) return True;
+ Bool pat_p = check_code(obj, code, &pat);
+ Bool pat_28_p = check_code(obj, code_28, &pat_28);
+ if (pat_p || pat_28_p) return True;
return False;
#endif
@@ -186,9 +194,98 @@ static Bool search_runtime_resolve(obj_node* obj)
static struct pattern pat = {
"amd64-def", 110, {{ 0,62 }, { 66,44 }, { 110,0 }} };
+ static UChar code_xsavec[] = {
+ /* 0*/ 0x53, 0x48, 0x89, 0xe3, 0x48, 0x83, 0xe4, 0xc0,
+ /* 8*/ 0x48, 0x2b, 0x25, 0x00, 0x00, 0x00, 0x00, /* sub <i32>(%rip),%rsp */
+ /*15*/ 0x48,
+ /*16*/ 0x89, 0x04, 0x24, 0x48, 0x89, 0x4c, 0x24, 0x08,
+ /*24*/ 0x48, 0x89, 0x54, 0x24, 0x10, 0x48, 0x89, 0x74,
+ /*32*/ 0x24, 0x18, 0x48, 0x89, 0x7c, 0x24, 0x20, 0x4c,
+ /*40*/ 0x89, 0x44, 0x24, 0x28, 0x4c, 0x89, 0x4c, 0x24,
+ /*48*/ 0x30, 0xb8, 0xee, 0x00, 0x00, 0x00, 0x31, 0xd2,
+ /*56*/ 0x48, 0x89, 0x94, 0x24, 0x50, 0x02, 0x00, 0x00,
+ /*64*/ 0x48, 0x89, 0x94, 0x24, 0x58, 0x02, 0x00, 0x00,
+ /*72*/ 0x48, 0x89, 0x94, 0x24, 0x60, 0x02, 0x00, 0x00,
+ /*80*/ 0x48, 0x89, 0x94, 0x24, 0x68, 0x02, 0x00, 0x00,
+ /*88*/ 0x48, 0x89, 0x94, 0x24, 0x70, 0x02, 0x00, 0x00,
+ /*96*/ 0x48, 0x89, 0x94, 0x24, 0x78, 0x02, 0x00, 0x00,
+ /*04*/ 0x0f, 0xc7, 0x64, 0x24, 0x40, 0x48, 0x8b, 0x73,
+ /*112*/0x10, 0x48, 0x8b, 0x7b, 0x08,
+ /*117*/0xe8, 0x00, 0x00, 0x00, 0x00, /* callq <_dl_fixup> */
+ /*122*/0x49, 0x89, 0xc3, 0xb8, 0xee, 0x00,
+ /*128*/0x00, 0x00, 0x31, 0xd2, 0x0f, 0xae, 0x6c, 0x24,
+ /*136*/0x40, 0x4c, 0x8b, 0x4c, 0x24, 0x30, 0x4c, 0x8b,
+ /*144*/0x44, 0x24, 0x28, 0x48, 0x8b, 0x7c, 0x24, 0x20,
+ /*152*/0x48, 0x8b, 0x74, 0x24, 0x18, 0x48, 0x8b, 0x54,
+ /*160*/0x24, 0x10, 0x48, 0x8b, 0x4c, 0x24, 0x08, 0x48,
+ /*168*/0x8b, 0x04, 0x24, 0x48, 0x89, 0xdc, 0x48, 0x8b,
+ /*176*/0x1c, 0x24, 0x48, 0x83, 0xc4, 0x18, 0xf2, 0x41,
+ /*184*/0xff, 0xe3 };
+ static struct pattern pat_xsavec = {
+ "amd64-xsavec", 186, {{ 0,11 }, { 15,103 }, {122,64}, { 186,0 }} };
+
+ static UChar code_xsave[] = {
+ /* 0*/ 0x53, 0x48, 0x89, 0xe3, 0x48, 0x83, 0xe4, 0xc0,
+ /* 8*/ 0x48, 0x2b, 0x25, 0x00, 0x00, 0x00, 0x00, /* sub <i32>(%rip),%rsp */
+ /*15*/ 0x48,
+ /*16*/ 0x89, 0x04, 0x24, 0x48, 0x89, 0x4c, 0x24, 0x08,
+ /*24*/ 0x48, 0x89, 0x54, 0x24, 0x10, 0x48, 0x89, 0x74,
+ /*32*/ 0x24, 0x18, 0x48, 0x89, 0x7c, 0x24, 0x20, 0x4c,
+ /*40*/ 0x89, 0x44, 0x24, 0x28, 0x4c, 0x89, 0x4c, 0x24,
+ /*48*/ 0x30, 0xb8, 0xee, 0x00, 0x00, 0x00, 0x31, 0xd2,
+ /*56*/ 0x48, 0x89, 0x94, 0x24, 0x40, 0x02, 0x00, 0x00,
+ /*64*/ 0x48, 0x89, 0x94, 0x24, 0x48, 0x02, 0x00, 0x00,
+ /*72*/ 0x48, 0x89, 0x94, 0x24, 0x50, 0x02, 0x00, 0x00,
+ /*80*/ 0x48, 0x89, 0x94, 0x24, 0x58, 0x02, 0x00, 0x00,
+ /*88*/ 0x48, 0x89, 0x94, 0x24, 0x60, 0x02, 0x00, 0x00,
+ /*96*/ 0x48, 0x89, 0x94, 0x24, 0x68, 0x02, 0x00, 0x00,
+ /*104*/0x48, 0x89, 0x94, 0x24, 0x70, 0x02, 0x00, 0x00,
+ /*112*/0x48, 0x89, 0x94, 0x24, 0x78, 0x02, 0x00, 0x00,
+ /*120*/0x0f, 0xae, 0x64, 0x24, 0x40, 0x48, 0x8b, 0x73,
+ /*128*/0x10, 0x48, 0x8b, 0x7b, 0x08,
+ /*133*/0xe8, 0x00, 0x00, 0x00, 0x00, /* callq <_dl_fixup> */
+ /*138*/0x49, 0x89, 0xc3, 0xb8, 0xee, 0x00,
+ /*144*/0x00, 0x00, 0x31, 0xd2, 0x0f, 0xae, 0x6c, 0x24,
+ /*152*/0x40, 0x4c, 0x8b, 0x4c, 0x24, 0x30, 0x4c, 0x8b,
+ /*160*/0x44, 0x24, 0x28, 0x48, 0x8b, 0x7c, 0x24, 0x20,
+ /*168*/0x48, 0x8b, 0x74, 0x24, 0x18, 0x48, 0x8b, 0x54,
+ /*176*/0x24, 0x10, 0x48, 0x8b, 0x4c, 0x24, 0x08, 0x48,
+ /*184*/0x8b, 0x04, 0x24, 0x48, 0x89, 0xdc, 0x48, 0x8b,
+ /*192*/0x1c, 0x24, 0x48, 0x83, 0xc4, 0x18, 0xf2, 0x41,
+ /*200*/0xff, 0xe3 };
+ static struct pattern pat_xsave = {
+ "amd64-xsave", 202, {{ 0,11 }, { 15,119 }, {138,64}, { 202,0 }} };
+
+ static UChar code_fxsave[] = {
+ /* 0*/ 0x53, 0x48, 0x89, 0xe3, 0x48, 0x83, 0xe4, 0xf0,
+ /* 8*/ 0x48, 0x81, 0xec, 0x40, 0x02, 0x00, 0x00, 0x48,
+ /*16*/ 0x89, 0x04, 0x24, 0x48, 0x89, 0x4c, 0x24, 0x08,
+ /*24*/ 0x48, 0x89, 0x54, 0x24, 0x10, 0x48, 0x89, 0x74,
+ /*32*/ 0x24, 0x18, 0x48, 0x89, 0x7c, 0x24, 0x20, 0x4c,
+ /*40*/ 0x89, 0x44, 0x24, 0x28, 0x4c, 0x89, 0x4c, 0x24,
+ /*48*/ 0x30, 0x0f, 0xae, 0x44, 0x24, 0x40, 0x48, 0x8b,
+ /*56*/ 0x73, 0x10, 0x48, 0x8b, 0x7b, 0x08,
+ /*62*/ 0xe8, 0x00, 0x00, 0x00, 0x00, /* callq <_dl_fixup> */
+ /*67*/ 0x49, 0x89, 0xc3, 0x0f, 0xae,
+ /*72*/ 0x4c, 0x24, 0x40, 0x4c, 0x8b, 0x4c, 0x24, 0x30,
+ /*80*/ 0x4c, 0x8b, 0x44, 0x24, 0x28, 0x48, 0x8b, 0x7c,
+ /*88*/ 0x24, 0x20, 0x48, 0x8b, 0x74, 0x24, 0x18, 0x48,
+ /*96*/ 0x8b, 0x54, 0x24, 0x10, 0x48, 0x8b, 0x4c, 0x24,
+ /*104*/0x08, 0x48, 0x8b, 0x04, 0x24, 0x48, 0x89, 0xdc,
+ /*112*/0x48, 0x8b, 0x1c, 0x24, 0x48, 0x83, 0xc4, 0x18,
+ /*120*/0xf2, 0x41, 0xff, 0xe3 };
+ static struct pattern pat_fxsave = {
+ "amd64-fxsave", 124, {{ 0,63 }, { 67,57 }, { 124,0 }} };
+
if ((VG_(strncmp)(obj->name, "/lib/ld", 7) != 0) &&
- (VG_(strncmp)(obj->name, "/lib64/ld", 9) != 0)) return False;
- return check_code(obj, code, &pat);
+ (VG_(strncmp)(obj->name, "/lib64/ld", 9) != 0) &&
+ (VG_(strncmp)(obj->name, "/usr/lib/ld", 11) != 0) &&
+ (VG_(strncmp)(obj->name, "/usr/lib64/ld", 13) != 0)) return False;
+ Bool pat_p = check_code(obj, code, &pat);
+ Bool pat_xsavec_p = check_code(obj, code_xsavec, &pat_xsavec);
+ Bool pat_xsave_p = check_code(obj, code_xsave, &pat_xsave);
+ Bool pat_fxsave_p = check_code(obj, code_fxsave, &pat_fxsave);
+ if (pat_p || pat_xsavec_p || pat_xsave_p || pat_fxsave_p) return True;
#endif
/* For other platforms, no patterns known */
@@ -254,7 +351,7 @@ obj_node* new_obj_node(DebugInfo* di, obj_node* next)
i++;
}
- if (runtime_resolve_addr == 0) search_runtime_resolve(obj);
+ if (runtime_resolve_addrs == 0) search_runtime_resolve(obj);
return obj;
}
@@ -490,6 +587,7 @@ fn_node* CLG_(get_fn_node)(BB* bb)
DebugInfo* di;
UInt line_num;
fn_node* fn;
+ Int i;
/* fn from debug info is idempotent for a BB */
if (bb->fn) return bb->fn;
@@ -538,12 +636,14 @@ fn_node* CLG_(get_fn_node)(BB* bb)
}
if (0 == VG_(strcmp)(fnname, "_exit") && !exit_bb)
exit_bb = bb;
-
- if (runtime_resolve_addr &&
- (bb_addr(bb) >= runtime_resolve_addr) &&
- (bb_addr(bb) < runtime_resolve_addr + runtime_resolve_length)) {
- /* BB in runtime_resolve found by code check; use this name */
- fnname = "_dl_runtime_resolve";
+
+ for (i = 0; i < runtime_resolve_addrs; i++) {
+ if ((bb_addr(bb) >= runtime_resolve_addr[i]) &&
+ (bb_addr(bb) < runtime_resolve_addr[i] + runtime_resolve_length[i])) {
+ /* BB in runtime_resolve found by code check; use this name */
+ fnname = "_dl_runtime_resolve";
+ break;
+ }
}
/* get fn_node struct for this function */

View File

@ -0,0 +1,117 @@
commit f326d68d762edf4b0e9604daa446b6f8ca25725a
Author: Mark Wielaard <mark@klomp.org>
Date: Sun Jul 26 22:40:22 2020 +0200
epoll_ctl warns for uninitialized padding on non-amd64 64bit arches
struct vki_epoll_event is packed on x86_64, but not on other 64bit
arches. This means that on 64bit arches there can be padding in the
epoll_event struct. Seperately the data field is only used by user
space (which might not set the data field if it doesn't need to).
Only check the events field on epoll_ctl. But assume both events
and data are both written to by epoll_[p]wait (exclude padding).
https://bugs.kde.org/show_bug.cgi?id=422623
diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c
index 5b5b7eee6..929a4d9af 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -2099,8 +2099,29 @@ PRE(sys_epoll_ctl)
SARG1, ( ARG2<3 ? epoll_ctl_s[ARG2] : "?" ), SARG3, ARG4);
PRE_REG_READ4(long, "epoll_ctl",
int, epfd, int, op, int, fd, struct vki_epoll_event *, event);
- if (ARG2 != VKI_EPOLL_CTL_DEL)
- PRE_MEM_READ( "epoll_ctl(event)", ARG4, sizeof(struct vki_epoll_event) );
+ if (ARG2 != VKI_EPOLL_CTL_DEL) {
+ /* Just check the events field, the data field is for user space and
+ unused by the kernel. */
+ struct vki_epoll_event *event = (struct vki_epoll_event *) ARG4;
+ PRE_MEM_READ( "epoll_ctl(event)", (Addr) &event->events,
+ sizeof(__vki_u32) );
+ }
+}
+
+/* RES event records have been written (exclude padding). */
+static void epoll_post_helper ( ThreadId tid, SyscallArgs* arrghs,
+ SyscallStatus* status )
+{
+ vg_assert(SUCCESS);
+ if (RES > 0) {
+ Int i;
+ struct vki_epoll_event **events = (struct vki_epoll_event**)(Addr)ARG2;
+ for (i = 0; i < RES; i++) {
+ /* Assume both events and data are set (data is user space only). */
+ POST_FIELD_WRITE(events[i]->events);
+ POST_FIELD_WRITE(events[i]->data);
+ }
+ }
}
PRE(sys_epoll_wait)
@@ -2111,13 +2132,12 @@ PRE(sys_epoll_wait)
PRE_REG_READ4(long, "epoll_wait",
int, epfd, struct vki_epoll_event *, events,
int, maxevents, int, timeout);
+ /* Assume all (maxevents) events records should be (fully) writable. */
PRE_MEM_WRITE( "epoll_wait(events)", ARG2, sizeof(struct vki_epoll_event)*ARG3);
}
POST(sys_epoll_wait)
{
- vg_assert(SUCCESS);
- if (RES > 0)
- POST_MEM_WRITE( ARG2, sizeof(struct vki_epoll_event)*RES ) ;
+ epoll_post_helper (tid, arrghs, status);
}
PRE(sys_epoll_pwait)
@@ -2130,15 +2150,14 @@ PRE(sys_epoll_pwait)
int, epfd, struct vki_epoll_event *, events,
int, maxevents, int, timeout, vki_sigset_t *, sigmask,
vki_size_t, sigsetsize);
+ /* Assume all (maxevents) events records should be (fully) writable. */
PRE_MEM_WRITE( "epoll_pwait(events)", ARG2, sizeof(struct vki_epoll_event)*ARG3);
if (ARG5)
PRE_MEM_READ( "epoll_pwait(sigmask)", ARG5, sizeof(vki_sigset_t) );
}
POST(sys_epoll_pwait)
{
- vg_assert(SUCCESS);
- if (RES > 0)
- POST_MEM_WRITE( ARG2, sizeof(struct vki_epoll_event)*RES ) ;
+ epoll_post_helper (tid, arrghs, status);
}
PRE(sys_eventfd)
commit b74f9f23c8758c77367f18368ea95baa858544cb
Author: Mark Wielaard <mark@klomp.org>
Date: Tue Aug 18 23:58:55 2020 +0200
Fix epoll_ctl setting of array event and data fields.
Fix for https://bugs.kde.org/show_bug.cgi?id=422623 in commit ecf5ba119
epoll_ctl warns for uninitialized padding on non-amd64 64bit arches
contained a bug. A pointer to an array is not a pointer to a pointer to
an array. Found by a Fedora user:
https://bugzilla.redhat.com/show_bug.cgi?id=1844778#c10
diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c
index 0850487e9..3f488795a 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -2115,11 +2115,11 @@ static void epoll_post_helper ( ThreadId tid, SyscallArgs* arrghs,
vg_assert(SUCCESS);
if (RES > 0) {
Int i;
- struct vki_epoll_event **events = (struct vki_epoll_event**)(Addr)ARG2;
+ struct vki_epoll_event *events = (struct vki_epoll_event*)(Addr)ARG2;
for (i = 0; i < RES; i++) {
/* Assume both events and data are set (data is user space only). */
- POST_FIELD_WRITE(events[i]->events);
- POST_FIELD_WRITE(events[i]->data);
+ POST_FIELD_WRITE(events[i].events);
+ POST_FIELD_WRITE(events[i].data);
}
}
}

View File

@ -0,0 +1,201 @@
commit a53adb79711ccfc76a4ee32b20253045cdab55c7
Author: Mark Wielaard <mark@klomp.org>
Date: Mon Jul 27 16:36:17 2020 +0200
Handle linux syscalls sched_getattr and sched_setattr
The only "special" thing about these syscalls is that the given
struct sched_attr determines its own size for future expansion.
Original fix by "ISHIKAWA,chiaki" <ishikawa@yk.rim.or.jp>
https://bugs.kde.org/show_bug.cgi?id=369029
diff --git a/coregrind/m_syswrap/priv_syswrap-linux.h b/coregrind/m_syswrap/priv_syswrap-linux.h
index cdc73c1e6..eb0b320ca 100644
--- a/coregrind/m_syswrap/priv_syswrap-linux.h
+++ b/coregrind/m_syswrap/priv_syswrap-linux.h
@@ -227,6 +227,8 @@ DECL_TEMPLATE(linux, sys_fremovexattr);
// syscalls.
DECL_TEMPLATE(linux, sys_sched_setparam);
DECL_TEMPLATE(linux, sys_sched_getparam);
+DECL_TEMPLATE(linux, sys_sched_setattr);
+DECL_TEMPLATE(linux, sys_sched_getattr);
DECL_TEMPLATE(linux, sys_sched_setscheduler);
DECL_TEMPLATE(linux, sys_sched_getscheduler);
DECL_TEMPLATE(linux, sys_sched_yield);
diff --git a/coregrind/m_syswrap/syswrap-amd64-linux.c b/coregrind/m_syswrap/syswrap-amd64-linux.c
index 28d90135a..d6f3eb910 100644
--- a/coregrind/m_syswrap/syswrap-amd64-linux.c
+++ b/coregrind/m_syswrap/syswrap-amd64-linux.c
@@ -846,9 +846,8 @@ static SyscallTableEntry syscall_table[] = {
LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 311
LINX_(__NR_kcmp, sys_kcmp), // 312
LINX_(__NR_finit_module, sys_finit_module), // 313
-// LIN__(__NR_sched_setattr, sys_ni_syscall), // 314
-
-// LIN__(__NR_sched_getattr, sys_ni_syscall), // 315
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 314
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 315
LINX_(__NR_renameat2, sys_renameat2), // 316
// LIN__(__NR_seccomp, sys_ni_syscall), // 317
LINXY(__NR_getrandom, sys_getrandom), // 318
diff --git a/coregrind/m_syswrap/syswrap-arm-linux.c b/coregrind/m_syswrap/syswrap-arm-linux.c
index 579542785..70700e53f 100644
--- a/coregrind/m_syswrap/syswrap-arm-linux.c
+++ b/coregrind/m_syswrap/syswrap-arm-linux.c
@@ -1009,6 +1009,8 @@ static SyscallTableEntry syscall_main_table[] = {
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 376
LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 377
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 380
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 381
LINX_(__NR_renameat2, sys_renameat2), // 382
LINXY(__NR_getrandom, sys_getrandom), // 384
diff --git a/coregrind/m_syswrap/syswrap-arm64-linux.c b/coregrind/m_syswrap/syswrap-arm64-linux.c
index 81e01456f..acca02442 100644
--- a/coregrind/m_syswrap/syswrap-arm64-linux.c
+++ b/coregrind/m_syswrap/syswrap-arm64-linux.c
@@ -806,8 +806,8 @@ static SyscallTableEntry syscall_main_table[] = {
LINX_(__NR_process_vm_writev, sys_process_vm_writev), // 271
LINX_(__NR_kcmp, sys_kcmp), // 272
LINX_(__NR_finit_module, sys_finit_module), // 273
- // (__NR_sched_setattr, sys_ni_syscall), // 274
- // (__NR_sched_getattr, sys_ni_syscall), // 275
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 274
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 275
LINX_(__NR_renameat2, sys_renameat2), // 276
// (__NR_seccomp, sys_ni_syscall), // 277
LINXY(__NR_getrandom, sys_getrandom), // 278
diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c
index 5b5b7eee6..56be3032d 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -3677,6 +3677,41 @@ POST(sys_sched_getparam)
POST_MEM_WRITE( ARG2, sizeof(struct vki_sched_param) );
}
+PRE(sys_sched_setattr)
+{
+ struct vki_sched_attr *attr;
+ PRINT("sched_setattr ( %ld, %#" FMT_REGWORD "x, %#"
+ FMT_REGWORD "x )", SARG1, ARG2, ARG3 );
+ PRE_REG_READ3(long, "sched_setattr",
+ vki_pid_t, pid, struct sched_attr *, p, unsigned int, flags);
+ /* We need to be able to read at least the size field. */
+ PRE_MEM_READ( "sched_setattr(attr->size)", ARG2, sizeof(vki_uint32_t) );
+ attr = (struct vki_sched_attr *)(Addr)ARG2;
+ if (ML_(safe_to_deref)(attr,sizeof(vki_uint32_t)))
+ PRE_MEM_READ( "sched_setattr(attr)", (Addr)attr, attr->size);
+}
+
+PRE(sys_sched_getattr)
+{
+ struct vki_sched_attr *attr;
+ PRINT("sched_getattr ( %ld, %#" FMT_REGWORD "x, %ld, %#"
+ FMT_REGWORD "x )", SARG1, ARG2, ARG3, ARG4 );
+ PRE_REG_READ4(long, "sched_getattr",
+ vki_pid_t, pid, struct sched_attr *, p,
+ unsigned int, size, unsigned int, flags);
+ /* We need to be able to read at least the size field. */
+ PRE_MEM_READ( "sched_setattr(attr->size)", ARG2, sizeof(vki_uint32_t) );
+ /* And the kernel needs to be able to write to the whole struct size. */
+ attr = (struct vki_sched_attr *)(Addr)ARG2;
+ if (ML_(safe_to_deref)(attr,sizeof(vki_uint32_t)))
+ PRE_MEM_WRITE( "sched_setattr(attr)", (Addr)attr, attr->size);
+}
+POST(sys_sched_getattr)
+{
+ struct vki_sched_attr *attr = (struct vki_sched_attr *)(Addr)ARG2;
+ POST_MEM_WRITE( (Addr)attr, attr->size );
+}
+
PRE(sys_sched_getscheduler)
{
PRINT("sys_sched_getscheduler ( %ld )", SARG1);
diff --git a/coregrind/m_syswrap/syswrap-ppc32-linux.c b/coregrind/m_syswrap/syswrap-ppc32-linux.c
index eed12a1bc..c19cb9e0e 100644
--- a/coregrind/m_syswrap/syswrap-ppc32-linux.c
+++ b/coregrind/m_syswrap/syswrap-ppc32-linux.c
@@ -1016,6 +1016,9 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 351
LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 352
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 355
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 356
+
LINXY(__NR_getrandom, sys_getrandom), // 359
LINXY(__NR_memfd_create, sys_memfd_create), // 360
diff --git a/coregrind/m_syswrap/syswrap-ppc64-linux.c b/coregrind/m_syswrap/syswrap-ppc64-linux.c
index d58200b49..b6422a765 100644
--- a/coregrind/m_syswrap/syswrap-ppc64-linux.c
+++ b/coregrind/m_syswrap/syswrap-ppc64-linux.c
@@ -998,6 +998,8 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 351
LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 352
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 355
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 356
LINX_(__NR_renameat2, sys_renameat2), // 357
LINXY(__NR_getrandom, sys_getrandom), // 359
diff --git a/coregrind/m_syswrap/syswrap-s390x-linux.c b/coregrind/m_syswrap/syswrap-s390x-linux.c
index a0a330aa2..3427fee16 100644
--- a/coregrind/m_syswrap/syswrap-s390x-linux.c
+++ b/coregrind/m_syswrap/syswrap-s390x-linux.c
@@ -825,8 +825,8 @@ static SyscallTableEntry syscall_table[] = {
LINX_(__NR_kcmp, sys_kcmp), // 343
// ?????(__NR_finit_module, ), // 344
-// ?????(__NR_sched_setattr, ), // 345
-// ?????(__NR_sched_getattr, ), // 346
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 345
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 346
LINX_(__NR_renameat2, sys_renameat2), // 347
// ?????(__NR_seccomp, ), // 348
LINXY(__NR_getrandom, sys_getrandom), // 349
diff --git a/coregrind/m_syswrap/syswrap-x86-linux.c b/coregrind/m_syswrap/syswrap-x86-linux.c
index 332ed0bf2..b59d96f37 100644
--- a/coregrind/m_syswrap/syswrap-x86-linux.c
+++ b/coregrind/m_syswrap/syswrap-x86-linux.c
@@ -1580,8 +1580,8 @@ static SyscallTableEntry syscall_table[] = {
LINX_(__NR_kcmp, sys_kcmp), // 349
// LIN__(__NR_finit_module, sys_ni_syscall), // 350
-// LIN__(__NR_sched_setattr, sys_ni_syscall), // 351
-// LIN__(__NR_sched_getattr, sys_ni_syscall), // 352
+ LINX_(__NR_sched_setattr, sys_sched_setattr), // 351
+ LINXY(__NR_sched_getattr, sys_sched_getattr), // 352
LINX_(__NR_renameat2, sys_renameat2), // 353
// LIN__(__NR_seccomp, sys_ni_syscall), // 354
diff --git a/include/vki/vki-linux.h b/include/vki/vki-linux.h
index 75b583165..ef93b9258 100644
--- a/include/vki/vki-linux.h
+++ b/include/vki/vki-linux.h
@@ -410,6 +410,23 @@ struct vki_sched_param {
int sched_priority;
};
+struct vki_sched_attr {
+ vki_uint32_t size;
+ vki_uint32_t sched_policy;
+ vki_uint64_t sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ vki_int32_t sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ vki_uint32_t sched_priority;
+
+ /* SCHED_DEADLINE */
+ vki_uint64_t sched_runtime;
+ vki_uint64_t sched_deadline;
+ vki_uint64_t sched_period;
+};
+
#define VKI_TASK_COMM_LEN 16
//----------------------------------------------------------------------

View File

@ -6,7 +6,7 @@
Summary: Tool for finding memory management bugs in programs
Name: %{?scl_prefix}valgrind
Version: 3.16.0
Release: 2%{?dist}
Release: 6%{?dist}
Epoch: 1
License: GPLv2+
URL: http://www.valgrind.org/
@ -67,6 +67,12 @@ URL: http://www.valgrind.org/
%endif
%endif
# Remove any pkgconfig provides for SCL devel packages.
# These should only be provided by the base packages.
%if %{is_scl}
%define __provides_exclude_from ^%{_libdir}/pkgconfig/.*.pc$
%endif
# Generating minisymtabs doesn't really work for the staticly linked
# tools. Note (below) that we don't strip the vgpreload libraries at all
# because valgrind might read and need the debuginfo in those (client)
@ -96,6 +102,24 @@ Patch5: valgrind-3.16.0-some-stack-protector.patch
# Add some -Wl,z,now.
Patch6: valgrind-3.16.0-some-Wl-z-now.patch
# KDE#422677 PPC sync instruction L field should only be 2 bits in ISA 3.0
Patch7: valgrind-3.16.0-ppc-L-field.patch
# KDE#422715 x86: vex: the `impossible' happened: expr_is_guardable
Patch8: valgrind-3.16.0-387-float.patch
# KDE#422174 unhandled instruction bytes: 0x48 0xE9 (REX prefix JMP instr)
Patch9: valgrind-3.16.1-REX-prefix-JMP.patch
# KDE#422623 epoll_ctl warns for uninit padding on non-amd64 64bit arches
Patch10: valgrind-3.16.1-epoll.patch
# KDE#369029 handle linux syscalls sched_getattr and sched_setattr
Patch11: valgrind-3.16.1-sched_getsetattr.patch
# KDE#415293 Incorrect call-graph tracking due to new _dl_runtime_resolve*
Patch12: valgrind-3.16.1-dl_runtime_resolve.patch
BuildRequires: glibc-devel
%if %{build_openmpi}
@ -227,6 +251,13 @@ Valgrind User Manual for details.
%patch6 -p1
%endif
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%build
# Some patches (might) touch Makefile.am or configure.ac files.
@ -446,6 +477,22 @@ fi
%endif
%changelog
* Thu Jul 29 2021 Mark Wielaard <mjw@redhat.com> - 3.16.0-6
- Use __provides_exclude_from instead of filter_from_provides
* Thu Jun 29 2021 Mark Wielaard <mjw@redhat.com> - 3.16.0-5
- Filter out pkgconfig provides
* Wed Oct 21 2020 Mark Wielaard <mjw@redhat.com> - 3.16.0-4
- Add valgrind-3.16.1-REX-prefix-JMP.patch
- Add valgrind-3.16.1-epoll.patch
- Add valgrind-3.16.1-sched_getsetattr.patch
- Add valgrind-3.16.1-dl_runtime_resolve.patch
* Wed Jun 24 2020 Mark Wielaard <mjw@redhat.com> - 3.16.0-3
- Add valgrind-3.16.0-ppc-L-field.patch
- Add valgrind-3.16.0-387-float.patch
* Thu May 28 2020 Mark Wielaard <mjw@redhat.com> - 3.16.0-2
- Apply stack-protector and -Wl,z,now patches.