Upgrade to valgrind 3.19.0

Resolves: #2067187
fails with "VEX temporary storage exhausted" on ppc64le
Resolves: #2057629
routine rebase of valgrind for rhel 9.1
Resolves: #2030626
reports conditional jump depends on uninitialised value in res_send.c
This commit is contained in:
Mark Wielaard 2022-04-19 18:53:11 +02:00
parent a14089fc99
commit 9445078b8b
22 changed files with 16 additions and 5342 deletions

1
.gitignore vendored
View File

@ -42,3 +42,4 @@
/valgrind-3.17.0.RC2.tar.bz2
/valgrind-3.17.0.tar.bz2
/valgrind-3.18.1.tar.bz2
/valgrind-3.19.0.tar.bz2

View File

@ -10,6 +10,7 @@ annocheck:
# Note that all (default hardened) flags need to be repeated here, if
# you override some config flags it will completely overwrite the
# defaults (--ignore-unknown --verbose).
jobs:
- hardened: --ignore-unknown --verbose --skip-lto
# Ignore files built specially without hardening flags
ignore:

View File

@ -1 +1 @@
SHA512 (valgrind-3.18.1.tar.bz2) = a03b5cd7eafab4a1cea07f46464c1546ae1cb3d106649626b1e55658badf90e58d1f3854a38a33d5dffd8237f5555ae7e1f27a4b40e06254f87825c7fc61b59b
SHA512 (valgrind-3.19.0.tar.bz2) = f720a89dc4c4989cc5714bff9efe97529f71990bcfad7a92b889ce099c4326d6da07fa4d5fbab2e9125e20f352354f6178471e49e419b613a3c82c2a1c667ab2

View File

@ -1,105 +0,0 @@
commit 595341b150312d2407bd43304449bf39ec3e1fa8
Author: Julian Seward <jseward@acm.org>
Date: Sat Nov 13 19:59:07 2021 +0100
amd64 front end: add more spec rules:
S after SHRQ
Z after SHLQ
NZ after SHLQ
Z after SHLL
S after SHLL
The lack of at least one of these was observed to cause occasional false
positives in Memcheck.
Plus add commented-out cases so as to complete the set of 12 rules
{Z,NZ,S,NS} after {SHRQ,SHLQ,SHLL}. The commented-out ones are commented
out because I so far didn't find any use cases for them.
diff --git a/VEX/priv/guest_amd64_helpers.c b/VEX/priv/guest_amd64_helpers.c
index 9d61e7a0f..ba71c1b62 100644
--- a/VEX/priv/guest_amd64_helpers.c
+++ b/VEX/priv/guest_amd64_helpers.c
@@ -1823,16 +1823,26 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
/*---------------- SHRQ ----------------*/
if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondZ)) {
- /* SHRQ, then Z --> test dep1 == 0 */
+ /* SHRQ, then Z --> test result[63:0] == 0 */
return unop(Iop_1Uto64,
binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
}
if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNZ)) {
- /* SHRQ, then NZ --> test dep1 != 0 */
+ /* SHRQ, then NZ --> test result[63:0] != 0 */
return unop(Iop_1Uto64,
binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
}
+ if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondS)) {
+ /* SHRQ, then S --> (ULong)result[63] (result is in dep1) */
+ return binop(Iop_Shr64, cc_dep1, mkU8(63));
+ }
+ // No known test case for this, hence disabled:
+ //if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNS)) {
+ // /* SHRQ, then NS --> (ULong) ~ result[63] */
+ // vassert(0);
+ //}
+
/*---------------- SHRL ----------------*/
if (isU64(cc_op, AMD64G_CC_OP_SHRL) && isU64(cond, AMD64CondZ)) {
@@ -1881,6 +1891,52 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
// mkU32(0)));
//}
+ /*---------------- SHLQ ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondZ)) {
+ /* SHLQ, then Z --> test dep1 == 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+ }
+ if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNZ)) {
+ /* SHLQ, then NZ --> test dep1 != 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+ }
+
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondS)) {
+ // /* SHLQ, then S --> (ULong)result[63] */
+ // vassert(0);
+ //}
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNS)) {
+ // /* SHLQ, then NS --> (ULong) ~ result[63] */
+ // vassert(0);
+ //}
+
+ /*---------------- SHLL ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondZ)) {
+ /* SHLL, then Z --> test result[31:0] == 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ32, unop(Iop_64to32, cc_dep1),
+ mkU32(0)));
+ }
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNZ)) {
+ // /* SHLL, then NZ --> test dep1 != 0 */
+ // vassert(0);
+ //}
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondS)) {
+ /* SHLL, then S --> (ULong)result[31] */
+ return binop(Iop_And64,
+ binop(Iop_Shr64, cc_dep1, mkU8(31)),
+ mkU64(1));
+ }
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNS)) {
+ // /* SHLL, then NS --> (ULong) ~ result[31] */
+ // vassert(0);
+ //}
+
/*---------------- COPY ----------------*/
/* This can happen, as a result of amd64 FP compares: "comisd ... ;
jbe" for example. */

View File

@ -1,163 +0,0 @@
commit 2be719921e700a9ac9b85f470ed87cb8adf8151b
Author: Julian Seward <jseward@acm.org>
Date: Sat Nov 13 09:27:01 2021 +0100
Bug 445415 - arm64 front end: alignment checks missing for atomic instructions.
For the arm64 front end, none of the atomic instructions have address
alignment checks included in their IR. They all should. The effect of
missing alignment checks in the IR is that, since this IR will in most cases
be translated back to atomic instructions in the back end, we will get
alignment traps (SIGBUS) on the host side and not on the guest side, which is
(very) incorrect behaviour of the simulation.
diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c
index ee018c6a9..16a7e075f 100644
--- a/VEX/priv/guest_arm64_toIR.c
+++ b/VEX/priv/guest_arm64_toIR.c
@@ -4833,6 +4833,34 @@ static IRTemp gen_zwidening_load ( UInt szB, IRTemp addr )
}
+/* Generate a SIGBUS followed by a restart of the current instruction if
+ `effective_addr` is `align`-aligned. This is required behaviour for atomic
+ instructions. This assumes that guest_RIP_curr_instr is set correctly!
+
+ This is hardwired to generate SIGBUS because so far the only supported arm64
+ (arm64-linux) does that. Should we need to later extend it to generate some
+ other signal, use the same scheme as with gen_SIGNAL_if_not_XX_aligned in
+ guest_amd64_toIR.c. */
+static
+void gen_SIGBUS_if_not_XX_aligned ( IRTemp effective_addr, ULong align )
+{
+ if (align == 1) {
+ return;
+ }
+ vassert(align == 16 || align == 8 || align == 4 || align == 2);
+ stmt(
+ IRStmt_Exit(
+ binop(Iop_CmpNE64,
+ binop(Iop_And64,mkexpr(effective_addr),mkU64(align-1)),
+ mkU64(0)),
+ Ijk_SigBUS,
+ IRConst_U64(guest_PC_curr_instr),
+ OFFB_PC
+ )
+ );
+}
+
+
/* Generate a "standard 7" name, from bitQ and size. But also
allow ".1d" since that's occasionally useful. */
static
@@ -6670,7 +6698,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is szB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
if (isLD && ss == BITS5(1,1,1,1,1)) {
IRTemp res = newTemp(ty);
@@ -6803,7 +6831,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is 2*elemSzB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, fullSzB);
if (isLD && ss == BITS5(1,1,1,1,1)) {
if (abiinfo->guest__use_fallback_LLSC) {
@@ -7044,7 +7072,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is szB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
if (isLD) {
IRTemp res = newTemp(ty);
@@ -7159,6 +7187,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
// Insert barrier before loading for acquire and acquire-release variants:
// A and AL.
@@ -7266,6 +7295,10 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRType ty = integerIRTypeOfSize(szB);
Bool is64 = szB == 8;
+ IRTemp ea = newTemp(Ity_I64);
+ assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
+
IRExpr *exp = narrowFrom64(ty, getIReg64orZR(ss));
IRExpr *new = narrowFrom64(ty, getIReg64orZR(tt));
@@ -7275,7 +7308,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
// Store the result back if LHS remains unchanged in memory.
IRTemp old = newTemp(ty);
stmt( IRStmt_CAS(mkIRCAS(/*oldHi*/IRTemp_INVALID, old,
- Iend_LE, getIReg64orSP(nn),
+ Iend_LE, mkexpr(ea),
/*expdHi*/NULL, exp,
/*dataHi*/NULL, new)) );
@@ -7307,6 +7340,10 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
if ((ss & 0x1) || (tt & 0x1)) {
/* undefined; fall through */
} else {
+ IRTemp ea = newTemp(Ity_I64);
+ assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, is64 ? 16 : 8);
+
IRExpr *expLo = getIRegOrZR(is64, ss);
IRExpr *expHi = getIRegOrZR(is64, ss + 1);
IRExpr *newLo = getIRegOrZR(is64, tt);
@@ -7318,7 +7355,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
stmt(IRStmt_MBE(Imbe_Fence));
stmt( IRStmt_CAS(mkIRCAS(oldHi, oldLo,
- Iend_LE, getIReg64orSP(nn),
+ Iend_LE, mkexpr(ea),
expHi, expLo,
newHi, newLo)) );
diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c
index b65e27db4..39c6aaa46 100644
--- a/VEX/priv/host_arm64_defs.c
+++ b/VEX/priv/host_arm64_defs.c
@@ -4033,6 +4033,7 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
case Ijk_FlushDCache: trcval = VEX_TRC_JMP_FLUSHDCACHE; break;
case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break;
case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break;
+ case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break;
//case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break;
case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break;
/* We don't expect to see the following being assisted. */
diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c
index 094e7e74b..82cb2d78c 100644
--- a/VEX/priv/host_arm64_isel.c
+++ b/VEX/priv/host_arm64_isel.c
@@ -4483,6 +4483,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
case Ijk_InvalICache:
case Ijk_FlushDCache:
case Ijk_SigTRAP:
+ case Ijk_SigBUS:
case Ijk_Yield: {
HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
addInstr(env, ARM64Instr_XAssisted(r, amPC, cc,
@@ -4576,8 +4577,8 @@ static void iselNext ( ISelEnv* env,
case Ijk_InvalICache:
case Ijk_FlushDCache:
case Ijk_SigTRAP:
- case Ijk_Yield:
- {
+ case Ijk_SigBUS:
+ case Ijk_Yield: {
HReg r = iselIntExpr_R(env, next);
ARM64AMode* amPC = mk_baseblock_64bit_access_amode(offsIP);
addInstr(env, ARM64Instr_XAssisted(r, amPC, ARM64cc_AL, jk));

View File

@ -1,16 +0,0 @@
diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c
index 1253cf588..482047c7a 100644
--- a/VEX/priv/main_main.c
+++ b/VEX/priv/main_main.c
@@ -2163,11 +2163,6 @@ static void check_hwcaps ( VexArch arch, UInt hwcaps )
if (have_fp16 != have_vfp16)
invalid_hwcaps(arch, hwcaps,
"Mismatch detected between scalar and vector FP16 features.\n");
- Bool have_rdm = ((hwcaps & VEX_HWCAPS_ARM64_RDM) != 0);
- Bool have_atomics = ((hwcaps & VEX_HWCAPS_ARM64_ATOMICS) != 0);
- if (have_rdm != have_atomics)
- invalid_hwcaps(arch, hwcaps,
- "Mismatch detected between RDMA and atomics features.\n");
return;
}

View File

@ -1,121 +0,0 @@
commit 7dbe2fed72886874f2eaf57dc07929542ae55b58
Author: Julian Seward <jseward@acm.org>
Date: Fri Nov 12 10:40:48 2021 +0100
Bug 445354 - arm64 backend: incorrect code emitted for doubleword CAS.
The sequence of instructions emitted by the arm64 backend for doubleword
compare-and-swap is incorrect. This could lead to incorrect simulation of the
AArch8.1 atomic instructions (CASP, at least). It also causes failures in the
upcoming fix for v8.0 support for LD{,A}XP/ST{,L}XP in bug 444399, at least
when running with the fallback LL/SC implementation
(`--sim-hints=fallback-llsc`, or as autoselected at startup). In the worst
case it can cause segfaulting in the generated code, because it could jump
backwards unexpectedly far.
The problem is the sequence emitted for ARM64in_CASP:
* the jump offsets are incorrect, both for `bne out` (x 2) and `cbnz w1, loop`.
* using w1 to hold the success indication of the stxp instruction trashes the
previous value in x1. But the value in x1 is an output of ARM64in_CASP,
hence one of the two output registers is corrupted. That confuses any code
downstream that want to inspect those values to find out whether or not the
transaction succeeded.
The fixes are to
* fix the branch offsets
* use a different register to hold the stxp success indication. w3 is a
convenient check.
diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c
index 5dccc0495..5657bcab9 100644
--- a/VEX/priv/host_arm64_defs.c
+++ b/VEX/priv/host_arm64_defs.c
@@ -2271,6 +2271,7 @@ void getRegUsage_ARM64Instr ( HRegUsage* u, const ARM64Instr* i, Bool mode64 )
addHRegUse(u, HRmWrite, hregARM64_X1());
addHRegUse(u, HRmWrite, hregARM64_X9());
addHRegUse(u, HRmWrite, hregARM64_X8());
+ addHRegUse(u, HRmWrite, hregARM64_X3());
break;
case ARM64in_MFence:
return;
@@ -4254,16 +4255,16 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
-- always:
cmp x0, x8 // EB08001F
- bne out // 540000E1 (b.ne #28 <out>)
+ bne out // 540000A1
cmp x1, x9 // EB09003F
- bne out // 540000A1 (b.ne #20 <out>)
+ bne out // 54000061
-- one of:
- stxp w1, x6, x7, [x2] // C8211C46
- stxp w1, w6, w7, [x2] // 88211C46
+ stxp w3, x6, x7, [x2] // C8231C46
+ stxp w3, w6, w7, [x2] // 88231C46
-- always:
- cbnz w1, loop // 35FFFE81 (cbnz w1, #-48 <loop>)
+ cbnz w3, loop // 35FFFF03
out:
*/
switch (i->ARM64in.CASP.szB) {
@@ -4277,15 +4278,15 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
default: vassert(0);
}
*p++ = 0xEB08001F;
- *p++ = 0x540000E1;
- *p++ = 0xEB09003F;
*p++ = 0x540000A1;
+ *p++ = 0xEB09003F;
+ *p++ = 0x54000061;
switch (i->ARM64in.CASP.szB) {
- case 8: *p++ = 0xC8211C46; break;
- case 4: *p++ = 0x88211C46; break;
+ case 8: *p++ = 0xC8231C46; break;
+ case 4: *p++ = 0x88231C46; break;
default: vassert(0);
}
- *p++ = 0x35FFFE81;
+ *p++ = 0x35FFFF03;
goto done;
}
case ARM64in_MFence: {
diff --git a/VEX/priv/host_arm64_defs.h b/VEX/priv/host_arm64_defs.h
index f0737f2c6..01fb5708e 100644
--- a/VEX/priv/host_arm64_defs.h
+++ b/VEX/priv/host_arm64_defs.h
@@ -720,6 +720,7 @@ typedef
Int szB; /* 1, 2, 4 or 8 */
} StrEX;
/* x1 = CAS(x3(addr), x5(expected) -> x7(new)),
+ and trashes x8
where x1[8*szB-1 : 0] == x5[8*szB-1 : 0] indicates success,
x1[8*szB-1 : 0] != x5[8*szB-1 : 0] indicates failure.
Uses x8 as scratch (but that's not allocatable).
@@ -738,7 +739,7 @@ typedef
-- if branch taken, failure; x1[[8*szB-1 : 0] holds old value
-- attempt to store
stxr w8, x7, [x3]
- -- if store successful, x1==0, so the eor is "x1 := x5"
+ -- if store successful, x8==0
-- if store failed, branch back and try again.
cbne w8, loop
after:
@@ -746,6 +747,12 @@ typedef
struct {
Int szB; /* 1, 2, 4 or 8 */
} CAS;
+ /* Doubleworld CAS, 2 x 32 bit or 2 x 64 bit
+ x0(oldLSW),x1(oldMSW)
+ = DCAS(x2(addr), x4(expectedLSW),x5(expectedMSW)
+ -> x6(newLSW),x7(newMSW))
+ and trashes x8, x9 and x3
+ */
struct {
Int szB; /* 4 or 8 */
} CASP;

File diff suppressed because it is too large Load Diff

View File

@ -1,284 +0,0 @@
commit 9abfed23c0d430aafb85de6397d171316c982792
Author: Paul Floyd <pjfloyd@wanadoo.fr>
Date: Fri Nov 19 08:34:53 2021 +0100
Bug 445504 Using C++ condition_variable results in bogus "mutex is locked simultaneously by two threads" warning(edit)
Add intercepts for pthread_cond_clockwait to DRD and Helgrind
Also testcase from bugzilla done by Bart, with configure check
diff --git a/configure.ac b/configure.ac
index e7381f205..cb836dbff 100755
--- a/configure.ac
+++ b/configure.ac
@@ -1989,6 +1989,27 @@ AC_LANG(C)
AM_CONDITIONAL(CXX_CAN_INCLUDE_THREAD_HEADER, test x$ac_cxx_can_include_thread_header = xyes)
+# Check whether compiler can process #include <condition_variable> without errors
+
+AC_MSG_CHECKING([that C++ compiler can include <condition_variable> header file])
+AC_LANG(C++)
+safe_CXXFLAGS=$CXXFLAGS
+CXXFLAGS=-std=c++0x
+
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([
+#include <condition_variable>
+])],
+[
+ac_cxx_can_include_condition_variable_header=yes
+AC_MSG_RESULT([yes])
+], [
+ac_cxx_can_include_condition_variable_header=no
+AC_MSG_RESULT([no])
+])
+CXXFLAGS=$safe_CXXFLAGS
+AC_LANG(C)
+
+AM_CONDITIONAL(CXX_CAN_INCLUDE_CONDITION_VARIABLE_HEADER, test x$ac_cxx_can_include_condition_variable_header = xyes)
# On aarch64 before glibc 2.20 we would get the kernel user_pt_regs instead
# of the user_regs_struct from sys/user.h. They are structurally the same
diff --git a/drd/drd_pthread_intercepts.c b/drd/drd_pthread_intercepts.c
index 8b4454364..95127b42c 100644
--- a/drd/drd_pthread_intercepts.c
+++ b/drd/drd_pthread_intercepts.c
@@ -1175,6 +1175,30 @@ PTH_FUNCS(int, condZureltimedwait, pthread_cond_timedwait_intercept,
(cond, mutex, timeout));
#endif /* VGO_solaris */
+
+static __always_inline
+int pthread_cond_clockwait_intercept(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ clockid_t clockid,
+ const struct timespec* abstime)
+{
+ int ret;
+ OrigFn fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT,
+ cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
+ CALL_FN_W_WWWW(ret, fn, cond, mutex, clockid, abstime);
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT,
+ cond, mutex, 1, 0, 0);
+ return ret;
+}
+
+PTH_FUNCS(int, pthreadZucondZuclockwait, pthread_cond_clockwait_intercept,
+ (pthread_cond_t *cond, pthread_mutex_t *mutex,
+ clockid_t clockid, const struct timespec* abstime),
+ (cond, mutex, clockid, abstime));
+
+
// NOTE: be careful to intercept only pthread_cond_signal() and not Darwin's
// pthread_cond_signal_thread_np(). The former accepts one argument; the latter
// two. Intercepting all pthread_cond_signal* functions will cause only one
diff --git a/drd/tests/Makefile.am b/drd/tests/Makefile.am
index 4cb2f7f84..c804391e8 100755
--- a/drd/tests/Makefile.am
+++ b/drd/tests/Makefile.am
@@ -105,6 +105,8 @@ EXTRA_DIST = \
circular_buffer.vgtest \
concurrent_close.stderr.exp \
concurrent_close.vgtest \
+ condvar.stderr.exp \
+ condvar.vgtest \
custom_alloc.stderr.exp \
custom_alloc.vgtest \
custom_alloc_fiw.stderr.exp \
@@ -458,6 +460,11 @@ check_PROGRAMS += \
endif
endif
+if CXX_CAN_INCLUDE_CONDITION_VARIABLE_HEADER
+check_PROGRAMS += \
+ condvar
+endif
+
if HAVE_OPENMP
check_PROGRAMS += omp_matinv omp_prime omp_printf
endif
@@ -502,6 +509,8 @@ LDADD = -lpthread
bug322621_SOURCES = bug322621.cpp
+condvar_SOURCES = condvar.cpp
+condvar_CXXFLAGS = $(AM_CXXFLAGS) -std=c++0x
concurrent_close_SOURCES = concurrent_close.cpp
if !VGCONF_OS_IS_FREEBSD
dlopen_main_LDADD = -ldl
diff --git a/drd/tests/condvar.cpp b/drd/tests/condvar.cpp
new file mode 100644
index 000000000..18ecb3f8a
--- /dev/null
+++ b/drd/tests/condvar.cpp
@@ -0,0 +1,55 @@
+/* See also https://bugs.kde.org/show_bug.cgi?id=445504 */
+
+#include <condition_variable>
+#include <future>
+#include <iostream>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+using lock_guard = std::lock_guard<std::mutex>;
+using unique_lock = std::unique_lock<std::mutex>;
+
+struct state {
+ std::mutex m;
+ std::vector<int> v;
+ std::condition_variable cv;
+
+ state() {
+ // Call pthread_cond_init() explicitly to let DRD know about 'cv'.
+ pthread_cond_init(cv.native_handle(), NULL);
+ }
+};
+
+void other_thread(state *sp) {
+ state &s = *sp;
+ std::cerr << "Other thread: waiting for notify\n";
+ unique_lock l{s.m};
+ while (true) {
+ if (s.cv.wait_for(l, std::chrono::seconds(3)) !=
+ std::cv_status::timeout) {
+ std::cerr << "Other thread: notified\n";
+ break;
+ }
+ }
+ return;
+}
+
+
+int main() {
+ state s;
+ auto future = std::async(std::launch::async, other_thread, &s);
+
+ if (future.wait_for(std::chrono::seconds(1)) != std::future_status::timeout) {
+ std::cerr << "Main: other thread returned too early!\n";
+ return 2;
+ }
+
+ {
+ std::lock_guard<std::mutex> g{s.m};
+ s.v.push_back(1);
+ s.v.push_back(2);
+ s.cv.notify_all();
+ }
+ return 0;
+}
diff --git a/drd/tests/condvar.stderr.exp b/drd/tests/condvar.stderr.exp
new file mode 100644
index 000000000..be1de9f97
--- /dev/null
+++ b/drd/tests/condvar.stderr.exp
@@ -0,0 +1,5 @@
+
+Other thread: waiting for notify
+Other thread: notified
+
+ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
diff --git a/drd/tests/condvar.vgtest b/drd/tests/condvar.vgtest
new file mode 100644
index 000000000..2e7d49f5a
--- /dev/null
+++ b/drd/tests/condvar.vgtest
@@ -0,0 +1,3 @@
+prereq: ./supported_libpthread && [ -e condvar ]
+vgopts: --check-stack-var=yes --read-var-info=yes
+prog: condvar
diff --git a/helgrind/hg_intercepts.c b/helgrind/hg_intercepts.c
index 866efdbaa..49c3ddcd9 100644
--- a/helgrind/hg_intercepts.c
+++ b/helgrind/hg_intercepts.c
@@ -1409,6 +1409,88 @@ static int pthread_cond_timedwait_WRK(pthread_cond_t* cond,
# error "Unsupported OS"
#endif
+//-----------------------------------------------------------
+// glibc: pthread_cond_clockwait
+//
+__attribute__((noinline))
+static int pthread_cond_clockwait_WRK(pthread_cond_t* cond,
+ pthread_mutex_t* mutex,
+ clockid_t clockid,
+ struct timespec* abstime,
+ int timeout_error)
+{
+ int ret;
+ OrigFn fn;
+ unsigned long mutex_is_valid;
+ Bool abstime_is_valid;
+ VALGRIND_GET_ORIG_FN(fn);
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, "<< pthread_cond_clockwait %p %p %p",
+ cond, mutex, abstime);
+ fflush(stderr);
+ }
+
+ /* Tell the tool a cond-wait is about to happen, so it can check
+ for bogus argument values. In return it tells us whether it
+ thinks the mutex is valid or not. */
+ DO_CREQ_W_WW(mutex_is_valid,
+ _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,
+ pthread_cond_t*,cond, pthread_mutex_t*,mutex);
+ assert(mutex_is_valid == 1 || mutex_is_valid == 0);
+
+ abstime_is_valid = abstime->tv_nsec >= 0 && abstime->tv_nsec < 1000000000;
+
+ /* Tell the tool we're about to drop the mutex. This reflects the
+ fact that in a cond_wait, we show up holding the mutex, and the
+ call atomically drops the mutex and waits for the cv to be
+ signalled. */
+ if (mutex_is_valid && abstime_is_valid) {
+ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,
+ pthread_mutex_t*,mutex);
+ }
+
+ CALL_FN_W_WWWW(ret, fn, cond,mutex,clockid,abstime);
+
+ if (mutex_is_valid && !abstime_is_valid && ret != EINVAL) {
+ DO_PthAPIerror("Bug in libpthread: pthread_cond_clockwait "
+ "invalid abstime did not cause"
+ " EINVAL", ret);
+ }
+
+ if (mutex_is_valid && abstime_is_valid) {
+ /* and now we have the mutex again if (ret == 0 || ret == timeout) */
+ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,
+ pthread_mutex_t *, mutex,
+ long, (ret == 0 || ret == timeout_error) ? True : False);
+ }
+
+ DO_CREQ_v_WWWW(_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,
+ pthread_cond_t*,cond, pthread_mutex_t*,mutex,
+ long,ret == timeout_error,
+ long, (ret == 0 || ret == timeout_error) && mutex_is_valid
+ ? True : False);
+
+ if (ret != 0 && ret != timeout_error) {
+ DO_PthAPIerror( "pthread_cond_clockwait", ret );
+ }
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, " cotimedwait -> %d >>\n", ret);
+ }
+
+ return ret;
+}
+
+#if defined(VGO_linux)
+ PTH_FUNC(int, pthreadZucondZuclockwait, // pthread_cond_clockwait
+ pthread_cond_t* cond, pthread_mutex_t* mutex,
+ clockid_t clockid,
+ struct timespec* abstime) {
+ return pthread_cond_clockwait_WRK(cond, mutex, clockid, abstime, ETIMEDOUT);
+ }
+#endif
+
//-----------------------------------------------------------
// glibc: pthread_cond_signal@GLIBC_2.0

View File

@ -1,35 +0,0 @@
commit 542447d4708d4418a08e678dcf467af92b90b7ad
Author: Mark Wielaard <mark@klomp.org>
Date: Mon Nov 22 13:07:59 2021 +0100
readdwarf3.c (parse_inl_DIE) inlined_subroutine can appear in namespaces
This was broken by commit 75e3ef0f3 "readdwarf3: Skip units without
addresses when looking for inlined functions". Specifically by this
part: "Also use skip_DIE instead of read_DIE when not parsing
(skipping) children"
rustc puts concrete function instances in namespaces (which is
allowed in DWARF since there is no strict separation between type
declarations and program scope entries in a DIE tree), the inline
parser didn't expect this and so skipped any DIE under a namespace
entry. This wasn't an issue before because "skipping" a DIE tree was
done by reading it, so it wasn't actually skipped. But now that we
really skip the DIE (sub)tree (which is faster than actually parsing
it) some entries were missed in the rustc case.
https://bugs.kde.org/show_bug.cgi?id=445668
diff --git a/coregrind/m_debuginfo/readdwarf3.c b/coregrind/m_debuginfo/readdwarf3.c
index 18eecea9f..5489f8d13 100644
--- a/coregrind/m_debuginfo/readdwarf3.c
+++ b/coregrind/m_debuginfo/readdwarf3.c
@@ -3358,7 +3358,7 @@ static Bool parse_inl_DIE (
// might maybe contain a DW_TAG_inlined_subroutine:
Bool ret = (unit_has_addrs
|| dtag == DW_TAG_lexical_block || dtag == DW_TAG_subprogram
- || dtag == DW_TAG_inlined_subroutine);
+ || dtag == DW_TAG_inlined_subroutine || dtag == DW_TAG_namespace);
return ret;
bad_DIE:

View File

@ -1,20 +0,0 @@
commit 33aba8eef68b1745d3de96b609ff8296b70d9a1c
Author: Paul Floyd <pjfloyd@wanadoo.fr>
Date: Wed Oct 27 21:37:00 2021 +0200
Bug 444495 - dhat/tests/copy fails on s390x
Add -fno-builtin to ensure that the copy functions get called and so dhat
can intercept and count them.
diff --git a/dhat/tests/Makefile.am b/dhat/tests/Makefile.am
index 86a9b6d64..b86fc416d 100644
--- a/dhat/tests/Makefile.am
+++ b/dhat/tests/Makefile.am
@@ -29,3 +29,6 @@ AM_CXXFLAGS += $(AM_FLAG_M3264_PRI)
# We don't care about uninitialized or unused malloc results
basic_CFLAGS = $(AM_CFLAGS) -Wno-uninitialized
big_CFLAGS = $(AM_CFLAGS) -Wno-unused-result
+
+# Prevent the copying functions from being inlined
+copy_CFLAGS = $(AM_CFLAGS) -fno-builtin

View File

@ -1,25 +0,0 @@
commit 64ab89162906d5b9e2de6c3afe476fec861ef7ec
Author: Mark Wielaard <mark@klomp.org>
Date: Tue Nov 2 14:27:45 2021 +0100
gdbserver_tests: Filter out glibc hwcaps libc.so
On some systems the gdbserver_tests would fail because the filter
for the optimized hwcaps subdir didn't match because the file is
called slightly differently, with the version number before .so
instead of after. For example: /lib64/glibc-hwcaps/power9/libc-2.28.so
Add one extra filter for this pattern.
diff --git a/gdbserver_tests/filter_gdb.in b/gdbserver_tests/filter_gdb.in
index d0c94f3f1..b753e0168 100755
--- a/gdbserver_tests/filter_gdb.in
+++ b/gdbserver_tests/filter_gdb.in
@@ -134,6 +134,7 @@ s/in \(.__\)\{0,1\}select () from \/.*$/in syscall .../
/^ from \/lib\/libc.so.*$/d
/^ from \/lib64\/libc.so.*$/d
/^ from \/lib64\/.*\/libc.so.*$/d
+/^ from \/lib64\/.*\/libc-.*.so/d
# and yet another (gdb 7.0 way) to get a system call
s/in select ()$/in syscall .../

View File

@ -1,136 +0,0 @@
commit 9d1d6cd6acc612cd94261956a8a94a6403a5d528
Author: Will Schmidt <will_schmidt@vnet.ibm.com>
Date: Tue Jan 4 16:41:00 2022 -0600
Subject: Assorted changes to protect from side affects from the feature checking code.
This problem was initially reported by Tulio, he assisted me in
identifying the underlying issue here.
This was discovered on a Power10, and occurs since the ISA 3.1 support
check uses the brh instruction via a hardcoded ".long 0x7f1401b6" asm stanza.
That encoding writes to r20, and since the stanza does not contain a clobber
the compiler did not know to save or restore that register upon entry or exit.
The junk value remaining in r20 subsequently caused a segfault.
This patch adds clobber masks to the instruction stanzas, as well as
updates the associated comments to clarify which registers are being
used.
As part of this change I've also
- updated the .long for the cnttzw instruction to write to r20, and
zeroed the reserved bits from that instruction so it is properly
decoded by the disassembler.
- updated the .long for the dadd instruction to write to f0.
I've inspected the current codegen with these changes in place, and
confirm that r20 is now saved and restored on entry and exit from the
machine_get_hwcaps() function.
diff --git a/coregrind/m_machine.c b/coregrind/m_machine.c
index 0b60ecc0fd44..a860ed67a334 100644
--- a/coregrind/m_machine.c
+++ b/coregrind/m_machine.c
@@ -1244,11 +1244,11 @@ Bool VG_(machine_get_hwcaps)( void )
/* Check for ISA 3.0 support. */
have_isa_3_0 = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_isa_3_0 = False;
} else {
- __asm__ __volatile__(".long 0x7d205434"); /* cnttzw RT, RB */
+ __asm__ __volatile__(".long 00x7f140434"::"r20"); /* cnttzw r20,r24 */
}
// ISA 3.1 not supported on 32-bit systems
/* determine dcbz/dcbzl sizes while we still have the signal
@@ -1356,79 +1356,79 @@ Bool VG_(machine_get_hwcaps)( void )
/* Altivec insns */
have_V = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_V = False;
} else {
- __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
+ __asm__ __volatile__(".long 0x10000484"); /* vor v0,v0,v0 */
}
/* General-Purpose optional (fsqrt, fsqrts) */
have_FX = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_FX = False;
} else {
- __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
+ __asm__ __volatile__(".long 0xFC00002C"); /* fsqrt f0,f0 */
}
/* Graphics optional (stfiwx, fres, frsqrte, fsel) */
have_GX = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_GX = False;
} else {
- __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
+ __asm__ __volatile__(".long 0xFC000034"); /* frsqrte f0,f0 */
}
/* VSX support implies Power ISA 2.06 */
have_VX = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_VX = False;
} else {
- __asm__ __volatile__(".long 0xf0000564"); /* xsabsdp XT,XB */
+ __asm__ __volatile__(".long 0xf0000564"); /* xsabsdp vs0,vs0 */
}
/* Check for Decimal Floating Point (DFP) support. */
have_DFP = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_DFP = False;
} else {
- __asm__ __volatile__(".long 0xee4e8005"); /* dadd FRT,FRA, FRB */
+ __asm__ __volatile__(".long 0xec0e8005"); /* dadd f0,f14,f16 */
}
/* Check for ISA 2.07 support. */
have_isa_2_07 = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_isa_2_07 = False;
} else {
- __asm__ __volatile__(".long 0x7c000166"); /* mtvsrd XT,RA */
+ __asm__ __volatile__(".long 0x7c000166"); /* mtvsrd f0,r0 */
}
/* Check for ISA 3.0 support. */
have_isa_3_0 = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_isa_3_0 = False;
} else {
- __asm__ __volatile__(".long 0x7d205434"); /* cnttzw RT, RB */
+ __asm__ __volatile__(".long 0x7f140434":::"r20"); /* cnttzw r20,r24 */
}
/* Check for ISA 3.1 support. */
have_isa_3_1 = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_isa_3_1 = False;
} else {
- __asm__ __volatile__(".long 0x7f1401b6"); /* brh RA, RS */
+ __asm__ __volatile__(".long 0x7f1401b6":::"r20"); /* brh r20,r24 */
}
/* Check if Host supports scv instruction */
have_scv_support = True;
if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
have_scv_support = False;
} else {
/* Set r0 to 13 for the system time call. Don't want to make a random
system call. */
- __asm__ __volatile__(".long 0x7c000278"); /* clear r0 */
- __asm__ __volatile__(".long 0x6009000d"); /* set r0 to 13 */
- __asm__ __volatile__(".long 0x44000001"); /* scv */
+ __asm__ __volatile__(".long 0x7c000278"); /* clear r0 with xor r0,r0,r0 */
+ __asm__ __volatile__(".long 0x6009000d"); /* set r0 to 13 with ori r9,r0,13 */
+ __asm__ __volatile__(".long 0x44000001"); /* scv 0 */
}
/* determine dcbz/dcbzl sizes while we still have the signal
* handlers registered */
find_ppc_dcbz_sz(&vai);

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
commit ae8c6de01417023e78763de145b1c0e6ddd87277
Author: Carl Love <cel@us.ibm.com>
Date: Wed Oct 20 20:40:13 2021 +0000
Fix for the prefixed stq instruction in PC relative mode.
The pstq instruction for R=1, was not using the correct effective address.
The EA_hi and EA_lo should have been based on the value of EA as calculated
by the function calculate_prefix_EA. Unfortuanely, the EA_hi and EA_lo
addresses were still using the previous code (not PC relative) to calculate
the address from the contants of RA plus the offset.
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
index 8afd77490..543fa9574 100644
--- a/VEX/priv/guest_ppc_toIR.c
+++ b/VEX/priv/guest_ppc_toIR.c
@@ -9838,23 +9838,24 @@ static Bool dis_int_store_ds_prefix ( UInt prefix,
if (host_endness == VexEndnessBE) {
/* upper 64-bits */
- assign( EA_hi, ea_rAor0_simm( rA_addr, immediate_val ) );
+ assign( EA_hi, mkexpr(EA));
/* lower 64-bits */
- assign( EA_lo, ea_rAor0_simm( rA_addr, immediate_val+8 ) );
+ assign( EA_lo, binop(Iop_Add64, mkexpr(EA), mkU64(8)));
+
} else {
/* upper 64-bits */
- assign( EA_hi, ea_rAor0_simm( rA_addr, immediate_val+8 ) );
+ assign( EA_hi, binop(Iop_Add64, mkexpr(EA), mkU64(8)));
/* lower 64-bits */
- assign( EA_lo, ea_rAor0_simm( rA_addr, immediate_val ) );
+ assign( EA_lo, mkexpr(EA));
}
} else {
/* upper half of upper 64-bits */
- assign( EA_hi, ea_rAor0_simm( rA_addr, immediate_val+4 ) );
+ assign( EA_hi, binop(Iop_Add32, mkexpr(EA), mkU32(4)));
/* lower half of upper 64-bits */
- assign( EA_lo, ea_rAor0_simm( rA_addr, immediate_val+12 ) );
+ assign( EA_lo, binop(Iop_Add32, mkexpr(EA), mkU32(12)));
}
/* Note, the store order for stq instruction is the same for BE

View File

@ -1,60 +0,0 @@
commit 6e08ee95f7f1b1c3fd434fa380cc5b2cc3e3f7c7
Author: Carl Love <cel@us.ibm.com>
Date: Fri Oct 29 16:30:33 2021 -0500
Bug 444571 - PPC, fix the lxsibzx and lxsihzx so they only load their respective sized data.
The lxsibzx was doing a 64-bit load. The result was initializing
additional bytes in the register that should not have been initialized.
The memcheck/tests/linux/dlclose_leak test detected the issue. The
code generation uses lxsibzx and stxsibx with -mcpu=power9. Previously
the lbz and stb instructions were generated.
The same issue was noted and fixed with the lxsihzx instruction. The
memcheck/tests/linux/badrw test now passes as well.
https://bugs.kde.org/show_bug.cgi?id=444571
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
index d90d566ed..8afd77490 100644
--- a/VEX/priv/guest_ppc_toIR.c
+++ b/VEX/priv/guest_ppc_toIR.c
@@ -25359,19 +25359,17 @@ dis_vx_load ( UInt prefix, UInt theInstr )
else
irx_addr = mkexpr( EA );
-
- byte = load( Ity_I64, irx_addr );
+ /* byte load */
+ byte = load( Ity_I8, irx_addr );
putVSReg( XT, binop( Iop_64HLtoV128,
- binop( Iop_And64,
- byte,
- mkU64( 0xFF ) ),
+ unop( Iop_8Uto64, byte ),
mkU64( 0 ) ) );
break;
}
case 0x32D: // lxsihzx
{
- IRExpr *byte;
+ IRExpr *hword;
IRExpr* irx_addr;
DIP("lxsihzx %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
@@ -25382,11 +25380,10 @@ dis_vx_load ( UInt prefix, UInt theInstr )
else
irx_addr = mkexpr( EA );
- byte = load( Ity_I64, irx_addr );
+ hword = load( Ity_I16, irx_addr );
putVSReg( XT, binop( Iop_64HLtoV128,
- binop( Iop_And64,
- byte,
- mkU64( 0xFFFF ) ),
+ unop( Iop_16Uto64,
+ hword ),
mkU64( 0 ) ) );
break;
}

View File

@ -1,180 +0,0 @@
commit 67e7b20eb256baec225b3d7df1f03d731bf5e939
Author: Mark Wielaard <mark@klomp.org>
Date: Fri Dec 10 17:41:59 2021 +0100
Implement linux rseq syscall as ENOSYS
This implements rseq for amd64, arm, arm64, ppc32, ppc64,
s390x and x86 linux as ENOSYS (without warning).
glibc will start using rseq to accelerate sched_getcpu, if
available. This would cause a warning from valgrind every
time a new thread is started.
Real rseq (restartable sequences) support is pretty hard, so
for now just explicitly return ENOSYS (just like we do for clone3).
https://sourceware.org/pipermail/libc-alpha/2021-December/133656.html
diff --git a/coregrind/m_syswrap/syswrap-amd64-linux.c b/coregrind/m_syswrap/syswrap-amd64-linux.c
index 5062324a1..18b25f80a 100644
--- a/coregrind/m_syswrap/syswrap-amd64-linux.c
+++ b/coregrind/m_syswrap/syswrap-amd64-linux.c
@@ -862,6 +862,8 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_statx, sys_statx), // 332
+ GENX_(__NR_rseq, sys_ni_syscall), // 334
+
LINX_(__NR_membarrier, sys_membarrier), // 324
LINX_(__NR_copy_file_range, sys_copy_file_range), // 326
diff --git a/coregrind/m_syswrap/syswrap-arm-linux.c b/coregrind/m_syswrap/syswrap-arm-linux.c
index 556dd844b..d583cef0c 100644
--- a/coregrind/m_syswrap/syswrap-arm-linux.c
+++ b/coregrind/m_syswrap/syswrap-arm-linux.c
@@ -1024,6 +1024,7 @@ static SyscallTableEntry syscall_main_table[] = {
LINX_(__NR_pwritev2, sys_pwritev2), // 393
LINXY(__NR_statx, sys_statx), // 397
+ GENX_(__NR_rseq, sys_ni_syscall), // 398
LINXY(__NR_clock_gettime64, sys_clock_gettime64), // 403
LINX_(__NR_clock_settime64, sys_clock_settime64), // 404
diff --git a/coregrind/m_syswrap/syswrap-arm64-linux.c b/coregrind/m_syswrap/syswrap-arm64-linux.c
index b87107727..2066a38ea 100644
--- a/coregrind/m_syswrap/syswrap-arm64-linux.c
+++ b/coregrind/m_syswrap/syswrap-arm64-linux.c
@@ -823,8 +823,9 @@ static SyscallTableEntry syscall_main_table[] = {
// (__NR_pkey_mprotect, sys_ni_syscall), // 288
// (__NR_pkey_alloc, sys_ni_syscall), // 289
// (__NR_pkey_free, sys_ni_syscall), // 290
+ LINXY(__NR_statx, sys_statx), // 291
- LINXY(__NR_statx, sys_statx), // 397
+ GENX_(__NR_rseq, sys_ni_syscall), // 293
LINXY(__NR_io_uring_setup, sys_io_uring_setup), // 425
LINXY(__NR_io_uring_enter, sys_io_uring_enter), // 426
diff --git a/coregrind/m_syswrap/syswrap-ppc32-linux.c b/coregrind/m_syswrap/syswrap-ppc32-linux.c
index 6263ab845..637b2504e 100644
--- a/coregrind/m_syswrap/syswrap-ppc32-linux.c
+++ b/coregrind/m_syswrap/syswrap-ppc32-linux.c
@@ -1028,6 +1028,8 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_statx, sys_statx), // 383
+ GENX_(__NR_rseq, sys_ni_syscall), // 387
+
LINXY(__NR_clock_gettime64, sys_clock_gettime64), // 403
LINX_(__NR_clock_settime64, sys_clock_settime64), // 404
diff --git a/coregrind/m_syswrap/syswrap-ppc64-linux.c b/coregrind/m_syswrap/syswrap-ppc64-linux.c
index a26b41c32..93956d3cc 100644
--- a/coregrind/m_syswrap/syswrap-ppc64-linux.c
+++ b/coregrind/m_syswrap/syswrap-ppc64-linux.c
@@ -1019,6 +1019,8 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_statx, sys_statx), // 383
+ GENX_(__NR_rseq, sys_ni_syscall), // 387
+
LINXY(__NR_io_uring_setup, sys_io_uring_setup), // 425
LINXY(__NR_io_uring_enter, sys_io_uring_enter), // 426
LINXY(__NR_io_uring_register, sys_io_uring_register), // 427
diff --git a/coregrind/m_syswrap/syswrap-s390x-linux.c b/coregrind/m_syswrap/syswrap-s390x-linux.c
index 5c9209859..73f9684c4 100644
--- a/coregrind/m_syswrap/syswrap-s390x-linux.c
+++ b/coregrind/m_syswrap/syswrap-s390x-linux.c
@@ -860,6 +860,8 @@ static SyscallTableEntry syscall_table[] = {
LINXY(__NR_statx, sys_statx), // 379
+ GENX_(__NR_rseq, sys_ni_syscall), // 381
+
LINXY(__NR_io_uring_setup, sys_io_uring_setup), // 425
LINXY(__NR_io_uring_enter, sys_io_uring_enter), // 426
LINXY(__NR_io_uring_register, sys_io_uring_register), // 427
diff --git a/coregrind/m_syswrap/syswrap-x86-linux.c b/coregrind/m_syswrap/syswrap-x86-linux.c
index 1d8f45d33..8662ff501 100644
--- a/coregrind/m_syswrap/syswrap-x86-linux.c
+++ b/coregrind/m_syswrap/syswrap-x86-linux.c
@@ -1619,6 +1619,8 @@ static SyscallTableEntry syscall_table[] = {
/* Explicitly not supported on i386 yet. */
GENX_(__NR_arch_prctl, sys_ni_syscall), // 384
+ GENX_(__NR_rseq, sys_ni_syscall), // 386
+
LINXY(__NR_clock_gettime64, sys_clock_gettime64), // 403
LINX_(__NR_clock_settime64, sys_clock_settime64), // 404
diff --git a/include/vki/vki-scnums-arm-linux.h b/include/vki/vki-scnums-arm-linux.h
index ff560e19d..485db8b26 100644
--- a/include/vki/vki-scnums-arm-linux.h
+++ b/include/vki/vki-scnums-arm-linux.h
@@ -432,6 +432,7 @@
#define __NR_pkey_alloc 395
#define __NR_pkey_free 396
#define __NR_statx 397
+#define __NR_rseq 398
diff --git a/include/vki/vki-scnums-arm64-linux.h b/include/vki/vki-scnums-arm64-linux.h
index 9aa3b2b5f..acdfb39c6 100644
--- a/include/vki/vki-scnums-arm64-linux.h
+++ b/include/vki/vki-scnums-arm64-linux.h
@@ -323,9 +323,11 @@
#define __NR_pkey_alloc 289
#define __NR_pkey_free 290
#define __NR_statx 291
+#define __NR_io_pgetevents 291
+#define __NR_rseq 293
#undef __NR_syscalls
-#define __NR_syscalls 292
+#define __NR_syscalls 294
///*
// * All syscalls below here should go away really,
diff --git a/include/vki/vki-scnums-ppc32-linux.h b/include/vki/vki-scnums-ppc32-linux.h
index 6987ad941..08fa77df0 100644
--- a/include/vki/vki-scnums-ppc32-linux.h
+++ b/include/vki/vki-scnums-ppc32-linux.h
@@ -415,6 +415,7 @@
#define __NR_pkey_alloc 384
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
+#define __NR_rseq 387
#endif /* __VKI_SCNUMS_PPC32_LINUX_H */
diff --git a/include/vki/vki-scnums-ppc64-linux.h b/include/vki/vki-scnums-ppc64-linux.h
index 6827964fd..a76fa6d32 100644
--- a/include/vki/vki-scnums-ppc64-linux.h
+++ b/include/vki/vki-scnums-ppc64-linux.h
@@ -407,6 +407,7 @@
#define __NR_pkey_alloc 384
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
+#define __NR_rseq 387
#endif /* __VKI_SCNUMS_PPC64_LINUX_H */
diff --git a/include/vki/vki-scnums-s390x-linux.h b/include/vki/vki-scnums-s390x-linux.h
index 6487e20c9..869c04584 100644
--- a/include/vki/vki-scnums-s390x-linux.h
+++ b/include/vki/vki-scnums-s390x-linux.h
@@ -342,8 +342,11 @@
#define __NR_s390_guarded_storage 378
#define __NR_statx 379
#define __NR_s390_sthyi 380
+#define __NR_kexec_file_load 381
+#define __NR_io_pgetevents 382
+#define __NR_rseq 383
-#define NR_syscalls 381
+#define NR_syscalls 384
/*
* There are some system calls that are not present on 64 bit, some

View File

@ -1,137 +0,0 @@
commit 4831385c6706b377851284adc4c4545fff4c6564
Author: Nicholas Nethercote <nnethercote@apple.com>
Date: Tue Nov 9 12:30:07 2021 +1100
Fix Rust v0 demangling.
It's currently broken due to a silly test that prevents the v0
demangling code from even running.
The commit also adds a test, to avoid such problems in the future.
diff --git a/coregrind/m_demangle/demangle.c b/coregrind/m_demangle/demangle.c
index 16161da2a..3fd7cb75f 100644
--- a/coregrind/m_demangle/demangle.c
+++ b/coregrind/m_demangle/demangle.c
@@ -118,8 +118,13 @@ void VG_(demangle) ( Bool do_cxx_demangling, Bool do_z_demangling,
}
/* Possibly undo (1) */
+ // - C++ mangled symbols start with "_Z" (possibly with exceptions?)
+ // - Rust "legacy" mangled symbols start with "_Z".
+ // - Rust "v0" mangled symbols start with "_R".
+ // XXX: the Java/Rust/Ada demangling here probably doesn't work. See
+ // https://bugs.kde.org/show_bug.cgi?id=445235 for details.
if (do_cxx_demangling && VG_(clo_demangle)
- && orig != NULL && orig[0] == '_' && orig[1] == 'Z') {
+ && orig != NULL && orig[0] == '_' && (orig[1] == 'Z' || orig[1] == 'R')) {
/* !!! vvv STATIC vvv !!! */
static HChar* demangled = NULL;
/* !!! ^^^ STATIC ^^^ !!! */
diff --git a/memcheck/tests/demangle-rust.c b/memcheck/tests/demangle-rust.c
new file mode 100644
index 000000000..f2a458b2a
--- /dev/null
+++ b/memcheck/tests/demangle-rust.c
@@ -0,0 +1,31 @@
+// Valgrind supports demangling Rust symbols (both the "v0" and "legacy"
+// mangling schemes), but we don't want to add a dependency on the Rust
+// compiler for a single test. So this is a C program with function names that
+// are mangled Rust symbols. In the output, they become demangled Rust names.
+// It's a hack, but a useful one.
+
+#include <stdlib.h>
+
+// A v0 symbol that demangles to: <rustc_middle::ty::PredicateKind as rustc_middle::ty::fold::TypeFoldable>::fold_with::<rustc_infer::infer::resolve::OpportunisticVarResolver>
+int _RINvYNtNtCs4uGc65yWeeX_12rustc_middle2ty13PredicateKindNtNtB5_4fold12TypeFoldable9fold_withNtNtNtCsgI90OQiJWEs_11rustc_infer5infer7resolve24OpportunisticVarResolverECsdozMG8X9FIu_21rustc_trait_selection(int *p)
+{
+ return *p ? 1 : 2;
+}
+
+// A v0 symbol that demangles to: rustc_expand::mbe::macro_parser::parse_tt
+int _RNvNtNtCsaqSe1lZGvEL_12rustc_expand3mbe12macro_parser8parse_tt(int* p)
+{
+ return _RINvYNtNtCs4uGc65yWeeX_12rustc_middle2ty13PredicateKindNtNtB5_4fold12TypeFoldable9fold_withNtNtNtCsgI90OQiJWEs_11rustc_infer5infer7resolve24OpportunisticVarResolverECsdozMG8X9FIu_21rustc_trait_selection(p);
+}
+
+// A legacy symbol that demangles to: core::str::lossy::Utf8Lossy::from_bytes
+int _ZN4core3str5lossy9Utf8Lossy10from_bytes17heb1677c8cb728b0bE(int* p)
+{
+ return _RNvNtNtCsaqSe1lZGvEL_12rustc_expand3mbe12macro_parser8parse_tt(p);
+}
+
+int main(void)
+{
+ return _ZN4core3str5lossy9Utf8Lossy10from_bytes17heb1677c8cb728b0bE(malloc(sizeof(int)));
+}
+
diff --git a/memcheck/tests/demangle-rust.stderr.exp b/memcheck/tests/demangle-rust.stderr.exp
new file mode 100644
index 000000000..f04bb625b
--- /dev/null
+++ b/memcheck/tests/demangle-rust.stderr.exp
@@ -0,0 +1,6 @@
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: <rustc_middle::ty::PredicateKind as rustc_middle::ty::fold::TypeFoldable>::fold_with::<rustc_infer::infer::resolve::OpportunisticVarResolver> (demangle-rust.c:12)
+ by 0x........: rustc_expand::mbe::macro_parser::parse_tt (demangle-rust.c:18)
+ by 0x........: core::str::lossy::Utf8Lossy::from_bytes (demangle-rust.c:24)
+ by 0x........: main (demangle-rust.c:29)
+
diff --git a/memcheck/tests/demangle-rust.vgtest b/memcheck/tests/demangle-rust.vgtest
new file mode 100644
index 000000000..d726c6b2e
--- /dev/null
+++ b/memcheck/tests/demangle-rust.vgtest
@@ -0,0 +1,2 @@
+prog: demangle-rust
+vgopts: -q
commit c1bfa115f985633722f25922d2996c231e8c9d8d
Author: Mark Wielaard <mark@klomp.org>
Date: Wed Nov 10 09:02:36 2021 +0100
Add demangle-rust.vgtest demangle-rust.stderr.exp to EXTRA_DIST
diff --git a/memcheck/tests/Makefile.am b/memcheck/tests/Makefile.am
index 4d0476e2d..7837d87c7 100644
--- a/memcheck/tests/Makefile.am
+++ b/memcheck/tests/Makefile.am
@@ -281,6 +281,7 @@ EXTRA_DIST = \
realloc3.stderr.exp realloc3.vgtest \
recursive-merge.stderr.exp recursive-merge.vgtest \
resvn_stack.stderr.exp resvn_stack.vgtest \
+ demangle-rust.vgtest demangle-rust.stderr.exp \
sbfragment.stdout.exp sbfragment.stderr.exp sbfragment.vgtest \
sem.stderr.exp sem.vgtest \
sendmsg.stderr.exp sendmsg.stderr.exp-solaris sendmsg.vgtest \
commit d151907e5d8ff393f4fef126c8ae445ea8813661
Author: Mark Wielaard <mark@klomp.org>
Date: Thu Nov 11 18:02:09 2021 +0100
Add demangle-rust to check_PROGRAMS
The demangle-rust.vgtest would fail because the demangle-rust binary
wasn't build by default. Add it to check_PROGRAMS and define
demangle_rust_SOURCES to make sure it is always build.
diff --git a/memcheck/tests/Makefile.am b/memcheck/tests/Makefile.am
index 7837d87c7..449710020 100644
--- a/memcheck/tests/Makefile.am
+++ b/memcheck/tests/Makefile.am
@@ -392,6 +392,7 @@ check_PROGRAMS = \
custom_alloc \
custom-overlap \
demangle \
+ demangle-rust \
big_debuginfo_symbol \
deep-backtrace \
describe-block \
@@ -505,6 +506,7 @@ endif
leak_cpp_interior_SOURCES = leak_cpp_interior.cpp
demangle_SOURCES = demangle.cpp
+demangle_rust_SOURCES = demangle-rust.c
# Suppress various gcc warnings which are correct, but for things
# we are actually testing for at runtime.

View File

@ -1,549 +0,0 @@
commit b77dbefe72e4a5c7bcf1576a02c909010bd56991
Author: Andreas Arnez <arnez@linux.ibm.com>
Date: Fri Oct 22 19:55:12 2021 +0200
Bug 444242 - s390x: Sign-extend "relative long" offset in EXRL
In s390_irgen_EXRL, the offset is zero-extended instead of sign-extended,
typically causing Valgrind to crash when a negative offset occurs.
Fix this with a new helper function that calculates a "relative long"
address from a 32-bit offset. Replace other calculations of "relative
long" addresses by invocations of this function as well. And for
consistency, do the same with "relative" (short) addresses.
diff --git a/VEX/priv/guest_s390_toIR.c b/VEX/priv/guest_s390_toIR.c
index 72222ab04..fffc563d4 100644
--- a/VEX/priv/guest_s390_toIR.c
+++ b/VEX/priv/guest_s390_toIR.c
@@ -399,6 +399,22 @@ mkF64i(ULong value)
return IRExpr_Const(IRConst_F64i(value));
}
+/* Return the 64-bit address with the given 32-bit "relative long" offset from
+ the current guest instruction being translated. */
+static __inline__ Addr64
+addr_rel_long(UInt offset)
+{
+ return guest_IA_curr_instr + ((Addr64)(Long)(Int)offset << 1);
+}
+
+/* Return the 64-bit address with the given 16-bit "relative" offset from the
+ current guest instruction being translated. */
+static __inline__ Addr64
+addr_relative(UShort offset)
+{
+ return guest_IA_curr_instr + ((Addr64)(Long)(Short)offset << 1);
+}
+
/* Little helper function for my sanity. ITE = if-then-else */
static IRExpr *
mkite(IRExpr *condition, IRExpr *iftrue, IRExpr *iffalse)
@@ -5516,7 +5532,7 @@ static const HChar *
s390_irgen_BRAS(UChar r1, UShort i2)
{
put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 4ULL));
- call_function_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ call_function_and_chase(addr_relative(i2));
return "bras";
}
@@ -5525,7 +5541,7 @@ static const HChar *
s390_irgen_BRASL(UChar r1, UInt i2)
{
put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 6ULL));
- call_function_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+ call_function_and_chase(addr_rel_long(i2));
return "brasl";
}
@@ -5538,12 +5554,11 @@ s390_irgen_BRC(UChar r1, UShort i2)
if (r1 == 0) {
} else {
if (r1 == 15) {
- always_goto_and_chase(
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ always_goto_and_chase(addr_relative(i2));
} else {
assign(cond, s390_call_calculate_cond(r1));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
}
}
@@ -5561,11 +5576,11 @@ s390_irgen_BRCL(UChar r1, UInt i2)
if (r1 == 0) {
} else {
if (r1 == 15) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+ always_goto_and_chase(addr_rel_long(i2));
} else {
assign(cond, s390_call_calculate_cond(r1));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+ addr_rel_long(i2));
}
}
if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
@@ -5579,7 +5594,7 @@ s390_irgen_BRCT(UChar r1, UShort i2)
{
put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
if_condition_goto(binop(Iop_CmpNE32, get_gpr_w1(r1), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brct";
}
@@ -5589,7 +5604,7 @@ s390_irgen_BRCTH(UChar r1, UInt i2)
{
put_gpr_w0(r1, binop(Iop_Sub32, get_gpr_w0(r1), mkU32(1)));
if_condition_goto(binop(Iop_CmpNE32, get_gpr_w0(r1), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brcth";
}
@@ -5599,7 +5614,7 @@ s390_irgen_BRCTG(UChar r1, UShort i2)
{
put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
if_condition_goto(binop(Iop_CmpNE64, get_gpr_dw0(r1), mkU64(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brctg";
}
@@ -5612,7 +5627,7 @@ s390_irgen_BRXH(UChar r1, UChar r3, UShort i2)
assign(value, get_gpr_w1(r3 | 1));
put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
if_condition_goto(binop(Iop_CmpLT32S, mkexpr(value), get_gpr_w1(r1)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brxh";
}
@@ -5625,7 +5640,7 @@ s390_irgen_BRXHG(UChar r1, UChar r3, UShort i2)
assign(value, get_gpr_dw0(r3 | 1));
put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
if_condition_goto(binop(Iop_CmpLT64S, mkexpr(value), get_gpr_dw0(r1)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brxhg";
}
@@ -5638,7 +5653,7 @@ s390_irgen_BRXLE(UChar r1, UChar r3, UShort i2)
assign(value, get_gpr_w1(r3 | 1));
put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
if_condition_goto(binop(Iop_CmpLE32S, get_gpr_w1(r1), mkexpr(value)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brxle";
}
@@ -5651,7 +5666,7 @@ s390_irgen_BRXLG(UChar r1, UChar r3, UShort i2)
assign(value, get_gpr_dw0(r3 | 1));
put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
if_condition_goto(binop(Iop_CmpLE64S, get_gpr_dw0(r1), mkexpr(value)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ addr_relative(i2));
return "brxlg";
}
@@ -5782,8 +5797,7 @@ s390_irgen_CRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I32);
assign(op1, get_gpr_w1(r1));
- assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ assign(op2, load(Ity_I32, mkU64(addr_rel_long(i2))));
s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
return "crl";
@@ -5796,8 +5810,7 @@ s390_irgen_CGRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ assign(op2, load(Ity_I64, mkU64(addr_rel_long(i2))));
s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
return "cgrl";
@@ -5810,8 +5823,7 @@ s390_irgen_CGFRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
return "cgfrl";
@@ -5875,15 +5887,14 @@ s390_irgen_CRJ(UChar r1, UChar r2, UShort i4, UChar m3)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_w1(r1));
assign(op2, get_gpr_w1(r2));
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
op1, op2));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -5901,15 +5912,14 @@ s390_irgen_CGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_dw0(r1));
assign(op2, get_gpr_dw0(r2));
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
op1, op2));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -5975,14 +5985,14 @@ s390_irgen_CIJ(UChar r1, UChar m3, UShort i4, UChar i2)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_w1(r1));
op2 = (Int)(Char)i2;
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
mktemp(Ity_I32, mkU32((UInt)op2))));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -6000,14 +6010,14 @@ s390_irgen_CGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_dw0(r1));
op2 = (Long)(Char)i2;
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
mktemp(Ity_I64, mkU64((ULong)op2))));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -6131,8 +6141,7 @@ s390_irgen_CHRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I32);
assign(op1, get_gpr_w1(r1));
- assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
return "chrl";
@@ -6145,8 +6154,7 @@ s390_irgen_CGHRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
return "cghrl";
@@ -6401,8 +6409,7 @@ s390_irgen_CLRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I32);
assign(op1, get_gpr_w1(r1));
- assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ assign(op2, load(Ity_I32, mkU64(addr_rel_long(i2))));
s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
return "clrl";
@@ -6415,8 +6422,7 @@ s390_irgen_CLGRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ assign(op2, load(Ity_I64, mkU64(addr_rel_long(i2))));
s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
return "clgrl";
@@ -6429,8 +6435,7 @@ s390_irgen_CLGFRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
return "clgfrl";
@@ -6443,8 +6448,7 @@ s390_irgen_CLHRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I32);
assign(op1, get_gpr_w1(r1));
- assign(op2, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_16Uto32, load(Ity_I16, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
return "clhrl";
@@ -6457,8 +6461,7 @@ s390_irgen_CLGHRL(UChar r1, UInt i2)
IRTemp op2 = newTemp(Ity_I64);
assign(op1, get_gpr_dw0(r1));
- assign(op2, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ assign(op2, unop(Iop_16Uto64, load(Ity_I16, mkU64(addr_rel_long(i2)))));
s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
return "clghrl";
@@ -6730,14 +6733,14 @@ s390_irgen_CLRJ(UChar r1, UChar r2, UShort i4, UChar m3)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_w1(r1));
assign(op2, get_gpr_w1(r2));
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
op1, op2));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -6755,14 +6758,14 @@ s390_irgen_CLGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_dw0(r1));
assign(op2, get_gpr_dw0(r2));
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
op1, op2));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -6828,14 +6831,14 @@ s390_irgen_CLIJ(UChar r1, UChar m3, UShort i4, UChar i2)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_w1(r1));
op2 = (UInt)i2;
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
mktemp(Ity_I32, mkU32(op2))));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -6853,14 +6856,14 @@ s390_irgen_CLGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
if (m3 == 0) {
} else {
if (m3 == 14) {
- always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ always_goto_and_chase(addr_relative(i4));
} else {
assign(op1, get_gpr_dw0(r1));
op2 = (ULong)i2;
assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
mktemp(Ity_I64, mkU64(op2))));
if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
- guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ addr_relative(i4));
}
}
@@ -7539,8 +7542,7 @@ s390_irgen_LGFI(UChar r1, UInt i2)
static const HChar *
s390_irgen_LRL(UChar r1, UInt i2)
{
- put_gpr_w1(r1, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ put_gpr_w1(r1, load(Ity_I32, mkU64(addr_rel_long(i2))));
return "lrl";
}
@@ -7548,8 +7550,7 @@ s390_irgen_LRL(UChar r1, UInt i2)
static const HChar *
s390_irgen_LGRL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
- i2 << 1))));
+ put_gpr_dw0(r1, load(Ity_I64, mkU64(addr_rel_long(i2))));
return "lgrl";
}
@@ -7557,8 +7558,7 @@ s390_irgen_LGRL(UChar r1, UInt i2)
static const HChar *
s390_irgen_LGFRL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkU64(addr_rel_long(i2)))));
return "lgfrl";
}
@@ -7598,7 +7598,7 @@ s390_irgen_LAEY(UChar r1, IRTemp op2addr)
static const HChar *
s390_irgen_LARL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)));
+ put_gpr_dw0(r1, mkU64(addr_rel_long(i2)));
return "larl";
}
@@ -8038,8 +8038,7 @@ s390_irgen_LGHI(UChar r1, UShort i2)
static const HChar *
s390_irgen_LHRL(UChar r1, UInt i2)
{
- put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkU64(addr_rel_long(i2)))));
return "lhrl";
}
@@ -8047,8 +8046,7 @@ s390_irgen_LHRL(UChar r1, UInt i2)
static const HChar *
s390_irgen_LGHRL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkU64(addr_rel_long(i2)))));
return "lghrl";
}
@@ -8088,8 +8086,7 @@ s390_irgen_LLGF(UChar r1, IRTemp op2addr)
static const HChar *
s390_irgen_LLGFRL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkU64(addr_rel_long(i2)))));
return "llgfrl";
}
@@ -8169,8 +8166,7 @@ s390_irgen_LLGH(UChar r1, IRTemp op2addr)
static const HChar *
s390_irgen_LLHRL(UChar r1, UInt i2)
{
- put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkU64(addr_rel_long(i2)))));
return "llhrl";
}
@@ -8178,8 +8174,7 @@ s390_irgen_LLHRL(UChar r1, UInt i2)
static const HChar *
s390_irgen_LLGHRL(UChar r1, UInt i2)
{
- put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
- ((ULong)(Long)(Int)i2 << 1)))));
+ put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkU64(addr_rel_long(i2)))));
return "llghrl";
}
@@ -10064,8 +10059,7 @@ s390_irgen_STG(UChar r1, IRTemp op2addr)
static const HChar *
s390_irgen_STRL(UChar r1, UInt i2)
{
- store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
- get_gpr_w1(r1));
+ store(mkU64(addr_rel_long(i2)), get_gpr_w1(r1));
return "strl";
}
@@ -10073,8 +10067,7 @@ s390_irgen_STRL(UChar r1, UInt i2)
static const HChar *
s390_irgen_STGRL(UChar r1, UInt i2)
{
- store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
- get_gpr_dw0(r1));
+ store(mkU64(addr_rel_long(i2)), get_gpr_dw0(r1));
return "stgrl";
}
@@ -10203,8 +10196,7 @@ s390_irgen_STHY(UChar r1, IRTemp op2addr)
static const HChar *
s390_irgen_STHRL(UChar r1, UInt i2)
{
- store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
- get_gpr_hw3(r1));
+ store(mkU64(addr_rel_long(i2)), get_gpr_hw3(r1));
return "sthrl";
}
@@ -13282,7 +13274,7 @@ static const HChar *
s390_irgen_EXRL(UChar r1, UInt offset)
{
IRTemp addr = newTemp(Ity_I64);
- Addr64 bytes_addr = guest_IA_curr_instr + offset * 2UL;
+ Addr64 bytes_addr = addr_rel_long(offset);
UChar *bytes = (UChar *)(HWord)bytes_addr;
/* we might save one round trip because we know the target */
if (!last_execute_target)
diff --git a/none/tests/s390x/exrl.c b/none/tests/s390x/exrl.c
index 2c99602d8..e669e484f 100644
--- a/none/tests/s390x/exrl.c
+++ b/none/tests/s390x/exrl.c
@@ -54,6 +54,17 @@ int main(void)
printf("|\n");
printf("\n");
+ printf("------- EXRL with negative offset\n");
+ asm volatile( "j 2f\n\t"
+ "1:\n\t"
+ "mvc 2(1,%0),0(%0)\n\t"
+ "2:\n\t"
+ "lghi 1,8\n\t"
+ ".insn ril,0xc60000000000,1,1b\n\t" // exrl 1, 1b
+ : : "a" (target)
+ : "1", "2", "3", "4");
+ printf(" target = |%s|\n", target);
+
return 0;
}
diff --git a/none/tests/s390x/exrl.stdout.exp b/none/tests/s390x/exrl.stdout.exp
index 520919e92..30dcde829 100644
--- a/none/tests/s390x/exrl.stdout.exp
+++ b/none/tests/s390x/exrl.stdout.exp
@@ -11,3 +11,5 @@ after: target = |0123456789aXXXXX|
------- EXRL to OR in the syscall number (writes out target)
target = |0123456789aXXXXX|
+------- EXRL with negative offset
+ target = |01010101010XXXXX|

View File

@ -1,28 +0,0 @@
commit 99bf5dabf7865aaea7f2192373633e026c6fb16e
Author: Andreas Arnez <arnez@linux.ibm.com>
Date: Thu Dec 9 15:27:41 2021 +0100
Bug 444481 - Don't unmap the vDSO on s390x
Newer Linux kernels on s390x may use the vDSO as a "trampoline" for
syscall restart. This means that the vDSO is no longer optional, and
unmapping it may lead to a segmentation fault when a system call restart
is performed.
So far Valgrind has been unmapping the vDSO on s390x. Just don't do this
anymore.
diff --git a/coregrind/m_initimg/initimg-linux.c b/coregrind/m_initimg/initimg-linux.c
index 7d02d5567..95508ad1e 100644
--- a/coregrind/m_initimg/initimg-linux.c
+++ b/coregrind/m_initimg/initimg-linux.c
@@ -892,7 +892,8 @@ Addr setup_client_stack( void* init_sp,
# if !defined(VGP_ppc32_linux) && !defined(VGP_ppc64be_linux) \
&& !defined(VGP_ppc64le_linux) \
&& !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) \
- && !defined(VGP_nanomips_linux)
+ && !defined(VGP_nanomips_linux) \
+ && !defined(VGP_s390x_linux)
case AT_SYSINFO_EHDR: {
/* Trash this, because we don't reproduce it */
const NSegment* ehdrseg = VG_(am_find_nsegment)((Addr)auxv->u.a_ptr);

View File

@ -1,33 +0,0 @@
From da3b331c63a6aec0ec3206b1d9ca0df9bced3338 Mon Sep 17 00:00:00 2001
From: Andreas Arnez <arnez@linux.ibm.com>
Date: Mon, 3 Jan 2022 18:15:05 +0100
Subject: [PATCH] s390: Fix VFLRX and WFLRX instructions
Due to a typo in s390_irgen_VFLR, the VFLR instruction behaves incorrectly
when its m3 field contains 4, meaning extended format. In that case VFLR
is also written as VFLRX (or WFLRX) and supposed to round down from the
extended 128-bit format to the long 64-bit format. However, the typo
checks for m3 == 2 instead, so the value of 4 is unhandled, causing
Valgrind to throw a specification exception.
This fixes the typo.
---
VEX/priv/guest_s390_toIR.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VEX/priv/guest_s390_toIR.c b/VEX/priv/guest_s390_toIR.c
index fffc563d4..3ef104fcd 100644
--- a/VEX/priv/guest_s390_toIR.c
+++ b/VEX/priv/guest_s390_toIR.c
@@ -19008,7 +19008,7 @@ s390_irgen_VFLL(UChar v1, UChar v2, UChar m3, UChar m4, UChar m5)
static const HChar *
s390_irgen_VFLR(UChar v1, UChar v2, UChar m3, UChar m4, UChar m5)
{
- s390_insn_assert("vflr", m3 == 3 || (s390_host_has_vxe && m3 == 2));
+ s390_insn_assert("vflr", m3 == 3 || (s390_host_has_vxe && m3 == 4));
if (m3 == 3)
s390_vector_fp_convert(Iop_F64toF32, Ity_F64, Ity_F32, True,
--
2.31.1

View File

@ -1,12 +1,12 @@
%{?scl:%scl_package valgrind}
Summary: Tool for finding memory management bugs in programs
Summary: Dynamic analysis tools to detect memory or thread bugs and profile
Name: %{?scl_prefix}valgrind
Version: 3.18.1
Release: 9%{?dist}
Version: 3.19.0
Release: 1%{?dist}
Epoch: 1
License: GPLv2+
URL: http://www.valgrind.org/
URL: https://www.valgrind.org/
# Only necessary for RHEL, will be ignored on Fedora
@ -77,71 +77,13 @@ Source0: https://sourceware.org/pub/valgrind/valgrind-%{version}.tar.bz2
Patch1: valgrind-3.9.0-cachegrind-improvements.patch
# Make ld.so supressions slightly less specific.
Patch3: valgrind-3.9.0-ldso-supp.patch
Patch2: valgrind-3.9.0-ldso-supp.patch
# Add some stack-protector
Patch4: valgrind-3.16.0-some-stack-protector.patch
Patch3: valgrind-3.16.0-some-stack-protector.patch
# Add some -Wl,z,now.
Patch5: valgrind-3.16.0-some-Wl-z-now.patch
# KDE#444495 dhat/tests/copy fails on s390x
Patch6: valgrind-3.18.1-dhat-tests-copy.patch
# KDE#444242 s390x: Sign-extend "relative long" offset in EXRL
Patch7: valgrind-3.18.1-s390x-EXRL.patch
# KDE#444571 - PPC, fix lxsibzx and lxsihzx
Patch8: valgrind-3.18.1-ppc64-lxsibzx-lxsihzx.patch
# commit ae8c6de01417023e78763de145b1c0e6ddd87277
# commit 3950c5d661ee09526cddcf24daf5fc22bc83f70c
# Fix for the prefixed stq instruction in PC relative mode.
# KDE#444836 pstq instruction for R=1 is not storing to the correct address
Patch9: valgrind-3.18.1-ppc-pstq.patch
Patch10: valgrind-3.18.1-ppc-pstq-tests.patch
# commit 64ab89162906d5b9e2de6c3afe476fec861ef7ec
# gdbserver_tests: Filter out glibc hwcaps libc.so
Patch11: valgrind-3.18.1-gdbserver_tests-hwcap.patch
# KDE#445184 Rust v0 symbol demangling is broken
Patch12: valgrind-3.18.1-rust-v0-demangle.patch
# KDE#445354 arm64 backend: incorrect code emitted for doubleword CAS
Patch13: valgrind-3.18.1-arm64-doubleword-cas.patch
# KDE#444399 arm64: unhandled instruction LD{,A}XP and ST{,L}XP
Patch14: valgrind-3.18.1-arm64-ldaxp-stlxp.patch
# KDE#445415 arm64 front end: alignment checks missing for atomic instructions.
Patch15: valgrind-3.18.1-arm64-atomic-align.patch
# commit 595341b150312d2407bd43304449bf39ec3e1fa8
# amd64 front end: add more spec rules
Patch16: valgrind-3.18.1-amd64-more-spec-rules.patch
# KDE#445504 Using C++ condition_variable results in bogus
# "mutex is locked simultaneously by two threads" warning
Patch17: valgrind-3.18.1-condvar.patch
# KDE#445668 Inline stack frame generation is broken for Rust binaries
Patch18: valgrind-3.18.1-demangle-namespace.patch
# KDE#405377 Handle new Linux kernel feature: Restartable Sequences ("rseq")
Patch19: valgrind-3.18.1-rseq-enosys.patch
# KDE#444481 gdb_server test failures on s390x
Patch20: valgrind-3.18.1-s390x-vdso.patch
# KDE#447995 Valgrind segfault on power10 due to hwcap checking code
Patch21: valgrind-3.18.1-ppc-hwcaps.patch
# KDE#447991 s390x: Valgrind indicates illegal instruction on wflrx
Patch22: valgrind-3.18.1-s390x-wflrx.patch
# KDE#449494 arm64: Mismatch detected between RDMA and atomics features
Patch23: valgrind-3.18.1-arm64-atomics-rdm.patch
Patch4: valgrind-3.16.0-some-Wl-z-now.patch
BuildRequires: make
BuildRequires: glibc-devel
@ -272,32 +214,14 @@ Valgrind User Manual for details.
%setup -q -n %{?scl:%{pkg_name}}%{!?scl:%{name}}-%{version}
%patch1 -p1
%patch3 -p1
%patch2 -p1
# Old rhel gcc doesn't have -fstack-protector-strong.
%if 0%{?fedora} || 0%{?rhel} >= 7
%patch3 -p1
%patch4 -p1
%patch5 -p1
%endif
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
%patch21 -p1
%patch22 -p1
%patch23 -p1
%build
# LTO triggers undefined symbols in valgrind. Valgrind has a --enable-lto
@ -527,6 +451,9 @@ fi
%endif
%changelog
* Tue Apr 19 2022 Mark Wielaard <mjw@redhat.com> - 3.19.0-1
- Upgrade to valgrind 3.19.0. Drop old patches.
* Thu Feb 10 2022 Mark Wielaard <mjw@redhat.com> - 3.18.1-9
- Add valgrind-3.18.1-arm64-atomics-rdm.patch