diff options
author | Edward Welbourne <edward.welbourne@qt.io> | 2023-02-07 16:32:34 +0100 |
---|---|---|
committer | Qt Cherry-pick Bot <cherrypick_bot@qt-project.org> | 2023-02-20 19:58:24 +0000 |
commit | a5adab4aa811cb2d719584cc71b121a628b77488 (patch) | |
tree | 8e1468963c7aae016a7f560fb0530923f50e3315 /src/testlib | |
parent | ac6ec5b1fab8c0a5c5984307f66de61b8d636e7e (diff) | |
download | qtbase-a5adab4aa811cb2d719584cc71b121a628b77488.tar.gz |
Update Valgrind to its 3.20.0
It now generates its valgrind.h from a valgrind.h.in, but there were
only two @...@ tokens to substitute, so that was easy enough.
From reading the diff, important changes:
* Added x86 support on FreeBSD (32- and 64-bit)
* Added nanoMIPS support on Linux
* Inline assembly fixes for S390
* Added VALGRIND_CLO_CHANGE request
Task-number: QTBUG-110999
Change-Id: I66990c958cf40c34e7d58dd60b1e11984303fb64
Reviewed-by: Thiago Macieira <thiago.macieira@intel.com>
(cherry picked from commit daceee6a9004565cee6ccd50c6a96216066b4f29)
Reviewed-by: Qt Cherry-pick Bot <cherrypick_bot@qt-project.org>
Diffstat (limited to 'src/testlib')
-rw-r--r-- | src/testlib/3rdparty/qt_attribution.json | 2 | ||||
-rw-r--r-- | src/testlib/3rdparty/valgrind_p.h | 566 |
2 files changed, 543 insertions, 25 deletions
diff --git a/src/testlib/3rdparty/qt_attribution.json b/src/testlib/3rdparty/qt_attribution.json index 4dd844c232..08a5bcf1ec 100644 --- a/src/testlib/3rdparty/qt_attribution.json +++ b/src/testlib/3rdparty/qt_attribution.json @@ -8,7 +8,7 @@ "Description": "An instrumentation framework for building dynamic analysis tools.", "Homepage": "http://valgrind.org/", - "Version": "3.14.0", + "Version": "3.20.0", "License": "BSD 4-clause \"Original\" or \"Old\" License", "LicenseId": "BSD-4-Clause", "LicenseFile": "VALGRIND_LICENSE.txt", diff --git a/src/testlib/3rdparty/valgrind_p.h b/src/testlib/3rdparty/valgrind_p.h index 577c8f05e5..3aaa76746c 100644 --- a/src/testlib/3rdparty/valgrind_p.h +++ b/src/testlib/3rdparty/valgrind_p.h @@ -89,7 +89,7 @@ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) */ #define __VALGRIND_MAJOR__ 3 -#define __VALGRIND_MINOR__ 14 +#define __VALGRIND_MINOR__ 20 #include <stdarg.h> @@ -110,6 +110,8 @@ */ #undef PLAT_x86_darwin #undef PLAT_amd64_darwin +#undef PLAT_x86_freebsd +#undef PLAT_amd64_freebsd #undef PLAT_x86_win32 #undef PLAT_amd64_win64 #undef PLAT_x86_linux @@ -122,6 +124,7 @@ #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux +#undef PLAT_nanomips_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris @@ -130,12 +133,17 @@ # define PLAT_x86_darwin 1 #elif defined(__APPLE__) && defined(__x86_64__) # define PLAT_amd64_darwin 1 -#elif (defined(__MINGW32__) && !defined(__MINGW64__)) \ +#elif defined(__FreeBSD__) && defined(__i386__) +# define PLAT_x86_freebsd 1 +#elif defined(__FreeBSD__) && defined(__amd64__) +# define PLAT_amd64_freebsd 1 +#elif (defined(__MINGW32__) && defined(__i386__)) \ || defined(__CYGWIN32__) \ || (defined(_WIN32) && defined(_M_IX86)) # define PLAT_x86_win32 1 -#elif defined(__MINGW64__) \ - || (defined(_WIN64) && defined(_M_X64)) +#elif (defined(__MINGW32__) && defined(__x86_64__)) \ + || (defined(_WIN32) && defined(_M_X64)) +/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */ # define PLAT_amd64_win64 1 #elif defined(__linux__) && defined(__i386__) # define PLAT_x86_linux 1 @@ -157,8 +165,10 @@ # define PLAT_s390x_linux 1 #elif defined(__linux__) && defined(__mips__) && (__mips==64) # define PLAT_mips64_linux 1 -#elif defined(__linux__) && defined(__mips__) && (__mips!=64) +#elif defined(__linux__) && defined(__mips__) && (__mips==32) # define PLAT_mips32_linux 1 +#elif defined(__linux__) && defined(__nanomips__) +# define PLAT_nanomips_linux 1 #elif defined(__sun) && defined(__i386__) # define PLAT_x86_solaris 1 #elif defined(__sun) && defined(__x86_64__) @@ -254,7 +264,7 @@ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ - || defined(PLAT_x86_solaris) + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) typedef struct { @@ -394,6 +404,7 @@ valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ || defined(PLAT_amd64_solaris) \ + || defined(PLAT_amd64_freebsd) \ || (defined(PLAT_amd64_win64) && defined(__GNUC__)) typedef @@ -872,7 +883,8 @@ typedef /* results = r3 */ \ "lgr %0, 3\n\t" \ : "=d" (_zzq_result) \ - : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "a" (&_zzq_args[0]), \ + "0" ((unsigned long int)_zzq_default) \ : "cc", "2", "3", "memory" \ ); \ _zzq_result; \ @@ -1045,6 +1057,75 @@ typedef #endif /* PLAT_mips64_linux */ +#if defined(PLAT_nanomips_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; +/* + 8000 c04d srl zero, zero, 13 + 8000 c05d srl zero, zero, 29 + 8000 c043 srl zero, zero, 3 + 8000 c053 srl zero, zero, 19 +*/ + +#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \ + "srl[32] $zero, $zero, 29 \n\t" \ + "srl[32] $zero, $zero, 3 \n\t" \ + "srl[32] $zero, $zero, 19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $a7, %1\n\t" /* default */ \ + "move $t0, %2\n\t" /* ptr */ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = client_request( $t0 ) */ \ + "or[32] $t0, $t0, $t0\n\t" \ + "move %0, $a7\n\t" /* result */ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$a7", "$t0", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = guest_NRADDR */ \ + "or[32] $t1, $t1, $t1\n\t" \ + "move %0, $a7" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$a7"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or[32] $t2, $t2, $t2\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or[32] $t3, $t3, $t3\n\t" \ + ); \ + } while (0) + +#endif /* Insert assembly code for other platforms here... */ #endif /* NVALGRIND */ @@ -1145,7 +1226,7 @@ typedef /* ----------------- x86-{linux,darwin,solaris} ---------------- */ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ - || defined(PLAT_x86_solaris) + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) /* These regs are trashed by the hidden call. No need to mention eax as gcc can already see that, plus causes gcc to bomb. */ @@ -1577,7 +1658,7 @@ typedef /* ---------------- amd64-{linux,darwin,solaris} --------------- */ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ - || defined(PLAT_amd64_solaris) + || defined(PLAT_amd64_solaris) || defined(PLAT_amd64_freebsd) /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ @@ -4687,8 +4768,16 @@ typedef r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the function a proper return address. All others are ABI defined call clobbers. */ -#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \ - "f0","f1","f2","f3","f4","f5","f6","f7" +#if defined(__VX__) || defined(__S390_VX__) +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +#else +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" +#endif /* Nb: Although r11 is modified in the asm snippets below (inside VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for @@ -4710,9 +4799,9 @@ typedef "aghi 15,-160\n\t" \ "lg 1, 0(1)\n\t" /* target->r1 */ \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ @@ -4734,9 +4823,9 @@ typedef "lg 2, 8(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ @@ -4759,9 +4848,9 @@ typedef "lg 3,16(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ @@ -4786,9 +4875,9 @@ typedef "lg 4,24(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ @@ -4815,9 +4904,9 @@ typedef "lg 5,32(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ @@ -4846,9 +4935,9 @@ typedef "lg 6,40(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -4880,9 +4969,9 @@ typedef "mvc 160(8,15), 48(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,168\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -4916,9 +5005,9 @@ typedef "mvc 168(8,15), 56(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,176\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -4954,9 +5043,9 @@ typedef "mvc 176(8,15), 64(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,184\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -4994,9 +5083,9 @@ typedef "mvc 184(8,15), 72(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,192\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -5036,9 +5125,9 @@ typedef "mvc 192(8,15), 80(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,200\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -5080,9 +5169,9 @@ typedef "mvc 200(8,15), 88(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,208\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -5126,9 +5215,9 @@ typedef "mvc 208(8,15), 96(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ - "lgr %0, 2\n\t" \ "aghi 15,216\n\t" \ VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ @@ -5678,6 +5767,422 @@ typedef #endif /* PLAT_mips32_linux */ +/* ------------------------- nanomips-linux -------------------- */ + +#if defined(PLAT_nanomips_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \ +"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \ +"$t8","$t9", "$at" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + "lw $a7,32(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9,48(%1) \n\t" \ + "sw $t9,12($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_nanomips_linux */ + /* ------------------------- mips64-linux ------------------------- */ #if defined(PLAT_mips64_linux) @@ -6146,6 +6651,10 @@ typedef command. */ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, + /* Allows the client program to change a dynamic command line + option. */ + VG_USERREQ__CLO_CHANGE = 0x1203, + /* These are useful and can be interpreted by any tool that tracks malloc() et al, by using vg_replace_malloc.c. */ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, @@ -6628,6 +7137,14 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...) command, 0, 0, 0, 0) +/* Change the value of a dynamic command line option. + Note that unknown or not dynamically changeable options + will cause a warning message to be output. */ +#define VALGRIND_CLO_CHANGE(option) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \ + option, 0, 0, 0, 0) + + #undef PLAT_x86_darwin #undef PLAT_amd64_darwin #undef PLAT_x86_win32 @@ -6641,6 +7158,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...) #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux +#undef PLAT_nanomips_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris |