From 0f388ce8689f409a11434aa9707a04a44907b2ce Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Tue, 31 Jul 2007 18:24:22 +0000 Subject: [PATCH] 2.6.90-1 --- .cvsignore | 4 +- glibc-fedora.patch | 399 +- glibc-i386-rwlock.patch | 98 + glibc-ldconfig-speedup.patch | 1143 +++++ glibc-private-futex.patch | 7988 ++++++++++++++++++++++++++++++++++ glibc-warning-patrol.patch | 210 + glibc.spec | 8 + sources | 4 +- 8 files changed, 9589 insertions(+), 265 deletions(-) create mode 100644 glibc-i386-rwlock.patch create mode 100644 glibc-ldconfig-speedup.patch create mode 100644 glibc-private-futex.patch create mode 100644 glibc-warning-patrol.patch diff --git a/.cvsignore b/.cvsignore index 1c1c289..9587647 100644 --- a/.cvsignore +++ b/.cvsignore @@ -1,2 +1,2 @@ -glibc-20070515T2025.tar.bz2 -glibc-fedora-20070515T2025.tar.bz2 +glibc-20070731T1624.tar.bz2 +glibc-fedora-20070731T1624.tar.bz2 diff --git a/glibc-fedora.patch b/glibc-fedora.patch index 621a1b3..46ad081 100644 --- a/glibc-fedora.patch +++ b/glibc-fedora.patch @@ -1,6 +1,6 @@ ---- glibc-20070515T2025/ChangeLog 15 May 2007 20:24:57 -0000 1.10641 -+++ glibc-20070515T2025-fedora/ChangeLog 15 May 2007 20:34:27 -0000 1.8782.2.244 -@@ -71,6 +71,13 @@ +--- glibc-20070731T1624/ChangeLog 31 Jul 2007 12:33:25 -0000 1.10737 ++++ glibc-20070731T1624-fedora/ChangeLog 31 Jul 2007 17:45:45 -0000 1.8782.2.260 +@@ -1235,6 +1235,13 @@ * include/sys/cdefs.h: Redefine __nonnull so that test for incorrect parameters in the libc code itself are not omitted. @@ -14,7 +14,7 @@ 2007-05-09 Jakub Jelinek * sysdeps/ia64/fpu/fraiseexcpt.c (feraiseexcept): Don't raise overflow -@@ -366,6 +373,10 @@ +@@ -1530,6 +1537,10 @@ [BZ #4368] * stdlib/stdlib.h: Remove obsolete part of comment for realpath. @@ -25,7 +25,7 @@ 2007-04-16 Ulrich Drepper [BZ #4364] -@@ -1623,6 +1634,15 @@ +@@ -2787,6 +2798,15 @@ separators also if no non-zero digits found. * stdlib/Makefile (tests): Add tst-strtod3. @@ -41,8 +41,8 @@ 2006-12-09 Ulrich Drepper [BZ #3632] ---- glibc-20070515T2025/ChangeLog.15 16 Feb 2005 07:34:17 -0000 1.1 -+++ glibc-20070515T2025-fedora/ChangeLog.15 19 Dec 2006 19:05:40 -0000 1.1.6.3 +--- glibc-20070731T1624/ChangeLog.15 16 Feb 2005 07:34:17 -0000 1.1 ++++ glibc-20070731T1624-fedora/ChangeLog.15 19 Dec 2006 19:05:40 -0000 1.1.6.3 @@ -477,6 +477,14 @@ 2004-11-26 Jakub Jelinek @@ -108,8 +108,8 @@ 2004-08-30 Roland McGrath * scripts/extract-abilist.awk: If `lastversion' variable defined, omit ---- glibc-20070515T2025/ChangeLog.16 4 May 2006 16:05:24 -0000 1.1 -+++ glibc-20070515T2025-fedora/ChangeLog.16 5 May 2006 06:11:52 -0000 1.1.2.1 +--- glibc-20070731T1624/ChangeLog.16 4 May 2006 16:05:24 -0000 1.1 ++++ glibc-20070731T1624-fedora/ChangeLog.16 5 May 2006 06:11:52 -0000 1.1.2.1 @@ -171,6 +171,11 @@ [BZ #2611] * stdio-common/renameat.c (renameat): Fix typo. @@ -281,8 +281,8 @@ 2005-02-10 Roland McGrath [BZ #157] ---- glibc-20070515T2025/csu/Makefile 1 Mar 2006 10:35:47 -0000 1.79 -+++ glibc-20070515T2025-fedora/csu/Makefile 30 Nov 2006 17:07:37 -0000 1.74.2.6 +--- glibc-20070731T1624/csu/Makefile 1 Mar 2006 10:35:47 -0000 1.79 ++++ glibc-20070731T1624-fedora/csu/Makefile 30 Nov 2006 17:07:37 -0000 1.74.2.6 @@ -93,7 +93,8 @@ omit-deps += $(crtstuff) $(crtstuff:%=$(objpfx)%.o): %.o: %.S $(objpfx)defs.h $(compile.S) -g0 $(ASFLAGS-.os) -o $@ @@ -293,8 +293,8 @@ vpath initfini.c $(sysdirs) ---- glibc-20070515T2025/csu/elf-init.c 5 Nov 2005 17:41:38 -0000 1.8 -+++ glibc-20070515T2025-fedora/csu/elf-init.c 15 Nov 2005 09:54:10 -0000 1.3.2.6 +--- glibc-20070731T1624/csu/elf-init.c 5 Nov 2005 17:41:38 -0000 1.8 ++++ glibc-20070731T1624-fedora/csu/elf-init.c 15 Nov 2005 09:54:10 -0000 1.3.2.6 @@ -49,6 +49,23 @@ extern void (*__init_array_end []) (int, extern void (*__fini_array_start []) (void) attribute_hidden; extern void (*__fini_array_end []) (void) attribute_hidden; @@ -319,8 +319,8 @@ /* These function symbols are provided for the .init/.fini section entry points automagically by the linker. */ ---- glibc-20070515T2025/debug/tst-chk1.c 24 Apr 2006 17:00:18 -0000 1.15 -+++ glibc-20070515T2025-fedora/debug/tst-chk1.c 24 Apr 2006 20:31:49 -0000 1.1.2.16 +--- glibc-20070731T1624/debug/tst-chk1.c 24 Apr 2006 17:00:18 -0000 1.15 ++++ glibc-20070731T1624-fedora/debug/tst-chk1.c 24 Apr 2006 20:31:49 -0000 1.1.2.16 @@ -17,6 +17,9 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ @@ -349,9 +349,9 @@ # define O 0 #else # define O 1 ---- glibc-20070515T2025/elf/ldconfig.c 13 Apr 2007 19:53:20 -0000 1.59 -+++ glibc-20070515T2025-fedora/elf/ldconfig.c 16 Apr 2007 23:59:03 -0000 1.47.2.14 -@@ -965,17 +965,19 @@ search_dirs (void) +--- glibc-20070731T1624/elf/ldconfig.c 16 Jul 2007 00:54:59 -0000 1.60 ++++ glibc-20070731T1624-fedora/elf/ldconfig.c 31 Jul 2007 17:45:49 -0000 1.47.2.15 +@@ -966,17 +966,19 @@ search_dirs (void) static void parse_conf_include (const char *config_file, unsigned int lineno, @@ -373,7 +373,7 @@ if (do_chroot && opt_chroot) { -@@ -1036,7 +1038,14 @@ parse_conf (const char *filename, bool d +@@ -1037,7 +1039,14 @@ parse_conf (const char *filename, bool d cp += 8; while ((dir = strsep (&cp, " \t")) != NULL) if (dir[0] != '\0') @@ -389,7 +389,7 @@ } else if (!strncasecmp (cp, "hwcap", 5) && isblank (cp[5])) { -@@ -1099,7 +1108,7 @@ parse_conf (const char *filename, bool d +@@ -1100,7 +1109,7 @@ parse_conf (const char *filename, bool d config files to read. */ static void parse_conf_include (const char *config_file, unsigned int lineno, @@ -398,7 +398,7 @@ { if (opt_chroot && pattern[0] != '/') error (EXIT_FAILURE, 0, -@@ -1129,7 +1138,7 @@ parse_conf_include (const char *config_f +@@ -1130,7 +1139,7 @@ parse_conf_include (const char *config_f { case 0: for (size_t i = 0; i < gl.gl_pathc; ++i) @@ -407,7 +407,7 @@ globfree64 (&gl); break; -@@ -1173,6 +1182,8 @@ main (int argc, char **argv) +@@ -1174,6 +1183,8 @@ main (int argc, char **argv) /* Set the text message domain. */ textdomain (_libc_intl_domainname); @@ -416,7 +416,7 @@ /* Parse and process arguments. */ int remaining; argp_parse (&argp, argc, argv, 0, &remaining, NULL); -@@ -1284,12 +1295,14 @@ main (int argc, char **argv) +@@ -1285,12 +1296,14 @@ main (int argc, char **argv) if (!opt_only_cline) { @@ -432,8 +432,8 @@ } search_dirs (); ---- glibc-20070515T2025/elf/tst-stackguard1.c 26 Jun 2005 18:08:36 -0000 1.1 -+++ glibc-20070515T2025-fedora/elf/tst-stackguard1.c 8 Aug 2005 21:24:27 -0000 1.1.2.3 +--- glibc-20070731T1624/elf/tst-stackguard1.c 26 Jun 2005 18:08:36 -0000 1.1 ++++ glibc-20070731T1624-fedora/elf/tst-stackguard1.c 8 Aug 2005 21:24:27 -0000 1.1.2.3 @@ -160,17 +160,21 @@ do_test (void) the 16 runs, something is very wrong. */ int ndifferences = 0; @@ -458,87 +458,9 @@ { puts ("stack guard canaries are not randomized enough"); puts ("nor equal to the default canary value"); ---- glibc-20070515T2025/iconv/iconvconfig.c 3 Jan 2007 05:44:11 -0000 1.27 -+++ glibc-20070515T2025-fedora/iconv/iconvconfig.c 17 Jan 2007 10:42:36 -0000 1.19.2.8 -@@ -1011,6 +1011,34 @@ next_prime (uint32_t seed) - module name offset - (following last entry with step count 0) - */ -+ -+static struct hash_entry *hash_table; -+static size_t hash_size; -+ -+/* Function to insert the names. */ -+static void name_insert (const void *nodep, VISIT value, int level) -+{ -+ struct name *name; -+ unsigned int idx; -+ unsigned int hval2; -+ -+ if (value != leaf && value != postorder) -+ return; -+ -+ name = *(struct name **) nodep; -+ idx = name->hashval % hash_size; -+ hval2 = 1 + name->hashval % (hash_size - 2); -+ -+ while (hash_table[idx].string_offset != 0) -+ if ((idx += hval2) >= hash_size) -+ idx -= hash_size; -+ -+ hash_table[idx].string_offset = strtaboffset (name->strent); -+ -+ assert (name->module_idx != -1); -+ hash_table[idx].module_idx = name->module_idx; -+} -+ - static int - write_output (void) - { -@@ -1018,8 +1046,6 @@ write_output (void) - char *string_table; - size_t string_table_size; - struct gconvcache_header header; -- struct hash_entry *hash_table; -- size_t hash_size; - struct module_entry *module_table; - char *extra_table; - char *cur_extra_table; -@@ -1032,31 +1058,6 @@ write_output (void) - char tmpfname[(output_file == NULL ? sizeof finalname : output_file_len + 1) - + strlen (".XXXXXX")]; - -- /* Function to insert the names. */ -- auto void -- name_insert (const void *nodep, VISIT value, int level) -- { -- struct name *name; -- unsigned int idx; -- unsigned int hval2; -- -- if (value != leaf && value != postorder) -- return; -- -- name = *(struct name **) nodep; -- idx = name->hashval % hash_size; -- hval2 = 1 + name->hashval % (hash_size - 2); -- -- while (hash_table[idx].string_offset != 0) -- if ((idx += hval2) >= hash_size) -- idx -= hash_size; -- -- hash_table[idx].string_offset = strtaboffset (name->strent); -- -- assert (name->module_idx != -1); -- hash_table[idx].module_idx = name->module_idx; -- } -- - /* Open the output file. */ - if (output_file == NULL) - { ---- glibc-20070515T2025/include/features.h 15 May 2007 06:48:32 -0000 1.46 -+++ glibc-20070515T2025-fedora/include/features.h 15 May 2007 20:34:29 -0000 1.35.2.15 -@@ -274,7 +274,13 @@ +--- glibc-20070731T1624/include/features.h 19 Jul 2007 17:27:30 -0000 1.47 ++++ glibc-20070731T1624-fedora/include/features.h 31 Jul 2007 17:45:50 -0000 1.35.2.16 +@@ -282,7 +282,13 @@ #endif #if defined _FORTIFY_SOURCE && _FORTIFY_SOURCE > 0 \ @@ -553,16 +475,16 @@ # if _FORTIFY_SOURCE > 1 # define __USE_FORTIFY_LEVEL 2 # else ---- glibc-20070515T2025/include/bits/stdlib-ldbl.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/include/bits/stdlib-ldbl.h 1 Feb 2006 09:30:43 -0000 1.1.2.1 +--- glibc-20070731T1624/include/bits/stdlib-ldbl.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/include/bits/stdlib-ldbl.h 1 Feb 2006 09:30:43 -0000 1.1.2.1 @@ -0,0 +1 @@ +#include ---- glibc-20070515T2025/include/bits/wchar-ldbl.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/include/bits/wchar-ldbl.h 1 Feb 2006 09:30:43 -0000 1.1.2.1 +--- glibc-20070731T1624/include/bits/wchar-ldbl.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/include/bits/wchar-ldbl.h 1 Feb 2006 09:30:43 -0000 1.1.2.1 @@ -0,0 +1 @@ +#include ---- glibc-20070515T2025/intl/locale.alias 4 Dec 2003 07:57:47 -0000 1.23 -+++ glibc-20070515T2025-fedora/intl/locale.alias 22 Sep 2004 21:20:53 -0000 1.23.2.1 +--- glibc-20070731T1624/intl/locale.alias 4 Dec 2003 07:57:47 -0000 1.23 ++++ glibc-20070731T1624-fedora/intl/locale.alias 22 Sep 2004 21:20:53 -0000 1.23.2.1 @@ -58,8 +58,6 @@ korean ko_KR.eucKR korean.euc ko_KR.eucKR ko_KR ko_KR.eucKR @@ -572,8 +494,8 @@ norwegian nb_NO.ISO-8859-1 nynorsk nn_NO.ISO-8859-1 polish pl_PL.ISO-8859-2 ---- glibc-20070515T2025/libio/stdio.h 17 Feb 2007 18:26:15 -0000 1.89 -+++ glibc-20070515T2025-fedora/libio/stdio.h 21 Feb 2007 11:15:50 -0000 1.78.2.11 +--- glibc-20070731T1624/libio/stdio.h 17 Feb 2007 18:26:15 -0000 1.89 ++++ glibc-20070731T1624-fedora/libio/stdio.h 21 Feb 2007 11:15:50 -0000 1.78.2.11 @@ -145,10 +145,12 @@ typedef _G_fpos64_t fpos64_t; extern struct _IO_FILE *stdin; /* Standard input stream. */ extern struct _IO_FILE *stdout; /* Standard output stream. */ @@ -587,8 +509,8 @@ __BEGIN_NAMESPACE_STD /* Remove file FILENAME. */ ---- glibc-20070515T2025/libio/bits/stdio2.h 17 Mar 2007 17:04:08 -0000 1.5 -+++ glibc-20070515T2025-fedora/libio/bits/stdio2.h 17 Mar 2007 21:52:49 -0000 1.1.2.7 +--- glibc-20070731T1624/libio/bits/stdio2.h 17 Mar 2007 17:04:08 -0000 1.5 ++++ glibc-20070731T1624-fedora/libio/bits/stdio2.h 17 Mar 2007 21:52:49 -0000 1.1.2.7 @@ -61,14 +61,25 @@ extern int __vfprintf_chk (FILE *__restr extern int __vprintf_chk (int __flag, __const char *__restrict __format, _G_va_list __ap); @@ -619,8 +541,8 @@ #endif ---- glibc-20070515T2025/locale/iso-4217.def 17 Feb 2007 07:46:20 -0000 1.20 -+++ glibc-20070515T2025-fedora/locale/iso-4217.def 21 Feb 2007 11:15:50 -0000 1.15.2.5 +--- glibc-20070731T1624/locale/iso-4217.def 17 Feb 2007 07:46:20 -0000 1.20 ++++ glibc-20070731T1624-fedora/locale/iso-4217.def 21 Feb 2007 11:15:50 -0000 1.15.2.5 @@ -8,6 +8,7 @@ * * !!! The list has to be sorted !!! @@ -712,54 +634,9 @@ DEFINE_INT_CURR("PYG") /* Paraguay Guarani */ DEFINE_INT_CURR("QAR") /* Qatar Rial */ DEFINE_INT_CURR("ROL") /* Romanian Leu */ ---- glibc-20070515T2025/locale/programs/3level.h 7 Dec 2005 05:47:27 -0000 1.6 -+++ glibc-20070515T2025-fedora/locale/programs/3level.h 19 Dec 2005 12:11:15 -0000 1.5.2.2 -@@ -202,6 +202,42 @@ CONCAT(TABLE,_iterate) (struct TABLE *t, - } - } - } -+ -+/* GCC ATM seems to do a poor job with pointers to nested functions passed -+ to inlined functions. Help it a little bit with this hack. */ -+#define wchead_table_iterate(tp, fn) \ -+do \ -+ { \ -+ struct wchead_table *t = (tp); \ -+ uint32_t index1; \ -+ for (index1 = 0; index1 < t->level1_size; index1++) \ -+ { \ -+ uint32_t lookup1 = t->level1[index1]; \ -+ if (lookup1 != ((uint32_t) ~0)) \ -+ { \ -+ uint32_t lookup1_shifted = lookup1 << t->q; \ -+ uint32_t index2; \ -+ for (index2 = 0; index2 < (1 << t->q); index2++) \ -+ { \ -+ uint32_t lookup2 = t->level2[index2 + lookup1_shifted]; \ -+ if (lookup2 != ((uint32_t) ~0)) \ -+ { \ -+ uint32_t lookup2_shifted = lookup2 << t->p; \ -+ uint32_t index3; \ -+ for (index3 = 0; index3 < (1 << t->p); index3++) \ -+ { \ -+ struct element_t *lookup3 \ -+ = t->level3[index3 + lookup2_shifted]; \ -+ if (lookup3 != NULL) \ -+ fn ((((index1 << t->q) + index2) << t->p) + index3, \ -+ lookup3); \ -+ } \ -+ } \ -+ } \ -+ } \ -+ } \ -+ } while (0) -+ - #endif - - #ifndef NO_FINALIZE ---- glibc-20070515T2025/locale/programs/locarchive.c 16 Apr 2007 23:30:58 -0000 1.24 -+++ glibc-20070515T2025-fedora/locale/programs/locarchive.c 16 Apr 2007 23:59:03 -0000 1.21.2.3 -@@ -221,9 +221,9 @@ oldlocrecentcmp (const void *a, const vo +--- glibc-20070731T1624/locale/programs/locarchive.c 16 Jul 2007 00:54:59 -0000 1.25 ++++ glibc-20070731T1624-fedora/locale/programs/locarchive.c 31 Jul 2007 17:45:53 -0000 1.21.2.4 +@@ -222,9 +222,9 @@ oldlocrecentcmp (const void *a, const vo /* forward decls for below */ static uint32_t add_locale (struct locarhandle *ah, const char *name, locale_data_t data, bool replace); @@ -772,7 +649,7 @@ static void enlarge_archive (struct locarhandle *ah, const struct locarhead *head) -@@ -541,7 +541,7 @@ close_archive (struct locarhandle *ah) +@@ -542,7 +542,7 @@ close_archive (struct locarhandle *ah) #include "../../intl/explodename.c" #include "../../intl/l10nflist.c" @@ -781,7 +658,7 @@ insert_name (struct locarhandle *ah, const char *name, size_t name_len, bool replace) { -@@ -599,7 +599,7 @@ insert_name (struct locarhandle *ah, +@@ -600,7 +600,7 @@ insert_name (struct locarhandle *ah, return &namehashtab[idx]; } @@ -790,8 +667,8 @@ add_alias (struct locarhandle *ah, const char *alias, bool replace, const char *oldname, uint32_t *locrec_offset_p) { ---- glibc-20070515T2025/localedata/Makefile 14 Mar 2007 22:32:43 -0000 1.105 -+++ glibc-20070515T2025-fedora/localedata/Makefile 17 Mar 2007 21:52:52 -0000 1.101.2.5 +--- glibc-20070731T1624/localedata/Makefile 10 Jul 2007 22:12:52 -0000 1.106 ++++ glibc-20070731T1624-fedora/localedata/Makefile 31 Jul 2007 17:45:53 -0000 1.101.2.6 @@ -223,6 +223,7 @@ $(INSTALL-SUPPORTED-LOCALES): install-lo echo -n '...'; \ input=`echo $$locale | sed 's/\([^.]*\)[^@]*\(.*\)/\1\2/'`; \ @@ -800,8 +677,8 @@ -i locales/$$input -c -f charmaps/$$charset \ $(addprefix --prefix=,$(install_root)) $$locale; \ echo ' done'; \ ---- glibc-20070515T2025/localedata/SUPPORTED 4 May 2007 20:52:45 -0000 1.103 -+++ glibc-20070515T2025-fedora/localedata/SUPPORTED 10 May 2007 06:44:26 -0000 1.71.2.17 +--- glibc-20070731T1624/localedata/SUPPORTED 4 May 2007 20:52:45 -0000 1.103 ++++ glibc-20070731T1624-fedora/localedata/SUPPORTED 10 May 2007 06:44:26 -0000 1.71.2.17 @@ -80,6 +80,7 @@ cy_GB.UTF-8/UTF-8 \ cy_GB/ISO-8859-14 \ da_DK.UTF-8/UTF-8 \ @@ -843,8 +720,8 @@ ta_IN/UTF-8 \ te_IN/UTF-8 \ tg_TJ.UTF-8/UTF-8 \ ---- glibc-20070515T2025/localedata/locales/cy_GB 28 Sep 2004 04:37:33 -0000 1.4 -+++ glibc-20070515T2025-fedora/localedata/locales/cy_GB 29 Sep 2004 08:48:23 -0000 1.3.2.2 +--- glibc-20070731T1624/localedata/locales/cy_GB 28 Sep 2004 04:37:33 -0000 1.4 ++++ glibc-20070731T1624-fedora/localedata/locales/cy_GB 29 Sep 2004 08:48:23 -0000 1.3.2.2 @@ -248,8 +248,11 @@ mon "" d_fmt "" @@ -859,8 +736,8 @@ END LC_TIME LC_MESSAGES ---- glibc-20070515T2025/localedata/locales/en_GB 1 Oct 2006 16:18:33 -0000 1.14 -+++ glibc-20070515T2025-fedora/localedata/locales/en_GB 2 Oct 2006 19:02:41 -0000 1.10.2.4 +--- glibc-20070731T1624/localedata/locales/en_GB 1 Oct 2006 16:18:33 -0000 1.14 ++++ glibc-20070731T1624-fedora/localedata/locales/en_GB 2 Oct 2006 19:02:41 -0000 1.10.2.4 @@ -116,8 +116,8 @@ mon "" d_fmt "" @@ -872,8 +749,8 @@ date_fmt "/ / " ---- glibc-20070515T2025/localedata/locales/no_NO 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/localedata/locales/no_NO 22 Sep 2004 21:21:01 -0000 1.11.2.1 +--- glibc-20070731T1624/localedata/locales/no_NO 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/localedata/locales/no_NO 22 Sep 2004 21:21:01 -0000 1.11.2.1 @@ -0,0 +1,69 @@ +escape_char / +comment_char % @@ -944,8 +821,8 @@ +LC_ADDRESS +copy "nb_NO" +END LC_ADDRESS ---- glibc-20070515T2025/localedata/locales/zh_TW 31 Oct 2004 23:42:28 -0000 1.7 -+++ glibc-20070515T2025-fedora/localedata/locales/zh_TW 2 Nov 2004 12:25:57 -0000 1.5.2.2 +--- glibc-20070731T1624/localedata/locales/zh_TW 31 Oct 2004 23:42:28 -0000 1.7 ++++ glibc-20070731T1624-fedora/localedata/locales/zh_TW 2 Nov 2004 12:25:57 -0000 1.5.2.2 @@ -1,7 +1,7 @@ comment_char % escape_char / @@ -973,8 +850,8 @@ revision "0.2" date "2000-08-02" % ---- glibc-20070515T2025/malloc/mcheck.c 8 Sep 2004 20:36:02 -0000 1.18 -+++ glibc-20070515T2025-fedora/malloc/mcheck.c 20 Dec 2005 10:59:21 -0000 1.18.2.1 +--- glibc-20070731T1624/malloc/mcheck.c 19 May 2007 04:27:20 -0000 1.20 ++++ glibc-20070731T1624-fedora/malloc/mcheck.c 21 May 2007 20:01:08 -0000 1.18.2.2 @@ -24,9 +24,25 @@ # include # include @@ -1028,7 +905,7 @@ __memalign_hook = memalignhook; if (block == NULL) return NULL; -@@ -294,8 +310,8 @@ reallochook (__ptr_t ptr, __malloc_size_ +@@ -300,8 +316,8 @@ reallochook (__ptr_t ptr, __malloc_size_ sizeof (struct hdr) + size + 1, caller); else @@ -1039,7 +916,7 @@ __free_hook = freehook; __malloc_hook = mallochook; __memalign_hook = memalignhook; -@@ -355,8 +371,8 @@ mcheck (func) +@@ -361,8 +377,8 @@ mcheck (func) if (__malloc_initialized <= 0 && !mcheck_used) { /* We call malloc() once here to ensure it is initialized. */ @@ -1050,8 +927,8 @@ old_free_hook = __free_hook; __free_hook = freehook; ---- glibc-20070515T2025/malloc/mtrace.c 25 Jan 2007 00:43:38 -0000 1.43 -+++ glibc-20070515T2025-fedora/malloc/mtrace.c 20 Dec 2005 10:59:21 -0000 1.41.2.1 +--- glibc-20070731T1624/malloc/mtrace.c 25 Jan 2007 00:43:38 -0000 1.43 ++++ glibc-20070731T1624-fedora/malloc/mtrace.c 20 Dec 2005 10:59:21 -0000 1.41.2.1 @@ -40,6 +40,18 @@ # include # define setvbuf(s, b, f, l) INTUSE(_IO_setvbuf) (s, b, f, l) @@ -1107,28 +984,28 @@ __memalign_hook = tr_memalignhook; __malloc_hook = tr_mallochook; ---- glibc-20070515T2025/manual/libc.texinfo 12 Nov 2003 00:37:03 -0000 1.94 -+++ glibc-20070515T2025-fedora/manual/libc.texinfo 3 Nov 2006 16:31:21 -0000 1.94.2.1 +--- glibc-20070731T1624/manual/libc.texinfo 31 Jul 2007 01:53:58 -0000 1.95 ++++ glibc-20070731T1624-fedora/manual/libc.texinfo 31 Jul 2007 17:45:53 -0000 1.94.2.2 @@ -5,7 +5,7 @@ - @setchapternewpage odd + @c setchapternewpage odd @comment Tell install-info what to do. --@dircategory GNU libraries +-@dircategory Software libraries +@dircategory Libraries @direntry * Libc: (libc). C library. @end direntry ---- glibc-20070515T2025/nis/nss 28 Apr 2006 21:02:23 -0000 1.3 -+++ glibc-20070515T2025-fedora/nis/nss 1 May 2006 08:02:53 -0000 1.2.2.2 +--- glibc-20070731T1624/nis/nss 28 Apr 2006 21:02:23 -0000 1.3 ++++ glibc-20070731T1624-fedora/nis/nss 1 May 2006 08:02:53 -0000 1.2.2.2 @@ -25,4 +25,4 @@ # memory with every getXXent() call. Otherwise each getXXent() call # might result into a network communication with the server to get # the next entry. -#SETENT_BATCH_READ=TRUE +SETENT_BATCH_READ=TRUE ---- glibc-20070515T2025/nptl/ChangeLog 15 May 2007 06:32:02 -0000 1.970 -+++ glibc-20070515T2025-fedora/nptl/ChangeLog 15 May 2007 20:34:29 -0000 1.706.2.124 -@@ -1474,6 +1474,15 @@ +--- glibc-20070731T1624/nptl/ChangeLog 28 Jul 2007 20:32:13 -0000 1.1011 ++++ glibc-20070731T1624-fedora/nptl/ChangeLog 31 Jul 2007 17:45:54 -0000 1.706.2.131 +@@ -2057,6 +2057,15 @@ Use __sigfillset. Document that sigfillset does the right thing wrt to SIGSETXID. @@ -1144,7 +1021,7 @@ 2005-07-11 Jakub Jelinek [BZ #1102] -@@ -2210,6 +2219,11 @@ +@@ -2793,6 +2802,11 @@ Move definition inside libpthread, libc, librt check. Provide definition for rtld. @@ -1156,7 +1033,7 @@ 2004-09-02 Ulrich Drepper * sysdeps/alpha/jmpbuf-unwind.h: Define __libc_unwind_longjmp. -@@ -4284,6 +4298,11 @@ +@@ -4867,6 +4881,11 @@ * Makefile [$(build-shared) = yes] (tests): Depend on $(test-modules). @@ -1168,8 +1045,8 @@ 2003-07-25 Jakub Jelinek * tst-cancel17.c (do_test): Check if aio_cancel failed. ---- glibc-20070515T2025/nptl/Makefile 8 Sep 2006 10:40:49 -0000 1.188 -+++ glibc-20070515T2025-fedora/nptl/Makefile 28 Nov 2006 11:18:18 -0000 1.157.2.29 +--- glibc-20070731T1624/nptl/Makefile 26 May 2007 01:30:09 -0000 1.190 ++++ glibc-20070731T1624-fedora/nptl/Makefile 31 Jul 2007 17:45:54 -0000 1.157.2.32 @@ -340,7 +340,8 @@ endif extra-objs += $(crti-objs) $(crtn-objs) omit-deps += crti crtn @@ -1202,8 +1079,8 @@ else $(addprefix $(objpfx),$(tests) $(test-srcs)): $(objpfx)libpthread.a endif ---- glibc-20070515T2025/nptl/tst-stackguard1.c 26 Jun 2005 17:44:14 -0000 1.1 -+++ glibc-20070515T2025-fedora/nptl/tst-stackguard1.c 8 Aug 2005 21:24:28 -0000 1.1.2.3 +--- glibc-20070731T1624/nptl/tst-stackguard1.c 26 Jun 2005 17:44:14 -0000 1.1 ++++ glibc-20070731T1624-fedora/nptl/tst-stackguard1.c 8 Aug 2005 21:24:28 -0000 1.1.2.3 @@ -190,17 +190,21 @@ do_test (void) the 16 runs, something is very wrong. */ int ndifferences = 0; @@ -1228,8 +1105,8 @@ { puts ("stack guard canaries are not randomized enough"); puts ("nor equal to the default canary value"); ---- glibc-20070515T2025/nptl/sysdeps/unix/sysv/linux/kernel-features.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/nptl/sysdeps/unix/sysv/linux/kernel-features.h 22 Sep 2004 21:21:02 -0000 1.1.2.1 +--- glibc-20070731T1624/nptl/sysdeps/unix/sysv/linux/kernel-features.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/nptl/sysdeps/unix/sysv/linux/kernel-features.h 22 Sep 2004 21:21:02 -0000 1.1.2.1 @@ -0,0 +1,6 @@ +#include_next + @@ -1237,9 +1114,9 @@ +#ifndef __ASSUME_CLONE_THREAD_FLAGS +# define __ASSUME_CLONE_THREAD_FLAGS 1 +#endif ---- glibc-20070515T2025/nscd/connections.c 1 Feb 2007 16:05:31 -0000 1.98 -+++ glibc-20070515T2025-fedora/nscd/connections.c 10 May 2007 17:07:30 -0000 1.55.2.29 -@@ -68,6 +68,7 @@ static gid_t *server_groups; +--- glibc-20070731T1624/nscd/connections.c 16 Jul 2007 00:54:58 -0000 1.99 ++++ glibc-20070731T1624-fedora/nscd/connections.c 31 Jul 2007 17:45:59 -0000 1.55.2.30 +@@ -69,6 +69,7 @@ static gid_t *server_groups; # define NGROUPS 32 #endif static int server_ngroups; @@ -1247,7 +1124,7 @@ static pthread_attr_t attr; -@@ -1363,6 +1364,10 @@ nscd_run (void *p) +@@ -1364,6 +1365,10 @@ nscd_run (void *p) if (readylist == NULL && to == ETIMEDOUT) { --nready; @@ -1258,7 +1135,7 @@ pthread_mutex_unlock (&readylist_lock); goto only_prune; } -@@ -1372,6 +1377,34 @@ nscd_run (void *p) +@@ -1373,6 +1378,34 @@ nscd_run (void *p) pthread_cond_wait (&readylist_cond, &readylist_lock); } @@ -1293,7 +1170,7 @@ struct fdlist *it = readylist->next; if (readylist->next == readylist) /* Just one entry on the list. */ -@@ -1947,3 +1980,10 @@ finish_drop_privileges (void) +@@ -1948,3 +1981,10 @@ finish_drop_privileges (void) install_real_capabilities (new_caps); #endif } @@ -1304,9 +1181,9 @@ +{ + sighup_pending = 1; +} ---- glibc-20070515T2025/nscd/nscd.c 16 Feb 2007 19:14:58 -0000 1.57 -+++ glibc-20070515T2025-fedora/nscd/nscd.c 21 Feb 2007 11:15:53 -0000 1.38.2.16 -@@ -119,6 +119,9 @@ static struct argp argp = +--- glibc-20070731T1624/nscd/nscd.c 16 Jul 2007 00:54:58 -0000 1.58 ++++ glibc-20070731T1624-fedora/nscd/nscd.c 31 Jul 2007 17:46:00 -0000 1.38.2.17 +@@ -120,6 +120,9 @@ static struct argp argp = options, parse_opt, NULL, doc, }; @@ -1316,7 +1193,7 @@ /* True if only statistics are requested. */ static bool get_stats; -@@ -263,6 +266,7 @@ main (int argc, char **argv) +@@ -264,6 +267,7 @@ main (int argc, char **argv) signal (SIGINT, termination_handler); signal (SIGQUIT, termination_handler); signal (SIGTERM, termination_handler); @@ -1324,8 +1201,8 @@ signal (SIGPIPE, SIG_IGN); /* Cleanup files created by a previous 'bind'. */ ---- glibc-20070515T2025/nscd/nscd.conf 14 Jan 2007 05:24:04 -0000 1.14 -+++ glibc-20070515T2025-fedora/nscd/nscd.conf 17 Jan 2007 10:42:40 -0000 1.8.2.6 +--- glibc-20070731T1624/nscd/nscd.conf 14 Jan 2007 05:24:04 -0000 1.14 ++++ glibc-20070731T1624-fedora/nscd/nscd.conf 17 Jan 2007 10:42:40 -0000 1.8.2.6 @@ -33,8 +33,8 @@ # logfile /var/log/nscd.log # threads 6 @@ -1337,8 +1214,8 @@ debug-level 0 # reload-count 5 paranoia no ---- glibc-20070515T2025/nscd/nscd.init 1 Dec 2006 20:12:45 -0000 1.10 -+++ glibc-20070515T2025-fedora/nscd/nscd.init 5 Dec 2006 21:50:20 -0000 1.6.2.5 +--- glibc-20070731T1624/nscd/nscd.init 1 Dec 2006 20:12:45 -0000 1.10 ++++ glibc-20070731T1624-fedora/nscd/nscd.init 5 Dec 2006 21:50:20 -0000 1.6.2.5 @@ -9,6 +9,7 @@ # slow naming services like NIS, NIS+, LDAP, or hesiod. # processname: /usr/sbin/nscd @@ -1379,8 +1256,8 @@ RETVAL=$? echo [ $RETVAL -eq 0 ] && touch /var/lock/subsys/nscd ---- glibc-20070515T2025/posix/Makefile 3 Apr 2007 23:28:20 -0000 1.197 -+++ glibc-20070515T2025-fedora/posix/Makefile 16 Apr 2007 23:59:05 -0000 1.171.2.24 +--- glibc-20070731T1624/posix/Makefile 29 Jul 2007 22:24:34 -0000 1.199 ++++ glibc-20070731T1624-fedora/posix/Makefile 31 Jul 2007 17:46:00 -0000 1.171.2.25 @@ -110,7 +110,7 @@ generated := $(addprefix wordexp-test-re tst-rxspencer-mem tst-rxspencer.mtrace tst-getconf.out \ tst-pcre-mem tst-pcre.mtrace tst-boost-mem tst-boost.mtrace \ @@ -1413,8 +1290,8 @@ + | sed -n -e '/START_OF_STRINGS/,$${/POSIX_V6_/{s/^[^"]*"//;s/".*$$//;p}}' \ + > $@.new + mv -f $@.new $@ ---- glibc-20070515T2025/posix/getconf.speclist.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/posix/getconf.speclist.h 13 Dec 2004 23:32:37 -0000 1.1.2.2 +--- glibc-20070731T1624/posix/getconf.speclist.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/posix/getconf.speclist.h 13 Dec 2004 23:32:37 -0000 1.1.2.2 @@ -0,0 +1,15 @@ +#include +const char *START_OF_STRINGS = @@ -1431,8 +1308,8 @@ +"POSIX_V6_LPBIG_OFFBIG" +#endif +""; ---- glibc-20070515T2025/sysdeps/generic/dl-cache.h 25 Jun 2003 08:01:22 -0000 1.13 -+++ glibc-20070515T2025-fedora/sysdeps/generic/dl-cache.h 22 Sep 2004 21:21:07 -0000 1.13.2.1 +--- glibc-20070731T1624/sysdeps/generic/dl-cache.h 25 Jun 2003 08:01:22 -0000 1.13 ++++ glibc-20070731T1624-fedora/sysdeps/generic/dl-cache.h 22 Sep 2004 21:21:07 -0000 1.13.2.1 @@ -36,6 +36,14 @@ # define add_system_dir(dir) add_dir (dir) #endif @@ -1448,8 +1325,8 @@ #define CACHEMAGIC "ld.so-1.7.0" /* libc5 and glibc 2.0/2.1 use the same format. For glibc 2.2 another ---- glibc-20070515T2025/sysdeps/i386/Makefile 6 Mar 2005 00:18:16 -0000 1.20 -+++ glibc-20070515T2025-fedora/sysdeps/i386/Makefile 30 Jun 2006 09:16:34 -0000 1.16.2.4 +--- glibc-20070731T1624/sysdeps/i386/Makefile 6 Mar 2005 00:18:16 -0000 1.20 ++++ glibc-20070731T1624-fedora/sysdeps/i386/Makefile 30 Jun 2006 09:16:34 -0000 1.16.2.4 @@ -64,4 +64,12 @@ endif ifneq (,$(filter -mno-tls-direct-seg-refs,$(CFLAGS))) @@ -1463,8 +1340,8 @@ +CPPFLAGS-.oS += -DNO_TLS_DIRECT_SEG_REFS +CFLAGS-.oS += -mno-tls-direct-seg-refs endif ---- glibc-20070515T2025/sysdeps/ia64/Makefile 16 Aug 2004 06:46:14 -0000 1.10 -+++ glibc-20070515T2025-fedora/sysdeps/ia64/Makefile 22 Sep 2004 21:21:07 -0000 1.10.2.1 +--- glibc-20070731T1624/sysdeps/ia64/Makefile 16 Aug 2004 06:46:14 -0000 1.10 ++++ glibc-20070731T1624-fedora/sysdeps/ia64/Makefile 22 Sep 2004 21:21:07 -0000 1.10.2.1 @@ -12,8 +12,8 @@ elide-routines.os += hp-timing ifeq (yes,$(build-shared)) @@ -1476,8 +1353,8 @@ endif endif ---- glibc-20070515T2025/sysdeps/ia64/ia64libgcc.S 11 May 2002 05:12:35 -0000 1.2 -+++ glibc-20070515T2025-fedora/sysdeps/ia64/ia64libgcc.S 22 Sep 2004 21:21:07 -0000 1.2.2.1 +--- glibc-20070731T1624/sysdeps/ia64/ia64libgcc.S 11 May 2002 05:12:35 -0000 1.2 ++++ glibc-20070731T1624-fedora/sysdeps/ia64/ia64libgcc.S 22 Sep 2004 21:21:07 -0000 1.2.2.1 @@ -1,350 +0,0 @@ -/* From the Intel IA-64 Optimization Guide, choose the minimum latency - alternative. */ @@ -1829,8 +1706,8 @@ - .symver ___multi3, __multi3@GLIBC_2.2 - -#endif ---- glibc-20070515T2025/sysdeps/ia64/libgcc-compat.c 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/sysdeps/ia64/libgcc-compat.c 22 Sep 2004 21:21:08 -0000 1.1.2.1 +--- glibc-20070731T1624/sysdeps/ia64/libgcc-compat.c 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/sysdeps/ia64/libgcc-compat.c 22 Sep 2004 21:21:08 -0000 1.1.2.1 @@ -0,0 +1,84 @@ +/* pre-.hidden libgcc compatibility + Copyright (C) 2002 Free Software Foundation, Inc. @@ -1916,8 +1793,8 @@ +symbol_version (INTUSE (__multi3), __multi3, GLIBC_2.2); + +#endif ---- glibc-20070515T2025/sysdeps/powerpc/powerpc64/Makefile 2 Feb 2006 08:23:44 -0000 1.8 -+++ glibc-20070515T2025-fedora/sysdeps/powerpc/powerpc64/Makefile 30 Nov 2006 17:07:38 -0000 1.4.2.5 +--- glibc-20070731T1624/sysdeps/powerpc/powerpc64/Makefile 2 Feb 2006 08:23:44 -0000 1.8 ++++ glibc-20070731T1624-fedora/sysdeps/powerpc/powerpc64/Makefile 30 Nov 2006 17:07:38 -0000 1.4.2.5 @@ -30,6 +30,7 @@ ifneq ($(elf),no) # we use -fpic instead which is much better. CFLAGS-initfini.s += -fpic -O1 @@ -1926,8 +1803,8 @@ endif ifeq ($(subdir),elf) ---- glibc-20070515T2025/sysdeps/unix/nice.c 15 Aug 2006 05:24:45 -0000 1.7 -+++ glibc-20070515T2025-fedora/sysdeps/unix/nice.c 15 Aug 2006 05:53:50 -0000 1.6.2.2 +--- glibc-20070731T1624/sysdeps/unix/nice.c 15 Aug 2006 05:24:45 -0000 1.7 ++++ glibc-20070731T1624-fedora/sysdeps/unix/nice.c 15 Aug 2006 05:53:50 -0000 1.6.2.2 @@ -42,7 +42,12 @@ nice (int incr) __set_errno (save); } @@ -1942,8 +1819,8 @@ if (result == -1) { if (errno == EACCES) ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/check_pf.c 25 Apr 2007 16:05:18 -0000 1.10 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/check_pf.c 4 May 2007 10:05:57 -0000 1.3.2.7 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/check_pf.c 16 Jun 2007 16:54:40 -0000 1.11 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/check_pf.c 31 Jul 2007 17:46:12 -0000 1.3.2.8 @@ -27,13 +27,10 @@ #include #include @@ -1959,8 +1836,8 @@ #ifndef IFA_F_TEMPORARY # define IFA_F_TEMPORARY IFA_F_SECONDARY ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/dl-osinfo.h 1 Aug 2006 06:55:27 -0000 1.23 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/dl-osinfo.h 2 Aug 2006 16:57:42 -0000 1.14.2.9 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/dl-osinfo.h 1 Aug 2006 06:55:27 -0000 1.23 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/dl-osinfo.h 2 Aug 2006 16:57:42 -0000 1.14.2.9 @@ -18,11 +18,14 @@ 02111-1307 USA. */ @@ -2008,8 +1885,8 @@ + ret ^= stk; return ret; } ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/futimesat.c 3 Feb 2006 05:26:34 -0000 1.6 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/futimesat.c 3 Feb 2006 09:43:55 -0000 1.1.2.7 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/futimesat.c 3 Feb 2006 05:26:34 -0000 1.6 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/futimesat.c 3 Feb 2006 09:43:55 -0000 1.1.2.7 @@ -37,14 +37,14 @@ futimesat (fd, file, tvp) { int result; @@ -2052,8 +1929,8 @@ { size_t filelen = strlen (file); static const char procfd[] = "/proc/self/fd/%d/%s"; ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/netlinkaccess.h 8 Jan 2006 08:21:15 -0000 1.3 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/netlinkaccess.h 10 Dec 2006 10:51:12 -0000 1.1.2.3 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/netlinkaccess.h 8 Jan 2006 08:21:15 -0000 1.3 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/netlinkaccess.h 10 Dec 2006 10:51:12 -0000 1.1.2.3 @@ -25,6 +25,24 @@ #include @@ -2079,8 +1956,8 @@ struct netlink_res { ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/paths.h 15 Nov 2000 23:06:47 -0000 1.11 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/paths.h 22 Sep 2004 21:21:08 -0000 1.11.4.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/paths.h 15 Nov 2000 23:06:47 -0000 1.11 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/paths.h 22 Sep 2004 21:21:08 -0000 1.11.4.1 @@ -61,7 +61,7 @@ #define _PATH_TTY "/dev/tty" #define _PATH_UNIX "/boot/vmlinux" @@ -2090,8 +1967,8 @@ #define _PATH_WTMP "/var/log/wtmp" /* Provide trailing slash, since mostly used for building pathnames. */ ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/tcsetattr.c 10 Sep 2003 19:16:07 -0000 1.16 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/tcsetattr.c 22 Sep 2004 21:21:08 -0000 1.16.2.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/tcsetattr.c 10 Sep 2003 19:16:07 -0000 1.16 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/tcsetattr.c 22 Sep 2004 21:21:08 -0000 1.16.2.1 @@ -49,6 +49,7 @@ tcsetattr (fd, optional_actions, termios { struct __kernel_termios k_termios; @@ -2137,8 +2014,8 @@ + return retval; } libc_hidden_def (tcsetattr) ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/i386/clone.S 3 Dec 2006 23:12:36 -0000 1.27 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/i386/clone.S 14 Dec 2006 09:06:34 -0000 1.22.2.6 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/i386/clone.S 3 Dec 2006 23:12:36 -0000 1.27 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/i386/clone.S 14 Dec 2006 09:06:34 -0000 1.22.2.6 @@ -120,9 +120,6 @@ L(pseudo_end): ret @@ -2157,8 +2034,8 @@ cfi_startproc PSEUDO_END (BP_SYM (__clone)) ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/i386/dl-cache.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/i386/dl-cache.h 22 Sep 2004 21:21:08 -0000 1.1.2.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/i386/dl-cache.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/i386/dl-cache.h 22 Sep 2004 21:21:08 -0000 1.1.2.1 @@ -0,0 +1,59 @@ +/* Support for reading /etc/ld.so.cache files written by Linux ldconfig. + Copyright (C) 2004 Free Software Foundation, Inc. @@ -2219,8 +2096,8 @@ + } while (0) + +#include_next ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/ia64/dl-cache.h 6 Jul 2001 04:56:17 -0000 1.2 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/ia64/dl-cache.h 22 Sep 2004 21:21:09 -0000 1.2.4.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/ia64/dl-cache.h 6 Jul 2001 04:56:17 -0000 1.2 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/ia64/dl-cache.h 22 Sep 2004 21:21:09 -0000 1.2.4.1 @@ -22,4 +22,31 @@ #define _dl_cache_check_flags(flags) \ ((flags) == _DL_CACHE_DEFAULT_ID) @@ -2253,29 +2130,29 @@ + } while (0) + #include_next ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/ia64/dl-procinfo.c 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/ia64/dl-procinfo.c 22 Sep 2004 21:21:09 -0000 1.1.2.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/ia64/dl-procinfo.c 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/ia64/dl-procinfo.c 22 Sep 2004 21:21:09 -0000 1.1.2.1 @@ -0,0 +1,5 @@ +#ifdef IS_IN_ldconfig +#include +#else +#include +#endif ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/ia64/dl-procinfo.h 1 Jan 1970 00:00:00 -0000 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/ia64/dl-procinfo.h 22 Sep 2004 21:21:09 -0000 1.1.2.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/ia64/dl-procinfo.h 1 Jan 1970 00:00:00 -0000 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/ia64/dl-procinfo.h 22 Sep 2004 21:21:09 -0000 1.1.2.1 @@ -0,0 +1,5 @@ +#ifdef IS_IN_ldconfig +#include +#else +#include +#endif ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/ia64/ldd-rewrite.sed 17 Jan 2002 06:49:28 -0000 1.2 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/ia64/ldd-rewrite.sed 22 Sep 2004 21:21:09 -0000 1.2.2.1 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/ia64/ldd-rewrite.sed 17 Jan 2002 06:49:28 -0000 1.2 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/ia64/ldd-rewrite.sed 22 Sep 2004 21:21:09 -0000 1.2.2.1 @@ -1 +1 @@ -s_^\(RTLDLIST=\)\([^ ]*\)-ia64\(\.so\.[0-9.]*\)[ ]*$_\1"\2-ia64\3 \2\3"_ +s_^\(RTLDLIST=\)\([^ ]*\)-ia64\(\.so\.[0-9.]*\)[ ]*$_\1"\2-ia64\3 /emul/ia32-linux\2\3"_ ---- glibc-20070515T2025/sysdeps/unix/sysv/linux/x86_64/clone.S 3 Dec 2006 23:12:36 -0000 1.7 -+++ glibc-20070515T2025-fedora/sysdeps/unix/sysv/linux/x86_64/clone.S 14 Dec 2006 09:06:34 -0000 1.4.2.4 +--- glibc-20070731T1624/sysdeps/unix/sysv/linux/x86_64/clone.S 3 Dec 2006 23:12:36 -0000 1.7 ++++ glibc-20070731T1624-fedora/sysdeps/unix/sysv/linux/x86_64/clone.S 14 Dec 2006 09:06:34 -0000 1.4.2.4 @@ -89,9 +89,6 @@ L(pseudo_end): ret diff --git a/glibc-i386-rwlock.patch b/glibc-i386-rwlock.patch new file mode 100644 index 0000000..c7b8816 --- /dev/null +++ b/glibc-i386-rwlock.patch @@ -0,0 +1,98 @@ +2007-07-30 Jakub Jelinek + + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S + (pthread_rwlock_timedrdlock): Copy futex retval to %esi rather than + %ecx. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S + (pthread_rwlock_timedwrlock): Likewise. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S + (__pthread_rwlock_unlock): Fix MUTEX != 0 args to __lll_*. + +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S 2007-07-30 18:10:05.000000000 +0200 +@@ -124,7 +124,7 @@ pthread_rwlock_timedrdlock: + leal READERS_WAKEUP(%ebp), %ebx + movl $SYS_futex, %eax + ENTER_KERNEL +- movl %eax, %ecx ++ movl %eax, %esi + 17: + + /* Reget the lock. */ +@@ -139,7 +139,7 @@ pthread_rwlock_timedrdlock: + jnz 12f + + 13: subl $1, READERS_QUEUED(%ebp) +- cmpl $-ETIMEDOUT, %ecx ++ cmpl $-ETIMEDOUT, %esi + jne 2b + + 18: movl $ETIMEDOUT, %ecx +@@ -217,7 +217,7 @@ pthread_rwlock_timedrdlock: + call __lll_mutex_lock_wait + jmp 13b + +-16: movl $-ETIMEDOUT, %ecx ++16: movl $-ETIMEDOUT, %esi + jmp 17b + + 19: movl $EINVAL, %ecx +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S 2007-07-30 18:10:59.000000000 +0200 +@@ -122,7 +122,7 @@ pthread_rwlock_timedwrlock: + leal WRITERS_WAKEUP(%ebp), %ebx + movl $SYS_futex, %eax + ENTER_KERNEL +- movl %eax, %ecx ++ movl %eax, %esi + 17: + + /* Reget the lock. */ +@@ -137,7 +137,7 @@ pthread_rwlock_timedwrlock: + jnz 12f + + 13: subl $1, WRITERS_QUEUED(%ebp) +- cmpl $-ETIMEDOUT, %ecx ++ cmpl $-ETIMEDOUT, %esi + jne 2b + + 18: movl $ETIMEDOUT, %ecx +@@ -210,7 +210,7 @@ pthread_rwlock_timedwrlock: + call __lll_mutex_lock_wait + jmp 13b + +-16: movl $-ETIMEDOUT, %ecx ++16: movl $-ETIMEDOUT, %esi + jmp 17b + + 19: movl $EINVAL, %ecx +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S 2007-07-30 18:19:01.000000000 +0200 +@@ -117,7 +117,7 @@ __pthread_rwlock_unlock: + #if MUTEX == 0 + movl %edi, %ecx + #else +- leal MUTEX(%edx), %ecx ++ leal MUTEX(%edi), %ecx + #endif + call __lll_mutex_lock_wait + jmp 2b +@@ -126,7 +126,7 @@ __pthread_rwlock_unlock: + #if MUTEX == 0 + movl %edi, %eax + #else +- leal MUTEX(%edx), %eax ++ leal MUTEX(%edi), %eax + #endif + call __lll_mutex_unlock_wake + jmp 4b +@@ -135,7 +135,7 @@ __pthread_rwlock_unlock: + #if MUTEX == 0 + movl %edi, %eax + #else +- leal MUTEX(%edx), %eax ++ leal MUTEX(%edi), %eax + #endif + call __lll_mutex_unlock_wake + jmp 8b + diff --git a/glibc-ldconfig-speedup.patch b/glibc-ldconfig-speedup.patch new file mode 100644 index 0000000..4d621af --- /dev/null +++ b/glibc-ldconfig-speedup.patch @@ -0,0 +1,1143 @@ +2007-07-27 Andreas Jaeger + Jakub Jelinek + + * elf/ldconfig.c (opt_ignore_aux_cache): Add new option. + (options): Add option. + (parse_opt): Handle option. + (manual_link): Adjust process_file caller. Call implicit_soname. + (search_dir): Formatting. Use and populate auxiliary cache. + (main): Load and save auxiliary cache. + * elf/readlib.c (process_file): Add stat_buf argument. Pass struct + stat64 from fstat64 to caller. + (implicit_soname): New function. + * elf/readelflib.c (process_elf_file): If DT_SONAME is not present, + leave *soname as NULL. + * elf/cache.c: Include libgen.h. + (print_entry, print_cache, compare, save_cache, add_to_cache): + Formatting and cleanups. + (aux_cache_entry_id, aux_cache_entry, aux_cache_file_entry, + aux_cache_file): New structures. + (AUX_CACHEMAGIC): Define. + (primes): New array. + (aux_hash_size, aux_hash): New variables. + (aux_cache_entry_id_hash, nextprime, init_aux_cache, + search_aux_cache, insert_to_aux_cache, add_to_aux_cache, + load_aux_cache, save_aux_cache): New functions. + * sysdeps/generic/ldconfig.h (_PATH_LDCONFIG_AUX_CACHE): Define. + (init_aux_cache, search_aux_cache, add_to_aux_cache, + load_aux_cache, save_aux_cache, implicit_soname): New prototypes. + (process_file): Adjust prototype. + +--- libc/elf/readlib.c.jj 2007-07-16 09:58:46.000000000 +0200 ++++ libc/elf/readlib.c 2007-07-27 17:45:36.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 1999-2003, 2005 Free Software Foundation, Inc. ++/* Copyright (C) 1999-2003, 2005, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Andreas Jaeger , 1999 and + Jakub Jelinek , 1999. +@@ -69,7 +69,7 @@ static struct known_names known_libs[] = + int + process_file (const char *real_file_name, const char *file_name, + const char *lib, int *flag, unsigned int *osversion, +- char **soname, int is_link) ++ char **soname, int is_link, struct stat64 *stat_buf) + { + FILE *file; + struct stat64 statbuf; +@@ -135,7 +135,7 @@ process_file (const char *real_file_name + ) + { + /* Aout files don't have a soname, just return the name +- including the major number. */ ++ including the major number. */ + char *copy, *major, *dot; + copy = xstrdup (lib); + major = strstr (copy, ".so."); +@@ -175,8 +175,31 @@ process_file (const char *real_file_name + munmap (file_contents, statbuf.st_size); + fclose (file); + ++ *stat_buf = statbuf; + return ret; + } + ++/* Returns made up soname if lib doesn't have explicit DT_SONAME. */ ++ ++char * ++implicit_soname (const char *lib, int flag) ++{ ++ char *soname = xstrdup (lib); ++ ++ if ((flag & FLAG_TYPE_MASK) != FLAG_LIBC4) ++ return soname; ++ ++ /* Aout files don't have a soname, just return the name ++ including the major number. */ ++ char *major = strstr (soname, ".so."); ++ if (major) ++ { ++ char *dot = strstr (major + 4, "."); ++ if (dot) ++ *dot = '\0'; ++ } ++ return soname; ++} ++ + /* Get architecture specific version of process_elf_file. */ + #include +--- libc/elf/readelflib.c.jj 2007-07-03 12:36:59.000000000 +0200 ++++ libc/elf/readelflib.c 2007-07-27 15:24:36.000000000 +0200 +@@ -231,11 +231,5 @@ process_elf_file (const char *file_name, + } + } + +- /* We reach this point only if the file doesn't contain a DT_SONAME +- or if we can't classify the library. If it doesn't have a +- soname, return the name of the library. */ +- if (*soname == NULL) +- *soname = xstrdup (lib); +- + return 0; + } +--- libc/elf/ldconfig.c.jj 2007-07-16 09:58:46.000000000 +0200 ++++ libc/elf/ldconfig.c 2007-07-27 18:07:32.000000000 +0200 +@@ -112,6 +112,9 @@ static char *opt_chroot; + /* Manually link given shared libraries. */ + static int opt_manual_link; + ++/* Should we ignore an old auxiliary cache file? */ ++static int opt_ignore_aux_cache; ++ + /* Cache file to use. */ + static char *cache_file; + +@@ -142,6 +145,7 @@ static const struct argp_option options[ + { NULL, 'n', NULL, 0, N_("Only process directories specified on the command line. Don't build cache."), 0}, + { NULL, 'l', NULL, 0, N_("Manually link individual libraries."), 0}, + { "format", 'c', N_("FORMAT"), 0, N_("Format to use: new, old or compat (default)"), 0}, ++ { "ignore-aux-cache", 'i', NULL, 0, N_("Ignore auxiliary cache file"), 0}, + { NULL, 0, NULL, 0, NULL, 0 } + }; + +@@ -238,10 +242,15 @@ parse_opt (int key, char *arg, struct ar + { + case 'C': + cache_file = arg; ++ /* Ignore auxiliary cache since we use non-standard cache. */ ++ opt_ignore_aux_cache = 1; + break; + case 'f': + config_file = arg; + break; ++ case 'i': ++ opt_ignore_aux_cache = 1; ++ break; + case 'l': + opt_manual_link = 1; + break; +@@ -518,7 +527,7 @@ manual_link (char *library) + if (libname) + { + /* Successfully split names. Check if path is just "/" to avoid +- an empty path. */ ++ an empty path. */ + if (libname == path) + { + libname = library + 1; +@@ -572,14 +581,17 @@ manual_link (char *library) + free (path); + return; + } ++ + if (process_file (real_library, library, libname, &flag, &osversion, +- &soname, 0)) ++ &soname, 0, &stat_buf)) + { + error (0, 0, _("No link created since soname could not be found for %s"), + library); + free (path); + return; + } ++ if (soname == NULL) ++ soname = implicit_soname (libname, flag); + create_links (real_path, path, libname, soname); + free (soname); + free (path); +@@ -625,23 +637,7 @@ struct dlib_entry + static void + search_dir (const struct dir_entry *entry) + { +- DIR *dir; +- struct dirent64 *direntry; +- char *file_name, *dir_name, *real_file_name, *real_name; +- int file_name_len, real_file_name_len, len; +- char *soname; +- struct dlib_entry *dlibs; +- struct dlib_entry *dlib_ptr; +- struct stat64 lstat_buf, stat_buf; +- int is_link, is_dir; + uint64_t hwcap = path_hwcap (entry->path); +- unsigned int osversion; +- +- file_name_len = PATH_MAX; +- file_name = alloca (file_name_len); +- +- dlibs = NULL; +- + if (opt_verbose) + { + if (hwcap != 0) +@@ -650,6 +646,11 @@ search_dir (const struct dir_entry *entr + printf ("%s:\n", entry->path); + } + ++ char *dir_name; ++ char *real_file_name; ++ size_t real_file_name_len; ++ size_t file_name_len = PATH_MAX; ++ char *file_name = alloca (file_name_len); + if (opt_chroot) + { + dir_name = chroot_canon (opt_chroot, entry->path); +@@ -663,6 +664,7 @@ search_dir (const struct dir_entry *entr + real_file_name = file_name; + } + ++ DIR *dir; + if (dir_name == NULL || (dir = opendir (dir_name)) == NULL) + { + if (opt_verbose) +@@ -672,6 +674,8 @@ search_dir (const struct dir_entry *entr + return; + } + ++ struct dirent64 *direntry; ++ struct dlib_entry *dlibs = NULL; + while ((direntry = readdir64 (dir)) != NULL) + { + int flag; +@@ -695,7 +699,8 @@ search_dir (const struct dir_entry *entr + #endif + !is_hwcap_platform (direntry->d_name))) + continue; +- len = strlen (direntry->d_name); ++ ++ size_t len = strlen (direntry->d_name); + /* Skip temporary files created by the prelink program. Files with + names like these are never really DSOs we want to look at. */ + if (len >= sizeof (".#prelink#") - 1) +@@ -727,7 +732,10 @@ search_dir (const struct dir_entry *entr + } + sprintf (real_file_name, "%s/%s", dir_name, direntry->d_name); + } ++ ++ struct stat64 lstat_buf; + #ifdef _DIRENT_HAVE_D_TYPE ++ /* We optimize and try to do the lstat call only if needed. */ + if (direntry->d_type != DT_UNKNOWN) + lstat_buf.st_mode = DTTOIF (direntry->d_type); + else +@@ -738,9 +746,11 @@ search_dir (const struct dir_entry *entr + continue; + } + +- is_link = S_ISLNK (lstat_buf.st_mode); ++ struct stat64 stat_buf; ++ int is_dir; ++ int is_link = S_ISLNK (lstat_buf.st_mode); + if (is_link) +- { ++ { + /* In case of symlink, we check if the symlink refers to + a directory. */ + if (__builtin_expect (stat64 (real_file_name, &stat_buf), 0)) +@@ -754,6 +764,12 @@ search_dir (const struct dir_entry *entr + continue; + } + is_dir = S_ISDIR (stat_buf.st_mode); ++ ++ /* lstat_buf is later stored, update contents. */ ++ lstat_buf.st_dev = stat_buf.st_dev; ++ lstat_buf.st_ino = stat_buf.st_ino; ++ lstat_buf.st_size = stat_buf.st_size; ++ lstat_buf.st_ctime = stat_buf.st_ctime; + } + else + is_dir = S_ISDIR (lstat_buf.st_mode); +@@ -767,36 +783,28 @@ search_dir (const struct dir_entry *entr + new_entry->path = xstrdup (file_name); + new_entry->flag = entry->flag; + new_entry->next = NULL; +- if (is_link) ++#ifdef _DIRENT_HAVE_D_TYPE ++ /* We have filled in lstat only #ifndef ++ _DIRENT_HAVE_D_TYPE. Fill it in if needed. */ ++ if (!is_link ++ && direntry->d_type != DT_UNKNOWN ++ && __builtin_expect (lstat64 (real_file_name, &lstat_buf), 0)) + { +- new_entry->ino = stat_buf.st_ino; +- new_entry->dev = stat_buf.st_dev; ++ error (0, errno, _("Cannot lstat %s"), file_name); ++ free (new_entry->path); ++ free (new_entry); ++ continue; + } +- else +- { +-#ifdef _DIRENT_HAVE_D_TYPE +- /* We have filled in lstat only #ifndef +- _DIRENT_HAVE_D_TYPE. Fill it in if needed. */ +- if (direntry->d_type != DT_UNKNOWN +- && __builtin_expect (lstat64 (real_file_name, &lstat_buf), +- 0)) +- { +- error (0, errno, _("Cannot lstat %s"), file_name); +- free (new_entry->path); +- free (new_entry); +- continue; +- } + #endif +- +- new_entry->ino = lstat_buf.st_ino; +- new_entry->dev = lstat_buf.st_dev; +- } ++ new_entry->ino = lstat_buf.st_ino; ++ new_entry->dev = lstat_buf.st_dev; + add_single_dir (new_entry, 0); + continue; + } + else if (!S_ISREG (lstat_buf.st_mode) && !is_link) + continue; + ++ char *real_name; + if (opt_chroot && is_link) + { + real_name = chroot_canon (opt_chroot, file_name); +@@ -810,14 +818,36 @@ search_dir (const struct dir_entry *entr + else + real_name = real_file_name; + +- if (process_file (real_name, file_name, direntry->d_name, &flag, +- &osversion, &soname, is_link)) ++#ifdef _DIRENT_HAVE_D_TYPE ++ /* Call lstat64 if not done yet. */ ++ if (!is_link ++ && direntry->d_type != DT_UNKNOWN ++ && __builtin_expect (lstat64 (real_file_name, &lstat_buf), 0)) + { +- if (real_name != real_file_name) +- free (real_name); ++ error (0, errno, _("Cannot lstat %s"), file_name); + continue; + } ++#endif ++ ++ /* First search whether the auxiliary cache contains this ++ library already and it's not changed. */ ++ char *soname; ++ unsigned int osversion; ++ if (!search_aux_cache (&lstat_buf, &flag, &osversion, &soname)) ++ { ++ if (process_file (real_name, file_name, direntry->d_name, &flag, ++ &osversion, &soname, is_link, &lstat_buf)) ++ { ++ if (real_name != real_file_name) ++ free (real_name); ++ continue; ++ } ++ else if (opt_build_cache) ++ add_to_aux_cache (&lstat_buf, flag, osversion, soname); ++ } + ++ if (soname == NULL) ++ soname = implicit_soname (direntry->d_name, flag); + + /* A link may just point to itself. */ + if (is_link) +@@ -834,7 +864,7 @@ search_dir (const struct dir_entry *entr + || strncmp (real_base_name, soname, len) != 0) + is_link = 0; + } +- } ++ } + + if (real_name != real_file_name) + free (real_name); +@@ -849,6 +879,7 @@ search_dir (const struct dir_entry *entr + && (entry->flag == FLAG_ELF_LIBC5 + || entry->flag == FLAG_ELF_LIBC6)) + flag = entry->flag; ++ + /* Some sanity checks to print warnings. */ + if (opt_verbose) + { +@@ -864,6 +895,7 @@ search_dir (const struct dir_entry *entr + } + + /* Add library to list. */ ++ struct dlib_entry *dlib_ptr; + for (dlib_ptr = dlibs; dlib_ptr != NULL; dlib_ptr = dlib_ptr->next) + { + /* Is soname already in list? */ +@@ -888,12 +920,13 @@ search_dir (const struct dir_entry *entr + dlib_ptr->flag = flag; + else + error (0, 0, _("libraries %s and %s in directory %s have same soname but different type."), +- dlib_ptr->name, direntry->d_name, entry->path); ++ dlib_ptr->name, direntry->d_name, ++ entry->path); + } + free (dlib_ptr->name); +- dlib_ptr->osversion = osversion; + dlib_ptr->name = xstrdup (direntry->d_name); + dlib_ptr->is_link = is_link; ++ dlib_ptr->osversion = osversion; + } + /* Don't add this library, abort loop. */ + /* Also free soname, since it's dynamically allocated. */ +@@ -906,10 +939,10 @@ search_dir (const struct dir_entry *entr + { + dlib_ptr = (struct dlib_entry *)xmalloc (sizeof (struct dlib_entry)); + dlib_ptr->name = xstrdup (direntry->d_name); +- dlib_ptr->flag = flag; +- dlib_ptr->osversion = osversion; + dlib_ptr->soname = soname; ++ dlib_ptr->flag = flag; + dlib_ptr->is_link = is_link; ++ dlib_ptr->osversion = osversion; + /* Add at head of list. */ + dlib_ptr->next = dlibs; + dlibs = dlib_ptr; +@@ -920,6 +953,7 @@ search_dir (const struct dir_entry *entr + + /* Now dlibs contains a list of all libs - add those to the cache + and created all symbolic links. */ ++ struct dlib_entry *dlib_ptr; + for (dlib_ptr = dlibs; dlib_ptr != NULL; dlib_ptr = dlib_ptr->next) + { + /* Don't create links to links. */ +@@ -1246,7 +1280,7 @@ main (int argc, char **argv) + if (opt_chroot) + { + /* Canonicalize the directory name of cache_file, not cache_file, +- because we'll rename a temporary cache file to it. */ ++ because we'll rename a temporary cache file to it. */ + char *p = strrchr (cache_file, '/'); + char *canon = chroot_canon (opt_chroot, + p ? (*p = '\0', cache_file) : "/"); +@@ -1293,10 +1327,18 @@ main (int argc, char **argv) + add_system_dir (LIBDIR); + } + ++ if (! opt_ignore_aux_cache) ++ load_aux_cache (_PATH_LDCONFIG_AUX_CACHE); ++ else ++ init_aux_cache (); ++ + search_dirs (); + + if (opt_build_cache) +- save_cache (cache_file); ++ { ++ save_cache (cache_file); ++ save_aux_cache (_PATH_LDCONFIG_AUX_CACHE); ++ } + + return 0; + } +--- libc/elf/cache.c.jj 2007-07-16 09:58:46.000000000 +0200 ++++ libc/elf/cache.c 2007-07-27 18:20:09.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 1999-2003,2005,2006 Free Software Foundation, Inc. ++/* Copyright (C) 1999-2003,2005,2006,2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Andreas Jaeger , 1999. + +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -80,16 +81,16 @@ print_entry (const char *lib, int flag, + fputs (",x86-64", stdout); + break; + case FLAG_S390_LIB64: +- fputs(",64bit", stdout); ++ fputs (",64bit", stdout); + break; + case FLAG_POWERPC_LIB64: +- fputs(",64bit", stdout); ++ fputs (",64bit", stdout); + break; + case FLAG_MIPS64_LIBN32: +- fputs(",N32", stdout); ++ fputs (",N32", stdout); + break; + case FLAG_MIPS64_LIBN64: +- fputs(",64bit", stdout); ++ fputs (",64bit", stdout); + case 0: + break; + default: +@@ -128,19 +129,11 @@ print_entry (const char *lib, int flag, + void + print_cache (const char *cache_name) + { +- size_t cache_size; +- struct stat64 st; +- int fd; +- unsigned int i; +- struct cache_file *cache; +- struct cache_file_new *cache_new = NULL; +- const char *cache_data; +- int format = 0; +- +- fd = open (cache_name, O_RDONLY); ++ int fd = open (cache_name, O_RDONLY); + if (fd < 0) + error (EXIT_FAILURE, errno, _("Can't open cache file %s\n"), cache_name); + ++ struct stat64 st; + if (fstat64 (fd, &st) < 0 + /* No need to map the file if it is empty. */ + || st.st_size == 0) +@@ -149,14 +142,19 @@ print_cache (const char *cache_name) + return; + } + +- cache = mmap (0, st.st_size, PROT_READ, MAP_SHARED, fd, 0); ++ struct cache_file *cache ++ = mmap (NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (cache == MAP_FAILED) + error (EXIT_FAILURE, errno, _("mmap of cache file failed.\n")); +- cache_size = st.st_size; + ++ size_t cache_size = st.st_size; + if (cache_size < sizeof (struct cache_file)) + error (EXIT_FAILURE, 0, _("File is not a cache file.\n")); + ++ struct cache_file_new *cache_new = NULL; ++ const char *cache_data; ++ int format = 0; ++ + if (memcmp (cache->magic, CACHEMAGIC, sizeof CACHEMAGIC - 1)) + { + /* This can only be the new format without the old one. */ +@@ -201,7 +199,7 @@ print_cache (const char *cache_name) + printf (_("%d libs found in cache `%s'\n"), cache->nlibs, cache_name); + + /* Print everything. */ +- for (i = 0; i < cache->nlibs; i++) ++ for (unsigned int i = 0; i < cache->nlibs; i++) + print_entry (cache_data + cache->libs[i].key, + cache->libs[i].flags, 0, 0, + cache_data + cache->libs[i].value); +@@ -212,7 +210,7 @@ print_cache (const char *cache_name) + cache_new->nlibs, cache_name); + + /* Print everything. */ +- for (i = 0; i < cache_new->nlibs; i++) ++ for (unsigned int i = 0; i < cache_new->nlibs; i++) + print_entry (cache_data + cache_new->libs[i].key, + cache_new->libs[i].flags, + cache_new->libs[i].osversion, +@@ -231,15 +229,11 @@ init_cache (void) + entries = NULL; + } + +- +- +-static +-int compare (const struct cache_entry *e1, const struct cache_entry *e2) ++static int ++compare (const struct cache_entry *e1, const struct cache_entry *e2) + { +- int res; +- + /* We need to swap entries here to get the correct sort order. */ +- res = _dl_cache_libcmp (e2->lib, e1->lib); ++ int res = _dl_cache_libcmp (e2->lib, e1->lib); + if (res == 0) + { + if (e1->flags < e2->flags) +@@ -267,29 +261,19 @@ int compare (const struct cache_entry *e + void + save_cache (const char *cache_name) + { +- struct cache_entry *entry; +- int fd, idx_old, idx_new; +- size_t total_strlen, len; +- char *strings, *str, *temp_name; +- struct cache_file *file_entries = NULL; +- struct cache_file_new *file_entries_new = NULL; +- size_t file_entries_size = 0; +- size_t file_entries_new_size = 0; +- unsigned int str_offset; +- /* Number of cache entries. */ +- int cache_entry_count = 0; +- /* Number of normal cache entries. */ +- int cache_entry_old_count = 0; +- /* Pad for alignment of cache_file_new. */ +- size_t pad; +- + /* The cache entries are sorted already, save them in this order. */ + + /* Count the length of all strings. */ + /* The old format doesn't contain hwcap entries and doesn't contain + libraries in subdirectories with hwcaps entries. Count therefore + also all entries with hwcap == 0. */ +- total_strlen = 0; ++ size_t total_strlen = 0; ++ struct cache_entry *entry; ++ /* Number of cache entries. */ ++ int cache_entry_count = 0; ++ /* Number of normal cache entries. */ ++ int cache_entry_old_count = 0; ++ + for (entry = entries; entry != NULL; entry = entry->next) + { + /* Account the final NULs. */ +@@ -300,8 +284,8 @@ save_cache (const char *cache_name) + } + + /* Create the on disk cache structure. */ +- /* First an array for all strings. */ +- strings = (char *)xmalloc (total_strlen); ++ struct cache_file *file_entries = NULL; ++ size_t file_entries_size = 0; + + if (opt_format != 2) + { +@@ -315,25 +299,27 @@ save_cache (const char *cache_name) + /* And the list of all entries in the old format. */ + file_entries_size = sizeof (struct cache_file) + + cache_entry_old_count * sizeof (struct file_entry); +- file_entries = (struct cache_file *) xmalloc (file_entries_size); ++ file_entries = xmalloc (file_entries_size); + + /* Fill in the header. */ +- memset (file_entries, 0, sizeof (struct cache_file)); ++ memset (file_entries, '\0', sizeof (struct cache_file)); + memcpy (file_entries->magic, CACHEMAGIC, sizeof CACHEMAGIC - 1); + + file_entries->nlibs = cache_entry_old_count; + } + ++ struct cache_file_new *file_entries_new = NULL; ++ size_t file_entries_new_size = 0; ++ + if (opt_format != 0) + { + /* And the list of all entries in the new format. */ + file_entries_new_size = sizeof (struct cache_file_new) + + cache_entry_count * sizeof (struct file_entry_new); +- file_entries_new = +- (struct cache_file_new *) xmalloc (file_entries_new_size); ++ file_entries_new = xmalloc (file_entries_new_size); + + /* Fill in the header. */ +- memset (file_entries_new, 0, sizeof (struct cache_file_new)); ++ memset (file_entries_new, '\0', sizeof (struct cache_file_new)); + memcpy (file_entries_new->magic, CACHEMAGIC_NEW, + sizeof CACHEMAGIC_NEW - 1); + memcpy (file_entries_new->version, CACHE_VERSION, +@@ -343,17 +329,24 @@ save_cache (const char *cache_name) + file_entries_new->len_strings = total_strlen; + } + +- pad = ALIGN_CACHE (file_entries_size) - file_entries_size; ++ /* Pad for alignment of cache_file_new. */ ++ size_t pad = ALIGN_CACHE (file_entries_size) - file_entries_size; + + /* If we have both formats, we hide the new format in the strings + table, we have to adjust all string indices for this so that + old libc5/glibc 2 dynamic linkers just ignore them. */ ++ unsigned int str_offset; + if (opt_format != 0) + str_offset = file_entries_new_size; + else + str_offset = 0; + +- str = strings; ++ /* An array for all strings. */ ++ char *strings = xmalloc (total_strlen); ++ char *str = strings; ++ int idx_old; ++ int idx_new; ++ + for (idx_old = 0, idx_new = 0, entry = entries; entry != NULL; + entry = entry->next, ++idx_new) + { +@@ -375,21 +368,18 @@ save_cache (const char *cache_name) + file_entries_new->libs[idx_new].hwcap = entry->hwcap; + file_entries_new->libs[idx_new].key = str_offset; + } +- len = strlen (entry->lib); +- str = stpcpy (str, entry->lib); +- /* Account the final NUL. */ +- ++str; +- str_offset += len + 1; ++ ++ size_t len = strlen (entry->lib) + 1; ++ str = mempcpy (str, entry->lib, len); ++ str_offset += len; + /* Then the path. */ + if (opt_format != 2 && entry->hwcap == 0) + file_entries->libs[idx_old].value = str_offset + pad; + if (opt_format != 0) + file_entries_new->libs[idx_new].value = str_offset; +- len = strlen (entry->path); +- str = stpcpy (str, entry->path); +- /* Account the final NUL. */ +- ++str; +- str_offset += len + 1; ++ len = strlen (entry->path) + 1; ++ str = mempcpy (str, entry->path, len); ++ str_offset += len; + /* Ignore entries with hwcap for old format. */ + if (entry->hwcap == 0) + ++idx_old; +@@ -403,16 +393,12 @@ save_cache (const char *cache_name) + /* Write out the cache. */ + + /* Write cache first to a temporary file and rename it later. */ +- temp_name = xmalloc (strlen (cache_name) + 2); ++ char *temp_name = xmalloc (strlen (cache_name) + 2); + sprintf (temp_name, "%s~", cache_name); +- /* First remove an old copy if it exists. */ +- if (unlink (temp_name) && errno != ENOENT) +- error (EXIT_FAILURE, errno, _("Can't remove old temporary cache file %s"), +- temp_name); + + /* Create file. */ +- fd = open (temp_name, O_CREAT|O_WRONLY|O_TRUNC|O_NOFOLLOW, +- S_IROTH|S_IRGRP|S_IRUSR|S_IWUSR); ++ int fd = open (temp_name, O_CREAT|O_WRONLY|O_TRUNC|O_NOFOLLOW, ++ S_IRUSR|S_IWUSR); + if (fd < 0) + error (EXIT_FAILURE, errno, _("Can't create temporary cache file %s"), + temp_name); +@@ -439,11 +425,10 @@ save_cache (const char *cache_name) + error (EXIT_FAILURE, errno, _("Writing of cache data failed")); + } + +- if (write (fd, strings, total_strlen) != (ssize_t) total_strlen) ++ if (write (fd, strings, total_strlen) != (ssize_t) total_strlen ++ || close (fd)) + error (EXIT_FAILURE, errno, _("Writing of cache data failed")); + +- close (fd); +- + /* Make sure user can always read cache file */ + if (chmod (temp_name, S_IROTH|S_IRGRP|S_IRUSR|S_IWUSR)) + error (EXIT_FAILURE, errno, +@@ -463,8 +448,6 @@ save_cache (const char *cache_name) + while (entries) + { + entry = entries; +- free (entry->path); +- free (entry->lib); + entries = entries->next; + free (entry); + } +@@ -476,33 +459,29 @@ void + add_to_cache (const char *path, const char *lib, int flags, + unsigned int osversion, uint64_t hwcap) + { +- struct cache_entry *new_entry, *ptr, *prev; +- char *full_path; +- size_t len, i; +- +- new_entry = (struct cache_entry *) xmalloc (sizeof (struct cache_entry)); +- +- len = strlen (lib) + strlen (path) + 2; +- +- full_path = (char *) xmalloc (len); +- snprintf (full_path, len, "%s/%s", path, lib); +- +- new_entry->lib = xstrdup (lib); +- new_entry->path = full_path; ++ size_t liblen = strlen (lib) + 1; ++ size_t len = liblen + strlen (path) + 1; ++ struct cache_entry *new_entry ++ = xmalloc (sizeof (struct cache_entry) + liblen + len); ++ ++ new_entry->lib = memcpy ((char *) (new_entry + 1), lib, liblen); ++ new_entry->path = new_entry->lib + liblen; ++ snprintf (new_entry->path, len, "%s/%s", path, lib); + new_entry->flags = flags; + new_entry->osversion = osversion; + new_entry->hwcap = hwcap; + new_entry->bits_hwcap = 0; + + /* Count the number of bits set in the masked value. */ +- for (i = 0; (~((1ULL << i) - 1) & hwcap) != 0 && i < 8 * sizeof (hwcap); ++i) ++ for (size_t i = 0; ++ (~((1ULL << i) - 1) & hwcap) != 0 && i < 8 * sizeof (hwcap); ++i) + if ((hwcap & (1ULL << i)) != 0) + ++new_entry->bits_hwcap; + + + /* Keep the list sorted - search for right place to insert. */ +- ptr = entries; +- prev = entries; ++ struct cache_entry *ptr = entries; ++ struct cache_entry *prev = entries; + while (ptr != NULL) + { + if (compare (ptr, new_entry) > 0) +@@ -522,3 +501,304 @@ add_to_cache (const char *path, const ch + prev->next = new_entry; + } + } ++ ++ ++/* Auxiliary cache. */ ++ ++struct aux_cache_entry_id ++{ ++ uint64_t ino; ++ uint64_t ctime; ++ uint64_t size; ++ uint64_t dev; ++}; ++ ++struct aux_cache_entry ++{ ++ struct aux_cache_entry_id id; ++ int flags; ++ unsigned int osversion; ++ int used; ++ char *soname; ++ struct aux_cache_entry *next; ++}; ++ ++#define AUX_CACHEMAGIC "glibc-ld.so.auxcache-1.0" ++ ++struct aux_cache_file_entry ++{ ++ struct aux_cache_entry_id id; /* Unique id of entry. */ ++ int32_t flags; /* This is 1 for an ELF library. */ ++ uint32_t soname; /* String table indice. */ ++ uint32_t osversion; /* Required OS version. */ ++ int32_t pad; ++}; ++ ++/* ldconfig maintains an auxiliary cache file that allows ++ only reading those libraries that have changed since the last iteration. ++ For this for each library some information is cached in the auxiliary ++ cache. */ ++struct aux_cache_file ++{ ++ char magic[sizeof AUX_CACHEMAGIC - 1]; ++ uint32_t nlibs; /* Number of entries. */ ++ uint32_t len_strings; /* Size of string table. */ ++ struct aux_cache_file_entry libs[0]; /* Entries describing libraries. */ ++ /* After this the string table of size len_strings is found. */ ++}; ++ ++static unsigned int primes[] = ++{ ++ 1021, 2039, 4093, 8191, 16381, 32749, 65521, 131071, 262139, ++ 524287, 1048573, 2097143, 4194301, 8388593, 16777213, 33554393, ++ 67108859, 134217689, 268435399, 536870909, 1073741789, 2147483647 ++}; ++ ++static size_t aux_hash_size; ++static struct aux_cache_entry **aux_hash; ++ ++/* Simplistic hash function for aux_cache_entry_id. */ ++static unsigned int ++aux_cache_entry_id_hash (struct aux_cache_entry_id *id) ++{ ++ uint64_t ret = ((id->ino * 11 + id->ctime) * 11 + id->size) * 11 + id->dev; ++ return ret ^ (ret >> 32); ++} ++ ++static size_t nextprime (size_t x) ++{ ++ for (unsigned int i = 0; i < sizeof (primes) / sizeof (primes[0]); ++i) ++ if (primes[i] >= x) ++ return primes[i]; ++ return x; ++} ++ ++void ++init_aux_cache (void) ++{ ++ aux_hash_size = primes[3]; ++ aux_hash = xcalloc (aux_hash_size, sizeof (struct aux_cache_entry *)); ++} ++ ++int ++search_aux_cache (struct stat64 *stat_buf, int *flags, ++ unsigned int *osversion, char **soname) ++{ ++ struct aux_cache_entry_id id; ++ id.ino = (uint64_t) stat_buf->st_ino; ++ id.ctime = (uint64_t) stat_buf->st_ctime; ++ id.size = (uint64_t) stat_buf->st_size; ++ id.dev = (uint64_t) stat_buf->st_dev; ++ ++ unsigned int hash = aux_cache_entry_id_hash (&id); ++ struct aux_cache_entry *entry; ++ for (entry = aux_hash[hash % aux_hash_size]; entry; entry = entry->next) ++ if (id.ino == entry->id.ino ++ && id.ctime == entry->id.ctime ++ && id.size == entry->id.size ++ && id.dev == entry->id.dev) ++ { ++ *flags = entry->flags; ++ *osversion = entry->osversion; ++ if (entry->soname != NULL) ++ *soname = xstrdup (entry->soname); ++ else ++ *soname = NULL; ++ entry->used = 1; ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void ++insert_to_aux_cache (struct aux_cache_entry_id *id, int flags, ++ unsigned int osversion, const char *soname, int used) ++{ ++ size_t hash = aux_cache_entry_id_hash (id) % aux_hash_size; ++ struct aux_cache_entry *entry; ++ for (entry = aux_hash[hash]; entry; entry = entry->next) ++ if (id->ino == entry->id.ino ++ && id->ctime == entry->id.ctime ++ && id->size == entry->id.size ++ && id->dev == entry->id.dev) ++ abort (); ++ ++ size_t len = soname ? strlen (soname) + 1 : 0; ++ entry = xmalloc (sizeof (struct aux_cache_entry) + len); ++ entry->id = *id; ++ entry->flags = flags; ++ entry->osversion = osversion; ++ entry->used = used; ++ if (soname != NULL) ++ entry->soname = memcpy ((char *) (entry + 1), soname, len); ++ else ++ entry->soname = NULL; ++ entry->next = aux_hash[hash]; ++ aux_hash[hash] = entry; ++} ++ ++void ++add_to_aux_cache (struct stat64 *stat_buf, int flags, ++ unsigned int osversion, const char *soname) ++{ ++ struct aux_cache_entry_id id; ++ id.ino = (uint64_t) stat_buf->st_ino; ++ id.ctime = (uint64_t) stat_buf->st_ctime; ++ id.size = (uint64_t) stat_buf->st_size; ++ id.dev = (uint64_t) stat_buf->st_dev; ++ insert_to_aux_cache (&id, flags, osversion, soname, 1); ++} ++ ++/* Load auxiliary cache to search for unchanged entries. */ ++void ++load_aux_cache (const char *aux_cache_name) ++{ ++ int fd = open (aux_cache_name, O_RDONLY); ++ if (fd < 0) ++ { ++ init_aux_cache (); ++ return; ++ } ++ ++ struct stat64 st; ++ if (fstat64 (fd, &st) < 0 || st.st_size < sizeof (struct aux_cache_file)) ++ { ++ close (fd); ++ init_aux_cache (); ++ return; ++ } ++ ++ size_t aux_cache_size = st.st_size; ++ struct aux_cache_file *aux_cache ++ = mmap (NULL, aux_cache_size, PROT_READ, MAP_PRIVATE, fd, 0); ++ if (aux_cache == MAP_FAILED ++ || aux_cache_size < sizeof (struct aux_cache_file) ++ || memcmp (aux_cache->magic, AUX_CACHEMAGIC, sizeof AUX_CACHEMAGIC - 1) ++ || aux_cache->nlibs < 0 ++ || aux_cache->nlibs >= aux_cache_size) ++ { ++ close (fd); ++ init_aux_cache (); ++ return; ++ } ++ ++ aux_hash_size = nextprime (aux_cache->nlibs); ++ aux_hash = xcalloc (aux_hash_size, sizeof (struct aux_cache_entry *)); ++ ++ const char *aux_cache_data ++ = (const char *) &aux_cache->libs[aux_cache->nlibs]; ++ for (unsigned int i = 0; i < aux_cache->nlibs; ++i) ++ insert_to_aux_cache (&aux_cache->libs[i].id, ++ aux_cache->libs[i].flags, ++ aux_cache->libs[i].osversion, ++ aux_cache->libs[i].soname == 0 ++ ? NULL : aux_cache_data + aux_cache->libs[i].soname, ++ 0); ++ ++ munmap (aux_cache, aux_cache_size); ++ close (fd); ++} ++ ++/* Save the contents of the auxiliary cache. */ ++void ++save_aux_cache (const char *aux_cache_name) ++{ ++ /* Count the length of all sonames. We start with empty string. */ ++ size_t total_strlen = 1; ++ /* Number of cache entries. */ ++ int cache_entry_count = 0; ++ ++ for (size_t i = 0; i < aux_hash_size; ++i) ++ for (struct aux_cache_entry *entry = aux_hash[i]; ++ entry != NULL; entry = entry->next) ++ if (entry->used) ++ { ++ ++cache_entry_count; ++ if (entry->soname != NULL) ++ total_strlen += strlen (entry->soname) + 1; ++ } ++ ++ /* Auxiliary cache. */ ++ size_t file_entries_size ++ = sizeof (struct aux_cache_file) ++ + cache_entry_count * sizeof (struct aux_cache_file_entry); ++ struct aux_cache_file *file_entries ++ = xmalloc (file_entries_size + total_strlen); ++ ++ /* Fill in the header of the auxiliary cache. */ ++ memset (file_entries, '\0', sizeof (struct aux_cache_file)); ++ memcpy (file_entries->magic, AUX_CACHEMAGIC, sizeof AUX_CACHEMAGIC - 1); ++ ++ file_entries->nlibs = cache_entry_count; ++ file_entries->len_strings = total_strlen; ++ ++ /* Initial String offset for auxiliary cache is always after the ++ special empty string. */ ++ unsigned int str_offset = 1; ++ ++ /* An array for all strings. */ ++ char *str = (char *) file_entries + file_entries_size; ++ *str++ = '\0'; ++ ++ size_t idx = 0; ++ for (size_t i = 0; i < aux_hash_size; ++i) ++ for (struct aux_cache_entry *entry = aux_hash[i]; ++ entry != NULL; entry = entry->next) ++ if (entry->used) ++ { ++ file_entries->libs[idx].id = entry->id; ++ file_entries->libs[idx].flags = entry->flags; ++ if (entry->soname == NULL) ++ file_entries->libs[idx].soname = 0; ++ else ++ { ++ file_entries->libs[idx].soname = str_offset; ++ ++ size_t len = strlen (entry->soname) + 1; ++ str = mempcpy (str, entry->soname, len); ++ str_offset += len; ++ } ++ file_entries->libs[idx].osversion = entry->osversion; ++ file_entries->libs[idx++].pad = 0; ++ } ++ ++ /* Write out auxiliary cache file. */ ++ /* Write auxiliary cache first to a temporary file and rename it later. */ ++ ++ char *temp_name = xmalloc (strlen (aux_cache_name) + 2); ++ sprintf (temp_name, "%s~", aux_cache_name); ++ ++ /* Check that directory exists and create if needed. */ ++ char *dir = strdupa (aux_cache_name); ++ dir = dirname (dir); ++ ++ struct stat64 st; ++ if (stat64 (dir, &st) < 0) ++ { ++ if (mkdir (dir, 0700) < 0) ++ error (EXIT_FAILURE, errno, _("Cannot create directory %s"), dir); ++ } ++ ++ /* Create file. */ ++ int fd = open (temp_name, O_CREAT|O_WRONLY|O_TRUNC|O_NOFOLLOW, ++ S_IRUSR|S_IWUSR); ++ if (fd < 0) ++ error (EXIT_FAILURE, errno, ++ _("Can't create temporary auxiliary cache file %s"), ++ temp_name); ++ ++ if (write (fd, file_entries, file_entries_size + total_strlen) ++ != (ssize_t) (file_entries_size + total_strlen) ++ || close (fd)) ++ error (EXIT_FAILURE, errno, ++ _("Writing of auxiliary cache data failed")); ++ ++ /* Move temporary to its final location. */ ++ if (rename (temp_name, aux_cache_name)) ++ error (EXIT_FAILURE, errno, _("Renaming of %s to %s failed"), temp_name, ++ aux_cache_name); ++ ++ /* Free allocated memory. */ ++ free (file_entries); ++} +--- libc/sysdeps/generic/ldconfig.h.jj 2003-03-14 06:32:49.000000000 +0100 ++++ libc/sysdeps/generic/ldconfig.h 2007-07-27 17:39:46.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 1999, 2000, 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 1999, 2000, 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Andreas Jaeger , 1999. + +@@ -35,6 +35,9 @@ + #define FLAG_MIPS64_LIBN32 0x0600 + #define FLAG_MIPS64_LIBN64 0x0700 + ++/* Name of auxiliary cache. */ ++#define _PATH_LDCONFIG_AUX_CACHE "/var/cache/ldconfig/aux-cache" ++ + /* Declared in cache.c. */ + extern void print_cache (const char *cache_name); + +@@ -45,10 +48,24 @@ extern void save_cache (const char *cach + extern void add_to_cache (const char *path, const char *lib, int flags, + unsigned int osversion, uint64_t hwcap); + ++extern void init_aux_cache (void); ++ ++extern void load_aux_cache (const char *aux_cache_name); ++ ++extern int search_aux_cache (struct stat64 *stat_buf, int *flags, ++ unsigned int *osversion, char **soname); ++ ++extern void add_to_aux_cache (struct stat64 *stat_buf, int flags, ++ unsigned int osversion, const char *soname); ++ ++extern void save_aux_cache (const char *aux_cache_name); ++ + /* Declared in readlib.c. */ + extern int process_file (const char *real_file_name, const char *file_name, + const char *lib, int *flag, unsigned int *osversion, +- char **soname, int is_link); ++ char **soname, int is_link, struct stat64 *stat_buf); ++ ++extern char *implicit_soname (const char *lib, int flag); + + /* Declared in readelflib.c. */ + extern int process_elf_file (const char *file_name, const char *lib, int *flag, + diff --git a/glibc-private-futex.patch b/glibc-private-futex.patch new file mode 100644 index 0000000..5a1fd14 --- /dev/null +++ b/glibc-private-futex.patch @@ -0,0 +1,7988 @@ +2007-07-31 Anton Blanchard + + * sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post): + Use __asm __volatile (__lll_acq_instr ::: "memory") instead of + atomic_full_barrier. + +2007-07-31 Jakub Jelinek + + * allocatestack.c (stack_cache_lock): Change type to int. + (get_cached_stack, allocate_stack, __deallocate_stack, + __make_stacks_executable, __find_thread_by_id, __nptl_setxid, + __pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE + as second argument to lll_lock and lll_unlock macros on + stack_cache_lock. + * pthread_create.c (__find_in_stack_list): Likewise. + (start_thread): Similarly with pd->lock. Use lll_robust_dead + macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it + as second argument. + * descr.h (struct pthread): Change lock and setxid_futex field + type to int. + * old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use + LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER. + * old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise. + * old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0): + Likewise. + * old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise. + * pthread_cond_init.c (__pthread_cond_init): Likewise. + * pthreadP.h (__attr_list_lock): Change type to int. + * pthread_attr_init.c (__attr_list_lock): Likewise. + * pthread_barrier_destroy.c (pthread_barrier_destroy): Pass + ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to + lll_{,un}lock. + * pthread_barrier_wait.c (pthread_barrier_wait): Likewise and + also for lll_futex_{wake,wait}. + * pthread_barrier_init.c (pthread_barrier_init): Make iattr + a pointer to const. + * pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass + LLL_SHARED as second argument to lll_{,un}lock. + * pthread_cond_destroy.c (__pthread_cond_destroy): Likewise. + * pthread_cond_signal.c (__pthread_cond_singal): Likewise. + * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise. + * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): + Likewise. + * pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE + as second argument to lll_{,un}lock macros on pd->lock. + * pthread_getschedparam.c (__pthread_getschedparam): Likewise. + * pthread_setschedparam.c (__pthread_setschedparam): Likewise. + * pthread_setschedprio.c (pthread_setschedprio): Likewise. + * tpp.c (__pthread_tpp_change_priority, __pthread_current_priority): + Likewise. + * sysdeps/pthread/createthread.c (do_clone, create_thread): + Likewise. + * pthread_once.c (once_lock): Change type to int. + (__pthread_once): Pass LLL_PRIVATE as second argument to + lll_{,un}lock macros on once_lock. + * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use + lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass + rwlock->__data.__shared as second argument to them and similarly + for lll_futex_w*. + * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock): + Likewise. + * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock): + Likewise. + * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise. + * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise. + * pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise. + * pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise. + * sem_close.c (sem_close): Pass LLL_PRIVATE as second argument + to lll_{,un}lock macros on __sem_mappings_lock. + * sem_open.c (check_add_mapping): Likewise. + (__sem_mappings_lock): Change type to int. + * semaphoreP.h (__sem_mappings_lock): Likewise. + * pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, + LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros + instead of lll_*mutex_*, pass LLL_SHARED as last + argument. + (__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock, + pass LLL_SHARED as last argument. + * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK, + LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use + lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*, + pass LLL_SHARED as last argument. + * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use + lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass + LLL_SHARED as last argument. + * pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly. + * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): + Similarly. + * sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock, + __libc_lock_lock_recursive, __libc_lock_unlock, + __libc_lock_unlock_recursive): Pass LLL_PRIVATE as second + argument to lll_{,un}lock. + * sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock, + _IO_lock_unlock): Likewise. + * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use + compound literal. + * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork): + Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on + __fork_lock. + * sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork, + free_mem): Likewise. + (__fork_lock): Change type to int. + * sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise. + * sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass + isem->private ^ FUTEX_PRIVATE_FLAG as second argument to + lll_futex_wake. + * sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise. + * sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise. + * sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private): + New function. + (__lll_lock_wait, __lll_timedlock_wait): Add private argument and + pass it through to lll_futex_*wait, only compile in when + IS_IN_libpthread. + * sysdeps/unix/sysv/linux/lowlevelrobustlock.c + (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private + argument and pass it through to lll_futex_*wait. + * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all + lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp. + lll_robust_*. Renamed all __lll_mutex_* resp. __lll_robust_mutex_* + inline functions to __lll_* resp. __lll_robust_*. + (LLL_MUTEX_LOCK_INITIALIZER): Remove. + (lll_mutex_dead): Add private argument. + (__lll_lock_wait_private): New prototype. + (__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait, + __lll_robust_lock_timedwait): Add private argument to prototypes. + (__lll_lock): Add private argument, if it is constant LLL_PRIVATE, + call __lll_lock_wait_private, otherwise pass private to + __lll_lock_wait. + (__lll_robust_lock, __lll_cond_lock, __lll_timedlock, + __lll_robust_timedlock): Add private argument, pass it to + __lll_*wait functions. + (__lll_unlock): Add private argument, if it is constant LLL_PRIVATE, + call __lll_unlock_wake_private, otherwise pass private to + __lll_unlock_wake. + (__lll_robust_unlock): Add private argument, pass it to + __lll_robust_unlock_wake. + (lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock, + lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private + argument, pass it through to __lll_* inline function. + (__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove. + (lll_lock_t): Remove. + (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, + __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, + lll_cond_wake, lll_cond_broadcast): Remove. + * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including + the header from assembler. Renamed all lll_mutex_* resp. + lll_robust_mutex_* macros to lll_* resp. lll_robust_*. + (LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP, + FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define. + (LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED, + LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove. + (__lll_mutex_lock_wait, __lll_mutex_timedlock_wait, + __lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake): + Remove prototype. + (__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define. + (lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER* + rather than LLL_MUTEX_LOCK_INITIALIZER* macros. + (lll_trylock): Likewise, use __lll_trylock_asm, pass + MULTIPLE_THREADS_OFFSET as another asm operand. + (lll_lock): Add private argument, use __lll_lock_asm_start, pass + MULTIPLE_THREADS_OFFSET as last asm operand, call + __lll_lock_wait_private if private is constant LLL_PRIVATE, + otherwise pass private as another argument to __lll_lock_wait. + (lll_robust_lock, lll_cond_lock, lll_robust_cond_lock, + lll_timedlock, lll_robust_timedlock): Add private argument, pass + private as another argument to __lll_*lock_wait call. + (lll_unlock): Add private argument, use __lll_unlock_asm, pass + MULTIPLE_THREADS_OFFSET as another asm operand, call + __lll_unlock_wake_private if private is constant LLL_PRIVATE, + otherwise pass private as another argument to __lll_unlock_wake. + (lll_robust_unlock): Add private argument, pass private as another + argument to __lll_unlock_wake. + (lll_robust_dead): Add private argument, use __lll_private_flag + macro. + (lll_islocked): Use LLL_LOCK_INITIALIZER instead of + LLL_MUTEX_LOCK_INITIALIZER. + (lll_lock_t): Remove. + (LLL_LOCK_INITIALIZER_WAITERS): Define. + (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, + __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, + lll_cond_wake, lll_cond_broadcast): Remove. + * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert + 2007-05-2{3,9} changes. + * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include + kernel-features.h and lowlevellock.h. + (LOAD_PRIVATE_FUTEX_WAIT): Define. + (LOAD_FUTEX_WAIT): Rewritten. + (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't + define. + (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. + (__lll_mutex_lock_wait): Rename to ... + (__lll_lock_wait): ... this. Take futex addr from %edx instead of + %ecx, %ecx is now private argument. Don't compile in for libc.so. + (__lll_mutex_timedlock_wait): Rename to ... + (__lll_timedlock_wait): ... this. Use __NR_gettimeofday. %esi + contains private argument. Don't compile in for libc.so. + (__lll_mutex_unlock_wake): Rename to ... + (__lll_unlock_wake): ... this. %ecx contains private argument. + Don't compile in for libc.so. + (__lll_timedwait_tid): Use __NR_gettimeofday. + * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include + kernel-features.h and lowlevellock.h. + (LOAD_FUTEX_WAIT): Define. + (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't + define. + (__lll_robust_mutex_lock_wait): Rename to ... + (__lll_robust_lock_wait): ... this. Futex addr is now in %edx + argument, %ecx argument contains private. Use LOAD_FUTEX_WAIT + macro. + (__lll_robust_mutex_timedlock_wait): Rename to ... + (__lll_robust_timedlock_wait): ... this. Use __NR_gettimeofday. + %esi argument contains private, use LOAD_FUTEX_WAIT macro. + * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include + lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass + PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to + __lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address + to __lll_lock_wait in %edx. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: + Include lowlevellock.h and pthread-errnos.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, + FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. + (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass + cond_lock address in %edx rather than %ecx to __lll_lock_wait, + pass LLL_SHARED in %ecx to both __lll_lock_wait and + __lll_unlock_wake. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: + Include lowlevellock.h and pthread-errnos.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, + FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. + (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass + cond_lock address in %edx rather than %ecx to __lll_lock_wait, + pass LLL_SHARED in %ecx to both __lll_lock_wait and + __lll_unlock_wake. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: + Include lowlevellock.h. + (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): + Don't define. + (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass + cond_lock address in %edx rather than %ecx to __lll_lock_wait, + pass LLL_SHARED in %ecx to both __lll_lock_wait and + __lll_unlock_wake. Use __NR_gettimeofday. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_* + to __lll_*, pass cond_lock address in %edx rather than %ecx to + __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass + MUTEX(%ebx) address in %edx rather than %ecx to + __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. Move return value from %ecx to %edx + register. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S: + Include lowlevellock.h. + (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): + Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass + MUTEX(%ebp) address in %edx rather than %ecx to + __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. Move return value from %ecx to %edx + register. Use __NR_gettimeofday. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S: + Include lowlevellock.h. + (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): + Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass + MUTEX(%ebp) address in %edx rather than %ecx to + __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. Move return value from %ecx to %edx + register. Use __NR_gettimeofday. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass + MUTEX(%edi) address in %edx rather than %ecx to + __lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass + MUTEX(%ebx) address in %edx rather than %ecx to + __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. Move return value from %ecx to %edx + register. + * sysdeps/unix/sysv/linux/i386/pthread_once.S: Include + lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't + define. + * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAKE): Don't define. + * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include + lowlevellock.h. + (LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define. + (sem_timedwait): Use __NR_gettimeofday. + * sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include + lowlevellock.h. + (LOCK): Don't define. + * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include + lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAIT): Don't define. + * sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there + are waiters. + * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert + 2007-05-2{3,9} changes. + * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include + kernel-features.h and lowlevellock.h. + (LOAD_PRIVATE_FUTEX_WAIT): Define. + (LOAD_FUTEX_WAIT): Rewritten. + (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. + (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. + (__lll_mutex_lock_wait): Rename to ... + (__lll_lock_wait): ... this. %esi is now private argument. + Don't compile in for libc.so. + (__lll_mutex_timedlock_wait): Rename to ... + (__lll_timedlock_wait): ... this. %esi contains private argument. + Don't compile in for libc.so. + (__lll_mutex_unlock_wake): Rename to ... + (__lll_unlock_wake): ... this. %esi contains private argument. + Don't compile in for libc.so. + * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include + kernel-features.h and lowlevellock.h. + (LOAD_FUTEX_WAIT): Define. + (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. + (__lll_robust_mutex_lock_wait): Rename to ... + (__lll_robust_lock_wait): ... this. %esi argument contains private. + Use LOAD_FUTEX_WAIT macro. + (__lll_robust_mutex_timedlock_wait): Rename to ... + (__lll_robust_timedlock_wait): ... this. %esi argument contains + private, use LOAD_FUTEX_WAIT macro. + * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include + lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass + PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to + __lll_lock_wait and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: + Include lowlevellock.h and pthread-errnos.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, + FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. + (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, + pass LLL_SHARED in %esi to both __lll_lock_wait and + __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: + Include lowlevellock.h and pthread-errnos.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, + FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. + (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, + pass LLL_SHARED in %esi to both __lll_lock_wait and + __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, + pass LLL_SHARED in %esi to both __lll_lock_wait and + __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. + (__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_* + to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): + Don't define. + (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, + pass PSHARED(%rdi) in %esi to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): + Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, + pass PSHARED(%rdi) in %esi to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): + Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, + pass PSHARED(%rdi) in %esi to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): + Don't define. + (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, + pass PSHARED(%rdi) in %esi to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: + Include lowlevellock.h. + (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): + Don't define. + (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, + pass PSHARED(%rdi) in %ecx to both __lll_lock_wait + and __lll_unlock_wake. + * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include + lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't + define. + * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAKE): Don't define. + * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include + lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAIT): Don't define. + * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include + lowlevellock.h. + (LOCK): Don't define. + * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include + lowlevellock.h. + (LOCK, SYS_futex, FUTEX_WAIT): Don't define. + * sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file. + * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file. + * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file. + * sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file. + * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c + (__lll_lock_wait_private): New function. + (__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass + it to lll_futex_*wait. Don't compile in for libc.so. + * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c: + Remove. + * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c + (struct sparc_pthread_barrier): Remove. + (pthread_barrier_wait): Use union sparc_pthread_barrier instead of + struct sparc_pthread_barrier. Pass + ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock + and lll_futex_wait macros. + * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c: + Remove. + * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c: + Include sparc pthread_barrier_wait.c instead of generic one. + +--- libc/nptl/sem_open.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/sem_open.c 2007-07-29 11:48:55.000000000 +0200 +@@ -147,7 +147,7 @@ __sem_search (const void *a, const void + void *__sem_mappings attribute_hidden; + + /* Lock to protect the search tree. */ +-lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER; ++int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER; + + + /* Search for existing mapping and if possible add the one provided. */ +@@ -161,7 +161,7 @@ check_add_mapping (const char *name, siz + if (__fxstat64 (_STAT_VER, fd, &st) == 0) + { + /* Get the lock. */ +- lll_lock (__sem_mappings_lock); ++ lll_lock (__sem_mappings_lock, LLL_PRIVATE); + + /* Search for an existing mapping given the information we have. */ + struct inuse_sem *fake; +@@ -210,7 +210,7 @@ check_add_mapping (const char *name, siz + } + + /* Release the lock. */ +- lll_unlock (__sem_mappings_lock); ++ lll_unlock (__sem_mappings_lock, LLL_PRIVATE); + } + + if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED) +--- libc/nptl/pthread_mutex_timedlock.c.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthread_mutex_timedlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime) + } + + /* We have to get the mutex. */ +- result = lll_mutex_timedlock (mutex->__data.__lock, abstime); ++ result = lll_timedlock (mutex->__data.__lock, abstime, ++ /* XYZ */ LLL_SHARED); + + if (result != 0) + goto out; +@@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime) + case PTHREAD_MUTEX_TIMED_NP: + simple: + /* Normal mutex. */ +- result = lll_mutex_timedlock (mutex->__data.__lock, abstime); ++ result = lll_timedlock (mutex->__data.__lock, abstime, ++ /* XYZ */ LLL_SHARED); + break; + + case PTHREAD_MUTEX_ADAPTIVE_NP: + if (! __is_smp) + goto simple; + +- if (lll_mutex_trylock (mutex->__data.__lock) != 0) ++ if (lll_trylock (mutex->__data.__lock) != 0) + { + int cnt = 0; + int max_cnt = MIN (MAX_ADAPTIVE_COUNT, +@@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime) + { + if (cnt++ >= max_cnt) + { +- result = lll_mutex_timedlock (mutex->__data.__lock, abstime); ++ result = lll_timedlock (mutex->__data.__lock, abstime, ++ /* XYZ */ LLL_SHARED); + break; + } + +@@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime) + BUSY_WAIT_NOP; + #endif + } +- while (lll_mutex_trylock (mutex->__data.__lock) != 0); ++ while (lll_trylock (mutex->__data.__lock) != 0); + + mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; + } +@@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime) + } + } + +- result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime, +- id); ++ result = lll_robust_timedlock (mutex->__data.__lock, abstime, id, ++ /* XYZ */ LLL_SHARED); + + if (__builtin_expect (mutex->__data.__owner + == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) + { + /* This mutex is now not recoverable. */ + mutex->__data.__count = 0; +- lll_mutex_unlock (mutex->__data.__lock); ++ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return ENOTRECOVERABLE; + } +--- libc/nptl/pthread_mutex_unlock.c.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthread_mutex_unlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, d + case PTHREAD_MUTEX_ERRORCHECK_NP: + /* Error checking mutex. */ + if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) +- || ! lll_mutex_islocked (mutex->__data.__lock)) ++ || ! lll_islocked (mutex->__data.__lock)) + return EPERM; + /* FALLTHROUGH */ + +@@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, d + --mutex->__data.__nusers; + + /* Unlock. */ +- lll_mutex_unlock (mutex->__data.__lock); ++ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + break; + + case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: +@@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, d + case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: + if ((mutex->__data.__lock & FUTEX_TID_MASK) + != THREAD_GETMEM (THREAD_SELF, tid) +- || ! lll_mutex_islocked (mutex->__data.__lock)) ++ || ! lll_islocked (mutex->__data.__lock)) + return EPERM; + + /* If the previous owner died and the caller did not succeed in +@@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, d + --mutex->__data.__nusers; + + /* Unlock. */ +- lll_robust_mutex_unlock (mutex->__data.__lock); ++ lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + break; +@@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, d + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + if ((mutex->__data.__lock & FUTEX_TID_MASK) + != THREAD_GETMEM (THREAD_SELF, tid) +- || ! lll_mutex_islocked (mutex->__data.__lock)) ++ || ! lll_islocked (mutex->__data.__lock)) + return EPERM; + + /* If the previous owner died and the caller did not succeed in +--- libc/nptl/old_pthread_cond_signal.c.jj 2003-03-21 09:02:07.000000000 +0100 ++++ libc/nptl/old_pthread_cond_signal.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond) + { + pthread_cond_t *newcond; + +-#if LLL_MUTEX_LOCK_INITIALIZER == 0 ++#if LLL_LOCK_INITIALIZER == 0 + newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); + if (newcond == NULL) + return ENOMEM; +--- libc/nptl/old_pthread_cond_timedwait.c.jj 2003-03-21 09:02:07.000000000 +0100 ++++ libc/nptl/old_pthread_cond_timedwait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mute + { + pthread_cond_t *newcond; + +-#if LLL_MUTEX_LOCK_INITIALIZER == 0 ++#if LLL_LOCK_INITIALIZER == 0 + newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); + if (newcond == NULL) + return ENOMEM; +--- libc/nptl/descr.h.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/descr.h 2007-07-29 11:48:55.000000000 +0200 +@@ -309,10 +309,10 @@ struct pthread + int parent_cancelhandling; + + /* Lock to synchronize access to the descriptor. */ +- lll_lock_t lock; ++ int lock; + + /* Lock for synchronizing setxid calls. */ +- lll_lock_t setxid_futex; ++ int setxid_futex; + + #if HP_TIMING_AVAIL + /* Offset of the CPU clock at start thread start time. */ +--- libc/nptl/allocatestack.c.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/allocatestack.c 2007-07-29 11:48:55.000000000 +0200 +@@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 * + static size_t stack_cache_actsize; + + /* Mutex protecting this variable. */ +-static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER; ++static int stack_cache_lock = LLL_LOCK_INITIALIZER; + + /* List of queued stack frames. */ + static LIST_HEAD (stack_cache); +@@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void ** + struct pthread *result = NULL; + list_t *entry; + +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* Search the cache for a matching entry. We search for the + smallest stack which has at least the required size. Note that +@@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void ** + || __builtin_expect (result->stackblock_size > 4 * size, 0)) + { + /* Release the lock. */ +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + return NULL; + } +@@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void ** + stack_cache_actsize -= result->stackblock_size; + + /* Release the lock early. */ +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + /* Report size and location of the stack to the caller. */ + *sizep = result->stackblock_size; +@@ -400,12 +400,12 @@ allocate_stack (const struct pthread_att + + + /* Prepare to modify global data. */ +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* And add to the list of stacks in use. */ + list_add (&pd->list, &__stack_user); + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + } + else + { +@@ -544,12 +544,12 @@ allocate_stack (const struct pthread_att + + + /* Prepare to modify global data. */ +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* And add to the list of stacks in use. */ + list_add (&pd->list, &stack_used); + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + + /* There might have been a race. Another thread might have +@@ -598,12 +598,12 @@ allocate_stack (const struct pthread_att + mprot_error: + err = errno; + +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* Remove the thread from the list. */ + list_del (&pd->list); + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + /* Get rid of the TLS block we allocated. */ + _dl_deallocate_tls (TLS_TPADJ (pd), false); +@@ -699,7 +699,7 @@ void + internal_function + __deallocate_stack (struct pthread *pd) + { +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* Remove the thread from the list of threads with user defined + stacks. */ +@@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd) + /* Free the memory associated with the ELF TLS. */ + _dl_deallocate_tls (TLS_TPADJ (pd), false); + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + } + + +@@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_e + const size_t pagemask = ~(__getpagesize () - 1); + #endif + +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + list_t *runp; + list_for_each (runp, &stack_used) +@@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_e + break; + } + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + return err; + } +@@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid) + { + struct pthread *result = NULL; + +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* Iterate over the list with system-allocated threads first. */ + list_t *runp; +@@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid) + } + + out: +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + return result; + } +@@ -920,7 +920,7 @@ attribute_hidden + __nptl_setxid (struct xid_command *cmdp) + { + int result; +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + __xidcmd = cmdp; + cmdp->cntr = 0; +@@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp) + result = -1; + } + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + return result; + } + +@@ -995,7 +995,7 @@ void + attribute_hidden + __pthread_init_static_tls (struct link_map *map) + { +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + /* Iterate over the list with system-allocated threads first. */ + list_t *runp; +@@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_m + list_for_each (runp, &__stack_user) + init_one_static_tls (list_entry (runp, struct pthread, list), map); + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + } + + +@@ -1014,7 +1014,7 @@ void + attribute_hidden + __wait_lookup_done (void) + { +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + struct pthread *self = THREAD_SELF; + +@@ -1063,5 +1063,5 @@ __wait_lookup_done (void) + while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); + } + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + } +--- libc/nptl/pthread_rwlock_tryrdlock.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_rwlock_tryrdlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock) + { + int result = EBUSY; + +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + if (rwlock->__data.__writer == 0 + && (rwlock->__data.__nr_writers_queued == 0 +@@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock) + result = 0; + } + +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/pthread_rwlock_trywrlock.c.jj 2007-01-03 11:04:36.000000000 +0100 ++++ libc/nptl/pthread_rwlock_trywrlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock) + { + int result = EBUSY; + +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0) + { +@@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock) + result = 0; + } + +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/pthread_getschedparam.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_getschedparam.c 2007-07-29 11:48:55.000000000 +0200 +@@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, polic + + int result = 0; + +- lll_lock (pd->lock); ++ lll_lock (pd->lock, LLL_PRIVATE); + + /* The library is responsible for maintaining the values at all + times. If the user uses a interface other than +@@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, polic + memcpy (param, &pd->schedparam, sizeof (struct sched_param)); + } + +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + + return result; + } +--- libc/nptl/pthread_barrier_init.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_barrier_init.c 2007-07-29 11:48:55.000000000 +0200 +@@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, cou + if (__builtin_expect (count == 0, 0)) + return EINVAL; + +- struct pthread_barrierattr *iattr ++ const struct pthread_barrierattr *iattr + = (attr != NULL + ? iattr = (struct pthread_barrierattr *) attr + : &default_attr); +--- libc/nptl/old_pthread_cond_wait.c.jj 2003-03-21 09:02:07.000000000 +0100 ++++ libc/nptl/old_pthread_cond_wait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex) + { + pthread_cond_t *newcond; + +-#if LLL_MUTEX_LOCK_INITIALIZER == 0 ++#if LLL_LOCK_INITIALIZER == 0 + newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); + if (newcond == NULL) + return ENOMEM; +--- libc/nptl/pthread_cond_destroy.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_cond_destroy.c 2007-07-29 11:48:55.000000000 +0200 +@@ -27,13 +27,13 @@ __pthread_cond_destroy (cond) + pthread_cond_t *cond; + { + /* Make sure we are alone. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + if (cond->__data.__total_seq > cond->__data.__wakeup_seq) + { + /* If there are still some waiters which have not been + woken up, this is an application bug. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + return EBUSY; + } + +@@ -66,13 +66,13 @@ __pthread_cond_destroy (cond) + + do + { +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + lll_futex_wait (&cond->__data.__nwaiters, nwaiters, + // XYZ check mutex flag + LLL_SHARED); + +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + nwaiters = cond->__data.__nwaiters; + } +--- libc/nptl/pthread_rwlock_rdlock.c.jj 2007-07-24 10:50:54.000000000 +0200 ++++ libc/nptl/pthread_rwlock_rdlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock) + int result = 0; + + /* Make sure we are along. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + while (1) + { +@@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock) + int waitval = rwlock->__data.__readers_wakeup; + + /* Free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* Wait for the writer to finish. */ +- lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval, ++ rwlock->__data.__shared); + + /* Get the lock. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + --rwlock->__data.__nr_readers_queued; + } + + /* We are done, free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/pthread_create.c.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/pthread_create.c 2007-07-29 11:48:55.000000000 +0200 +@@ -63,7 +63,7 @@ __find_in_stack_list (pd) + list_t *entry; + struct pthread *result = NULL; + +- lll_lock (stack_cache_lock); ++ lll_lock (stack_cache_lock, LLL_PRIVATE); + + list_for_each (entry, &stack_used) + { +@@ -90,7 +90,7 @@ __find_in_stack_list (pd) + } + } + +- lll_unlock (stack_cache_lock); ++ lll_unlock (stack_cache_lock, LLL_PRIVATE); + + return result; + } +@@ -284,9 +284,9 @@ start_thread (void *arg) + int oldtype = CANCEL_ASYNC (); + + /* Get the lock the parent locked to force synchronization. */ +- lll_lock (pd->lock); ++ lll_lock (pd->lock, LLL_PRIVATE); + /* And give it up right away. */ +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + + CANCEL_RESET (oldtype); + } +@@ -370,7 +370,7 @@ start_thread (void *arg) + # endif + this->__list.__next = NULL; + +- lll_robust_mutex_dead (this->__lock); ++ lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED); + } + while (robust != (void *) &pd->robust_head); + } +--- libc/nptl/pthread_rwlock_wrlock.c.jj 2007-07-24 10:50:54.000000000 +0200 ++++ libc/nptl/pthread_rwlock_wrlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock) + int result = 0; + + /* Make sure we are along. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + while (1) + { +@@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock) + int waitval = rwlock->__data.__writer_wakeup; + + /* Free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* Wait for the writer or reader(s) to finish. */ + lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval, +- // XYZ check mutex flag +- LLL_SHARED); ++ rwlock->__data.__shared); + + /* Get the lock. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* To start over again, remove the thread from the writer list. */ + --rwlock->__data.__nr_writers_queued; + } + + /* We are done, free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/pthread_rwlock_unlock.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_rwlock_unlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -27,7 +27,7 @@ + int + __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) + { +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + if (rwlock->__data.__writer) + rwlock->__data.__writer = 0; + else +@@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_ + if (rwlock->__data.__nr_writers_queued) + { + ++rwlock->__data.__writer_wakeup; +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + lll_futex_wake (&rwlock->__data.__writer_wakeup, 1, +- // XYZ check mutex flag +- LLL_SHARED); ++ rwlock->__data.__shared); + return 0; + } + else if (rwlock->__data.__nr_readers_queued) + { + ++rwlock->__data.__readers_wakeup; +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, +- // XYZ check mutex flag +- LLL_SHARED); ++ rwlock->__data.__shared); + return 0; + } + } +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + return 0; + } + +--- libc/nptl/pthread_rwlock_timedwrlock.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_rwlock_timedwrlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abst + int result = 0; + + /* Make sure we are along. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + while (1) + { +@@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abst + int waitval = rwlock->__data.__writer_wakeup; + + /* Free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* Wait for the writer or reader(s) to finish. */ + err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup, +- waitval, &rt, +- // XYZ check mutex flag +- LLL_SHARED); ++ waitval, &rt, rwlock->__data.__shared); + + /* Get the lock. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* To start over again, remove the thread from the writer list. */ + --rwlock->__data.__nr_writers_queued; +@@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abst + } + + /* We are done, free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/old_pthread_cond_broadcast.c.jj 2003-03-21 09:02:07.000000000 +0100 ++++ libc/nptl/old_pthread_cond_broadcast.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond) + { + pthread_cond_t *newcond; + +-#if LLL_MUTEX_LOCK_INITIALIZER == 0 ++#if LLL_LOCK_INITIALIZER == 0 + newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); + if (newcond == NULL) + return ENOMEM; +--- libc/nptl/pthread_cond_wait.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_cond_wait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -45,7 +45,7 @@ __condvar_cleanup (void *arg) + unsigned int destroying; + + /* We are going to modify shared data. */ +- lll_mutex_lock (cbuffer->cond->__data.__lock); ++ lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); + + if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) + { +@@ -78,7 +78,7 @@ __condvar_cleanup (void *arg) + } + + /* We are done. */ +- lll_mutex_unlock (cbuffer->cond->__data.__lock); ++ lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Wake everybody to make sure no condvar signal gets lost. */ + if (! destroying) +@@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex) + int err; + + /* Make sure we are along. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Now we can release the mutex. */ + err = __pthread_mutex_unlock_usercnt (mutex, 0); + if (__builtin_expect (err, 0)) + { +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + return err; + } + +@@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex) + unsigned int futex_val = cond->__data.__futex; + + /* Prepare to wait. Release the condvar futex. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Enable asynchronous cancellation. Required by the standard. */ + cbuffer.oldtype = __pthread_enable_asynccancel (); +@@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex) + __pthread_disable_asynccancel (cbuffer.oldtype); + + /* We are going to look at shared data again, so get the lock. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* If a broadcast happened, we are done. */ + if (cbuffer.bc_seq != cond->__data.__broadcast_seq) +@@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex) + LLL_SHARED); + + /* We are done with the condvar. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* The cancellation handling is back to normal, remove the handler. */ + __pthread_cleanup_pop (&buffer, 0); +--- libc/nptl/pthread_cond_init.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_cond_init.c 2007-07-29 11:48:55.000000000 +0200 +@@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr) + { + struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr; + +- cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER; ++ cond->__data.__lock = LLL_LOCK_INITIALIZER; + cond->__data.__futex = 0; + cond->__data.__nwaiters = (icond_attr != NULL + && ((icond_attr->value +--- libc/nptl/pthread_attr_init.c.jj 2004-03-19 00:56:31.000000000 +0100 ++++ libc/nptl/pthread_attr_init.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -27,7 +27,7 @@ + + + struct pthread_attr *__attr_list; +-lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER; ++int __attr_list_lock = LLL_LOCK_INITIALIZER; + + + int +--- libc/nptl/pthread_setschedparam.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_setschedparam.c 2007-07-29 11:48:55.000000000 +0200 +@@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, polic + + int result = 0; + +- lll_lock (pd->lock); ++ lll_lock (pd->lock, LLL_PRIVATE); + + struct sched_param p; + const struct sched_param *orig_param = param; +@@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, polic + pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET; + } + +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + + return result; + } +--- libc/nptl/pthread_cond_broadcast.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_cond_broadcast.c 2007-07-29 11:48:55.000000000 +0200 +@@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond) + pthread_cond_t *cond; + { + /* Make sure we are alone. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Are there any waiters to be woken? */ + if (cond->__data.__total_seq > cond->__data.__wakeup_seq) +@@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond) + ++cond->__data.__broadcast_seq; + + /* We are done. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Do not use requeue for pshared condvars. */ + if (cond->__data.__mutex == (void *) ~0l) +@@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond) + } + + /* We are done. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + return 0; + } +--- libc/nptl/pthread_barrier_destroy.c.jj 2002-11-26 23:49:50.000000000 +0100 ++++ libc/nptl/pthread_barrier_destroy.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier) + + ibarrier = (struct pthread_barrier *) barrier; + +- lll_lock (ibarrier->lock); ++ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1)) + /* The barrier is not used anymore. */ + result = 0; + else + /* Still used, return with an error. */ +- lll_unlock (ibarrier->lock); ++ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + return result; + } +--- libc/nptl/sem_close.c.jj 2003-05-17 22:49:02.000000000 +0200 ++++ libc/nptl/sem_close.c 2007-07-29 11:48:55.000000000 +0200 +@@ -47,7 +47,7 @@ sem_close (sem) + int result = 0; + + /* Get the lock. */ +- lll_lock (__sem_mappings_lock); ++ lll_lock (__sem_mappings_lock, LLL_PRIVATE); + + /* Locate the entry for the mapping the caller provided. */ + rec = NULL; +@@ -75,7 +75,7 @@ sem_close (sem) + } + + /* Release the lock. */ +- lll_unlock (__sem_mappings_lock); ++ lll_unlock (__sem_mappings_lock, LLL_PRIVATE); + + return result; + } +--- libc/nptl/semaphoreP.h.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/semaphoreP.h 2007-07-29 11:48:55.000000000 +0200 +@@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once at + extern void *__sem_mappings attribute_hidden; + + /* Lock to protect the search tree. */ +-extern lll_lock_t __sem_mappings_lock attribute_hidden; ++extern int __sem_mappings_lock attribute_hidden; + + + /* Initializer for mountpoint. */ +--- libc/nptl/pthread_once.c.jj 2006-10-28 07:09:12.000000000 +0200 ++++ libc/nptl/pthread_once.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -22,7 +22,7 @@ + + + +-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER; ++static int once_lock = LLL_LOCK_INITIALIZER; + + + int +@@ -35,7 +35,7 @@ __pthread_once (once_control, init_routi + object. */ + if (*once_control == PTHREAD_ONCE_INIT) + { +- lll_lock (once_lock); ++ lll_lock (once_lock, LLL_PRIVATE); + + /* XXX This implementation is not complete. It doesn't take + cancelation and fork into account. */ +@@ -46,7 +46,7 @@ __pthread_once (once_control, init_routi + *once_control = !PTHREAD_ONCE_INIT; + } + +- lll_unlock (once_lock); ++ lll_unlock (once_lock, LLL_PRIVATE); + } + + return 0; +--- libc/nptl/pthread_rwlock_timedrdlock.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_rwlock_timedrdlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abst + int result = 0; + + /* Make sure we are along. */ +- lll_mutex_lock(rwlock->__data.__lock); ++ lll_lock(rwlock->__data.__lock, rwlock->__data.__shared); + + while (1) + { +@@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abst + int waitval = rwlock->__data.__readers_wakeup; + + /* Free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + /* Wait for the writer to finish. */ + err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup, +- waitval, &rt, +- // XYZ check mutex flag +- LLL_SHARED); ++ waitval, &rt, rwlock->__data.__shared); + + /* Get the lock. */ +- lll_mutex_lock (rwlock->__data.__lock); ++ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); + + --rwlock->__data.__nr_readers_queued; + +@@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abst + } + + /* We are done, free the lock. */ +- lll_mutex_unlock (rwlock->__data.__lock); ++ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); + + return result; + } +--- libc/nptl/pthreadP.h.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthreadP.h 2007-07-29 11:48:55.000000000 +0200 +@@ -151,7 +151,7 @@ hidden_proto (__stack_user) + + /* Attribute handling. */ + extern struct pthread_attr *__attr_list attribute_hidden; +-extern lll_lock_t __attr_list_lock attribute_hidden; ++extern int __attr_list_lock attribute_hidden; + + /* First available RT signal. */ + extern int __current_sigrtmin attribute_hidden; +--- libc/nptl/pthread_cond_timedwait.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_cond_timedwait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, a + return EINVAL; + + /* Make sure we are along. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Now we can release the mutex. */ + int err = __pthread_mutex_unlock_usercnt (mutex, 0); + if (err) + { +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + return err; + } + +@@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, a + unsigned int futex_val = cond->__data.__futex; + + /* Prepare to wait. Release the condvar futex. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Enable asynchronous cancellation. Required by the standard. */ + cbuffer.oldtype = __pthread_enable_asynccancel (); +@@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, a + __pthread_disable_asynccancel (cbuffer.oldtype); + + /* We are going to look at shared data again, so get the lock. */ +- lll_mutex_lock(cond->__data.__lock); ++ lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* If a broadcast happened, we are done. */ + if (cbuffer.bc_seq != cond->__data.__broadcast_seq) +@@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, a + LLL_SHARED); + + /* We are done with the condvar. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* The cancellation handling is back to normal, remove the handler. */ + __pthread_cleanup_pop (&buffer, 0); +--- libc/nptl/pthread_setschedprio.c.jj 2007-06-04 08:42:05.000000000 +0200 ++++ libc/nptl/pthread_setschedprio.c 2007-07-29 11:48:55.000000000 +0200 +@@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio) + struct sched_param param; + param.sched_priority = prio; + +- lll_lock (pd->lock); ++ lll_lock (pd->lock, LLL_PRIVATE); + + /* If the thread should have higher priority because of some + PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */ +@@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio) + pd->flags |= ATTR_FLAG_SCHED_SET; + } + +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + + return result; + } +--- libc/nptl/pthread_cond_signal.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_cond_signal.c 2007-07-29 11:48:55.000000000 +0200 +@@ -33,7 +33,7 @@ __pthread_cond_signal (cond) + pthread_cond_t *cond; + { + /* Make sure we are alone. */ +- lll_mutex_lock (cond->__data.__lock); ++ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + /* Are there any waiters to be woken? */ + if (cond->__data.__total_seq > cond->__data.__wakeup_seq) +@@ -56,7 +56,7 @@ __pthread_cond_signal (cond) + } + + /* We are done. */ +- lll_mutex_unlock (cond->__data.__lock); ++ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + + return 0; + } +--- libc/nptl/pthread_barrier_wait.c.jj 2007-06-08 09:13:50.000000000 +0200 ++++ libc/nptl/pthread_barrier_wait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -32,7 +32,7 @@ pthread_barrier_wait (barrier) + int result = 0; + + /* Make sure we are alone. */ +- lll_lock (ibarrier->lock); ++ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + /* One more arrival. */ + --ibarrier->left; +@@ -46,8 +46,7 @@ pthread_barrier_wait (barrier) + + /* Wake up everybody. */ + lll_futex_wake (&ibarrier->curr_event, INT_MAX, +- // XYZ check mutex flag +- LLL_SHARED); ++ ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + /* This is the thread which finished the serialization. */ + result = PTHREAD_BARRIER_SERIAL_THREAD; +@@ -59,13 +58,12 @@ pthread_barrier_wait (barrier) + unsigned int event = ibarrier->curr_event; + + /* Before suspending, make the barrier available to others. */ +- lll_unlock (ibarrier->lock); ++ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + /* Wait for the event counter of the barrier to change. */ + do + lll_futex_wait (&ibarrier->curr_event, event, +- // XYZ check mutex flag +- LLL_SHARED); ++ ibarrier->private ^ FUTEX_PRIVATE_FLAG); + while (event == ibarrier->curr_event); + } + +@@ -75,7 +73,7 @@ pthread_barrier_wait (barrier) + /* If this was the last woken thread, unlock. */ + if (atomic_increment_val (&ibarrier->left) == init_count) + /* We are done. */ +- lll_unlock (ibarrier->lock); ++ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); + + return result; + } +--- libc/nptl/pthread_mutex_lock.c.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthread_mutex_lock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -27,9 +27,9 @@ + + + #ifndef LLL_MUTEX_LOCK +-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex) +-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex) +-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id) ++# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED) ++# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex) ++# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED) + #endif + + +@@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex) + { + /* This mutex is now not recoverable. */ + mutex->__data.__count = 0; +- lll_mutex_unlock (mutex->__data.__lock); ++ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return ENOTRECOVERABLE; + } +--- libc/nptl/pthread_mutex_trylock.c.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthread_mutex_trylock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex) + return 0; + } + +- if (lll_mutex_trylock (mutex->__data.__lock) == 0) ++ if (lll_trylock (mutex->__data.__lock) == 0) + { + /* Record the ownership. */ + mutex->__data.__owner = id; +@@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex) + case PTHREAD_MUTEX_TIMED_NP: + case PTHREAD_MUTEX_ADAPTIVE_NP: + /* Normal mutex. */ +- if (lll_mutex_trylock (mutex->__data.__lock) != 0) ++ if (lll_trylock (mutex->__data.__lock) != 0) + break; + + /* Record the ownership. */ +@@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex) + } + } + +- oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id); ++ oldval = lll_robust_trylock (mutex->__data.__lock, id); + if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) + { + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); +@@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex) + /* This mutex is now not recoverable. */ + mutex->__data.__count = 0; + if (oldval == id) +- lll_mutex_unlock (mutex->__data.__lock); ++ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return ENOTRECOVERABLE; + } +--- libc/nptl/pthread_getattr_np.c.jj 2007-06-29 10:19:56.000000000 +0200 ++++ libc/nptl/pthread_getattr_np.c 2007-07-29 11:48:55.000000000 +0200 +@@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr) + struct pthread_attr *iattr = (struct pthread_attr *) attr; + int ret = 0; + +- lll_lock (thread->lock); ++ lll_lock (thread->lock, LLL_PRIVATE); + + /* The thread library is responsible for keeping the values in the + thread desriptor up-to-date in case the user changes them. */ +@@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr) + } + } + +- lll_unlock (thread->lock); ++ lll_unlock (thread->lock, LLL_PRIVATE); + + return ret; + } +--- libc/nptl/sysdeps/pthread/createthread.c.jj 2006-09-05 19:13:14.000000000 +0200 ++++ libc/nptl/sysdeps/pthread/createthread.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const stru + /* We Make sure the thread does not run far by forcing it to get a + lock. We lock it here too so that the new thread cannot continue + until we tell it to. */ +- lll_lock (pd->lock); ++ lll_lock (pd->lock, LLL_PRIVATE); + + /* One more thread. We cannot have the thread do this itself, since it + might exist but not have been scheduled yet by the time we've returned +@@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const + __nptl_create_event (); + + /* And finally restart the new thread. */ +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + } + + return res; +@@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const + + if (res == 0 && stopped) + /* And finally restart the new thread. */ +- lll_unlock (pd->lock); ++ lll_unlock (pd->lock, LLL_PRIVATE); + + return res; + } +--- libc/nptl/sysdeps/pthread/bits/stdio-lock.h.jj 2007-07-19 19:46:48.000000000 +0200 ++++ libc/nptl/sysdeps/pthread/bits/stdio-lock.h 2007-07-31 12:40:13.000000000 +0200 +@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void + void *__self = THREAD_SELF; \ + if ((_name).owner != __self) \ + { \ +- lll_lock ((_name).lock); \ ++ lll_lock ((_name).lock, LLL_PRIVATE); \ + (_name).owner = __self; \ + } \ + ++(_name).cnt; \ +@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void + if (--(_name).cnt == 0) \ + { \ + (_name).owner = NULL; \ +- lll_unlock ((_name).lock); \ ++ lll_unlock ((_name).lock, LLL_PRIVATE); \ + } \ + } while (0) + +--- libc/nptl/sysdeps/pthread/bits/libc-lock.h.jj 2007-03-21 21:22:17.000000000 +0100 ++++ libc/nptl/sysdeps/pthread/bits/libc-lock.h 2007-07-29 11:48:55.000000000 +0200 +@@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t; + /* Lock the named lock variable. */ + #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread) + # define __libc_lock_lock(NAME) \ +- ({ lll_lock (NAME); 0; }) ++ ({ lll_lock (NAME, LLL_PRIVATE); 0; }) + #else + # define __libc_lock_lock(NAME) \ + __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0) +@@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t; + void *self = THREAD_SELF; \ + if ((NAME).owner != self) \ + { \ +- lll_lock ((NAME).lock); \ ++ lll_lock ((NAME).lock, LLL_PRIVATE); \ + (NAME).owner = self; \ + } \ + ++(NAME).cnt; \ +@@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t; + /* Unlock the named lock variable. */ + #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread) + # define __libc_lock_unlock(NAME) \ +- lll_unlock (NAME) ++ lll_unlock (NAME, LLL_PRIVATE) + #else + # define __libc_lock_unlock(NAME) \ + __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0) +@@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t; + if (--(NAME).cnt == 0) \ + { \ + (NAME).owner = NULL; \ +- lll_unlock ((NAME).lock); \ ++ lll_unlock ((NAME).lock, LLL_PRIVATE); \ + } \ + } while (0) + #else +--- libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h 2007-07-30 23:01:58.000000000 +0200 +@@ -70,9 +70,6 @@ + #endif + + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +- + #define lll_futex_wait(futexp, val, private) \ + lll_futex_timed_wait (futexp, val, NULL, private) + +@@ -96,7 +93,7 @@ + INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \ + }) + +-#define lll_robust_mutex_dead(futexv) \ ++#define lll_robust_dead(futexv) \ + do \ + { \ + int *__futexp = &(futexv); \ +@@ -132,149 +129,130 @@ + + + static inline int __attribute__((always_inline)) +-__lll_mutex_trylock(int *futex) ++__lll_trylock(int *futex) + { + return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0; + } +-#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock)) ++#define lll_trylock(lock) __lll_trylock (&(lock)) + + + static inline int __attribute__((always_inline)) +-__lll_mutex_cond_trylock(int *futex) ++__lll_cond_trylock(int *futex) + { + return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0; + } +-#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock)) ++#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock)) + + + static inline int __attribute__((always_inline)) +-__lll_robust_mutex_trylock(int *futex, int id) ++__lll_robust_trylock(int *futex, int id) + { + return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0; + } +-#define lll_robust_mutex_trylock(lock, id) \ +- __lll_robust_mutex_trylock (&(lock), id) ++#define lll_robust_trylock(lock, id) \ ++ __lll_robust_trylock (&(lock), id) + +-extern void __lll_lock_wait (int *futex) attribute_hidden; +-extern int __lll_robust_lock_wait (int *futex) attribute_hidden; ++extern void __lll_lock_wait_private (int *futex) attribute_hidden; ++extern void __lll_lock_wait (int *futex, int private) attribute_hidden; ++extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + + static inline void __attribute__((always_inline)) +-__lll_mutex_lock(int *futex) ++__lll_lock(int *futex, int private) + { + if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) +- __lll_lock_wait (futex); ++ { ++ if (__builtin_constant_p (private) && private == LLL_PRIVATE) ++ __lll_lock_wait_private (futex); ++ else ++ __lll_lock_wait (futex, private); ++ } + } +-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) ++#define lll_lock(futex, private) __lll_lock (&(futex), private) + + + static inline int __attribute__ ((always_inline)) +-__lll_robust_mutex_lock (int *futex, int id) ++__lll_robust_lock (int *futex, int id, int private) + { + int result = 0; + if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +- result = __lll_robust_lock_wait (futex); ++ result = __lll_robust_lock_wait (futex, private); + return result; + } +-#define lll_robust_mutex_lock(futex, id) \ +- __lll_robust_mutex_lock (&(futex), id) ++#define lll_robust_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), id, private) + + + static inline void __attribute__ ((always_inline)) +-__lll_mutex_cond_lock (int *futex) ++__lll_cond_lock (int *futex, int private) + { + if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0) +- __lll_lock_wait (futex); ++ __lll_lock_wait (futex, private); + } +-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) ++#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + + +-#define lll_robust_mutex_cond_lock(futex, id) \ +- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS) ++#define lll_robust_cond_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private) + + +-extern int __lll_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; +-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; ++extern int __lll_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; ++extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; + + static inline int __attribute__ ((always_inline)) +-__lll_mutex_timedlock (int *futex, const struct timespec *abstime) ++__lll_timedlock (int *futex, const struct timespec *abstime, int private) + { + int result = 0; + if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) +- result = __lll_timedlock_wait (futex, abstime); ++ result = __lll_timedlock_wait (futex, abstime, private); + return result; + } +-#define lll_mutex_timedlock(futex, abstime) \ +- __lll_mutex_timedlock (&(futex), abstime) ++#define lll_timedlock(futex, abstime, private) \ ++ __lll_timedlock (&(futex), abstime, private) + + + static inline int __attribute__ ((always_inline)) +-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime, +- int id) ++__lll_robust_timedlock (int *futex, const struct timespec *abstime, ++ int id, int private) + { + int result = 0; + if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) + result = __lll_robust_timedlock_wait (futex, abstime); + return result; + } +-#define lll_robust_mutex_timedlock(futex, abstime, id) \ +- __lll_robust_mutex_timedlock (&(futex), abstime, id) ++#define lll_robust_timedlock(futex, abstime, id, private) \ ++ __lll_robust_timedlock (&(futex), abstime, id, private) + + + static inline void __attribute__ ((always_inline)) +-__lll_mutex_unlock (int *futex) ++__lll_unlock (int *futex, int private) + { + int val = atomic_exchange_rel (futex, 0); + if (__builtin_expect (val > 1, 0)) +- lll_futex_wake (futex, 1, LLL_SHARED); ++ lll_futex_wake (futex, 1, private); + } +-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex)) ++#define lll_unlock(futex, private) __lll_unlock(&(futex), private) + + + static inline void __attribute__ ((always_inline)) +-__lll_robust_mutex_unlock (int *futex, int mask) ++__lll_robust_unlock (int *futex, int private) + { + int val = atomic_exchange_rel (futex, 0); +- if (__builtin_expect (val & mask, 0)) +- lll_futex_wake (futex, 1, LLL_SHARED); ++ if (__builtin_expect (val & FUTEX_WAITERS, 0)) ++ lll_futex_wake (futex, 1, private); + } +-#define lll_robust_mutex_unlock(futex) \ +- __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS) +- ++#define lll_robust_unlock(futex, private) \ ++ __lll_robust_unlock(&(futex), private) + +-static inline void __attribute__ ((always_inline)) +-__lll_mutex_unlock_force (int *futex) +-{ +- (void) atomic_exchange_rel (futex, 0); +- lll_futex_wake (futex, 1, LLL_SHARED); +-} +-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex)) + +- +-#define lll_mutex_islocked(futex) \ ++#define lll_islocked(futex) \ + (futex != 0) + +- +-/* Our internal lock implementation is identical to the binary-compatible +- mutex implementation. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- + /* Initializers for lock. */ + #define LLL_LOCK_INITIALIZER (0) + #define LLL_LOCK_INITIALIZER_LOCKED (1) + +-/* The states of a lock are: +- 0 - untaken +- 1 - taken by one user +- >1 - taken by more users */ +- +-#define lll_trylock(lock) lll_mutex_trylock (lock) +-#define lll_lock(lock) lll_mutex_lock (lock) +-#define lll_unlock(lock) lll_mutex_unlock (lock) +-#define lll_islocked(lock) lll_mutex_islocked (lock) + + /* The kernel notifies a process which uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the +@@ -298,26 +276,4 @@ extern int __lll_timedwait_tid (int *, c + __res; \ + }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- attribute_hidden; +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h 2007-07-30 21:50:40.000000000 +0200 +@@ -73,9 +73,6 @@ + /* Delay in spinlock loop. */ + #define BUSY_WAIT_NOP asm ("hint @pause") + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +- + #define lll_futex_wait(futex, val, private) \ + lll_futex_timed_wait (futex, val, NULL, private) + +@@ -95,12 +92,13 @@ + _r10 == -1 ? -_retval : _retval; \ + }) + +-#define lll_robust_mutex_dead(futexv) \ ++#define lll_robust_dead(futexv, private) \ + do \ + { \ + int *__futexp = &(futexv); \ + atomic_or (__futexp, FUTEX_OWNER_DIED); \ +- DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \ ++ DO_INLINE_SYSCALL(futex, 3, (long) __futexp, \ ++ __lll_private_flag (FUTEX_WAKE, private), 1); \ + } \ + while (0) + +@@ -123,156 +121,144 @@ while (0) + }) + + +-#define __lll_mutex_trylock(futex) \ ++#define __lll_trylock(futex) \ + (atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0) +-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex)) ++#define lll_trylock(futex) __lll_trylock (&(futex)) + + +-#define __lll_robust_mutex_trylock(futex, id) \ ++#define __lll_robust_trylock(futex, id) \ + (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0) +-#define lll_robust_mutex_trylock(futex, id) \ +- __lll_robust_mutex_trylock (&(futex), id) ++#define lll_robust_trylock(futex, id) \ ++ __lll_robust_trylock (&(futex), id) + + +-#define __lll_mutex_cond_trylock(futex) \ ++#define __lll_cond_trylock(futex) \ + (atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0) +-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex)) +- +- +-extern void __lll_lock_wait (int *futex) attribute_hidden; +-extern int __lll_robust_lock_wait (int *futex) attribute_hidden; ++#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex)) + + +-#define __lll_mutex_lock(futex) \ +- ((void) ({ \ +- int *__futex = (futex); \ +- if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \ +- __lll_lock_wait (__futex); \ ++extern void __lll_lock_wait_private (int *futex) attribute_hidden; ++extern void __lll_lock_wait (int *futex, int private) attribute_hidden; ++extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; ++ ++ ++#define __lll_lock(futex, private) \ ++ ((void) ({ \ ++ int *__futex = (futex); \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \ ++ 1, 0), 0)) \ ++ { \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __lll_lock_wait_private (__futex); \ ++ else \ ++ __lll_lock_wait (__futex, private); \ + })) +-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) ++#define lll_lock(futex, private) __lll_lock (&(futex), private) + + +-#define __lll_robust_mutex_lock(futex, id) \ +- ({ \ +- int *__futex = (futex); \ +- int __val = 0; \ +- \ +- if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \ +- __val = __lll_robust_lock_wait (__futex); \ +- __val; \ ++#define __lll_robust_lock(futex, id, private) \ ++ ({ \ ++ int *__futex = (futex); \ ++ int __val = 0; \ ++ \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ ++ 0), 0)) \ ++ __val = __lll_robust_lock_wait (__futex, private); \ ++ __val; \ + }) +-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id) ++#define lll_robust_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), id, private) + + +-#define __lll_mutex_cond_lock(futex) \ +- ((void) ({ \ +- int *__futex = (futex); \ +- if (atomic_compare_and_exchange_bool_acq (__futex, 2, 0) != 0) \ +- __lll_lock_wait (__futex); \ ++#define __lll_cond_lock(futex, private) \ ++ ((void) ({ \ ++ int *__futex = (futex); \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 2, \ ++ 0), 0)) \ ++ __lll_lock_wait (__futex, private); \ + })) +-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) ++#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + + +-#define __lll_robust_mutex_cond_lock(futex, id) \ +- ({ \ +- int *__futex = (futex); \ +- int __val = 0; \ +- int __id = (id) | FUTEX_WAITERS; \ +- \ +- if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \ +- __val = __lll_robust_lock_wait (__futex); \ +- __val; \ ++#define __lll_robust_cond_lock(futex, id, private) \ ++ ({ \ ++ int *__futex = (futex); \ ++ int __val = 0; \ ++ int __id = (id) | FUTEX_WAITERS; \ ++ \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \ ++ __id, 0), 0)) \ ++ __val = __lll_robust_lock_wait (__futex, private); \ ++ __val; \ + }) +-#define lll_robust_mutex_cond_lock(futex, id) \ +- __lll_robust_mutex_cond_lock (&(futex), id) +- +- +-extern int __lll_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; +-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; ++#define lll_robust_cond_lock(futex, id, private) \ ++ __lll_robust_cond_lock (&(futex), id, private) + + +-#define __lll_mutex_timedlock(futex, abstime) \ +- ({ \ +- int *__futex = (futex); \ +- int __val = 0; \ +- \ +- if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \ +- __val = __lll_timedlock_wait (__futex, abstime); \ +- __val; \ ++extern int __lll_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; ++extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; ++ ++ ++#define __lll_timedlock(futex, abstime, private) \ ++ ({ \ ++ int *__futex = (futex); \ ++ int __val = 0; \ ++ \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 1, \ ++ 0), 0)) \ ++ __val = __lll_timedlock_wait (__futex, abstime, private); \ ++ __val; \ + }) +-#define lll_mutex_timedlock(futex, abstime) \ +- __lll_mutex_timedlock (&(futex), abstime) ++#define lll_timedlock(futex, abstime, private) \ ++ __lll_timedlock (&(futex), abstime, private) + + +-#define __lll_robust_mutex_timedlock(futex, abstime, id) \ +- ({ \ +- int *__futex = (futex); \ +- int __val = 0; \ +- \ +- if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \ +- __val = __lll_robust_timedlock_wait (__futex, abstime); \ +- __val; \ ++#define __lll_robust_timedlock(futex, abstime, id, private) \ ++ ({ \ ++ int *__futex = (futex); \ ++ int __val = 0; \ ++ \ ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ ++ 0), 0)) \ ++ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \ ++ __val; \ + }) +-#define lll_robust_mutex_timedlock(futex, abstime, id) \ +- __lll_robust_mutex_timedlock (&(futex), abstime, id) +- +- +-#define __lll_mutex_unlock(futex) \ +- ((void) ({ \ +- int *__futex = (futex); \ +- int __val = atomic_exchange_rel (__futex, 0); \ +- \ +- if (__builtin_expect (__val > 1, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ +- })) +-#define lll_mutex_unlock(futex) \ +- __lll_mutex_unlock(&(futex)) ++#define lll_robust_timedlock(futex, abstime, id, private) \ ++ __lll_robust_timedlock (&(futex), abstime, id, private) + + +-#define __lll_robust_mutex_unlock(futex) \ +- ((void) ({ \ +- int *__futex = (futex); \ +- int __val = atomic_exchange_rel (__futex, 0); \ +- \ +- if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++#define __lll_unlock(futex, private) \ ++ ((void) ({ \ ++ int *__futex = (futex); \ ++ int __val = atomic_exchange_rel (__futex, 0); \ ++ \ ++ if (__builtin_expect (__val > 1, 0)) \ ++ lll_futex_wake (__futex, 1, private); \ + })) +-#define lll_robust_mutex_unlock(futex) \ +- __lll_robust_mutex_unlock(&(futex)) ++#define lll_unlock(futex, private) __lll_unlock(&(futex), private) + + +-#define __lll_mutex_unlock_force(futex) \ +- ((void) ({ \ +- int *__futex = (futex); \ +- (void) atomic_exchange_rel (__futex, 0); \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++#define __lll_robust_unlock(futex, private) \ ++ ((void) ({ \ ++ int *__futex = (futex); \ ++ int __val = atomic_exchange_rel (__futex, 0); \ ++ \ ++ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ ++ lll_futex_wake (__futex, 1, private); \ + })) +-#define lll_mutex_unlock_force(futex) \ +- __lll_mutex_unlock_force(&(futex)) ++#define lll_robust_unlock(futex, private) \ ++ __lll_robust_unlock(&(futex), private) + + +-#define lll_mutex_islocked(futex) \ ++#define lll_islocked(futex) \ + (futex != 0) + +- +-/* We have a separate internal lock implementation which is not tied +- to binary compatibility. We can use the lll_mutex_*. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- + /* Initializers for lock. */ + #define LLL_LOCK_INITIALIZER (0) + #define LLL_LOCK_INITIALIZER_LOCKED (1) + +-#define lll_trylock(futex) lll_mutex_trylock (futex) +-#define lll_lock(futex) lll_mutex_lock (futex) +-#define lll_unlock(futex) lll_mutex_unlock (futex) +-#define lll_islocked(futex) lll_mutex_islocked (futex) +- +- + /* The kernel notifies a process with uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the + thread ID while the clone is running and is reset to zero +@@ -297,26 +283,4 @@ extern int __lll_timedwait_tid (int *, c + __res; \ + }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- attribute_hidden; +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c 2007-07-29 11:48:55.000000000 +0200 +@@ -54,7 +54,7 @@ __unregister_atfork (dso_handle) + that there couldn't have been another thread deleting something. + The __unregister_atfork function is only called from the + dlclose() code which itself serializes the operations. */ +- lll_lock (__fork_lock); ++ lll_lock (__fork_lock, LLL_PRIVATE); + + /* We have to create a new list with all the entries we don't remove. */ + struct deleted_handler +@@ -89,7 +89,7 @@ __unregister_atfork (dso_handle) + while (runp != NULL); + + /* Release the lock. */ +- lll_unlock (__fork_lock); ++ lll_unlock (__fork_lock, LLL_PRIVATE); + + /* Walk the list of all entries which have to be deleted. */ + while (deleted != NULL) +--- libc/nptl/sysdeps/unix/sysv/linux/fork.h.jj 2006-05-15 22:19:43.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/fork.h 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -26,7 +26,7 @@ extern unsigned long int __fork_generati + extern unsigned long int *__fork_generation_pointer attribute_hidden; + + /* Lock to protect allocation and deallocation of fork handlers. */ +-extern lll_lock_t __fork_lock attribute_hidden; ++extern int __fork_lock attribute_hidden; + + /* Elements of the fork handler lists. */ + struct fork_handler +--- libc/nptl/sysdeps/unix/sysv/linux/fork.c.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/fork.c 2007-07-29 11:48:55.000000000 +0200 +@@ -183,7 +183,7 @@ __libc_fork (void) + } + + /* Initialize the fork lock. */ +- __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER; ++ __fork_lock = LLL_LOCK_INITIALIZER; + } + else + { +--- libc/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S.jj 2007-05-24 16:41:25.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S 2007-07-31 12:20:52.000000000 +0200 +@@ -20,19 +20,9 @@ + #include + #include + #include ++#include + + +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- + .comm __fork_generation, 4, 4 + + .text +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S.jj 2007-07-30 18:10:05.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S 2007-07-30 18:08:45.000000000 +0200 +@@ -18,22 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + + +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl pthread_rwlock_timedrdlock +@@ -88,7 +77,7 @@ pthread_rwlock_timedrdlock: + /* Get current time. */ + 11: movl %esp, %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +@@ -142,11 +131,11 @@ pthread_rwlock_timedrdlock: + cmpl $-ETIMEDOUT, %esi + jne 2b + +-18: movl $ETIMEDOUT, %ecx ++18: movl $ETIMEDOUT, %edx + jmp 9f + + +-5: xorl %ecx, %ecx ++5: xorl %edx, %edx + addl $1, NR_READERS(%ebp) + je 8f + 9: LOCK +@@ -157,7 +146,7 @@ pthread_rwlock_timedrdlock: + #endif + jne 6f + +-7: movl %ecx, %eax ++7: movl %edx, %eax + + addl $8, %esp + popl %ebp +@@ -168,16 +157,17 @@ pthread_rwlock_timedrdlock: + + 1: + #if MUTEX == 0 +- movl %ebp, %ecx ++ movl %ebp, %edx + #else +- leal MUTEX(%ebp), %ecx ++ leal MUTEX(%ebp), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebp), %ecx ++ call __lll_lock_wait + jmp 2b + + 14: cmpl %gs:TID, %eax + jne 3b +- movl $EDEADLK, %ecx ++ movl $EDEADLK, %edx + jmp 9b + + 6: +@@ -186,17 +176,18 @@ pthread_rwlock_timedrdlock: + #else + leal MUTEX(%ebp), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebp), %ecx ++ call __lll_unlock_wake + jmp 7b + + /* Overflow. */ + 8: subl $1, NR_READERS(%ebp) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + /* Overflow. */ + 4: subl $1, READERS_QUEUED(%ebp) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + 10: +@@ -205,21 +196,23 @@ pthread_rwlock_timedrdlock: + #else + leal MUTEX(%ebp), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebp), %ecx ++ call __lll_unlock_wake + jmp 11b + + 12: + #if MUTEX == 0 +- movl %ebp, %ecx ++ movl %ebp, %edx + #else +- leal MUTEX(%ebp), %ecx ++ leal MUTEX(%ebp), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebp), %ecx ++ call __lll_lock_wait + jmp 13b + + 16: movl $-ETIMEDOUT, %esi + jmp 17b + +-19: movl $EINVAL, %ecx ++19: movl $EINVAL, %edx + jmp 9b + .size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S 2007-07-30 18:26:35.000000000 +0200 +@@ -18,21 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + + +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_wrlock +@@ -106,7 +96,7 @@ __pthread_rwlock_wrlock: + 13: subl $1, WRITERS_QUEUED(%ebx) + jmp 2b + +-5: xorl %ecx, %ecx ++5: xorl %edx, %edx + movl %gs:TID, %eax + movl %eax, WRITER(%ebx) + 9: LOCK +@@ -118,23 +108,24 @@ __pthread_rwlock_wrlock: + jne 6f + 7: + +- movl %ecx, %eax ++ movl %edx, %eax + popl %ebx + popl %esi + ret + + 1: + #if MUTEX == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal MUTEX(%ebx), %ecx ++ leal MUTEX(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebx), %ecx ++ call __lll_lock_wait + jmp 2b + + 14: cmpl %gs:TID , %eax + jne 3b +- movl $EDEADLK, %ecx ++ movl $EDEADLK, %edx + jmp 9b + + 6: +@@ -143,11 +134,12 @@ __pthread_rwlock_wrlock: + #else + leal MUTEX(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebx), %ecx ++ call __lll_unlock_wake + jmp 7b + + 4: subl $1, WRITERS_QUEUED(%ebx) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + 10: +@@ -156,16 +148,18 @@ __pthread_rwlock_wrlock: + #else + leal MUTEX(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebx), %ecx ++ call __lll_unlock_wake + jmp 11b + + 12: + #if MUTEX == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal MUTEX(%ebx), %ecx ++ leal MUTEX(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebx), %ecx ++ call __lll_lock_wait + jmp 13b + .size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock + +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S.jj 2006-04-09 04:42:29.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S 2007-07-31 12:37:45.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -20,12 +20,7 @@ + #include + #include + #include +- +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif ++#include + + .text + +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S.jj 2006-09-05 16:46:43.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S 2007-07-30 15:56:10.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,31 +19,36 @@ + + #include + #include ++#include + #include ++#include + + .text + +-#ifndef LOCK +-# ifdef UP +-# define LOCK +-# else +-# define LOCK lock +-# endif +-#endif +- +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 + #define FUTEX_WAITERS 0x80000000 + #define FUTEX_OWNER_DIED 0x40000000 + ++#ifdef __ASSUME_PRIVATE_FUTEX ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++#else ++# if FUTEX_WAIT == 0 ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %gs:PRIVATE_FUTEX, reg ++# else ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %gs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg ++# endif ++#endif + +- .globl __lll_robust_mutex_lock_wait +- .type __lll_robust_mutex_lock_wait,@function +- .hidden __lll_robust_mutex_lock_wait ++ .globl __lll_robust_lock_wait ++ .type __lll_robust_lock_wait,@function ++ .hidden __lll_robust_lock_wait + .align 16 +-__lll_robust_mutex_lock_wait: ++__lll_robust_lock_wait: + cfi_startproc + pushl %edx + cfi_adjust_cfa_offset(4) +@@ -55,9 +60,9 @@ __lll_robust_mutex_lock_wait: + cfi_offset(%ebx, -12) + cfi_offset(%esi, -16) + +- movl %ecx, %ebx ++ movl %edx, %ebx + xorl %esi, %esi /* No timeout. */ +- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */ ++ LOAD_FUTEX_WAIT (%ecx) + + 4: movl %eax, %edx + orl $FUTEX_WAITERS, %edx +@@ -98,14 +103,14 @@ __lll_robust_mutex_lock_wait: + cfi_restore(%edx) + ret + cfi_endproc +- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait ++ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait + + +- .globl __lll_robust_mutex_timedlock_wait +- .type __lll_robust_mutex_timedlock_wait,@function +- .hidden __lll_robust_mutex_timedlock_wait ++ .globl __lll_robust_timedlock_wait ++ .type __lll_robust_timedlock_wait,@function ++ .hidden __lll_robust_timedlock_wait + .align 16 +-__lll_robust_mutex_timedlock_wait: ++__lll_robust_timedlock_wait: + cfi_startproc + /* Check for a valid timeout value. */ + cmpl $1000000000, 4(%edx) +@@ -136,7 +141,7 @@ __lll_robust_mutex_timedlock_wait: + /* Get current time. */ + movl %esp, %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +@@ -177,7 +182,8 @@ __lll_robust_mutex_timedlock_wait: + 2: + /* Futex call. */ + movl %esp, %esi +- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */ ++ movl 20(%esp), %ecx ++ LOAD_FUTEX_WAIT (%ecx) + movl $SYS_futex, %eax + ENTER_KERNEL + movl %eax, %ecx +@@ -224,4 +230,4 @@ __lll_robust_mutex_timedlock_wait: + 8: movl $ETIMEDOUT, %eax + jmp 6b + cfi_endproc +- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait ++ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S 2007-07-30 17:42:49.000000000 +0200 +@@ -18,21 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + + +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_rdlock +@@ -108,7 +98,7 @@ __pthread_rwlock_rdlock: + 13: subl $1, READERS_QUEUED(%ebx) + jmp 2b + +-5: xorl %ecx, %ecx ++5: xorl %edx, %edx + addl $1, NR_READERS(%ebx) + je 8f + 9: LOCK +@@ -120,24 +110,25 @@ __pthread_rwlock_rdlock: + jne 6f + 7: + +- movl %ecx, %eax ++ movl %edx, %eax + popl %ebx + popl %esi + ret + + 1: + #if MUTEX == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal MUTEX(%ebx), %ecx ++ leal MUTEX(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebx), %ecx ++ call __lll_lock_wait + jmp 2b + + 14: cmpl %gs:TID, %eax + jne 3b + /* Deadlock detected. */ +- movl $EDEADLK, %ecx ++ movl $EDEADLK, %edx + jmp 9b + + 6: +@@ -146,17 +137,18 @@ __pthread_rwlock_rdlock: + #else + leal MUTEX(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebx), %ecx ++ call __lll_unlock_wake + jmp 7b + + /* Overflow. */ + 8: subl $1, NR_READERS(%ebx) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + /* Overflow. */ + 4: subl $1, READERS_QUEUED(%ebx) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + 10: +@@ -165,16 +157,18 @@ __pthread_rwlock_rdlock: + #else + leal MUTEX(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebx), %ecx ++ call __lll_unlock_wake + jmp 11b + + 12: + #if MUTEX == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal MUTEX(%ebx), %ecx ++ leal MUTEX(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebx), %ecx ++ call __lll_lock_wait + jmp 13b + .size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock + +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S 2007-07-30 19:06:09.000000000 +0200 +@@ -19,42 +19,53 @@ + + #include + #include ++#include ++#include + + .text + +-#ifndef LOCK +-# ifdef UP +-# define LOCK ++#ifdef __ASSUME_PRIVATE_FUTEX ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ ++ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_FUTEX_WAKE(reg) \ ++ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg ++#else ++# if FUTEX_WAIT == 0 ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl %gs:PRIVATE_FUTEX, reg + # else +-# define LOCK lock ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl %gs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg + # endif +-#endif +- +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#ifndef FUTEX_WAIT +-# define FUTEX_WAIT 0 +-# define FUTEX_WAKE 1 +-#endif +- +-#ifndef LOAD_FUTEX_WAIT ++# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ ++ movl %gs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAKE, reg + # if FUTEX_WAIT == 0 + # define LOAD_FUTEX_WAIT(reg) \ +- xorl reg, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %gs:PRIVATE_FUTEX, reg + # else + # define LOAD_FUTEX_WAIT(reg) \ +- movl $FUTEX_WAIT, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %gs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg + # endif + # define LOAD_FUTEX_WAKE(reg) \ +- movl $FUTEX_WAKE, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %gs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAKE, reg + #endif + +- +- .globl __lll_mutex_lock_wait +- .type __lll_mutex_lock_wait,@function +- .hidden __lll_mutex_lock_wait ++ .globl __lll_lock_wait_private ++ .type __lll_lock_wait_private,@function ++ .hidden __lll_lock_wait_private + .align 16 +-__lll_mutex_lock_wait: ++__lll_lock_wait_private: + cfi_startproc + pushl %edx + cfi_adjust_cfa_offset(4) +@@ -69,7 +80,7 @@ __lll_mutex_lock_wait: + movl $2, %edx + movl %ecx, %ebx + xorl %esi, %esi /* No timeout. */ +- LOAD_FUTEX_WAIT (%ecx) ++ LOAD_PRIVATE_FUTEX_WAIT (%ecx) + + cmpl %edx, %eax /* NB: %edx == 2 */ + jne 2f +@@ -94,15 +105,60 @@ __lll_mutex_lock_wait: + cfi_restore(%edx) + ret + cfi_endproc +- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait +- ++ .size __lll_lock_wait_private,.-__lll_lock_wait_private + + #ifdef NOT_IN_libc +- .globl __lll_mutex_timedlock_wait +- .type __lll_mutex_timedlock_wait,@function +- .hidden __lll_mutex_timedlock_wait ++ .globl __lll_lock_wait ++ .type __lll_lock_wait,@function ++ .hidden __lll_lock_wait + .align 16 +-__lll_mutex_timedlock_wait: ++__lll_lock_wait: ++ cfi_startproc ++ pushl %edx ++ cfi_adjust_cfa_offset(4) ++ pushl %ebx ++ cfi_adjust_cfa_offset(4) ++ pushl %esi ++ cfi_adjust_cfa_offset(4) ++ cfi_offset(%edx, -8) ++ cfi_offset(%ebx, -12) ++ cfi_offset(%esi, -16) ++ ++ movl %edx, %ebx ++ movl $2, %edx ++ xorl %esi, %esi /* No timeout. */ ++ LOAD_FUTEX_WAIT (%ecx) ++ ++ cmpl %edx, %eax /* NB: %edx == 2 */ ++ jne 2f ++ ++1: movl $SYS_futex, %eax ++ ENTER_KERNEL ++ ++2: movl %edx, %eax ++ xchgl %eax, (%ebx) /* NB: lock is implied */ ++ ++ testl %eax, %eax ++ jnz 1b ++ ++ popl %esi ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%esi) ++ popl %ebx ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%ebx) ++ popl %edx ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%edx) ++ ret ++ cfi_endproc ++ .size __lll_lock_wait,.-__lll_lock_wait ++ ++ .globl __lll_timedlock_wait ++ .type __lll_timedlock_wait,@function ++ .hidden __lll_timedlock_wait ++ .align 16 ++__lll_timedlock_wait: + cfi_startproc + /* Check for a valid timeout value. */ + cmpl $1000000000, 4(%edx) +@@ -132,7 +188,7 @@ __lll_mutex_timedlock_wait: + /* Get current time. */ + movl %esp, %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +@@ -165,6 +221,7 @@ __lll_mutex_timedlock_wait: + + /* Futex call. */ + movl %esp, %esi ++ movl 16(%esp), %ecx + LOAD_FUTEX_WAIT (%ecx) + movl $SYS_futex, %eax + ENTER_KERNEL +@@ -215,15 +272,51 @@ __lll_mutex_timedlock_wait: + 5: movl $ETIMEDOUT, %eax + jmp 6b + cfi_endproc +- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait ++ .size __lll_timedlock_wait,.-__lll_timedlock_wait + #endif + ++ .globl __lll_unlock_wake_private ++ .type __lll_unlock_wake_private,@function ++ .hidden __lll_unlock_wake_private ++ .align 16 ++__lll_unlock_wake_private: ++ cfi_startproc ++ pushl %ebx ++ cfi_adjust_cfa_offset(4) ++ pushl %ecx ++ cfi_adjust_cfa_offset(4) ++ pushl %edx ++ cfi_adjust_cfa_offset(4) ++ cfi_offset(%ebx, -8) ++ cfi_offset(%ecx, -12) ++ cfi_offset(%edx, -16) ++ ++ movl %eax, %ebx ++ movl $0, (%eax) ++ LOAD_PRIVATE_FUTEX_WAKE (%ecx) ++ movl $1, %edx /* Wake one thread. */ ++ movl $SYS_futex, %eax ++ ENTER_KERNEL ++ ++ popl %edx ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%edx) ++ popl %ecx ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%ecx) ++ popl %ebx ++ cfi_adjust_cfa_offset(-4) ++ cfi_restore(%ebx) ++ ret ++ cfi_endproc ++ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private + +- .globl __lll_mutex_unlock_wake +- .type __lll_mutex_unlock_wake,@function +- .hidden __lll_mutex_unlock_wake ++#ifdef NOT_IN_libc ++ .globl __lll_unlock_wake ++ .type __lll_unlock_wake,@function ++ .hidden __lll_unlock_wake + .align 16 +-__lll_mutex_unlock_wake: ++__lll_unlock_wake: + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) +@@ -253,10 +346,8 @@ __lll_mutex_unlock_wake: + cfi_restore(%ebx) + ret + cfi_endproc +- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake ++ .size __lll_unlock_wake,.-__lll_unlock_wake + +- +-#ifdef NOT_IN_libc + .globl __lll_timedwait_tid + .type __lll_timedwait_tid,@function + .hidden __lll_timedwait_tid +@@ -274,7 +365,7 @@ __lll_timedwait_tid: + /* Get current time. */ + 2: movl %esp, %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S.jj 2006-07-29 06:31:49.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S 2007-07-30 16:47:51.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,24 +19,11 @@ + + #include + #include ++#include + #include + #include + #include +- +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_REQUEUE 3 +-#define FUTEX_CMP_REQUEUE 4 +- +-#define EINVAL 22 +- ++#include + + .text + +@@ -141,21 +128,27 @@ __pthread_cond_broadcast: + /* Initial locking failed. */ + 1: + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 2b + + /* Unlock in loop requires waekup. */ + 5: leal cond_lock-cond_futex(%ebx), %eax +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 6b + + /* Unlock in loop requires waekup. */ + 7: leal cond_lock-cond_futex(%ebx), %eax +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 8b + + 9: /* The futex requeue functionality is not available. */ +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S 2007-07-31 12:22:18.000000000 +0200 +@@ -21,15 +21,7 @@ + #include + #include + #include +- +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAKE 1 ++#include + + + .text +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S.jj 2007-07-30 18:19:01.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S 2007-07-30 18:15:07.000000000 +0200 +@@ -18,20 +18,10 @@ + 02111-1307 USA. */ + + #include ++#include + #include + + +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_unlock +@@ -115,11 +105,12 @@ __pthread_rwlock_unlock: + + 1: + #if MUTEX == 0 +- movl %edi, %ecx ++ movl %edi, %edx + #else +- leal MUTEX(%edi), %ecx ++ leal MUTEX(%edi), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%edi), %ecx ++ call __lll_lock_wait + jmp 2b + + 3: +@@ -128,7 +119,8 @@ __pthread_rwlock_unlock: + #else + leal MUTEX(%edi), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%edi), %ecx ++ call __lll_unlock_wake + jmp 4b + + 7: +@@ -137,7 +129,8 @@ __pthread_rwlock_unlock: + #else + leal MUTEX(%edi), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%edi), %ecx ++ call __lll_unlock_wake + jmp 8b + + .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S 2007-07-30 14:23:04.000000000 +0200 +@@ -17,19 +17,4 @@ + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +-#include +- +-/* All locks in libc are private. Use the kernel feature if possible. */ +-#define FUTEX_PRIVATE_FLAG 128 +-#ifdef __ASSUME_PRIVATE_FUTEX +-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG) +-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG) +-#else +-# define LOAD_FUTEX_WAIT(reg) \ +- movl %gs:PRIVATE_FUTEX, reg +-# define LOAD_FUTEX_WAKE(reg) \ +- movl %gs:PRIVATE_FUTEX, reg ; \ +- orl $FUTEX_WAKE, reg +-#endif +- + #include "lowlevellock.S" +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S 2007-07-30 17:23:12.000000000 +0200 +@@ -19,19 +19,10 @@ + + #include + #include ++#include + #include + #include + +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- + + .text + +@@ -202,11 +193,13 @@ __pthread_cond_wait: + 1: + .LSbl1: + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 2b + + /* Unlock in loop requires waekup. */ +@@ -217,17 +210,21 @@ __pthread_cond_wait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ + 5: + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 6b + + /* Unlock after loop requires wakeup. */ +@@ -237,7 +234,9 @@ __pthread_cond_wait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 11b + + /* The initial unlocking of the mutex failed. */ +@@ -257,7 +256,9 @@ __pthread_cond_wait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + + movl %esi, %eax + jmp 14b +@@ -287,11 +288,13 @@ __condvar_w_cleanup: + jz 1f + + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + + 1: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax +@@ -348,7 +351,9 @@ __condvar_w_cleanup: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + + /* Wake up all waiters to make sure no signal gets lost. */ + 2: testl %edi, %edi +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S.jj 2005-09-08 19:40:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S 2007-07-30 16:47:18.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,23 +19,10 @@ + + #include + #include ++#include + #include + #include +- +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_WAKE_OP 5 +- +-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) +- +-#define EINVAL 22 ++#include + + + .text +@@ -119,17 +106,21 @@ __pthread_cond_signal: + + /* Unlock in loop requires wakeup. */ + 5: movl %edi, %eax +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 6b + + /* Initial locking failed. */ + 1: + #if cond_lock == 0 +- movl %edi, %ecx ++ movl %edi, %edx + #else +- leal cond_lock(%edi), %ecx ++ leal cond_lock(%edi), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 2b + + .size __pthread_cond_signal, .-__pthread_cond_signal +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S 2007-07-31 12:23:11.000000000 +0200 +@@ -21,16 +21,7 @@ + #include + #include + #include +- +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 ++#include + + + #if VALUE != 0 +@@ -82,7 +73,7 @@ sem_timedwait: + 7: xorl %ecx, %ecx + movl %esp, %ebx + movl %ecx, %edx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S 2007-07-31 12:22:45.000000000 +0200 +@@ -21,15 +21,7 @@ + #include + #include + #include +- +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 ++#include + + + #if VALUE != 0 +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S 2007-07-30 16:48:57.000000000 +0200 +@@ -18,19 +18,9 @@ + 02111-1307 USA. */ + + #include ++#include + #include + +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl pthread_barrier_wait +@@ -152,19 +142,27 @@ pthread_barrier_wait: + popl %ebx + ret + +-1: leal MUTEX(%ebx), %ecx +- call __lll_mutex_lock_wait ++1: movl PRIVATE(%ebx), %ecx ++ leal MUTEX(%ebx), %edx ++ xorl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 2b + +-4: leal MUTEX(%ebx), %eax +- call __lll_mutex_unlock_wake ++4: movl PRIVATE(%ebx), %ecx ++ leal MUTEX(%ebx), %eax ++ xorl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 5b + +-6: leal MUTEX(%ebx), %eax +- call __lll_mutex_unlock_wake ++6: movl PRIVATE(%ebx), %ecx ++ leal MUTEX(%ebx), %eax ++ xorl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 7b + +-9: leal MUTEX(%ebx), %eax +- call __lll_mutex_unlock_wake ++9: movl PRIVATE(%ebx), %ecx ++ leal MUTEX(%ebx), %eax ++ xorl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 10b + .size pthread_barrier_wait,.-pthread_barrier_wait +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S 2007-07-30 17:15:28.000000000 +0200 +@@ -19,20 +19,10 @@ + + #include + #include ++#include + #include + #include + +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- + + .text + +@@ -127,7 +117,7 @@ __pthread_cond_timedwait: + /* Get the current time. */ + leal 4(%esp), %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + movl %edx, %ebx + +@@ -285,11 +275,13 @@ __pthread_cond_timedwait: + 1: + .LSbl1: + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 2b + + /* Unlock in loop requires wakeup. */ +@@ -300,17 +292,21 @@ __pthread_cond_timedwait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ + 5: + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + jmp 6b + + /* Unlock after loop requires wakeup. */ +@@ -320,7 +316,9 @@ __pthread_cond_timedwait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + jmp 11b + + /* The initial unlocking of the mutex failed. */ +@@ -340,7 +338,9 @@ __pthread_cond_timedwait: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + + movl %esi, %eax + jmp 18b +@@ -350,7 +350,7 @@ __pthread_cond_timedwait: + .LSbl4: + 19: leal 4(%esp), %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + movl %edx, %ebx + +@@ -396,11 +396,13 @@ __condvar_tw_cleanup: + jz 1f + + #if cond_lock == 0 +- movl %ebx, %ecx ++ movl %ebx, %edx + #else +- leal cond_lock(%ebx), %ecx ++ leal cond_lock(%ebx), %edx + #endif +- call __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_lock_wait + + 1: movl broadcast_seq(%ebx), %eax + cmpl 20(%esp), %eax +@@ -457,7 +459,9 @@ __condvar_tw_cleanup: + #else + leal cond_lock(%ebx), %eax + #endif +- call __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %ecx ++ call __lll_unlock_wake + + /* Wake up all waiters to make sure no signal gets lost. */ + 2: testl %edi, %edi +--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S.jj 2007-07-30 18:10:59.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S 2007-07-30 18:06:27.000000000 +0200 +@@ -18,22 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + + +-#define SYS_gettimeofday __NR_gettimeofday +-#define SYS_futex 240 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl pthread_rwlock_timedwrlock +@@ -86,7 +75,7 @@ pthread_rwlock_timedwrlock: + /* Get current time. */ + 11: movl %esp, %ebx + xorl %ecx, %ecx +- movl $SYS_gettimeofday, %eax ++ movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ +@@ -140,11 +129,11 @@ pthread_rwlock_timedwrlock: + cmpl $-ETIMEDOUT, %esi + jne 2b + +-18: movl $ETIMEDOUT, %ecx ++18: movl $ETIMEDOUT, %edx + jmp 9f + + +-5: xorl %ecx, %ecx ++5: xorl %edx, %edx + movl %gs:TID, %eax + movl %eax, WRITER(%ebp) + 9: LOCK +@@ -155,7 +144,7 @@ pthread_rwlock_timedwrlock: + #endif + jne 6f + +-7: movl %ecx, %eax ++7: movl %edx, %eax + + addl $8, %esp + popl %ebp +@@ -166,16 +155,17 @@ pthread_rwlock_timedwrlock: + + 1: + #if MUTEX == 0 +- movl %ebp, %ecx ++ movl %ebp, %edx + #else +- leal MUTEX(%ebp), %ecx ++ leal MUTEX(%ebp), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebp), %ecx ++ call __lll_lock_wait + jmp 2b + + 14: cmpl %gs:TID, %eax + jne 3b +-20: movl $EDEADLK, %ecx ++20: movl $EDEADLK, %edx + jmp 9b + + 6: +@@ -184,12 +174,13 @@ pthread_rwlock_timedwrlock: + #else + leal MUTEX(%ebp), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebp), %ecx ++ call __lll_unlock_wake + jmp 7b + + /* Overflow. */ + 4: subl $1, WRITERS_QUEUED(%ebp) +- movl $EAGAIN, %ecx ++ movl $EAGAIN, %edx + jmp 9b + + 10: +@@ -198,21 +189,23 @@ pthread_rwlock_timedwrlock: + #else + leal MUTEX(%ebp), %eax + #endif +- call __lll_mutex_unlock_wake ++ movl PSHARED(%ebp), %ecx ++ call __lll_unlock_wake + jmp 11b + + 12: + #if MUTEX == 0 +- movl %ebp, %ecx ++ movl %ebp, %edx + #else +- leal MUTEX(%ebp), %ecx ++ leal MUTEX(%ebp), %edx + #endif +- call __lll_mutex_lock_wait ++ movl PSHARED(%ebp), %ecx ++ call __lll_lock_wait + jmp 13b + + 16: movl $-ETIMEDOUT, %esi + jmp 17b + +-19: movl $EINVAL, %ecx ++19: movl $EINVAL, %edx + jmp 9b + .size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock +--- libc/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h.jj 2007-07-29 12:06:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h 2007-07-30 19:02:17.000000000 +0200 +@@ -20,28 +20,41 @@ + #ifndef _LOWLEVELLOCK_H + #define _LOWLEVELLOCK_H 1 + +-#include +-#include +-#include +-#include +-#include +- +-#ifndef LOCK_INSTR +-# ifdef UP +-# define LOCK_INSTR /* nothing */ +-# else +-# define LOCK_INSTR "lock;" ++#ifndef __ASSEMBLER__ ++# include ++# include ++# include ++# include ++# include ++ ++# ifndef LOCK_INSTR ++# ifdef UP ++# define LOCK_INSTR /* nothing */ ++# else ++# define LOCK_INSTR "lock;" ++# endif ++# endif ++#else ++# ifndef LOCK ++# ifdef UP ++# define LOCK ++# else ++# define LOCK lock ++# endif + # endif + #endif + + #define SYS_futex 240 + #define FUTEX_WAIT 0 + #define FUTEX_WAKE 1 ++#define FUTEX_CMP_REQUEUE 4 ++#define FUTEX_WAKE_OP 5 + #define FUTEX_LOCK_PI 6 + #define FUTEX_UNLOCK_PI 7 + #define FUTEX_TRYLOCK_PI 8 + #define FUTEX_PRIVATE_FLAG 128 + ++#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) + + /* Values for 'private' parameter of locking macros. Yes, the + definition seems to be backwards. But it is not. The bit will be +@@ -76,11 +89,12 @@ + # endif + #endif + ++#ifndef __ASSEMBLER__ + + /* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1) +-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2) ++#define LLL_LOCK_INITIALIZER (0) ++#define LLL_LOCK_INITIALIZER_LOCKED (1) ++#define LLL_LOCK_INITIALIZER_WAITERS (2) + + + #ifdef PIC +@@ -102,7 +116,7 @@ + #endif + + /* Delay in spinlock loop. */ +-#define BUSY_WAIT_NOP asm ("rep; nop") ++#define BUSY_WAIT_NOP asm ("rep; nop") + + + #define LLL_STUB_UNWIND_INFO_START \ +@@ -217,332 +231,309 @@ LLL_STUB_UNWIND_INFO_END + } while (0) + + +-/* Does not preserve %eax and %ecx. */ +-extern int __lll_mutex_lock_wait (int val, int *__futex) +- __attribute ((regparm (2))) attribute_hidden; +-/* Does not preserve %eax, %ecx, and %edx. */ +-extern int __lll_mutex_timedlock_wait (int val, int *__futex, +- const struct timespec *abstime) +- __attribute ((regparm (3))) attribute_hidden; +-/* Preserves all registers but %eax. */ +-extern int __lll_mutex_unlock_wake (int *__futex) +- __attribute ((regparm (1))) attribute_hidden; +- +- +-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax ++/* NB: in the lll_trylock macro we simply return the value in %eax + after the cmpxchg instruction. In case the operation succeded this + value is zero. In case the operation failed, the cmpxchg instruction + has loaded the current value of the memory work which is guaranteed + to be nonzero. */ +-#define lll_mutex_trylock(futex) \ ++#if defined NOT_IN_libc || defined UP ++# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1" ++#else ++# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \ ++ "je 0f\n\t" \ ++ "lock\n" \ ++ "0:\tcmpxchgl %2, %1" ++#endif ++ ++#define lll_trylock(futex) \ + ({ int ret; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ ++ __asm __volatile (__lll_trylock_asm \ + : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ +- "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \ ++ "0" (LLL_LOCK_INITIALIZER), \ ++ "i" (MULTIPLE_THREADS_OFFSET) \ + : "memory"); \ + ret; }) + +- +-#define lll_robust_mutex_trylock(futex, id) \ ++#define lll_robust_trylock(futex, id) \ + ({ int ret; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ + : "=a" (ret), "=m" (futex) \ + : "r" (id), "m" (futex), \ +- "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ "0" (LLL_LOCK_INITIALIZER) \ + : "memory"); \ + ret; }) + + +-#define lll_mutex_cond_trylock(futex) \ ++#define lll_cond_trylock(futex) \ + ({ int ret; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ + : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \ +- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \ ++ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \ + : "memory"); \ + ret; }) + ++#if defined NOT_IN_libc || defined UP ++# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t" ++#else ++# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t" \ ++ "je 0f\n\t" \ ++ "lock\n" \ ++ "0:\tcmpxchgl %1, %2\n\t" ++#endif + +-#define lll_mutex_lock(futex) \ +- (void) ({ int ignore1, ignore2; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \ +- "jnz _L_mutex_lock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_lock_%=,@function\n" \ +- "_L_mutex_lock_%=:\n" \ +- "1:\tleal %2, %%ecx\n" \ +- "2:\tcall __lll_mutex_lock_wait\n" \ +- "3:\tjmp 18f\n" \ +- "4:\t.size _L_mutex_lock_%=, 4b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \ +- : "0" (0), "1" (1), "m" (futex) \ +- : "memory"); }) +- ++#define lll_lock(futex, private) \ ++ (void) \ ++ ({ int ignore1, ignore2; \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __asm __volatile (__lll_lock_asm_start \ ++ "jnz _L_lock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_lock_%=,@function\n" \ ++ "_L_lock_%=:\n" \ ++ "1:\tleal %2, %%ecx\n" \ ++ "2:\tcall __lll_lock_wait_private\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_lock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_3 \ ++ "18:" \ ++ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \ ++ : "0" (0), "1" (1), "m" (futex), \ ++ "i" (MULTIPLE_THREADS_OFFSET) \ ++ : "memory"); \ ++ else \ ++ { \ ++ int ignore3; \ ++ __asm __volatile (__lll_lock_asm_start \ ++ "jnz _L_lock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_lock_%=,@function\n" \ ++ "_L_lock_%=:\n" \ ++ "1:\tleal %2, %%edx\n" \ ++ "0:\tmovl %8, %%ecx\n" \ ++ "2:\tcall __lll_lock_wait\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_lock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_4 \ ++ "18:" \ ++ : "=a" (ignore1), "=c" (ignore2), \ ++ "=m" (futex), "=&d" (ignore3) \ ++ : "1" (1), "m" (futex), \ ++ "i" (MULTIPLE_THREADS_OFFSET), "0" (0), \ ++ "g" (private) \ ++ : "memory"); \ ++ } \ ++ }) + +-#define lll_robust_mutex_lock(futex, id) \ +- ({ int result, ignore; \ ++#define lll_robust_lock(futex, id, private) \ ++ ({ int result, ignore1, ignore2; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \ +- "jnz _L_robust_mutex_lock_%=\n\t" \ ++ "jnz _L_robust_lock_%=\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_lock_%=,@function\n" \ +- "_L_robust_mutex_lock_%=:\n" \ +- "1:\tleal %2, %%ecx\n" \ +- "2:\tcall __lll_robust_mutex_lock_wait\n" \ ++ ".type _L_robust_lock_%=,@function\n" \ ++ "_L_robust_lock_%=:\n" \ ++ "1:\tleal %2, %%edx\n" \ ++ "0:\tmovl %7, %%ecx\n" \ ++ "2:\tcall __lll_robust_lock_wait\n" \ + "3:\tjmp 18f\n" \ +- "4:\t.size _L_robust_mutex_lock_%=, 4b-1b\n\t" \ ++ "4:\t.size _L_robust_lock_%=, 4b-1b\n\t" \ + ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ ++ LLL_STUB_UNWIND_INFO_4 \ + "18:" \ +- : "=a" (result), "=c" (ignore), "=m" (futex) \ +- : "0" (0), "1" (id), "m" (futex) \ ++ : "=a" (result), "=c" (ignore1), "=m" (futex), \ ++ "=&d" (ignore2) \ ++ : "0" (0), "1" (id), "m" (futex), "g" (private) \ + : "memory"); \ + result; }) + + +-/* Special version of lll_mutex_lock which causes the unlock function to ++/* Special version of lll_lock which causes the unlock function to + always wakeup waiters. */ +-#define lll_mutex_cond_lock(futex) \ +- (void) ({ int ignore1, ignore2; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \ +- "jnz _L_mutex_cond_lock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_cond_lock_%=,@function\n" \ +- "_L_mutex_cond_lock_%=:\n" \ +- "1:\tleal %2, %%ecx\n" \ +- "2:\tcall __lll_mutex_lock_wait\n" \ +- "3:\tjmp 18f\n" \ +- "4:\t.size _L_mutex_cond_lock_%=, 4b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \ +- : "0" (0), "1" (2), "m" (futex) \ +- : "memory"); }) ++#define lll_cond_lock(futex, private) \ ++ (void) \ ++ ({ int ignore1, ignore2, ignore3; \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \ ++ "jnz _L_cond_lock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_cond_lock_%=,@function\n" \ ++ "_L_cond_lock_%=:\n" \ ++ "1:\tleal %2, %%edx\n" \ ++ "0:\tmovl %7, %%ecx\n" \ ++ "2:\tcall __lll_lock_wait\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_cond_lock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_4 \ ++ "18:" \ ++ : "=a" (ignore1), "=c" (ignore2), "=m" (futex), \ ++ "=&d" (ignore3) \ ++ : "0" (0), "1" (2), "m" (futex), "g" (private) \ ++ : "memory"); \ ++ }) + + +-#define lll_robust_mutex_cond_lock(futex, id) \ +- ({ int result, ignore; \ ++#define lll_robust_cond_lock(futex, id, private) \ ++ ({ int result, ignore1, ignore2; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \ +- "jnz _L_robust_mutex_cond_lock_%=\n\t" \ ++ "jnz _L_robust_cond_lock_%=\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_cond_lock_%=,@function\n" \ +- "_L_robust_mutex_cond_lock_%=:\n" \ +- "1:\tleal %2, %%ecx\n" \ +- "2:\tcall __lll_robust_mutex_lock_wait\n" \ ++ ".type _L_robust_cond_lock_%=,@function\n" \ ++ "_L_robust_cond_lock_%=:\n" \ ++ "1:\tleal %2, %%edx\n" \ ++ "0:\tmovl %7, %%ecx\n" \ ++ "2:\tcall __lll_robust_lock_wait\n" \ + "3:\tjmp 18f\n" \ +- "4:\t.size _L_robust_mutex_cond_lock_%=, 4b-1b\n\t" \ ++ "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t" \ + ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ ++ LLL_STUB_UNWIND_INFO_4 \ + "18:" \ +- : "=a" (result), "=c" (ignore), "=m" (futex) \ +- : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex) \ ++ : "=a" (result), "=c" (ignore1), "=m" (futex), \ ++ "=&d" (ignore2) \ ++ : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \ ++ "g" (private) \ + : "memory"); \ + result; }) + + +-#define lll_mutex_timedlock(futex, timeout) \ +- ({ int result, ignore1, ignore2; \ ++#define lll_timedlock(futex, timeout, private) \ ++ ({ int result, ignore1, ignore2, ignore3; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \ +- "jnz _L_mutex_timedlock_%=\n\t" \ ++ "jnz _L_timedlock_%=\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_mutex_timedlock_%=,@function\n" \ +- "_L_mutex_timedlock_%=:\n" \ ++ ".type _L_timedlock_%=,@function\n" \ ++ "_L_timedlock_%=:\n" \ + "1:\tleal %3, %%ecx\n" \ +- "0:\tmovl %7, %%edx\n" \ +- "2:\tcall __lll_mutex_timedlock_wait\n" \ ++ "0:\tmovl %8, %%edx\n" \ ++ "2:\tcall __lll_timedlock_wait\n" \ + "3:\tjmp 18f\n" \ +- "4:\t.size _L_mutex_timedlock_%=, 4b-1b\n\t" \ ++ "4:\t.size _L_timedlock_%=, 4b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_4 \ + "18:" \ + : "=a" (result), "=c" (ignore1), "=&d" (ignore2), \ +- "=m" (futex) \ +- : "0" (0), "1" (1), "m" (futex), "m" (timeout) \ ++ "=m" (futex), "=S" (ignore3) \ ++ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \ ++ "4" (private) \ + : "memory"); \ + result; }) + + +-#define lll_robust_mutex_timedlock(futex, timeout, id) \ +- ({ int result, ignore1, ignore2; \ ++#define lll_robust_timedlock(futex, timeout, id, private) \ ++ ({ int result, ignore1, ignore2, ignore3; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \ +- "jnz _L_robust_mutex_timedlock_%=\n\t" \ ++ "jnz _L_robust_timedlock_%=\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_timedlock_%=,@function\n" \ +- "_L_robust_mutex_timedlock_%=:\n" \ ++ ".type _L_robust_timedlock_%=,@function\n" \ ++ "_L_robust_timedlock_%=:\n" \ + "1:\tleal %3, %%ecx\n" \ +- "0:\tmovl %7, %%edx\n" \ +- "2:\tcall __lll_robust_mutex_timedlock_wait\n" \ ++ "0:\tmovl %8, %%edx\n" \ ++ "2:\tcall __lll_robust_timedlock_wait\n" \ + "3:\tjmp 18f\n" \ +- "4:\t.size _L_robust_mutex_timedlock_%=, 4b-1b\n\t" \ ++ "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_4 \ + "18:" \ + : "=a" (result), "=c" (ignore1), "=&d" (ignore2), \ +- "=m" (futex) \ +- : "0" (0), "1" (id), "m" (futex), "m" (timeout) \ ++ "=m" (futex), "=S" (ignore3) \ ++ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \ ++ "4" (private) \ + : "memory"); \ + result; }) + +- +-#define lll_mutex_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile (LOCK_INSTR "subl $1, %0\n\t" \ +- "jne _L_mutex_unlock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_unlock_%=,@function\n" \ +- "_L_mutex_unlock_%=:\n" \ +- "1:\tleal %0, %%eax\n" \ +- "2:\tcall __lll_mutex_unlock_wake\n" \ +- "3:\tjmp 18f\n" \ +- "4:\t.size _L_mutex_unlock_%=, 4b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=m" (futex), "=&a" (ignore) \ +- : "m" (futex) \ +- : "memory"); }) +- +- +-#define lll_robust_mutex_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \ +- "jne _L_robust_mutex_unlock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_robust_mutex_unlock_%=,@function\n" \ +- "_L_robust_mutex_unlock_%=:\n\t" \ +- "1:\tleal %0, %%eax\n" \ +- "2:\tcall __lll_mutex_unlock_wake\n" \ +- "3:\tjmp 18f\n" \ +- "4:\t.size _L_robust_mutex_unlock_%=, 4b-1b\n\t"\ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=m" (futex), "=&a" (ignore) \ +- : "i" (FUTEX_WAITERS), "m" (futex) \ +- : "memory"); }) +- +- +-#define lll_robust_mutex_dead(futex) \ +- (void) ({ int __ignore; \ +- register int _nr asm ("edx") = 1; \ +- __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \ +- LLL_EBX_LOAD \ +- LLL_ENTER_KERNEL \ +- LLL_EBX_LOAD \ +- : "=a" (__ignore) \ +- : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \ +- "c" (FUTEX_WAKE), "d" (_nr), \ +- "i" (FUTEX_OWNER_DIED), \ +- "i" (offsetof (tcbhead_t, sysinfo))); }) +- +- +-#define lll_mutex_islocked(futex) \ +- (futex != 0) +- +- +-/* We have a separate internal lock implementation which is not tied +- to binary compatibility. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- +-/* Initializers for lock. */ +-#define LLL_LOCK_INITIALIZER (0) +-#define LLL_LOCK_INITIALIZER_LOCKED (1) +- +- +-extern int __lll_lock_wait (int val, int *__futex) +- __attribute ((regparm (2))) attribute_hidden; +-extern int __lll_unlock_wake (int *__futex) +- __attribute ((regparm (1))) attribute_hidden; +- +- +-/* The states of a lock are: +- 0 - untaken +- 1 - taken by one user +- 2 - taken by more users */ +- +- + #if defined NOT_IN_libc || defined UP +-# define lll_trylock(futex) lll_mutex_trylock (futex) +-# define lll_lock(futex) lll_mutex_lock (futex) +-# define lll_unlock(futex) lll_mutex_unlock (futex) ++# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t" + #else +-/* Special versions of the macros for use in libc itself. They avoid +- the lock prefix when the thread library is not used. +- +- XXX In future we might even want to avoid it on UP machines. */ +-# include +- +-# define lll_trylock(futex) \ +- ({ unsigned char ret; \ +- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ +- "je 0f\n\t" \ +- "lock\n" \ +- "0:\tcmpxchgl %2, %1; setne %0" \ +- : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ +- "0" (LLL_MUTEX_LOCK_INITIALIZER), \ +- "i" (offsetof (tcbhead_t, multiple_threads)) \ +- : "memory"); \ +- ret; }) +- +- +-# define lll_lock(futex) \ +- (void) ({ int ignore1, ignore2; \ +- __asm __volatile ("cmpl $0, %%gs:%P6\n\t" \ +- "je 0f\n\t" \ +- "lock\n" \ +- "0:\tcmpxchgl %1, %2\n\t" \ +- "jnz _L_lock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_lock_%=,@function\n" \ +- "_L_lock_%=:\n" \ +- "1:\tleal %2, %%ecx\n" \ +- "2:\tcall __lll_mutex_lock_wait\n" \ +- "3:\tjmp 18f\n" \ +- "4:\t.size _L_lock_%=, 4b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \ +- : "0" (0), "1" (1), "m" (futex), \ +- "i" (offsetof (tcbhead_t, multiple_threads)) \ +- : "memory"); }) +- +- +-# define lll_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile ("cmpl $0, %%gs:%P3\n\t" \ +- "je 0f\n\t" \ +- "lock\n" \ +- "0:\tsubl $1,%0\n\t" \ +- "jne _L_unlock_%=\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_unlock_%=,@function\n" \ +- "_L_unlock_%=:\n" \ +- "1:\tleal %0, %%eax\n" \ +- "2:\tcall __lll_mutex_unlock_wake\n" \ +- "3:\tjmp 18f\n\t" \ +- "4:\t.size _L_unlock_%=, 4b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_3 \ +- "18:" \ +- : "=m" (futex), "=&a" (ignore) \ +- : "m" (futex), \ +- "i" (offsetof (tcbhead_t, multiple_threads)) \ +- : "memory"); }) ++# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t" \ ++ "je 0f\n\t" \ ++ "lock\n" \ ++ "0:\tsubl $1,%0\n\t" + #endif + ++#define lll_unlock(futex, private) \ ++ (void) \ ++ ({ int ignore; \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __asm __volatile (__lll_unlock_asm \ ++ "jne _L_unlock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_unlock_%=,@function\n" \ ++ "_L_unlock_%=:\n" \ ++ "1:\tleal %0, %%eax\n" \ ++ "2:\tcall __lll_unlock_wake_private\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_3 \ ++ "18:" \ ++ : "=m" (futex), "=&a" (ignore) \ ++ : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET) \ ++ : "memory"); \ ++ else \ ++ { \ ++ int ignore2; \ ++ __asm __volatile (__lll_unlock_asm \ ++ "jne _L_unlock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_unlock_%=,@function\n" \ ++ "_L_unlock_%=:\n" \ ++ "1:\tleal %0, %%eax\n" \ ++ "0:\tmovl %5, %%ecx\n" \ ++ "2:\tcall __lll_unlock_wake\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_4 \ ++ "18:" \ ++ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \ ++ : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex), \ ++ "g" (private) \ ++ : "memory"); \ ++ } \ ++ }) ++ ++#define lll_robust_unlock(futex, private) \ ++ (void) \ ++ ({ int ignore, ignore2; \ ++ __asm __volatile (LOCK_INSTR "andl %3, %0\n\t" \ ++ "jne _L_robust_unlock_%=\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_robust_unlock_%=,@function\n" \ ++ "_L_robust_unlock_%=:\n\t" \ ++ "1:\tleal %0, %%eax\n" \ ++ "0:\tmovl %5, %%ecx\n" \ ++ "2:\tcall __lll_unlock_wake\n" \ ++ "3:\tjmp 18f\n" \ ++ "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_4 \ ++ "18:" \ ++ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \ ++ : "i" (FUTEX_WAITERS), "m" (futex), "g" (private) \ ++ : "memory"); \ ++ }) ++ ++ ++#define lll_robust_dead(futex, private) \ ++ (void) \ ++ ({ int __ignore; \ ++ register int _nr asm ("edx") = 1; \ ++ __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \ ++ LLL_EBX_LOAD \ ++ LLL_ENTER_KERNEL \ ++ LLL_EBX_LOAD \ ++ : "=a" (__ignore) \ ++ : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \ ++ "c" (__lll_private_flag (FUTEX_WAKE, private)), \ ++ "d" (_nr), "i" (FUTEX_OWNER_DIED), \ ++ "i" (offsetof (tcbhead_t, sysinfo))); \ ++ }) + + #define lll_islocked(futex) \ + (futex != LLL_LOCK_INITIALIZER) + +- + /* The kernel notifies a process with uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the + thread ID while the clone is running and is reset to zero +@@ -581,28 +572,6 @@ extern int __lll_timedwait_tid (int *tid + } \ + __result; }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- __attribute ((regparm (1))) attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- __attribute ((regparm (2))) attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- __attribute ((regparm (1))) attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- __attribute ((regparm (1))) attribute_hidden; +- +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- ++#endif /* !__ASSEMBLER__ */ + + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/sem_post.c.jj 2007-06-08 09:13:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sem_post.c 2007-07-29 11:48:55.000000000 +0200 +@@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem) + if (isem->nwaiters > 0) + { + int err = lll_futex_wake (&isem->value, 1, +- // XYZ check mutex flag +- LLL_SHARED); ++ isem->private ^ FUTEX_PRIVATE_FLAG); + if (__builtin_expect (err, 0) < 0) + { + __set_errno (-err); +--- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2007-06-08 09:13:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -25,7 +25,7 @@ + + + int +-__lll_robust_lock_wait (int *futex) ++__lll_robust_lock_wait (int *futex, int private) + { + int oldval = *futex; + int tid = THREAD_GETMEM (THREAD_SELF, tid); +@@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex) + && atomic_compare_and_exchange_bool_acq (futex, newval, oldval)) + continue; + +- lll_futex_wait (futex, newval, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_wait (futex, newval, private); + + try: + ; +@@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex) + + + int +-__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime) ++__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime, ++ int private) + { + /* Reject invalid timeouts. */ + if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) +@@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex, + && atomic_compare_and_exchange_bool_acq (futex, newval, oldval)) + continue; + +- lll_futex_timed_wait (futex, newval, &rt, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_timed_wait (futex, newval, &rt, private); + + try: + ; +--- libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c.jj 2005-12-21 23:17:21.000000000 +0100 ++++ libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -24,7 +24,7 @@ + + + /* Lock to protect allocation and deallocation of fork handlers. */ +-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER; ++int __fork_lock = LLL_LOCK_INITIALIZER; + + + /* Number of pre-allocated handler entries. */ +@@ -85,7 +85,7 @@ __register_atfork (prepare, parent, chil + void *dso_handle; + { + /* Get the lock to not conflict with other allocations. */ +- lll_lock (__fork_lock); ++ lll_lock (__fork_lock, LLL_PRIVATE); + + struct fork_handler *newp = fork_handler_alloc (); + +@@ -102,7 +102,7 @@ __register_atfork (prepare, parent, chil + } + + /* Release the lock. */ +- lll_unlock (__fork_lock); ++ lll_unlock (__fork_lock, LLL_PRIVATE); + + return newp == NULL ? ENOMEM : 0; + } +@@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork) + libc_freeres_fn (free_mem) + { + /* Get the lock to not conflict with running forks. */ +- lll_lock (__fork_lock); ++ lll_lock (__fork_lock, LLL_PRIVATE); + + /* No more fork handlers. */ + __fork_handlers = NULL; +@@ -123,7 +123,7 @@ libc_freeres_fn (free_mem) + memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool)); + + /* Release the lock. */ +- lll_unlock (__fork_lock); ++ lll_unlock (__fork_lock, LLL_PRIVATE); + + /* We can free the memory after releasing the lock. */ + while (runp != NULL) +--- libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h 2007-07-30 22:05:42.000000000 +0200 +@@ -68,9 +68,6 @@ + # endif + #endif + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +- + #define lll_futex_wait(futex, val, private) \ + lll_futex_timed_wait (futex, val, NULL, private) + +@@ -108,13 +105,13 @@ + }) + + +-#define lll_robust_mutex_dead(futexv) \ ++#define lll_robust_dead(futexv, private) \ + do \ + { \ + int *__futexp = &(futexv); \ + \ + atomic_or (__futexp, FUTEX_OWNER_DIED); \ +- lll_futex_wake (__futexp, 1, LLL_SHARED); \ ++ lll_futex_wake (__futexp, 1, private); \ + } \ + while (0) + +@@ -175,7 +172,7 @@ + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_trylock (int *futex) ++__lll_trylock (int *futex) + { + unsigned int old; + +@@ -184,12 +181,12 @@ __lll_mutex_trylock (int *futex) + : "0" (0), "d" (1), "m" (*futex) : "cc", "memory" ); + return old != 0; + } +-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex)) ++#define lll_trylock(futex) __lll_trylock (&(futex)) + + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_cond_trylock (int *futex) ++__lll_cond_trylock (int *futex) + { + unsigned int old; + +@@ -198,12 +195,12 @@ __lll_mutex_cond_trylock (int *futex) + : "0" (0), "d" (2), "m" (*futex) : "cc", "memory" ); + return old != 0; + } +-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex)) ++#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex)) + + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_trylock (int *futex, int id) ++__lll_robust_trylock (int *futex, int id) + { + unsigned int old; + +@@ -212,141 +209,121 @@ __lll_robust_mutex_trylock (int *futex, + : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" ); + return old != 0; + } +-#define lll_robust_mutex_trylock(futex, id) \ +- __lll_robust_mutex_trylock (&(futex), id) ++#define lll_robust_trylock(futex, id) \ ++ __lll_robust_trylock (&(futex), id) + + +-extern void __lll_lock_wait (int *futex) attribute_hidden; +-extern int __lll_robust_lock_wait (int *futex) attribute_hidden; ++extern void __lll_lock_wait_private (int *futex) attribute_hidden; ++extern void __lll_lock_wait (int *futex, int private) attribute_hidden; ++extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + + static inline void + __attribute__ ((always_inline)) +-__lll_mutex_lock (int *futex) ++__lll_lock (int *futex, int private) + { +- if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) +- __lll_lock_wait (futex); ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0)) ++ { ++ if (__builtin_constant_p (private) && private == LLL_PRIVATE) ++ __lll_lock_wait_private (futex); ++ else ++ __lll_lock_wait (futex, private); ++ } + } +-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) ++#define lll_lock(futex, private) __lll_lock (&(futex), private) + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_lock (int *futex, int id) ++__lll_robust_lock (int *futex, int id, int private) + { + int result = 0; +- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +- result = __lll_robust_lock_wait (futex); ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0), ++ 0)) ++ result = __lll_robust_lock_wait (futex, private); + return result; + } +-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id) ++#define lll_robust_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), id, private) + + static inline void + __attribute__ ((always_inline)) +-__lll_mutex_cond_lock (int *futex) ++__lll_cond_lock (int *futex, int private) + { +- if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0) +- __lll_lock_wait (futex); ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0)) ++ __lll_lock_wait (futex, private); + } +-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) ++#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + +-#define lll_robust_mutex_cond_lock(futex, id) \ +- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS) ++#define lll_robust_cond_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private) + + extern int __lll_timedlock_wait +- (int *futex, const struct timespec *) attribute_hidden; ++ (int *futex, const struct timespec *, int private) attribute_hidden; + extern int __lll_robust_timedlock_wait +- (int *futex, const struct timespec *) attribute_hidden; ++ (int *futex, const struct timespec *, int private) attribute_hidden; + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_timedlock (int *futex, const struct timespec *abstime) ++__lll_timedlock (int *futex, const struct timespec *abstime, int private) + { + int result = 0; +- if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) +- result = __lll_timedlock_wait (futex, abstime); ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0)) ++ result = __lll_timedlock_wait (futex, abstime, private); + return result; + } +-#define lll_mutex_timedlock(futex, abstime) \ +- __lll_mutex_timedlock (&(futex), abstime) ++#define lll_timedlock(futex, abstime, private) \ ++ __lll_timedlock (&(futex), abstime, private) + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime, +- int id) ++__lll_robust_timedlock (int *futex, const struct timespec *abstime, ++ int id, int private) + { + int result = 0; +- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +- result = __lll_robust_timedlock_wait (futex, abstime); ++ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0), ++ 0)) ++ result = __lll_robust_timedlock_wait (futex, abstime, private); + return result; + } +-#define lll_robust_mutex_timedlock(futex, abstime, id) \ +- __lll_robust_mutex_timedlock (&(futex), abstime, id) ++#define lll_robust_timedlock(futex, abstime, id, private) \ ++ __lll_robust_timedlock (&(futex), abstime, id, private) + + + static inline void + __attribute__ ((always_inline)) +-__lll_mutex_unlock (int *futex) ++__lll_unlock (int *futex, int private) + { + int oldval; + int newval = 0; + + lll_compare_and_swap (futex, oldval, newval, "slr %2,%2"); +- if (oldval > 1) +- lll_futex_wake (futex, 1, LLL_SHARED); ++ if (__builtin_expect (oldval > 1, 0)) ++ lll_futex_wake (futex, 1, private); + } +-#define lll_mutex_unlock(futex) \ +- __lll_mutex_unlock(&(futex)) ++#define lll_unlock(futex, private) __lll_unlock(&(futex), private) + + + static inline void + __attribute__ ((always_inline)) +-__lll_robust_mutex_unlock (int *futex, int mask) ++__lll_robust_unlock (int *futex, int private) + { + int oldval; + int newval = 0; + + lll_compare_and_swap (futex, oldval, newval, "slr %2,%2"); +- if (oldval & mask) +- lll_futex_wake (futex, 1, LLL_SHARED); ++ if (__builtin_expect (oldval & FUTEX_WAITERS, 0)) ++ lll_futex_wake (futex, 1, private); + } +-#define lll_robust_mutex_unlock(futex) \ +- __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS) +- ++#define lll_robust_unlock(futex, private) \ ++ __lll_robust_unlock(&(futex), private) + +-static inline void +-__attribute__ ((always_inline)) +-__lll_mutex_unlock_force (int *futex) +-{ +- *futex = 0; +- lll_futex_wake (futex, 1, LLL_SHARED); +-} +-#define lll_mutex_unlock_force(futex) \ +- __lll_mutex_unlock_force(&(futex)) +- +-#define lll_mutex_islocked(futex) \ ++#define lll_islocked(futex) \ + (futex != 0) + + +-/* We have a separate internal lock implementation which is not tied +- to binary compatibility. We can use the lll_mutex_*. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- + /* Initializers for lock. */ + #define LLL_LOCK_INITIALIZER (0) + #define LLL_LOCK_INITIALIZER_LOCKED (1) + +-#define lll_trylock(futex) lll_mutex_trylock (futex) +-#define lll_lock(futex) lll_mutex_lock (futex) +-#define lll_unlock(futex) lll_mutex_unlock (futex) +-#define lll_islocked(futex) lll_mutex_islocked (futex) +- +-/* The states of a lock are: +- 1 - untaken +- 0 - taken by one user +- <0 - taken by more users */ +- +- + /* The kernel notifies a process with uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the + thread ID while the clone is running and is reset to zero +@@ -373,25 +350,4 @@ extern int __lll_timedwait_tid (int *, c + __res; \ + }) + +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- attribute_hidden; +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c.jj 2007-07-24 10:50:54.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c 2007-07-29 11:48:55.000000000 +0200 +@@ -29,11 +29,37 @@ + int + __new_sem_post (sem_t *sem) + { ++ struct new_sem *isem = (struct new_sem *) sem; ++ ++ __asm __volatile (__lll_rel_instr ::: "memory"); ++ atomic_increment (&isem->value); ++ __asm __volatile (__lll_acq_instr ::: "memory"); ++ if (isem->nwaiters > 0) ++ { ++ int err = lll_futex_wake (&isem->value, 1, ++ isem->private ^ FUTEX_PRIVATE_FLAG); ++ if (__builtin_expect (err, 0) < 0) ++ { ++ __set_errno (-err); ++ return -1; ++ } ++ } ++ return 0; ++} ++versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1); ++ ++#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) ++ ++int ++attribute_compat_text_section ++__old_sem_post (sem_t *sem) ++{ + int *futex = (int *) sem; + + __asm __volatile (__lll_rel_instr ::: "memory"); + int nr = atomic_increment_val (futex); +- int err = lll_futex_wake (futex, nr, LLL_SHARED); ++ /* We always have to assume it is a shared semaphore. */ ++ int err = lll_futex_wake (futex, 1, LLL_SHARED); + if (__builtin_expect (err, 0) < 0) + { + __set_errno (-err); +@@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem) + } + return 0; + } +-versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1); +-#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) +-strong_alias (__new_sem_post, __old_sem_post) ++ + compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0); + #endif +--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2007-07-29 11:48:55.000000000 +0200 +@@ -69,9 +69,6 @@ + # endif + #endif + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +- + #define lll_futex_wait(futexp, val, private) \ + lll_futex_timed_wait (futexp, val, NULL, private) + +@@ -97,14 +94,15 @@ + INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ + }) + +-#define lll_robust_mutex_dead(futexv) \ ++#define lll_robust_dead(futexv, private) \ + do \ + { \ + INTERNAL_SYSCALL_DECL (__err); \ + int *__futexp = &(futexv); \ + \ + atomic_or (__futexp, FUTEX_OWNER_DIED); \ +- INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \ ++ INTERNAL_SYSCALL (futex, __err, 4, __futexp, \ ++ __lll_private_flag (FUTEX_WAKE, private), 1, 0); \ + } \ + while (0) + +@@ -171,119 +169,111 @@ + __val; \ + }) + +-#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id) ++#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id) + + /* Set *futex to 1 if it is 0, atomically. Returns the old value */ + #define __lll_trylock(futex) __lll_robust_trylock (futex, 1) + +-#define lll_mutex_trylock(lock) __lll_trylock (&(lock)) ++#define lll_trylock(lock) __lll_trylock (&(lock)) + + /* Set *futex to 2 if it is 0, atomically. Returns the old value */ + #define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2) + +-#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock)) ++#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock)) + + +-extern void __lll_lock_wait (int *futex) attribute_hidden; +-extern int __lll_robust_lock_wait (int *futex) attribute_hidden; ++extern void __lll_lock_wait_private (int *futex) attribute_hidden; ++extern void __lll_lock_wait (int *futex, int private) attribute_hidden; ++extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + +-#define lll_mutex_lock(lock) \ ++#define lll_lock(lock, private) \ + (void) ({ \ + int *__futex = &(lock); \ + if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ + 0) != 0) \ +- __lll_lock_wait (__futex); \ ++ { \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __lll_lock_wait_private (__futex); \ ++ else \ ++ __lll_lock_wait (__futex, private); \ ++ } \ + }) + +-#define lll_robust_mutex_lock(lock, id) \ ++#define lll_robust_lock(lock, id, private) \ + ({ \ + int *__futex = &(lock); \ + int __val = 0; \ + if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ + 0), 0)) \ +- __val = __lll_robust_lock_wait (__futex); \ ++ __val = __lll_robust_lock_wait (__futex, private); \ + __val; \ + }) + +-#define lll_mutex_cond_lock(lock) \ ++#define lll_cond_lock(lock, private) \ + (void) ({ \ + int *__futex = &(lock); \ + if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\ + 0) != 0) \ +- __lll_lock_wait (__futex); \ ++ __lll_lock_wait (__futex, private); \ + }) + +-#define lll_robust_mutex_cond_lock(lock, id) \ ++#define lll_robust_cond_lock(lock, id, private) \ + ({ \ + int *__futex = &(lock); \ + int __val = 0; \ + int __id = id | FUTEX_WAITERS; \ + if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\ + 0), 0)) \ +- __val = __lll_robust_lock_wait (__futex); \ ++ __val = __lll_robust_lock_wait (__futex, private); \ + __val; \ + }) + + + extern int __lll_timedlock_wait +- (int *futex, const struct timespec *) attribute_hidden; ++ (int *futex, const struct timespec *, int private) attribute_hidden; + extern int __lll_robust_timedlock_wait +- (int *futex, const struct timespec *) attribute_hidden; ++ (int *futex, const struct timespec *, int private) attribute_hidden; + +-#define lll_mutex_timedlock(lock, abstime) \ ++#define lll_timedlock(lock, abstime, private) \ + ({ \ + int *__futex = &(lock); \ + int __val = 0; \ + if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ + 0) != 0) \ +- __val = __lll_timedlock_wait (__futex, abstime); \ ++ __val = __lll_timedlock_wait (__futex, abstime, private); \ + __val; \ + }) + +-#define lll_robust_mutex_timedlock(lock, abstime, id) \ ++#define lll_robust_timedlock(lock, abstime, id, private) \ + ({ \ + int *__futex = &(lock); \ + int __val = 0; \ + if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ + 0), 0)) \ +- __val = __lll_robust_timedlock_wait (__futex, abstime); \ ++ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \ + __val; \ + }) + +-#define lll_mutex_unlock(lock) \ ++#define lll_unlock(lock, private) \ + ((void) ({ \ + int *__futex = &(lock); \ + int __val = atomic_exchange_rel (__futex, 0); \ + if (__builtin_expect (__val > 1, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++ lll_futex_wake (__futex, 1, private); \ + })) + +-#define lll_robust_mutex_unlock(lock) \ ++#define lll_robust_unlock(lock, private) \ + ((void) ({ \ + int *__futex = &(lock); \ + int __val = atomic_exchange_rel (__futex, 0); \ + if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ +- })) +- +-#define lll_mutex_unlock_force(lock) \ +- ((void) ({ \ +- int *__futex = &(lock); \ +- *__futex = 0; \ +- __asm __volatile (__lll_rel_instr ::: "memory"); \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++ lll_futex_wake (__futex, 1, private); \ + })) + +-#define lll_mutex_islocked(futex) \ ++#define lll_islocked(futex) \ + (futex != 0) + + +-/* Our internal lock implementation is identical to the binary-compatible +- mutex implementation. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- + /* Initializers for lock. */ + #define LLL_LOCK_INITIALIZER (0) + #define LLL_LOCK_INITIALIZER_LOCKED (1) +@@ -293,11 +283,6 @@ typedef int lll_lock_t; + 1 - taken by one user + >1 - taken by more users */ + +-#define lll_trylock(lock) lll_mutex_trylock (lock) +-#define lll_lock(lock) lll_mutex_lock (lock) +-#define lll_unlock(lock) lll_mutex_unlock (lock) +-#define lll_islocked(lock) lll_mutex_islocked (lock) +- + /* The kernel notifies a process which uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the + thread ID while the clone is running and is reset to zero +@@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, c + __res; \ + }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- attribute_hidden; +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c.jj 2007-06-08 09:13:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -25,22 +25,35 @@ + + + void +-__lll_lock_wait (int *futex) ++__lll_lock_wait_private (int *futex) + { + do + { + int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); + if (oldval != 0) +- lll_futex_wait (futex, 2, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_wait (futex, 2, LLL_PRIVATE); ++ } ++ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); ++} ++ ++ ++/* These functions doesn't get included in libc.so */ ++#ifdef IS_IN_libpthread ++void ++__lll_lock_wait (int *futex, int private) ++{ ++ do ++ { ++ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); ++ if (oldval != 0) ++ lll_futex_wait (futex, 2, private); + } + while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); + } + + + int +-__lll_timedlock_wait (int *futex, const struct timespec *abstime) ++__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private) + { + /* Reject invalid timeouts. */ + if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) +@@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const + /* Wait. */ + int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); + if (oldval != 0) +- lll_futex_timed_wait (futex, 2, &rt, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_timed_wait (futex, 2, &rt, private); + } + while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); + +@@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const + } + + +-/* This function doesn't get included in libc.so */ +-#ifdef IS_IN_libpthread + int + __lll_timedwait_tid (int *tidp, const struct timespec *abstime) + { +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S.jj 2007-05-24 16:41:25.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S 2007-07-29 11:48:55.000000000 +0200 +@@ -19,17 +19,8 @@ + + #include + #include ++#include + +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 + + .comm __fork_generation, 4, 4 + +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S.jj 2007-07-24 10:50:55.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,27 +18,15 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- + /* For the calculation see asm/vsyscall.h. */ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl pthread_rwlock_timedrdlock +@@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock: + popq %r12 + retq + +-1: ++1: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + jmp 2b + + 14: cmpl %fs:TID, %eax +@@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock: + movl $EDEADLK, %edx + jmp 9b + +-6: ++6: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leal MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 7b + + /* Overflow. */ +@@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock: + movl $EAGAIN, %edx + jmp 9b + +-10: ++10: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leaq MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 11b + +-12: ++12: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leaq MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + jmp 13b + + 16: movq $-ETIMEDOUT, %rdx +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S.jj 2007-07-24 10:50:55.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,23 +18,12 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_wrlock +@@ -121,11 +110,11 @@ __pthread_rwlock_wrlock: + movq %rdx, %rax + retq + +-1: ++1: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif +@@ -136,32 +125,32 @@ __pthread_rwlock_wrlock: + movl $EDEADLK, %edx + jmp 9b + +-6: ++6: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 7b + + 4: decl WRITERS_QUEUED(%rdi) + movl $EAGAIN, %edx + jmp 9b + +-10: ++10: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif + jmp 11b + +-12: ++12: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,15 +18,10 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- + .text + + .globl sem_trywait +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S.jj 2006-09-05 16:46:43.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S 2007-07-30 15:56:48.000000000 +0200 +@@ -1,4 +1,5 @@ +-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 ++ Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,33 +20,40 @@ + + #include + #include ++#include + #include ++#include + + .text + +-#ifndef LOCK +-# ifdef UP +-# define LOCK ++#define FUTEX_WAITERS 0x80000000 ++#define FUTEX_OWNER_DIED 0x40000000 ++ ++#ifdef __ASSUME_PRIVATE_FUTEX ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++#else ++# if FUTEX_WAIT == 0 ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %fs:PRIVATE_FUTEX, reg + # else +-# define LOCK lock ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %fs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg + # endif + #endif + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_WAITERS 0x80000000 +-#define FUTEX_OWNER_DIED 0x40000000 +- + /* For the calculation see asm/vsyscall.h. */ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + + +- .globl __lll_robust_mutex_lock_wait +- .type __lll_robust_mutex_lock_wait,@function +- .hidden __lll_robust_mutex_lock_wait ++ .globl __lll_robust_lock_wait ++ .type __lll_robust_lock_wait,@function ++ .hidden __lll_robust_lock_wait + .align 16 +-__lll_robust_mutex_lock_wait: ++__lll_robust_lock_wait: + cfi_startproc + pushq %r10 + cfi_adjust_cfa_offset(8) +@@ -55,11 +63,7 @@ __lll_robust_mutex_lock_wait: + cfi_offset(%rdx, -24) + + xorq %r10, %r10 /* No timeout. */ +-#if FUTEX_WAIT == 0 +- xorl %esi, %esi +-#else +- movl $FUTEX_WAIT, %esi +-#endif ++ LOAD_FUTEX_WAIT (%esi) + + 4: movl %eax, %edx + orl $FUTEX_WAITERS, %edx +@@ -97,14 +101,14 @@ __lll_robust_mutex_lock_wait: + cfi_restore(%r10) + retq + cfi_endproc +- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait ++ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait + + +- .globl __lll_robust_mutex_timedlock_wait +- .type __lll_robust_mutex_timedlock_wait,@function +- .hidden __lll_robust_mutex_timedlock_wait ++ .globl __lll_robust_timedlock_wait ++ .type __lll_robust_timedlock_wait,@function ++ .hidden __lll_robust_timedlock_wait + .align 16 +-__lll_robust_mutex_timedlock_wait: ++__lll_robust_timedlock_wait: + cfi_startproc + /* Check for a valid timeout value. */ + cmpq $1000000000, 8(%rdx) +@@ -122,10 +126,12 @@ __lll_robust_mutex_timedlock_wait: + cfi_offset(%r9, -24) + cfi_offset(%r12, -32) + cfi_offset(%r13, -40) ++ pushq %rsi ++ cfi_adjust_cfa_offset(8) + + /* Stack frame for the timespec and timeval structs. */ +- subq $24, %rsp +- cfi_adjust_cfa_offset(24) ++ subq $32, %rsp ++ cfi_adjust_cfa_offset(32) + + movq %rdi, %r12 + movq %rdx, %r13 +@@ -174,11 +180,8 @@ __lll_robust_mutex_timedlock_wait: + jnz 5f + + 2: movq %rsp, %r10 +-#if FUTEX_WAIT == 0 +- xorl %esi, %esi +-#else +- movl $FUTEX_WAIT, %esi +-#endif ++ movl 32(%rsp), %esi ++ LOAD_FUTEX_WAIT (%esi) + movq %r12, %rdi + movl $SYS_futex, %eax + syscall +@@ -195,8 +198,8 @@ __lll_robust_mutex_timedlock_wait: + cmpxchgl %edx, (%r12) + jnz 7f + +-6: addq $24, %rsp +- cfi_adjust_cfa_offset(-24) ++6: addq $40, %rsp ++ cfi_adjust_cfa_offset(-40) + popq %r13 + cfi_adjust_cfa_offset(-8) + cfi_restore(%r13) +@@ -214,7 +217,7 @@ __lll_robust_mutex_timedlock_wait: + 3: movl $EINVAL, %eax + retq + +- cfi_adjust_cfa_offset(56) ++ cfi_adjust_cfa_offset(72) + cfi_offset(%r8, -16) + cfi_offset(%r9, -24) + cfi_offset(%r12, -32) +@@ -226,4 +229,4 @@ __lll_robust_mutex_timedlock_wait: + 8: movl $ETIMEDOUT, %eax + jmp 6b + cfi_endproc +- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait ++ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S.jj 2007-07-24 10:50:55.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,23 +18,12 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_rdlock +@@ -123,11 +112,11 @@ __pthread_rwlock_rdlock: + movq %rdx, %rax + retq + +-1: ++1: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif +@@ -139,11 +128,11 @@ __pthread_rwlock_rdlock: + movl $EDEADLK, %edx + jmp 9b + +-6: ++6: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif +@@ -159,21 +148,21 @@ __pthread_rwlock_rdlock: + movl $EAGAIN, %edx + jmp 9b + +-10: ++10: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif + jmp 11b + +-12: ++12: movl PSHARED(%rdi), %esi + #if MUTEX == 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S 2007-07-31 12:40:13.000000000 +0200 +@@ -19,33 +19,46 @@ + + #include + #include ++#include ++#include + + .text + +-#ifndef LOCK +-# ifdef UP +-# define LOCK ++#ifdef __ASSUME_PRIVATE_FUTEX ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ ++ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_FUTEX_WAIT(reg) \ ++ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg ++# define LOAD_FUTEX_WAKE(reg) \ ++ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg ++#else ++# if FUTEX_WAIT == 0 ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl %fs:PRIVATE_FUTEX, reg + # else +-# define LOCK lock ++# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ ++ movl %fs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg + # endif +-#endif +- +-#define SYS_futex 202 +-#ifndef FUTEX_WAIT +-# define FUTEX_WAIT 0 +-# define FUTEX_WAKE 1 +-#endif +- +-#ifndef LOAD_FUTEX_WAIT ++# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ ++ movl %fs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAKE, reg + # if FUTEX_WAIT == 0 + # define LOAD_FUTEX_WAIT(reg) \ +- xorl reg, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %fs:PRIVATE_FUTEX, reg + # else + # define LOAD_FUTEX_WAIT(reg) \ +- movl $FUTEX_WAIT, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %fs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAIT, reg + # endif + # define LOAD_FUTEX_WAKE(reg) \ +- movl $FUTEX_WAKE, reg ++ xorl $FUTEX_PRIVATE_FLAG, reg ; \ ++ andl %fs:PRIVATE_FUTEX, reg ; \ ++ orl $FUTEX_WAKE, reg + #endif + + +@@ -53,11 +66,11 @@ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + + +- .globl __lll_mutex_lock_wait +- .type __lll_mutex_lock_wait,@function +- .hidden __lll_mutex_lock_wait ++ .globl __lll_lock_wait_private ++ .type __lll_lock_wait_private,@function ++ .hidden __lll_lock_wait_private + .align 16 +-__lll_mutex_lock_wait: ++__lll_lock_wait_private: + cfi_startproc + pushq %r10 + cfi_adjust_cfa_offset(8) +@@ -67,7 +80,7 @@ __lll_mutex_lock_wait: + cfi_offset(%rdx, -24) + xorq %r10, %r10 /* No timeout. */ + movl $2, %edx +- LOAD_FUTEX_WAIT (%esi) ++ LOAD_PRIVATE_FUTEX_WAIT (%esi) + + cmpl %edx, %eax /* NB: %edx == 2 */ + jne 2f +@@ -89,15 +102,52 @@ __lll_mutex_lock_wait: + cfi_restore(%r10) + retq + cfi_endproc +- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait +- ++ .size __lll_lock_wait_private,.-__lll_lock_wait_private + + #ifdef NOT_IN_libc +- .globl __lll_mutex_timedlock_wait +- .type __lll_mutex_timedlock_wait,@function +- .hidden __lll_mutex_timedlock_wait ++ .globl __lll_lock_wait ++ .type __lll_lock_wait,@function ++ .hidden __lll_lock_wait + .align 16 +-__lll_mutex_timedlock_wait: ++__lll_lock_wait: ++ cfi_startproc ++ pushq %r10 ++ cfi_adjust_cfa_offset(8) ++ pushq %rdx ++ cfi_adjust_cfa_offset(8) ++ cfi_offset(%r10, -16) ++ cfi_offset(%rdx, -24) ++ xorq %r10, %r10 /* No timeout. */ ++ movl $2, %edx ++ LOAD_FUTEX_WAIT (%esi) ++ ++ cmpl %edx, %eax /* NB: %edx == 2 */ ++ jne 2f ++ ++1: movl $SYS_futex, %eax ++ syscall ++ ++2: movl %edx, %eax ++ xchgl %eax, (%rdi) /* NB: lock is implied */ ++ ++ testl %eax, %eax ++ jnz 1b ++ ++ popq %rdx ++ cfi_adjust_cfa_offset(-8) ++ cfi_restore(%rdx) ++ popq %r10 ++ cfi_adjust_cfa_offset(-8) ++ cfi_restore(%r10) ++ retq ++ cfi_endproc ++ .size __lll_lock_wait,.-__lll_lock_wait ++ ++ .globl __lll_timedlock_wait ++ .type __lll_timedlock_wait,@function ++ .hidden __lll_timedlock_wait ++ .align 16 ++__lll_timedlock_wait: + cfi_startproc + /* Check for a valid timeout value. */ + cmpq $1000000000, 8(%rdx) +@@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait: + cfi_offset(%r12, -32) + cfi_offset(%r13, -40) + cfi_offset(%r14, -48) ++ pushq %rsi ++ cfi_adjust_cfa_offset(8) + + /* Stack frame for the timespec and timeval structs. */ +- subq $16, %rsp +- cfi_adjust_cfa_offset(16) ++ subq $24, %rsp ++ cfi_adjust_cfa_offset(24) + + movq %rdi, %r12 + movq %rdx, %r13 +@@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait: + je 8f + + movq %rsp, %r10 ++ movl 24(%rsp), %esi + LOAD_FUTEX_WAIT (%esi) + movq %r12, %rdi + movl $SYS_futex, %eax +@@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait: + cmpxchgl %edx, (%r12) + jnz 7f + +-6: addq $16, %rsp +- cfi_adjust_cfa_offset(-16) ++6: addq $32, %rsp ++ cfi_adjust_cfa_offset(-32) + popq %r14 + cfi_adjust_cfa_offset(-8) + cfi_restore(%r14) +@@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait: + 3: movl $EINVAL, %eax + retq + +- cfi_adjust_cfa_offset(56) ++ cfi_adjust_cfa_offset(72) + cfi_offset(%r8, -16) + cfi_offset(%r9, -24) + cfi_offset(%r12, -32) +@@ -216,15 +269,15 @@ __lll_mutex_timedlock_wait: + 5: movl $ETIMEDOUT, %eax + jmp 6b + cfi_endproc +- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait ++ .size __lll_timedlock_wait,.-__lll_timedlock_wait + #endif + + +- .globl __lll_mutex_unlock_wake +- .type __lll_mutex_unlock_wake,@function +- .hidden __lll_mutex_unlock_wake ++ .globl __lll_unlock_wake_private ++ .type __lll_unlock_wake_private,@function ++ .hidden __lll_unlock_wake_private + .align 16 +-__lll_mutex_unlock_wake: ++__lll_unlock_wake_private: + cfi_startproc + pushq %rsi + cfi_adjust_cfa_offset(8) +@@ -234,7 +287,7 @@ __lll_mutex_unlock_wake: + cfi_offset(%rdx, -24) + + movl $0, (%rdi) +- LOAD_FUTEX_WAKE (%esi) ++ LOAD_PRIVATE_FUTEX_WAKE (%esi) + movl $1, %edx /* Wake one thread. */ + movl $SYS_futex, %eax + syscall +@@ -247,10 +300,38 @@ __lll_mutex_unlock_wake: + cfi_restore(%rsi) + retq + cfi_endproc +- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake +- ++ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private + + #ifdef NOT_IN_libc ++ .globl __lll_unlock_wake ++ .type __lll_unlock_wake,@function ++ .hidden __lll_unlock_wake ++ .align 16 ++__lll_unlock_wake: ++ cfi_startproc ++ pushq %rsi ++ cfi_adjust_cfa_offset(8) ++ pushq %rdx ++ cfi_adjust_cfa_offset(8) ++ cfi_offset(%rsi, -16) ++ cfi_offset(%rdx, -24) ++ ++ movl $0, (%rdi) ++ LOAD_FUTEX_WAKE (%esi) ++ movl $1, %edx /* Wake one thread. */ ++ movl $SYS_futex, %eax ++ syscall ++ ++ popq %rdx ++ cfi_adjust_cfa_offset(-8) ++ cfi_restore(%rdx) ++ popq %rsi ++ cfi_adjust_cfa_offset(-8) ++ cfi_restore(%rsi) ++ retq ++ cfi_endproc ++ .size __lll_unlock_wake,.-__lll_unlock_wake ++ + .globl __lll_timedwait_tid + .type __lll_timedwait_tid,@function + .hidden __lll_timedwait_tid +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S.jj 2006-07-29 06:31:49.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,5 @@ +-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 ++ Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,23 +20,11 @@ + + #include + #include ++#include + #include + #include + #include +- +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_REQUEUE 3 +-#define FUTEX_CMP_REQUEUE 4 +- +-#define EINVAL 22 ++#include + + + .text +@@ -115,7 +104,9 @@ __pthread_cond_broadcast: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + #if cond_lock != 0 + subq $cond_lock, %rdi + #endif +@@ -123,12 +114,16 @@ __pthread_cond_broadcast: + + /* Unlock in loop requires wakeup. */ + 5: addq $cond_lock-cond_futex, %rdi +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 6b + + /* Unlock in loop requires wakeup. */ + 7: addq $cond_lock-cond_futex, %rdi +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + subq $cond_lock-cond_futex, %rdi + jmp 8b + +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,19 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAKE 1 +- + + .text + +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S.jj 2007-07-24 10:50:55.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,22 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl __pthread_rwlock_unlock +@@ -107,28 +96,28 @@ __pthread_rwlock_unlock: + 4: xorl %eax, %eax + retq + +-1: ++1: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + #if MUTEX != 0 + subq $MUTEX, %rdi + #endif + jmp 2b + +-3: ++3: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 4b + +-7: ++7: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 8b + + .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -17,19 +17,4 @@ + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +-#include +- +-/* All locks in libc are private. Use the kernel feature if possible. */ +-#define FUTEX_PRIVATE_FLAG 128 +-#ifdef __ASSUME_PRIVATE_FUTEX +-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG) +-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG) +-#else +-# define LOAD_FUTEX_WAIT(reg) \ +- movl %fs:PRIVATE_FUTEX, reg +-# define LOAD_FUTEX_WAKE(reg) \ +- movl %fs:PRIVATE_FUTEX, reg ; \ +- orl $FUTEX_WAKE, reg +-#endif +- + #include "lowlevellock.S" +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -19,19 +19,10 @@ + + #include + #include ++#include + #include + #include + +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- + + .text + +@@ -58,7 +49,9 @@ __condvar_cleanup: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + #if cond_lock != 0 + subq $cond_lock, %rdi + #endif +@@ -105,7 +98,9 @@ __condvar_cleanup: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + + /* Wake up all waiters to make sure no signal gets lost. */ + 2: testq %r12, %r12 +@@ -307,7 +302,9 @@ __pthread_cond_wait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + jmp 2b + + /* Unlock in loop requires wakeup. */ +@@ -315,7 +312,9 @@ __pthread_cond_wait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ +@@ -323,7 +322,9 @@ __pthread_cond_wait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + #if cond_lock != 0 + subq $cond_lock, %rdi + #endif +@@ -334,7 +335,9 @@ __pthread_cond_wait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 11b + + /* The initial unlocking of the mutex failed. */ +@@ -351,7 +354,9 @@ __pthread_cond_wait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + + 13: movq %r10, %rax + jmp 14b +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S.jj 2005-09-08 19:40:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S 2007-07-29 11:48:55.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. ++/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + +@@ -19,23 +19,10 @@ + + #include + #include ++#include + #include + #include +- +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_WAKE_OP 5 +- +-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) +- +-#define EINVAL 22 ++#include + + + .text +@@ -111,7 +98,9 @@ __pthread_cond_signal: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + #if cond_lock != 0 + subq $cond_lock, %rdi + #endif +@@ -120,7 +109,9 @@ __pthread_cond_signal: + /* Unlock in loop requires wakeup. */ + 5: + movq %r8, %rdi +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 6b + .size __pthread_cond_signal, .-__pthread_cond_signal + versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,23 +18,15 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 + + /* For the calculation see asm/vsyscall.h. */ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + +- + .text + + .globl sem_timedwait +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,19 +18,11 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + +-#ifndef UP +-# define LOCK lock +-#else +-# define +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +- + + .text + +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,18 +18,9 @@ + 02111-1307 USA. */ + + #include ++#include + #include + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- + + .text + +@@ -142,21 +133,29 @@ pthread_barrier_wait: + + retq + +-1: addq $MUTEX, %rdi +- callq __lll_mutex_lock_wait ++1: movl PRIVATE(%rdi), %esi ++ addq $MUTEX, %rdi ++ xorl $LLL_SHARED, %esi ++ callq __lll_lock_wait + subq $MUTEX, %rdi + jmp 2b + +-4: addq $MUTEX, %rdi +- callq __lll_mutex_unlock_wake ++4: movl PRIVATE(%rdi), %esi ++ addq $MUTEX, %rdi ++ xorl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 5b + +-6: addq $MUTEX, %rdi +- callq __lll_mutex_unlock_wake ++6: movl PRIVATE(%rdi), %esi ++ addq $MUTEX, %rdi ++ xorl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + subq $MUTEX, %rdi + jmp 7b + +-9: addq $MUTEX, %rdi +- callq __lll_mutex_unlock_wake ++9: movl PRIVATE(%rdi), %esi ++ addq $MUTEX, %rdi ++ xorl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 10b + .size pthread_barrier_wait,.-pthread_barrier_wait +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h 2007-07-31 12:40:13.000000000 +0200 +@@ -20,17 +20,27 @@ + #ifndef _LOWLEVELLOCK_H + #define _LOWLEVELLOCK_H 1 + +-#include +-#include +-#include +-#include +-#include +- +-#ifndef LOCK_INSTR +-# ifdef UP +-# define LOCK_INSTR /* nothing */ +-# else +-# define LOCK_INSTR "lock;" ++#ifndef __ASSEMBLER__ ++# include ++# include ++# include ++# include ++# include ++ ++# ifndef LOCK_INSTR ++# ifdef UP ++# define LOCK_INSTR /* nothing */ ++# else ++# define LOCK_INSTR "lock;" ++# endif ++# endif ++#else ++# ifndef LOCK ++# ifdef UP ++# define LOCK ++# else ++# define LOCK lock ++# endif + # endif + #endif + +@@ -38,11 +48,13 @@ + #define FUTEX_WAIT 0 + #define FUTEX_WAKE 1 + #define FUTEX_CMP_REQUEUE 4 ++#define FUTEX_WAKE_OP 5 + #define FUTEX_LOCK_PI 6 + #define FUTEX_UNLOCK_PI 7 + #define FUTEX_TRYLOCK_PI 8 + #define FUTEX_PRIVATE_FLAG 128 + ++#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) + + /* Values for 'private' parameter of locking macros. Yes, the + definition seems to be backwards. But it is not. The bit will be +@@ -50,6 +62,8 @@ + #define LLL_PRIVATE 0 + #define LLL_SHARED FUTEX_PRIVATE_FLAG + ++#ifndef __ASSEMBLER__ ++ + #if !defined NOT_IN_libc || defined IS_IN_rtld + /* In libc.so or ld.so all futexes are private. */ + # ifdef __ASSUME_PRIVATE_FUTEX +@@ -76,13 +90,13 @@ + # endif + #endif + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1) +-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2) ++/* Initializer for lock. */ ++#define LLL_LOCK_INITIALIZER (0) ++#define LLL_LOCK_INITIALIZER_LOCKED (1) ++#define LLL_LOCK_INITIALIZER_WAITERS (2) + + /* Delay in spinlock loop. */ +-#define BUSY_WAIT_NOP asm ("rep; nop") ++#define BUSY_WAIT_NOP asm ("rep; nop") + + + #define LLL_STUB_UNWIND_INFO_START \ +@@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END + : "=a" (__status) \ + : "0" (SYS_futex), "D" (futex), \ + "S" (__lll_private_flag (FUTEX_WAIT, private)), \ +- "d" (_val), "r" (__to) \ ++ "d" (_val), "r" (__to) \ + : "memory", "cc", "r11", "cx"); \ + __status; \ + }) +@@ -215,242 +229,308 @@ LLL_STUB_UNWIND_INFO_END + } while (0) + + +- +-/* Does not preserve %eax and %ecx. */ +-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden; +-/* Does not preserver %eax, %ecx, and %edx. */ +-extern int __lll_mutex_timedlock_wait (int *__futex, int __val, +- const struct timespec *__abstime) +- attribute_hidden; +-/* Preserves all registers but %eax. */ +-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden; +- +- +-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax ++/* NB: in the lll_trylock macro we simply return the value in %eax + after the cmpxchg instruction. In case the operation succeded this + value is zero. In case the operation failed, the cmpxchg instruction + has loaded the current value of the memory work which is guaranteed + to be nonzero. */ +-#define lll_mutex_trylock(futex) \ ++#if defined NOT_IN_libc || defined UP ++# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1" ++#else ++# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ ++ "je 0f\n\t" \ ++ "lock; cmpxchgl %2, %1\n\t" \ ++ "jmp 1f\n\t" \ ++ "0:\tcmpxchgl %2, %1\n\t" \ ++ "1:" ++#endif ++ ++#define lll_trylock(futex) \ + ({ int ret; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ ++ __asm __volatile (__lll_trylock_asm \ + : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ +- "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \ ++ "0" (LLL_LOCK_INITIALIZER) \ + : "memory"); \ + ret; }) + +- +-#define lll_robust_mutex_trylock(futex, id) \ ++#define lll_robust_trylock(futex, id) \ + ({ int ret; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ + : "=a" (ret), "=m" (futex) \ +- : "r" (id), "m" (futex), \ +- "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \ + : "memory"); \ + ret; }) + +- +-#define lll_mutex_cond_trylock(futex) \ ++#define lll_cond_trylock(futex) \ + ({ int ret; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ + : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \ +- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \ ++ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \ ++ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \ + : "memory"); \ + ret; }) + +- +-#define lll_mutex_lock(futex) \ +- (void) ({ int ignore1, ignore2, ignore3; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ ++#if defined NOT_IN_libc || defined UP ++# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \ ++ "jnz 1f\n\t" ++#else ++# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ ++ "je 0f\n\t" \ ++ "lock; cmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_lock_%=, @function\n" \ +- "_L_mutex_lock_%=:\n" \ +- "1:\tleaq %2, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_lock_wait\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_mutex_lock_%=, 6b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ +- "=a" (ignore3) \ +- : "0" (1), "m" (futex), "3" (0) \ +- : "cx", "r11", "cc", "memory"); }) ++ "jmp 24f\n" \ ++ "0:\tcmpxchgl %4, %2\n\t" \ ++ "jnz 1f\n\t" ++#endif + ++#define lll_lock(futex, private) \ ++ (void) \ ++ ({ int ignore1, ignore2, ignore3; \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __asm __volatile (__lll_lock_asm_start \ ++ ".subsection 1\n\t" \ ++ ".type _L_lock_%=, @function\n" \ ++ "_L_lock_%=:\n" \ ++ "1:\tleaq %2, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_lock_wait_private\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_lock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ ++ "=a" (ignore3) \ ++ : "0" (1), "m" (futex), "3" (0) \ ++ : "cx", "r11", "cc", "memory"); \ ++ else \ ++ __asm __volatile (__lll_lock_asm_start \ ++ ".subsection 1\n\t" \ ++ ".type _L_lock_%=, @function\n" \ ++ "_L_lock_%=:\n" \ ++ "1:\tleaq %2, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_lock_wait\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_lock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ ++ "=a" (ignore3) \ ++ : "1" (1), "m" (futex), "3" (0), "0" (private) \ ++ : "cx", "r11", "cc", "memory"); \ ++ }) \ + +-#define lll_robust_mutex_lock(futex, id) \ ++#define lll_robust_lock(futex, id, private) \ + ({ int result, ignore1, ignore2; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_lock_%=, @function\n" \ +- "_L_robust_mutex_lock_%=:\n" \ ++ ".type _L_robust_lock_%=, @function\n" \ ++ "_L_robust_lock_%=:\n" \ + "1:\tleaq %2, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_robust_mutex_lock_wait\n" \ ++ "3:\tcallq __lll_robust_lock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ +- "6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t" \ ++ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ +- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ ++ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ + "=a" (result) \ +- : "0" (id), "m" (futex), "3" (0) \ ++ : "1" (id), "m" (futex), "3" (0), "0" (private) \ + : "cx", "r11", "cc", "memory"); \ + result; }) + ++#define lll_cond_lock(futex, private) \ ++ (void) \ ++ ({ int ignore1, ignore2, ignore3; \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ ++ "jnz 1f\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_cond_lock_%=, @function\n" \ ++ "_L_cond_lock_%=:\n" \ ++ "1:\tleaq %2, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_lock_wait\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ ++ "=a" (ignore3) \ ++ : "1" (2), "m" (futex), "3" (0), "0" (private) \ ++ : "cx", "r11", "cc", "memory"); \ ++ }) + +-#define lll_mutex_cond_lock(futex) \ +- (void) ({ int ignore1, ignore2, ignore3; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ +- "jnz 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_cond_lock_%=, @function\n" \ +- "_L_mutex_cond_lock_%=:\n" \ +- "1:\tleaq %2, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_lock_wait\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ +- "=a" (ignore3) \ +- : "0" (2), "m" (futex), "3" (0) \ +- : "cx", "r11", "cc", "memory"); }) +- +- +-#define lll_robust_mutex_cond_lock(futex, id) \ ++#define lll_robust_cond_lock(futex, id, private) \ + ({ int result, ignore1, ignore2; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_cond_lock_%=, @function\n" \ +- "_L_robust_mutex_cond_lock_%=:\n" \ ++ ".type _L_robust_cond_lock_%=, @function\n" \ ++ "_L_robust_cond_lock_%=:\n" \ + "1:\tleaq %2, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_robust_mutex_lock_wait\n" \ ++ "3:\tcallq __lll_robust_lock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ +- "6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t" \ ++ "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ +- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ ++ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ + "=a" (result) \ +- : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0) \ ++ : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \ ++ "0" (private) \ + : "cx", "r11", "cc", "memory"); \ + result; }) + +- +-#define lll_mutex_timedlock(futex, timeout) \ ++#define lll_timedlock(futex, timeout, private) \ + ({ int result, ignore1, ignore2, ignore3; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \ + "jnz 1f\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_mutex_timedlock_%=, @function\n" \ +- "_L_mutex_timedlock_%=:\n" \ ++ ".type _L_timedlock_%=, @function\n" \ ++ "_L_timedlock_%=:\n" \ + "1:\tleaq %4, %%rdi\n" \ + "0:\tmovq %8, %%rdx\n" \ + "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_timedlock_wait\n" \ ++ "3:\tcallq __lll_timedlock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ +- "6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t" \ ++ "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_6 \ + "24:" \ +- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \ ++ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \ + "=&d" (ignore3), "=m" (futex) \ +- : "0" (0), "2" (1), "m" (futex), "m" (timeout) \ ++ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \ ++ "2" (private) \ + : "memory", "cx", "cc", "r10", "r11"); \ + result; }) + +- +-#define lll_robust_mutex_timedlock(futex, timeout, id) \ ++#define lll_robust_timedlock(futex, timeout, id, private) \ + ({ int result, ignore1, ignore2, ignore3; \ +- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \ ++ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \ + "jnz 1f\n\t" \ + ".subsection 1\n\t" \ +- ".type _L_robust_mutex_timedlock_%=, @function\n" \ +- "_L_robust_mutex_timedlock_%=:\n" \ ++ ".type _L_robust_timedlock_%=, @function\n" \ ++ "_L_robust_timedlock_%=:\n" \ + "1:\tleaq %4, %%rdi\n" \ + "0:\tmovq %8, %%rdx\n" \ + "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_robust_mutex_timedlock_wait\n" \ ++ "3:\tcallq __lll_robust_timedlock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ +- "6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t" \ ++ "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_6 \ + "24:" \ +- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \ ++ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \ + "=&d" (ignore3), "=m" (futex) \ +- : "0" (0), "2" (id), "m" (futex), "m" (timeout) \ ++ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \ ++ "2" (private) \ + : "memory", "cx", "cc", "r10", "r11"); \ + result; }) + ++#if defined NOT_IN_libc || defined UP ++# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \ ++ "jne 1f\n\t" ++#else ++# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ ++ "je 0f\n\t" \ ++ "lock; decl %0\n\t" \ ++ "jne 1f\n\t" \ ++ "jmp 24f\n\t" \ ++ "0:\tdecl %0\n\t" \ ++ "jne 1f\n\t" ++#endif + +-#define lll_mutex_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile (LOCK_INSTR "decl %0\n\t" \ +- "jne 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_mutex_unlock_%=, @function\n" \ +- "_L_mutex_unlock_%=:\n" \ +- "1:\tleaq %0, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_unlock_wake\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=m" (futex), "=&D" (ignore) \ +- : "m" (futex) \ +- : "ax", "cx", "r11", "cc", "memory"); }) +- +- +-#define lll_robust_mutex_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \ +- "jne 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_robust_mutex_unlock_%=, @function\n" \ +- "_L_robust_mutex_unlock_%=:\n" \ +- "1:\tleaq %0, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_unlock_wake\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=m" (futex), "=&D" (ignore) \ +- : "i" (FUTEX_WAITERS), "m" (futex) \ +- : "ax", "cx", "r11", "cc", "memory"); }) +- +- +-#define lll_robust_mutex_dead(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \ +- "syscall" \ +- : "=m" (futex), "=a" (ignore) \ +- : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \ +- "S" (FUTEX_WAKE), "1" (__NR_futex), \ +- "d" (1) \ +- : "cx", "r11", "cc", "memory"); }) +- ++#define lll_unlock(futex, private) \ ++ (void) \ ++ ({ int ignore; \ ++ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ ++ __asm __volatile (__lll_unlock_asm_start \ ++ ".subsection 1\n\t" \ ++ ".type _L_unlock_%=, @function\n" \ ++ "_L_unlock_%=:\n" \ ++ "1:\tleaq %0, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_unlock_wake_private\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=m" (futex), "=&D" (ignore) \ ++ : "m" (futex) \ ++ : "ax", "cx", "r11", "cc", "memory"); \ ++ else \ ++ __asm __volatile (__lll_unlock_asm_start \ ++ ".subsection 1\n\t" \ ++ ".type _L_unlock_%=, @function\n" \ ++ "_L_unlock_%=:\n" \ ++ "1:\tleaq %0, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_unlock_wake\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=m" (futex), "=&D" (ignore) \ ++ : "m" (futex), "S" (private) \ ++ : "ax", "cx", "r11", "cc", "memory"); \ ++ }) ++ ++#define lll_robust_unlock(futex, private) \ ++ do \ ++ { \ ++ int ignore; \ ++ __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \ ++ "jne 1f\n\t" \ ++ ".subsection 1\n\t" \ ++ ".type _L_robust_unlock_%=, @function\n" \ ++ "_L_robust_unlock_%=:\n" \ ++ "1:\tleaq %0, %%rdi\n" \ ++ "2:\tsubq $128, %%rsp\n" \ ++ "3:\tcallq __lll_unlock_wake\n" \ ++ "4:\taddq $128, %%rsp\n" \ ++ "5:\tjmp 24f\n" \ ++ "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \ ++ ".previous\n" \ ++ LLL_STUB_UNWIND_INFO_5 \ ++ "24:" \ ++ : "=m" (futex), "=&D" (ignore) \ ++ : "i" (FUTEX_WAITERS), "m" (futex), \ ++ "S" (private) \ ++ : "ax", "cx", "r11", "cc", "memory"); \ ++ } \ ++ while (0) ++ ++#define lll_robust_dead(futex, private) \ ++ do \ ++ { \ ++ int ignore; \ ++ __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \ ++ "syscall" \ ++ : "=m" (futex), "=a" (ignore) \ ++ : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \ ++ "S" (__lll_private_flag (FUTEX_WAKE, private)), \ ++ "1" (__NR_futex), "d" (1) \ ++ : "cx", "r11", "cc", "memory"); \ ++ } \ ++ while (0) + + /* Returns non-zero if error happened, zero if success. */ + #define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \ +@@ -461,117 +541,13 @@ extern int __lll_mutex_unlock_wait (int + __asm __volatile ("syscall" \ + : "=a" (__res) \ + : "0" (__NR_futex), "D" ((void *) ftx), \ +- "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \ +- "r" (__nr_move), "r" (__mutex), "r" (__val) \ ++ "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \ ++ "r" (__nr_move), "r" (__mutex), "r" (__val) \ + : "cx", "r11", "cc", "memory"); \ + __res < 0; }) + +- +-#define lll_mutex_islocked(futex) \ +- (futex != LLL_MUTEX_LOCK_INITIALIZER) +- +- +-/* We have a separate internal lock implementation which is not tied +- to binary compatibility. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- +-/* Initializers for lock. */ +-#define LLL_LOCK_INITIALIZER (0) +-#define LLL_LOCK_INITIALIZER_LOCKED (1) +- +- +-/* The states of a lock are: +- 0 - untaken +- 1 - taken by one user +- 2 - taken by more users */ +- +- +-#if defined NOT_IN_libc || defined UP +-# define lll_trylock(futex) lll_mutex_trylock (futex) +-# define lll_lock(futex) lll_mutex_lock (futex) +-# define lll_unlock(futex) lll_mutex_unlock (futex) +-#else +-/* Special versions of the macros for use in libc itself. They avoid +- the lock prefix when the thread library is not used. +- +- The code sequence to avoid unnecessary lock prefixes is what the AMD +- guys suggested. If you do not like it, bring it up with AMD. +- +- XXX In future we might even want to avoid it on UP machines. */ +- +-# define lll_trylock(futex) \ +- ({ unsigned char ret; \ +- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ +- "je 0f\n\t" \ +- "lock; cmpxchgl %2, %1\n\t" \ +- "jmp 1f\n" \ +- "0:\tcmpxchgl %2, %1\n\t" \ +- "1:setne %0" \ +- : "=a" (ret), "=m" (futex) \ +- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ +- "0" (LLL_MUTEX_LOCK_INITIALIZER) \ +- : "memory"); \ +- ret; }) +- +- +-# define lll_lock(futex) \ +- (void) ({ int ignore1, ignore2, ignore3; \ +- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ +- "je 0f\n\t" \ +- "lock; cmpxchgl %0, %2\n\t" \ +- "jnz 1f\n\t" \ +- "jmp 24f\n" \ +- "0:\tcmpxchgl %0, %2\n\t" \ +- "jnz 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_lock_%=, @function\n" \ +- "_L_lock_%=:\n" \ +- "1:\tleaq %2, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_lock_wait\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_lock_%=, 6b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ +- "=a" (ignore3) \ +- : "0" (1), "m" (futex), "3" (0) \ +- : "cx", "r11", "cc", "memory"); }) +- +- +-# define lll_unlock(futex) \ +- (void) ({ int ignore; \ +- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ +- "je 0f\n\t" \ +- "lock; decl %0\n\t" \ +- "jne 1f\n\t" \ +- "jmp 24f\n" \ +- "0:\tdecl %0\n\t" \ +- "jne 1f\n\t" \ +- ".subsection 1\n\t" \ +- ".type _L_unlock_%=, @function\n" \ +- "_L_unlock_%=:\n" \ +- "1:\tleaq %0, %%rdi\n" \ +- "2:\tsubq $128, %%rsp\n" \ +- "3:\tcallq __lll_mutex_unlock_wake\n" \ +- "4:\taddq $128, %%rsp\n" \ +- "5:\tjmp 24f\n" \ +- "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ +- ".previous\n" \ +- LLL_STUB_UNWIND_INFO_5 \ +- "24:" \ +- : "=m" (futex), "=&D" (ignore) \ +- : "m" (futex) \ +- : "ax", "cx", "r11", "cc", "memory"); }) +-#endif +- +- + #define lll_islocked(futex) \ +- (futex != LLL_MUTEX_LOCK_INITIALIZER) ++ (futex != LLL_LOCK_INITIALIZER) + + + /* The kernel notifies a process with uses CLONE_CLEARTID via futex +@@ -610,25 +586,6 @@ extern int __lll_timedwait_tid (int *tid + } \ + __result; }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden; +- +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- ++#endif /* !__ASSEMBLER__ */ + + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S 2007-07-29 11:48:55.000000000 +0200 +@@ -19,19 +19,10 @@ + + #include + #include ++#include + #include + #include + +-#ifdef UP +-# define LOCK +-#else +-# define LOCK lock +-#endif +- +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +- + /* For the calculation see asm/vsyscall.h. */ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + +@@ -301,7 +292,9 @@ __pthread_cond_timedwait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + jmp 2b + + /* Unlock in loop requires wakeup. */ +@@ -309,7 +302,9 @@ __pthread_cond_timedwait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ +@@ -317,7 +312,9 @@ __pthread_cond_timedwait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_lock_wait ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_lock_wait + #if cond_lock != 0 + subq $cond_lock, %rdi + #endif +@@ -328,7 +325,9 @@ __pthread_cond_timedwait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + jmp 11b + + /* The initial unlocking of the mutex failed. */ +@@ -345,7 +344,9 @@ __pthread_cond_timedwait: + #if cond_lock != 0 + addq $cond_lock, %rdi + #endif +- callq __lll_mutex_unlock_wake ++ /* XYZ */ ++ movl $LLL_SHARED, %esi ++ callq __lll_unlock_wake + + 17: movq (%rsp), %rax + jmp 18b +--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S.jj 2007-07-24 10:50:55.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S 2007-07-29 11:48:55.000000000 +0200 +@@ -18,26 +18,15 @@ + 02111-1307 USA. */ + + #include ++#include + #include + #include + #include + + +-#define SYS_futex 202 +-#define FUTEX_WAIT 0 +-#define FUTEX_WAKE 1 +-#define FUTEX_PRIVATE_FLAG 128 +- + /* For the calculation see asm/vsyscall.h. */ + #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 + +-#ifndef UP +-# define LOCK lock +-#else +-# define LOCK +-#endif +- +- + .text + + .globl pthread_rwlock_timedwrlock +@@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock: + popq %r12 + retq + +-1: ++1: movl PSHARED(%rdi), %esi + #if MUTEX != 0 + addq $MUTEX, %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + jmp 2b + + 14: cmpl %fs:TID, %eax +@@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock: + 20: movl $EDEADLK, %edx + jmp 9b + +-6: ++6: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leal MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 7b + + /* Overflow. */ +@@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock: + movl $EAGAIN, %edx + jmp 9b + +-10: ++10: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leaq MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_unlock_wake ++ callq __lll_unlock_wake + jmp 11b + +-12: ++12: movl PSHARED(%r12), %esi + #if MUTEX == 0 + movq %r12, %rdi + #else + leaq MUTEX(%r12), %rdi + #endif +- callq __lll_mutex_lock_wait ++ callq __lll_lock_wait + jmp 13b + + 16: movq $-ETIMEDOUT, %rdx +--- libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c.jj 2007-06-08 09:13:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct + int oldtype = __pthread_enable_asynccancel (); + + err = lll_futex_timed_wait (&isem->value, 0, &rt, +- // XYZ check mutex flag +- LLL_SHARED); ++ isem->private ^ FUTEX_PRIVATE_FLAG); + + /* Disable asynchronous cancellation. */ + __pthread_disable_asynccancel (oldtype); +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c.jj 2007-07-30 22:47:08.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c 2007-07-30 22:47:50.000000000 +0200 +@@ -0,0 +1,55 @@ ++/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper , 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, write to the Free ++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA ++ 02111-1307 USA. */ ++ ++#include ++#include "pthreadP.h" ++#include ++ ++int ++pthread_barrier_init (barrier, attr, count) ++ pthread_barrier_t *barrier; ++ const pthread_barrierattr_t *attr; ++ unsigned int count; ++{ ++ union sparc_pthread_barrier *ibarrier; ++ ++ if (__builtin_expect (count == 0, 0)) ++ return EINVAL; ++ ++ struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr; ++ if (iattr != NULL) ++ { ++ if (iattr->pshared != PTHREAD_PROCESS_PRIVATE ++ && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0)) ++ /* Invalid attribute. */ ++ return EINVAL; ++ } ++ ++ ibarrier = (union sparc_pthread_barrier *) barrier; ++ ++ /* Initialize the individual fields. */ ++ ibarrier->b.lock = LLL_LOCK_INITIALIZER; ++ ibarrier->b.left = count; ++ ibarrier->b.init_count = count; ++ ibarrier->b.curr_event = 0; ++ ibarrier->s.left_lock = 0; ++ ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED); ++ ++ return 0; ++} +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c.jj 2006-01-04 00:46:19.000000000 +0100 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c 2007-07-30 22:46:57.000000000 +0200 +@@ -1,62 +0,0 @@ +-/* Copyright (C) 2002, 2006 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper , 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, write to the Free +- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +- 02111-1307 USA. */ +- +-#include +-#include "pthreadP.h" +-#include +- +-struct sparc_pthread_barrier +-{ +- struct pthread_barrier b; +- unsigned char left_lock; +- unsigned char pshared; +-}; +- +-int +-pthread_barrier_init (barrier, attr, count) +- pthread_barrier_t *barrier; +- const pthread_barrierattr_t *attr; +- unsigned int count; +-{ +- struct sparc_pthread_barrier *ibarrier; +- +- if (__builtin_expect (count == 0, 0)) +- return EINVAL; +- +- struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr; +- if (iattr != NULL) +- { +- if (iattr->pshared != PTHREAD_PROCESS_PRIVATE +- && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0)) +- /* Invalid attribute. */ +- return EINVAL; +- } +- +- ibarrier = (struct sparc_pthread_barrier *) barrier; +- +- /* Initialize the individual fields. */ +- ibarrier->b.lock = LLL_LOCK_INITIALIZER; +- ibarrier->b.left = count; +- ibarrier->b.init_count = count; +- ibarrier->b.curr_event = 0; +- ibarrier->left_lock = 0; +- ibarrier->pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED); +- +- return 0; +-} +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c.jj 2007-06-04 08:42:06.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c 2007-07-30 22:18:37.000000000 +0200 +@@ -25,20 +25,35 @@ + + + void +-__lll_lock_wait (int *futex) ++__lll_lock_wait_private (int *futex) + { + do + { + int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1); + if (oldval != 0) +- lll_futex_wait (futex, 2); ++ lll_futex_wait (futex, 2, LLL_PRIVATE); ++ } ++ while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0); ++} ++ ++#ifdef IS_IN_libpthread ++/* These functions don't get included in libc.so */ ++ ++void ++__lll_lock_wait (int *futex, int private) ++{ ++ do ++ { ++ int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1); ++ if (oldval != 0) ++ lll_futex_wait (futex, 2, private); + } + while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0); + } + + + int +-__lll_timedlock_wait (int *futex, const struct timespec *abstime) ++__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private) + { + /* Reject invalid timeouts. */ + if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) +@@ -68,7 +83,7 @@ __lll_timedlock_wait (int *futex, const + /* Wait. */ + int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1); + if (oldval != 0) +- lll_futex_timed_wait (futex, 2, &rt); ++ lll_futex_timed_wait (futex, 2, &rt, private); + } + while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0); + +@@ -76,8 +91,6 @@ __lll_timedlock_wait (int *futex, const + } + + +-/* This function doesn't get included in libc.so */ +-#ifdef IS_IN_libpthread + int + __lll_timedwait_tid (int *tidp, const struct timespec *abstime) + { +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c.jj 2006-01-04 00:58:44.000000000 +0100 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c 2007-07-30 22:50:27.000000000 +0200 +@@ -1 +0,0 @@ +-#include "../../../../../../../pthread_barrier_init.c" +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c.jj 2007-01-11 00:19:18.000000000 +0100 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c 2007-07-30 22:50:42.000000000 +0200 +@@ -1 +1 @@ +-#include "../../../../../../../pthread_barrier_wait.c" ++#include "../../pthread_barrier_wait.c" +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c.jj 2007-06-08 09:13:53.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c 2007-07-31 12:40:13.000000000 +0200 +@@ -22,24 +22,18 @@ + #include + #include + +-struct sparc_pthread_barrier +-{ +- struct pthread_barrier b; +- unsigned char left_lock; +- unsigned char pshared; +-}; +- + /* Wait on barrier. */ + int + pthread_barrier_wait (barrier) + pthread_barrier_t *barrier; + { +- struct sparc_pthread_barrier *ibarrier +- = (struct sparc_pthread_barrier *) barrier; ++ union sparc_pthread_barrier *ibarrier ++ = (union sparc_pthread_barrier *) barrier; + int result = 0; ++ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE; + + /* Make sure we are alone. */ +- lll_lock (ibarrier->b.lock); ++ lll_lock (ibarrier->b.lock, private); + + /* One more arrival. */ + --ibarrier->b.left; +@@ -52,9 +46,7 @@ pthread_barrier_wait (barrier) + ++ibarrier->b.curr_event; + + /* Wake up everybody. */ +- lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private); + + /* This is the thread which finished the serialization. */ + result = PTHREAD_BARRIER_SERIAL_THREAD; +@@ -66,13 +58,11 @@ pthread_barrier_wait (barrier) + unsigned int event = ibarrier->b.curr_event; + + /* Before suspending, make the barrier available to others. */ +- lll_unlock (ibarrier->b.lock); ++ lll_unlock (ibarrier->b.lock, private); + + /* Wait for the event counter of the barrier to change. */ + do +- lll_futex_wait (&ibarrier->b.curr_event, event, +- // XYZ check mutex flag +- LLL_SHARED); ++ lll_futex_wait (&ibarrier->b.curr_event, event, private); + while (event == ibarrier->b.curr_event); + } + +@@ -80,11 +70,11 @@ pthread_barrier_wait (barrier) + unsigned int init_count = ibarrier->b.init_count; + + /* If this was the last woken thread, unlock. */ +- if (__atomic_is_v9 || ibarrier->pshared == 0) ++ if (__atomic_is_v9 || ibarrier->s.pshared == 0) + { + if (atomic_increment_val (&ibarrier->b.left) == init_count) + /* We are done. */ +- lll_unlock (ibarrier->b.lock); ++ lll_unlock (ibarrier->b.lock, private); + } + else + { +@@ -92,12 +82,12 @@ pthread_barrier_wait (barrier) + /* Slightly more complicated. On pre-v9 CPUs, atomic_increment_val + is only atomic for threads within the same process, not for + multiple processes. */ +- __sparc32_atomic_do_lock24 (&ibarrier->left_lock); ++ __sparc32_atomic_do_lock24 (&ibarrier->s.left_lock); + left = ++ibarrier->b.left; +- __sparc32_atomic_do_unlock24 (&ibarrier->left_lock); ++ __sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock); + if (left == init_count) + /* We are done. */ +- lll_unlock (ibarrier->b.lock); ++ lll_unlock (ibarrier->b.lock, private); + } + + return result; +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c.jj 2007-07-30 22:30:29.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c 2007-07-30 22:46:38.000000000 +0200 +@@ -0,0 +1,45 @@ ++/* Copyright (C) 2002, 2007 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper , 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, write to the Free ++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA ++ 02111-1307 USA. */ ++ ++#include ++#include "pthreadP.h" ++#include ++ ++int ++pthread_barrier_destroy (barrier) ++ pthread_barrier_t *barrier; ++{ ++ union sparc_pthread_barrier *ibarrier; ++ int result = EBUSY; ++ ++ ibarrier = (union sparc_pthread_barrier *) barrier; ++ ++ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE; ++ ++ lll_lock (ibarrier->b.lock, private); ++ ++ if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1)) ++ /* The barrier is not used anymore. */ ++ result = 0; ++ else ++ /* Still used, return with an error. */ ++ lll_unlock (ibarrier->b.lock, private); ++ ++ return result; ++} +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h 2007-07-31 12:40:13.000000000 +0200 +@@ -70,9 +70,6 @@ + #endif + + +-/* Initializer for compatibility lock. */ +-#define LLL_MUTEX_LOCK_INITIALIZER (0) +- + #define lll_futex_wait(futexp, val, private) \ + lll_futex_timed_wait (futexp, val, NULL, private) + +@@ -110,12 +107,12 @@ + INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ + }) + +-#define lll_robust_mutex_dead(futexv) \ ++#define lll_robust_dead(futexv, private) \ + do \ + { \ + int *__futexp = &(futexv); \ + atomic_or (__futexp, FUTEX_OWNER_DIED); \ +- lll_futex_wake (__futexp, 1, LLL_SHARED); \ ++ lll_futex_wake (__futexp, 1, private); \ + } \ + while (0) + +@@ -139,146 +136,132 @@ + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_trylock (int *futex) ++__lll_trylock (int *futex) + { + return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0; + } +-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex)) ++#define lll_trylock(futex) __lll_trylock (&(futex)) + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_cond_trylock (int *futex) ++__lll_cond_trylock (int *futex) + { + return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0; + } +-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex)) ++#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex)) + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_trylock (int *futex, int id) ++__lll_robust_trylock (int *futex, int id) + { + return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0; + } +-#define lll_robust_mutex_trylock(futex, id) \ +- __lll_robust_mutex_trylock (&(futex), id) ++#define lll_robust_trylock(futex, id) \ ++ __lll_robust_trylock (&(futex), id) + + +-extern void __lll_lock_wait (int *futex) attribute_hidden; +-extern int __lll_robust_lock_wait (int *futex) attribute_hidden; ++extern void __lll_lock_wait_private (int *futex) attribute_hidden; ++extern void __lll_lock_wait (int *futex, int private) attribute_hidden; ++extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + + static inline void + __attribute__ ((always_inline)) +-__lll_mutex_lock (int *futex) ++__lll_lock (int *futex, int private) + { + int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0); + + if (__builtin_expect (val != 0, 0)) +- __lll_lock_wait (futex); ++ { ++ if (__builtin_constant_p (private) && private == LLL_PRIVATE) ++ __lll_lock_wait_private (futex); ++ else ++ __lll_lock_wait (futex, private); ++ } + } +-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) ++#define lll_lock(futex, private) __lll_lock (&(futex), private) + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_lock (int *futex, int id) ++__lll_robust_lock (int *futex, int id, int private) + { + int result = 0; + if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +- result = __lll_robust_lock_wait (futex); ++ result = __lll_robust_lock_wait (futex, private); + return result; + } +-#define lll_robust_mutex_lock(futex, id) \ +- __lll_robust_mutex_lock (&(futex), id) ++#define lll_robust_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), id, private) + + static inline void + __attribute__ ((always_inline)) +-__lll_mutex_cond_lock (int *futex) ++__lll_cond_lock (int *futex, int private) + { + int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0); + + if (__builtin_expect (val != 0, 0)) +- __lll_lock_wait (futex); ++ __lll_lock_wait (futex, private); + } +-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) ++#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + +-#define lll_robust_mutex_cond_lock(futex, id) \ +- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS) ++#define lll_robust_cond_lock(futex, id, private) \ ++ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private) + + +-extern int __lll_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; +-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) +- attribute_hidden; ++extern int __lll_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; ++extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *, ++ int private) attribute_hidden; + + static inline int + __attribute__ ((always_inline)) +-__lll_mutex_timedlock (int *futex, const struct timespec *abstime) ++__lll_timedlock (int *futex, const struct timespec *abstime, int private) + { + int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0); + int result = 0; + + if (__builtin_expect (val != 0, 0)) +- result = __lll_timedlock_wait (futex, abstime); ++ result = __lll_timedlock_wait (futex, abstime, private); + return result; + } +-#define lll_mutex_timedlock(futex, abstime) \ +- __lll_mutex_timedlock (&(futex), abstime) ++#define lll_timedlock(futex, abstime, private) \ ++ __lll_timedlock (&(futex), abstime, private) + + static inline int + __attribute__ ((always_inline)) +-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime, +- int id) ++__lll_robust_timedlock (int *futex, const struct timespec *abstime, ++ int id, int private) + { + int result = 0; + if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +- result = __lll_robust_timedlock_wait (futex, abstime); ++ result = __lll_robust_timedlock_wait (futex, abstime, private); + return result; + } +-#define lll_robust_mutex_timedlock(futex, abstime, id) \ +- __lll_robust_mutex_timedlock (&(futex), abstime, id) ++#define lll_robust_timedlock(futex, abstime, id, private) \ ++ __lll_robust_timedlock (&(futex), abstime, id, private) + +-#define lll_mutex_unlock(lock) \ ++#define lll_unlock(lock, private) \ + ((void) ({ \ + int *__futex = &(lock); \ + int __val = atomic_exchange_24_rel (__futex, 0); \ + if (__builtin_expect (__val > 1, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++ lll_futex_wake (__futex, 1, private); \ + })) + +-#define lll_robust_mutex_unlock(lock) \ ++#define lll_robust_unlock(lock, private) \ + ((void) ({ \ + int *__futex = &(lock); \ + int __val = atomic_exchange_rel (__futex, 0); \ + if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ +- })) +- +-#define lll_mutex_unlock_force(lock) \ +- ((void) ({ \ +- int *__futex = &(lock); \ +- (void) atomic_exchange_24_rel (__futex, 0); \ +- lll_futex_wake (__futex, 1, LLL_SHARED); \ ++ lll_futex_wake (__futex, 1, private); \ + })) + +-#define lll_mutex_islocked(futex) \ ++#define lll_islocked(futex) \ + (futex != 0) + +- +-/* We have a separate internal lock implementation which is not tied +- to binary compatibility. We can use the lll_mutex_*. */ +- +-/* Type for lock object. */ +-typedef int lll_lock_t; +- + /* Initializers for lock. */ + #define LLL_LOCK_INITIALIZER (0) + #define LLL_LOCK_INITIALIZER_LOCKED (1) + +-#define lll_trylock(futex) lll_mutex_trylock (futex) +-#define lll_lock(futex) lll_mutex_lock (futex) +-#define lll_unlock(futex) lll_mutex_unlock (futex) +-#define lll_islocked(futex) lll_mutex_islocked (futex) +- +- + /* The kernel notifies a process with uses CLONE_CLEARTID via futex + wakeup when the clone terminates. The memory location contains the + thread ID while the clone is running and is reset to zero +@@ -303,26 +286,4 @@ extern int __lll_timedwait_tid (int *, c + __res; \ + }) + +- +-/* Conditional variable handling. */ +- +-extern void __lll_cond_wait (pthread_cond_t *cond) +- attribute_hidden; +-extern int __lll_cond_timedwait (pthread_cond_t *cond, +- const struct timespec *abstime) +- attribute_hidden; +-extern void __lll_cond_wake (pthread_cond_t *cond) +- attribute_hidden; +-extern void __lll_cond_broadcast (pthread_cond_t *cond) +- attribute_hidden; +- +-#define lll_cond_wait(cond) \ +- __lll_cond_wait (cond) +-#define lll_cond_timedwait(cond, abstime) \ +- __lll_cond_timedwait (cond, abstime) +-#define lll_cond_wake(cond) \ +- __lll_cond_wake (cond) +-#define lll_cond_broadcast(cond) \ +- __lll_cond_broadcast (cond) +- + #endif /* lowlevellock.h */ +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c.jj 2007-07-30 22:49:29.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c 2007-07-30 22:50:08.000000000 +0200 +@@ -0,0 +1,78 @@ ++/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Martin Schwidefsky , 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, write to the Free ++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA ++ 02111-1307 USA. */ ++ ++#include ++#include ++#include ++#include ++ ++/* Wait on barrier. */ ++int ++pthread_barrier_wait (barrier) ++ pthread_barrier_t *barrier; ++{ ++ union sparc_pthread_barrier *ibarrier ++ = (union sparc_pthread_barrier *) barrier; ++ int result = 0; ++ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE; ++ ++ /* Make sure we are alone. */ ++ lll_lock (ibarrier->b.lock, private); ++ ++ /* One more arrival. */ ++ --ibarrier->b.left; ++ ++ /* Are these all? */ ++ if (ibarrier->b.left == 0) ++ { ++ /* Yes. Increment the event counter to avoid invalid wake-ups and ++ tell the current waiters that it is their turn. */ ++ ++ibarrier->b.curr_event; ++ ++ /* Wake up everybody. */ ++ lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private); ++ ++ /* This is the thread which finished the serialization. */ ++ result = PTHREAD_BARRIER_SERIAL_THREAD; ++ } ++ else ++ { ++ /* The number of the event we are waiting for. The barrier's event ++ number must be bumped before we continue. */ ++ unsigned int event = ibarrier->b.curr_event; ++ ++ /* Before suspending, make the barrier available to others. */ ++ lll_unlock (ibarrier->b.lock, private); ++ ++ /* Wait for the event counter of the barrier to change. */ ++ do ++ lll_futex_wait (&ibarrier->b.curr_event, event, private); ++ while (event == ibarrier->b.curr_event); ++ } ++ ++ /* Make sure the init_count is stored locally or in a register. */ ++ unsigned int init_count = ibarrier->b.init_count; ++ ++ /* If this was the last woken thread, unlock. */ ++ if (atomic_increment_val (&ibarrier->b.left) == init_count) ++ /* We are done. */ ++ lll_unlock (ibarrier->b.lock, private); ++ ++ return result; ++} +--- libc/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h.jj 2007-07-30 22:44:59.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h 2007-07-30 22:45:46.000000000 +0200 +@@ -0,0 +1,18 @@ ++#ifndef _INTERNALTYPES_H ++#include "../internaltypes.h" ++ ++union sparc_pthread_barrier ++{ ++ struct pthread_barrier b; ++ struct sparc_pthread_barrier_s ++ { ++ unsigned int curr_event; ++ int lock; ++ unsigned int left; ++ unsigned int init_count; ++ unsigned char left_lock; ++ unsigned char pshared; ++ } s; ++}; ++ ++#endif +--- libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c.jj 2006-02-15 18:01:17.000000000 +0100 ++++ libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,8 +1,8 @@ + #include + +-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex) +-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex) +-#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id) ++#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED) ++#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex) ++#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED) + #define __pthread_mutex_lock __pthread_mutex_cond_lock + #define NO_INCR + +--- libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c.jj 2007-06-08 09:13:52.000000000 +0200 ++++ libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c 2007-07-29 11:48:55.000000000 +0200 +@@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem) + int oldtype = __pthread_enable_asynccancel (); + + err = lll_futex_wait (&isem->value, 0, +- // XYZ check mutex flag +- LLL_SHARED); ++ isem->private ^ FUTEX_PRIVATE_FLAG); + + /* Disable asynchronous cancellation. */ + __pthread_disable_asynccancel (oldtype); +--- libc/nptl/tpp.c.jj 2006-08-15 01:02:29.000000000 +0200 ++++ libc/nptl/tpp.c 2007-07-29 11:48:55.000000000 +0200 +@@ -1,5 +1,5 @@ + /* Thread Priority Protect helpers. +- Copyright (C) 2006 Free Software Foundation, Inc. ++ Copyright (C) 2006, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2006. + +@@ -93,7 +93,7 @@ __pthread_tpp_change_priority (int previ + if (priomax == newpriomax) + return 0; + +- lll_lock (self->lock); ++ lll_lock (self->lock, LLL_PRIVATE); + + tpp->priomax = newpriomax; + +@@ -129,7 +129,7 @@ __pthread_tpp_change_priority (int previ + } + } + +- lll_unlock (self->lock); ++ lll_unlock (self->lock, LLL_PRIVATE); + + return result; + } +@@ -144,7 +144,7 @@ __pthread_current_priority (void) + + int result = 0; + +- lll_lock (self->lock); ++ lll_lock (self->lock, LLL_PRIVATE); + + if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) + { +@@ -166,7 +166,7 @@ __pthread_current_priority (void) + if (result != -1) + result = self->schedparam.sched_priority; + +- lll_unlock (self->lock); ++ lll_unlock (self->lock, LLL_PRIVATE); + + return result; + } diff --git a/glibc-warning-patrol.patch b/glibc-warning-patrol.patch new file mode 100644 index 0000000..0942e03 --- /dev/null +++ b/glibc-warning-patrol.patch @@ -0,0 +1,210 @@ +2007-07-30 Jakub Jelinek + + * stdlib/tst-strtod2.c (do_test): Use %tu in fmt string for ptrdiff_t + value. + + * stdio-common/tst-fmemopen2.c (do_test): Avoid fmt string warning + if off_t is different rank from size_t. + + * sysdeps/generic/unwind-dw2.c (extract_cie_info, execute_cfa_program, + uw_frame_state_for): Avoid type punning warnings. + * sysdeps/generic/unwind-dw2-fde-glibc.c + (_Unwind_IteratePhdrCallback): Likewise. + * sysdeps/generic/unwind-dw2-fde.c (_Unwind_Find_FDE): Likewise. + (binary_search_single_encoding_fdes, binary_search_mixed_encoding_fdes, + get_cie_encoding, linear_search_fdes): Don't mix char and unsigned char + pointers. +nptl/ + * tst-rwlock14.c (do_test): Avoid warnings on 32-bit arches. +localedata/ + * tst-strptime.c (do_test): Use %tu in fmt string for ptrdiff_t value. + +--- libc/stdlib/tst-strtod2.c.jj 2007-07-30 19:13:57.000000000 +0200 ++++ libc/stdlib/tst-strtod2.c 2007-07-30 19:13:57.000000000 +0200 +@@ -32,7 +32,7 @@ do_test (void) + } + if (ep != tests[i].str + tests[i].offset) + { +- printf ("test %zu strtod parsed %ju characters, expected %zu\n", ++ printf ("test %zu strtod parsed %tu characters, expected %zu\n", + i, ep - tests[i].str, tests[i].offset); + status = 1; + } +--- libc/localedata/tst-strptime.c.jj 2007-07-11 00:12:28.000000000 +0200 ++++ libc/localedata/tst-strptime.c 2007-07-30 19:13:23.000000000 +0200 +@@ -15,7 +15,7 @@ do_test (void) + static const char s[] = "\ + \x54\x68\xb8\x6e\x67\x20\x6d\xad\xea\x69\x20\x6d\xe9\x74"; + char *r = strptime (s, "%b", &tm); +- printf ("r = %p, r-s = %ju, tm.tm_mon = %d\n", r, r - s, tm.tm_mon); ++ printf ("r = %p, r-s = %tu, tm.tm_mon = %d\n", r, r - s, tm.tm_mon); + return r == NULL || r - s != 14 || tm.tm_mon != 10; + } + +--- libc/nptl/tst-rwlock14.c.jj 2007-07-30 19:23:34.000000000 +0200 ++++ libc/nptl/tst-rwlock14.c 2007-07-30 19:30:46.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2004 Free Software Foundation, Inc. ++/* Copyright (C) 2004, 2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2004. + +@@ -130,8 +130,8 @@ do_test (void) + result = 1; + } + +- ts.tv_nsec = 0x100001000LL; +- if (ts.tv_nsec != 0x100001000LL) ++ ts.tv_nsec = (__typeof (ts.tv_nsec)) 0x100001000LL; ++ if ((__typeof (ts.tv_nsec)) 0x100001000LL != 0x100001000LL) + ts.tv_nsec = 2000000000; + + e = pthread_rwlock_timedrdlock (&r, &ts); +--- libc/stdio-common/tst-fmemopen2.c.jj 2007-07-30 19:16:50.000000000 +0200 ++++ libc/stdio-common/tst-fmemopen2.c 2007-07-30 19:16:50.000000000 +0200 +@@ -28,7 +28,7 @@ do_test (void) + o = ftello (fp); + if (o != 0) + { +- printf ("second ftello returned %ld, expected %zu\n", o, (off_t) 0); ++ printf ("second ftello returned %ld, expected 0\n", o); + result = 1; + } + if (fseeko (fp, 0, SEEK_END) != 0) +--- libc/sysdeps/generic/unwind-dw2.c.jj 2006-05-02 02:45:11.000000000 +0200 ++++ libc/sysdeps/generic/unwind-dw2.c 2007-07-30 19:45:31.000000000 +0200 +@@ -1,5 +1,5 @@ + /* DWARF2 exception handling and frame unwind runtime interface routines. +- Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006 ++ Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006,2007 + Free Software Foundation, Inc. + + This file is part of the GNU C Library. +@@ -309,8 +309,9 @@ extract_cie_info (struct dwarf_cie *cie, + /* "P" indicates a personality routine in the CIE augmentation. */ + else if (aug[0] == 'P') + { +- p = read_encoded_value (context, *p, p + 1, +- (_Unwind_Ptr *) &fs->personality); ++ _Unwind_Ptr personality; ++ p = read_encoded_value (context, *p, p + 1, &personality); ++ fs->personality = (_Unwind_Personality_Fn) personality; + aug += 1; + } + +@@ -771,8 +772,12 @@ execute_cfa_program (const unsigned char + else switch (insn) + { + case DW_CFA_set_loc: +- insn_ptr = read_encoded_value (context, fs->fde_encoding, +- insn_ptr, (_Unwind_Ptr *) &fs->pc); ++ { ++ _Unwind_Ptr pc; ++ insn_ptr = read_encoded_value (context, fs->fde_encoding, ++ insn_ptr, &pc); ++ fs->pc = (void *) pc; ++ } + break; + + case DW_CFA_advance_loc1: +@@ -992,8 +997,11 @@ uw_frame_state_for (struct _Unwind_Conte + insn = aug + i; + } + if (fs->lsda_encoding != DW_EH_PE_omit) +- aug = read_encoded_value (context, fs->lsda_encoding, aug, +- (_Unwind_Ptr *) &context->lsda); ++ { ++ _Unwind_Ptr lsda; ++ aug = read_encoded_value (context, fs->lsda_encoding, aug, &lsda); ++ context->lsda = (void *) lsda; ++ } + + /* Then the insns in the FDE up to our target PC. */ + if (insn == NULL) +--- libc/sysdeps/generic/unwind-dw2-fde-glibc.c.jj 2004-05-25 20:48:24.000000000 +0200 ++++ libc/sysdeps/generic/unwind-dw2-fde-glibc.c 2007-07-30 19:46:17.000000000 +0200 +@@ -1,4 +1,4 @@ +-/* Copyright (C) 2001, 2002 Free Software Foundation, Inc. ++/* Copyright (C) 2001, 2002, 2007 Free Software Foundation, Inc. + Contributed by Jakub Jelinek . + + This file is part of the GNU C Library. +@@ -235,10 +235,11 @@ _Unwind_IteratePhdrCallback (struct dl_p + if (data->ret != NULL) + { + unsigned int encoding = get_fde_encoding (data->ret); ++ _Unwind_Ptr func; + read_encoded_value_with_base (encoding, + base_from_cb_data (encoding, data), +- data->ret->pc_begin, +- (_Unwind_Ptr *)&data->func); ++ data->ret->pc_begin, &func); ++ data->func = (void *) func; + } + return 1; + } +--- libc/sysdeps/generic/unwind-dw2-fde.c.jj 2006-04-07 22:50:31.000000000 +0200 ++++ libc/sysdeps/generic/unwind-dw2-fde.c 2007-07-30 19:45:55.000000000 +0200 +@@ -1,5 +1,5 @@ + /* Subroutines needed for unwinding stack frames for exception handling. */ +-/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006 ++/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006, 2007 + Free Software Foundation, Inc. + Contributed by Jason Merrill . + +@@ -301,7 +301,8 @@ get_cie_encoding (struct dwarf_cie *cie) + if (aug[0] != 'z') + return DW_EH_PE_absptr; + +- p = aug + strlen (aug) + 1; /* Skip the augmentation string. */ ++ /* Skip the augmentation string. */ ++ p = aug + strlen ((const char *) aug) + 1; + p = read_uleb128 (p, &utmp); /* Skip code alignment. */ + p = read_sleb128 (p, &stmp); /* Skip data alignment. */ + p++; /* Skip return address column. */ +@@ -838,7 +839,7 @@ linear_search_fdes (struct object *ob, f + else + { + _Unwind_Ptr mask; +- const char *p; ++ const unsigned char *p; + + p = read_encoded_value_with_base (encoding, base, + this_fde->pc_begin, &pc_begin); +@@ -908,7 +909,7 @@ binary_search_single_encoding_fdes (stru + size_t i = (lo + hi) / 2; + fde *f = vec->array[i]; + _Unwind_Ptr pc_begin, pc_range; +- const char *p; ++ const unsigned char *p; + + p = read_encoded_value_with_base (encoding, base, f->pc_begin, + &pc_begin); +@@ -936,7 +937,7 @@ binary_search_mixed_encoding_fdes (struc + size_t i = (lo + hi) / 2; + fde *f = vec->array[i]; + _Unwind_Ptr pc_begin, pc_range; +- const char *p; ++ const unsigned char *p; + int encoding; + + encoding = get_fde_encoding (f); +@@ -1046,6 +1047,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf + if (f) + { + int encoding; ++ _Unwind_Ptr func; + + bases->tbase = ob->tbase; + bases->dbase = ob->dbase; +@@ -1054,7 +1056,8 @@ _Unwind_Find_FDE (void *pc, struct dwarf + if (ob->s.b.mixed_encoding) + encoding = get_fde_encoding (f); + read_encoded_value_with_base (encoding, base_from_object (encoding, ob), +- f->pc_begin, (_Unwind_Ptr *)&bases->func); ++ f->pc_begin, &func); ++ bases->func = (void *) func; + } + + return f; + diff --git a/glibc.spec b/glibc.spec index 0d909e9..9dbecc3 100644 --- a/glibc.spec +++ b/glibc.spec @@ -36,6 +36,10 @@ Source2: %(echo %{glibcsrcdir} | sed s/glibc-/glibc-libidn-/).tar.bz2 Source3: %{glibcname}-fedora-%{glibcdate}.tar.bz2 Patch0: %{glibcname}-fedora.patch Patch1: %{name}-ia64-lib64.patch +Patch2: glibc-warning-patrol.patch +Patch3: glibc-i386-rwlock.patch +Patch4: glibc-ldconfig-speedup.patch +Patch5: glibc-private-futex.patch Buildroot: %{_tmppath}/glibc-%{PACKAGE_VERSION}-root Obsoletes: zoneinfo, libc-static, libc-devel, libc-profile, libc-headers, Obsoletes: gencat, locale, ldconfig, locale-ja, glibc-profile @@ -247,6 +251,10 @@ package or when debugging this package. %patch1 -p1 %endif %endif +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 # Hack till glibc-kernheaders get updated, argh mkdir -p override_headers/linux diff --git a/sources b/sources index f9b15dd..2cfb0c1 100644 --- a/sources +++ b/sources @@ -1,2 +1,2 @@ -1394656dacdf6b56f92b2922011e5763 glibc-20070515T2025.tar.bz2 -3d589fbdb896cec5c158cd8aaa442716 glibc-fedora-20070515T2025.tar.bz2 +42cd03e6b5608b1ca1444341a7c1027b glibc-20070731T1624.tar.bz2 +9c141a28ced6c0680889ac8069dee8b8 glibc-fedora-20070731T1624.tar.bz2