Sync with upstream master
- glibc-bench-compare.patch: Merged upstream - glibc-rh757881.patch: Fixed differently upstream - glibc-revert-arena-threshold-fix.patch: Additional fixes on top of this - glibc-rh841787.patch: Fixed differently upstream - Set MODULE_NAME=librt for rtkaio - Fix up glibc-rh741105.patch to continue to work with latest master
This commit is contained in:
parent
706a051a42
commit
6223dbf32d
@ -20,8 +20,8 @@ index ded5471..7d28496 100644
|
||||
+ stp xzr, x30, [sp, #32+16*2]
|
||||
+
|
||||
mrs x4, tpidr_el0
|
||||
ldr x1, [x0,#8]
|
||||
ldr x0, [x4]
|
||||
/* The ldar here happens after the load from [x0] at the call site
|
||||
(that is generated by the compiler as part of the TLS access ABI),
|
||||
@@ -169,6 +173,8 @@ _dl_tlsdesc_dynamic:
|
||||
1:
|
||||
ldp x1, x2, [sp, #32+16*0]
|
||||
@ -30,4 +30,4 @@ index ded5471..7d28496 100644
|
||||
+ msr nzcv, x30
|
||||
|
||||
ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
|
||||
cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
|
||||
cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
|
||||
|
@ -1,412 +0,0 @@
|
||||
diff -pruN a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py
|
||||
--- a/benchtests/scripts/compare_bench.py 1970-01-01 05:30:00.000000000 +0530
|
||||
+++ b/benchtests/scripts/compare_bench.py 2015-05-07 15:32:41.843584024 +0530
|
||||
@@ -0,0 +1,184 @@
|
||||
+#!/usr/bin/python
|
||||
+# Copyright (C) 2015 Free Software Foundation, Inc.
|
||||
+# This file is part of the GNU C Library.
|
||||
+#
|
||||
+# The GNU C Library is free software; you can redistribute it and/or
|
||||
+# modify it under the terms of the GNU Lesser General Public
|
||||
+# License as published by the Free Software Foundation; either
|
||||
+# version 2.1 of the License, or (at your option) any later version.
|
||||
+#
|
||||
+# The GNU C Library is distributed in the hope that it will be useful,
|
||||
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
+# Lesser General Public License for more details.
|
||||
+#
|
||||
+# You should have received a copy of the GNU Lesser General Public
|
||||
+# License along with the GNU C Library; if not, see
|
||||
+# <http://www.gnu.org/licenses/>.
|
||||
+"""Compare two benchmark results
|
||||
+
|
||||
+Given two benchmark result files and a threshold, this script compares the
|
||||
+benchmark results and flags differences in performance beyond a given
|
||||
+threshold.
|
||||
+"""
|
||||
+import sys
|
||||
+import os
|
||||
+import pylab
|
||||
+import import_bench as bench
|
||||
+
|
||||
+def do_compare(func, var, tl1, tl2, par, threshold):
|
||||
+ """Compare one of the aggregate measurements
|
||||
+
|
||||
+ Helper function to compare one of the aggregate measurements of a function
|
||||
+ variant.
|
||||
+
|
||||
+ Args:
|
||||
+ func: Function name
|
||||
+ var: Function variant name
|
||||
+ tl1: The first timings list
|
||||
+ tl2: The second timings list
|
||||
+ par: The aggregate to measure
|
||||
+ threshold: The threshold for differences, beyond which the script should
|
||||
+ print a warning.
|
||||
+ """
|
||||
+ d = abs(tl2[par] - tl1[par]) * 100 / tl1[str(par)]
|
||||
+ if d > threshold:
|
||||
+ if tl1[par] > tl2[par]:
|
||||
+ ind = '+++'
|
||||
+ else:
|
||||
+ ind = '---'
|
||||
+ print('%s %s(%s)[%s]: (%.2lf%%) from %g to %g' %
|
||||
+ (ind, func, var, par, d, tl1[par], tl2[par]))
|
||||
+
|
||||
+
|
||||
+def compare_runs(pts1, pts2, threshold):
|
||||
+ """Compare two benchmark runs
|
||||
+
|
||||
+ Args:
|
||||
+ pts1: Timing data from first machine
|
||||
+ pts2: Timing data from second machine
|
||||
+ """
|
||||
+
|
||||
+ # XXX We assume that the two benchmarks have identical functions and
|
||||
+ # variants. We cannot compare two benchmarks that may have different
|
||||
+ # functions or variants. Maybe that is something for the future.
|
||||
+ for func in pts1['functions'].keys():
|
||||
+ for var in pts1['functions'][func].keys():
|
||||
+ tl1 = pts1['functions'][func][var]
|
||||
+ tl2 = pts2['functions'][func][var]
|
||||
+
|
||||
+ # Compare the consolidated numbers
|
||||
+ # do_compare(func, var, tl1, tl2, 'max', threshold)
|
||||
+ do_compare(func, var, tl1, tl2, 'min', threshold)
|
||||
+ do_compare(func, var, tl1, tl2, 'mean', threshold)
|
||||
+
|
||||
+ # Skip over to the next variant or function if there is no detailed
|
||||
+ # timing info for the function variant.
|
||||
+ if 'timings' not in pts1['functions'][func][var].keys() or \
|
||||
+ 'timings' not in pts2['functions'][func][var].keys():
|
||||
+ continue
|
||||
+
|
||||
+ # If two lists do not have the same length then it is likely that
|
||||
+ # the performance characteristics of the function have changed.
|
||||
+ # XXX: It is also likely that there was some measurement that
|
||||
+ # strayed outside the usual range. Such ouiers should not
|
||||
+ # happen on an idle machine with identical hardware and
|
||||
+ # configuration, but ideal environments are hard to come by.
|
||||
+ if len(tl1['timings']) != len(tl2['timings']):
|
||||
+ print('* %s(%s): Timing characteristics changed' %
|
||||
+ (func, var))
|
||||
+ print('\tBefore: [%s]' %
|
||||
+ ', '.join([str(x) for x in tl1['timings']]))
|
||||
+ print('\tAfter: [%s]' %
|
||||
+ ', '.join([str(x) for x in tl2['timings']]))
|
||||
+ continue
|
||||
+
|
||||
+ # Collect numbers whose differences cross the threshold we have
|
||||
+ # set.
|
||||
+ issues = [(x, y) for x, y in zip(tl1['timings'], tl2['timings']) \
|
||||
+ if abs(y - x) * 100 / x > threshold]
|
||||
+
|
||||
+ # Now print them.
|
||||
+ for t1, t2 in issues:
|
||||
+ d = abs(t2 - t1) * 100 / t1
|
||||
+ if t2 > t1:
|
||||
+ ind = '-'
|
||||
+ else:
|
||||
+ ind = '+'
|
||||
+
|
||||
+ print("%s %s(%s): (%.2lf%%) from %g to %g" %
|
||||
+ (ind, func, var, d, t1, t2))
|
||||
+
|
||||
+
|
||||
+def plot_graphs(bench1, bench2):
|
||||
+ """Plot graphs for functions
|
||||
+
|
||||
+ Make scatter plots for the functions and their variants.
|
||||
+
|
||||
+ Args:
|
||||
+ bench1: Set of points from the first machine
|
||||
+ bench2: Set of points from the second machine.
|
||||
+ """
|
||||
+ for func in bench1['functions'].keys():
|
||||
+ for var in bench1['functions'][func].keys():
|
||||
+ # No point trying to print a graph if there are no detailed
|
||||
+ # timings.
|
||||
+ if u'timings' not in bench1['functions'][func][var].keys():
|
||||
+ print('Skipping graph for %s(%s)' % (func, var))
|
||||
+ continue
|
||||
+
|
||||
+ pylab.clf()
|
||||
+ pylab.ylabel('Time (cycles)')
|
||||
+
|
||||
+ # First set of points
|
||||
+ length = len(bench1['functions'][func][var]['timings'])
|
||||
+ X = [float(x) for x in range(length)]
|
||||
+ lines = pylab.scatter(X, bench1['functions'][func][var]['timings'],
|
||||
+ 1.5 + 100 / length)
|
||||
+ pylab.setp(lines, 'color', 'r')
|
||||
+
|
||||
+ # Second set of points
|
||||
+ length = len(bench2['functions'][func][var]['timings'])
|
||||
+ X = [float(x) for x in range(length)]
|
||||
+ lines = pylab.scatter(X, bench2['functions'][func][var]['timings'],
|
||||
+ 1.5 + 100 / length)
|
||||
+ pylab.setp(lines, 'color', 'g')
|
||||
+
|
||||
+ if var:
|
||||
+ filename = "%s-%s.png" % (func, var)
|
||||
+ else:
|
||||
+ filename = "%s.png" % func
|
||||
+ print('Writing out %s' % filename)
|
||||
+ pylab.savefig(filename)
|
||||
+
|
||||
+
|
||||
+def main(args):
|
||||
+ """Program Entry Point
|
||||
+
|
||||
+ Take two benchmark output files and compare their timings.
|
||||
+ """
|
||||
+ if len(args) > 4 or len(args) < 3:
|
||||
+ print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
|
||||
+ sys.exit(os.EX_USAGE)
|
||||
+
|
||||
+ bench1 = bench.parse_bench(args[1], args[0])
|
||||
+ bench2 = bench.parse_bench(args[2], args[0])
|
||||
+ if len(args) == 4:
|
||||
+ threshold = float(args[3])
|
||||
+ else:
|
||||
+ threshold = 10.0
|
||||
+
|
||||
+ if (bench1['timing_type'] != bench2['timing_type']):
|
||||
+ print('Cannot compare benchmark outputs: timing types are different')
|
||||
+ return
|
||||
+
|
||||
+ plot_graphs(bench1, bench2)
|
||||
+
|
||||
+ bench.compress_timings(bench1)
|
||||
+ bench.compress_timings(bench2)
|
||||
+
|
||||
+ compare_runs(bench1, bench2, threshold)
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ main(sys.argv[1:])
|
||||
diff -pruN a/benchtests/scripts/import_bench.py b/benchtests/scripts/import_bench.py
|
||||
--- a/benchtests/scripts/import_bench.py 1970-01-01 05:30:00.000000000 +0530
|
||||
+++ b/benchtests/scripts/import_bench.py 2015-05-07 15:32:41.844584032 +0530
|
||||
@@ -0,0 +1,141 @@
|
||||
+#!/usr/bin/python
|
||||
+# Copyright (C) 2015 Free Software Foundation, Inc.
|
||||
+# This file is part of the GNU C Library.
|
||||
+#
|
||||
+# The GNU C Library is free software; you can redistribute it and/or
|
||||
+# modify it under the terms of the GNU Lesser General Public
|
||||
+# License as published by the Free Software Foundation; either
|
||||
+# version 2.1 of the License, or (at your option) any later version.
|
||||
+#
|
||||
+# The GNU C Library is distributed in the hope that it will be useful,
|
||||
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
+# Lesser General Public License for more details.
|
||||
+#
|
||||
+# You should have received a copy of the GNU Lesser General Public
|
||||
+# License along with the GNU C Library; if not, see
|
||||
+# <http://www.gnu.org/licenses/>.
|
||||
+"""Functions to import benchmark data and process it"""
|
||||
+
|
||||
+import json
|
||||
+try:
|
||||
+ import jsonschema as validator
|
||||
+except ImportError:
|
||||
+ print('Could not find jsonschema module.')
|
||||
+ raise
|
||||
+
|
||||
+
|
||||
+def mean(lst):
|
||||
+ """Compute and return mean of numbers in a list
|
||||
+
|
||||
+ The numpy average function has horrible performance, so implement our
|
||||
+ own mean function.
|
||||
+
|
||||
+ Args:
|
||||
+ lst: The list of numbers to average.
|
||||
+ Return:
|
||||
+ The mean of members in the list.
|
||||
+ """
|
||||
+ return sum(lst) / len(lst)
|
||||
+
|
||||
+
|
||||
+def split_list(bench, func, var):
|
||||
+ """ Split the list into a smaller set of more distinct points
|
||||
+
|
||||
+ Group together points such that the difference between the smallest
|
||||
+ point and the mean is less than 1/3rd of the mean. This means that
|
||||
+ the mean is at most 1.5x the smallest member of that group.
|
||||
+
|
||||
+ mean - xmin < mean / 3
|
||||
+ i.e. 2 * mean / 3 < xmin
|
||||
+ i.e. mean < 3 * xmin / 2
|
||||
+
|
||||
+ For an evenly distributed group, the largest member will be less than
|
||||
+ twice the smallest member of the group.
|
||||
+ Derivation:
|
||||
+
|
||||
+ An evenly distributed series would be xmin, xmin + d, xmin + 2d...
|
||||
+
|
||||
+ mean = (2 * n * xmin + n * (n - 1) * d) / 2 * n
|
||||
+ and max element is xmin + (n - 1) * d
|
||||
+
|
||||
+ Now, mean < 3 * xmin / 2
|
||||
+
|
||||
+ 3 * xmin > 2 * mean
|
||||
+ 3 * xmin > (2 * n * xmin + n * (n - 1) * d) / n
|
||||
+ 3 * n * xmin > 2 * n * xmin + n * (n - 1) * d
|
||||
+ n * xmin > n * (n - 1) * d
|
||||
+ xmin > (n - 1) * d
|
||||
+ 2 * xmin > xmin + (n-1) * d
|
||||
+ 2 * xmin > xmax
|
||||
+
|
||||
+ Hence, proved.
|
||||
+
|
||||
+ Similarly, it is trivial to prove that for a similar aggregation by using
|
||||
+ the maximum element, the maximum element in the group must be at most 4/3
|
||||
+ times the mean.
|
||||
+
|
||||
+ Args:
|
||||
+ bench: The benchmark object
|
||||
+ func: The function name
|
||||
+ var: The function variant name
|
||||
+ """
|
||||
+ means = []
|
||||
+ lst = bench['functions'][func][var]['timings']
|
||||
+ last = len(lst) - 1
|
||||
+ while lst:
|
||||
+ for i in range(last + 1):
|
||||
+ avg = mean(lst[i:])
|
||||
+ if avg > 0.75 * lst[last]:
|
||||
+ means.insert(0, avg)
|
||||
+ lst = lst[:i]
|
||||
+ last = i - 1
|
||||
+ break
|
||||
+ bench['functions'][func][var]['timings'] = means
|
||||
+
|
||||
+
|
||||
+def do_for_all_timings(bench, callback):
|
||||
+ """Call a function for all timing objects for each function and its
|
||||
+ variants.
|
||||
+
|
||||
+ Args:
|
||||
+ bench: The benchmark object
|
||||
+ callback: The callback function
|
||||
+ """
|
||||
+ for func in bench['functions'].keys():
|
||||
+ for k in bench['functions'][func].keys():
|
||||
+ if 'timings' not in bench['functions'][func][k].keys():
|
||||
+ continue
|
||||
+
|
||||
+ callback(bench, func, k)
|
||||
+
|
||||
+
|
||||
+def compress_timings(points):
|
||||
+ """Club points with close enough values into a single mean value
|
||||
+
|
||||
+ See split_list for details on how the clubbing is done.
|
||||
+
|
||||
+ Args:
|
||||
+ points: The set of points.
|
||||
+ """
|
||||
+ do_for_all_timings(points, split_list)
|
||||
+
|
||||
+
|
||||
+def parse_bench(filename, schema_filename):
|
||||
+ """Parse the input file
|
||||
+
|
||||
+ Parse and validate the json file containing the benchmark outputs. Return
|
||||
+ the resulting object.
|
||||
+ Args:
|
||||
+ filename: Name of the benchmark output file.
|
||||
+ Return:
|
||||
+ The bench dictionary.
|
||||
+ """
|
||||
+ with open(schema_filename, 'r') as schemafile:
|
||||
+ schema = json.load(schemafile)
|
||||
+ with open(filename, 'r') as benchfile:
|
||||
+ bench = json.load(benchfile)
|
||||
+ validator.validate(bench, schema)
|
||||
+ do_for_all_timings(bench, lambda b, f, v:
|
||||
+ b['functions'][f][v]['timings'].sort())
|
||||
+ return bench
|
||||
diff -pruN a/benchtests/scripts/validate_benchout.py b/benchtests/scripts/validate_benchout.py
|
||||
--- a/benchtests/scripts/validate_benchout.py 2015-05-07 11:58:40.000000000 +0530
|
||||
+++ b/benchtests/scripts/validate_benchout.py 2015-05-07 15:32:41.844584032 +0530
|
||||
@@ -27,37 +27,26 @@ import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
- import jsonschema
|
||||
+ import import_bench as bench
|
||||
except ImportError:
|
||||
- print('Could not find jsonschema module. Output not validated.')
|
||||
+ print('Import Error: Output will not be validated.')
|
||||
# Return success because we don't want the bench target to fail just
|
||||
# because the jsonschema module was not found.
|
||||
sys.exit(os.EX_OK)
|
||||
|
||||
|
||||
-def validate_bench(benchfile, schemafile):
|
||||
- """Validate benchmark file
|
||||
-
|
||||
- Validate a benchmark output file against a JSON schema.
|
||||
+def print_and_exit(message, exitcode):
|
||||
+ """Prints message to stderr and returns the exit code.
|
||||
|
||||
Args:
|
||||
- benchfile: The file name of the bench.out file.
|
||||
- schemafile: The file name of the JSON schema file to validate
|
||||
- bench.out against.
|
||||
+ message: The message to print
|
||||
+ exitcode: The exit code to return
|
||||
|
||||
- Exceptions:
|
||||
- jsonschema.ValidationError: When bench.out is not valid
|
||||
- jsonschema.SchemaError: When the JSON schema is not valid
|
||||
- IOError: If any of the files are not found.
|
||||
+ Returns:
|
||||
+ The passed exit code
|
||||
"""
|
||||
- with open(benchfile, 'r') as bfile:
|
||||
- with open(schemafile, 'r') as sfile:
|
||||
- bench = json.load(bfile)
|
||||
- schema = json.load(sfile)
|
||||
- jsonschema.validate(bench, schema)
|
||||
-
|
||||
- # If we reach here, we're all good.
|
||||
- print("Benchmark output in %s is valid." % benchfile)
|
||||
+ print(message, file=sys.stderr)
|
||||
+ return exitcode
|
||||
|
||||
|
||||
def main(args):
|
||||
@@ -73,11 +62,23 @@ def main(args):
|
||||
Exceptions thrown by validate_bench
|
||||
"""
|
||||
if len(args) != 2:
|
||||
- print("Usage: %s <bench.out file> <bench.out schema>" % sys.argv[0],
|
||||
- file=sys.stderr)
|
||||
- return os.EX_USAGE
|
||||
+ return print_and_exit("Usage: %s <bench.out file> <bench.out schema>"
|
||||
+ % sys.argv[0], os.EX_USAGE)
|
||||
+
|
||||
+ try:
|
||||
+ bench.parse_bench(args[0], args[1])
|
||||
+ except IOError as e:
|
||||
+ return print_and_exit("IOError(%d): %s" % (e.errno, e.strerror),
|
||||
+ os.EX_OSFILE)
|
||||
+
|
||||
+ except bench.validator.ValidationError as e:
|
||||
+ return print_and_exit("Invalid benchmark output: %s" % e.message,
|
||||
+ os.EX_DATAERR)
|
||||
+
|
||||
+ except bench.validator.SchemaError as e:
|
||||
+ return print_and_exit("Invalid schema: %s" % e.message, os.EX_DATAERR)
|
||||
|
||||
- validate_bench(args[0], args[1])
|
||||
+ print("Benchmark output in %s is valid." % args[0])
|
||||
return os.EX_OK
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
Revert the following fix temporarily:
|
||||
|
||||
commit c26efef9798914e208329c0e8c3c73bb1135d9e3
|
||||
Author: Mel Gorman <mgorman@suse.de>
|
||||
Date: Thu Apr 2 12:14:14 2015 +0530
|
||||
|
||||
malloc: Consistently apply trim_threshold to all heaps [BZ #17195]
|
||||
|
||||
|
||||
because it makes an openjdk bug (#1209451) more prominent, crashing java
|
||||
commands.
|
||||
|
||||
diff --git a/malloc/arena.c b/malloc/arena.c
|
||||
index 8af51f0..d85f371 100644
|
||||
--- a/malloc/arena.c
|
||||
+++ b/malloc/arena.c
|
||||
@@ -658,7 +658,7 @@ heap_trim (heap_info *heap, size_t pad)
|
||||
unsigned long pagesz = GLRO (dl_pagesize);
|
||||
mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
|
||||
heap_info *prev_heap;
|
||||
- long new_size, top_size, extra, prev_size, misalign;
|
||||
+ long new_size, top_size, top_area, extra, prev_size, misalign;
|
||||
|
||||
/* Can this heap go away completely? */
|
||||
while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
|
||||
@@ -694,9 +694,16 @@ heap_trim (heap_info *heap, size_t pad)
|
||||
set_head (top_chunk, new_size | PREV_INUSE);
|
||||
/*check_chunk(ar_ptr, top_chunk);*/
|
||||
}
|
||||
+
|
||||
+ /* Uses similar logic for per-thread arenas as the main arena with systrim
|
||||
+ by preserving the top pad and at least a page. */
|
||||
top_size = chunksize (top_chunk);
|
||||
- extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
|
||||
- if (extra < (long) pagesz)
|
||||
+ top_area = top_size - MINSIZE - 1;
|
||||
+ if (top_area <= pad)
|
||||
+ return 0;
|
||||
+
|
||||
+ extra = ALIGN_DOWN(top_area - pad, pagesz);
|
||||
+ if ((unsigned long) extra < mp_.trim_threshold)
|
||||
return 0;
|
||||
|
||||
/* Try to shrink. */
|
@ -27,7 +27,7 @@ index 6078e2d..36fd50b 100644
|
||||
+static-only-routines += libc_pthread_atfork
|
||||
shared-only-routines = forward
|
||||
|
||||
libpthread-routines = nptl-init vars events version \
|
||||
libpthread-routines = nptl-init vars events version pt-interp \
|
||||
diff --git a/nptl/libc_pthread_atfork.c b/nptl/libc_pthread_atfork.c
|
||||
new file mode 100644
|
||||
index 0000000..667049a
|
||||
|
@ -1,6 +1,16 @@
|
||||
diff -rup a/elf/dl-load.c b/elf/dl-load.c
|
||||
--- a/elf/dl-load.c 2012-02-03 10:59:58.917870716 -0700
|
||||
+++ b/elf/dl-load.c 2012-02-03 11:01:01.796580644 -0700
|
||||
@@ -880,7 +880,8 @@ _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
|
||||
|
||||
/* Get file information. */
|
||||
struct r_file_id id;
|
||||
- if (__glibc_unlikely (!_dl_get_file_id (fd, &id)))
|
||||
+ struct stat64 st;
|
||||
+ if (__glibc_unlikely (!_dl_get_file_id (fd, &id, &st)))
|
||||
{
|
||||
errstring = N_("cannot stat shared object");
|
||||
call_lose_errno:
|
||||
@@ -1130,6 +1130,16 @@ _dl_map_object_from_fd (const char *name
|
||||
= N_("ELF load command address/offset not properly aligned");
|
||||
goto call_lose;
|
||||
@ -18,3 +28,45 @@ diff -rup a/elf/dl-load.c b/elf/dl-load.c
|
||||
|
||||
struct loadcmd *c = &loadcmds[nloadcmds++];
|
||||
c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
|
||||
diff --git a/sysdeps/generic/dl-fileid.h b/sysdeps/generic/dl-fileid.h
|
||||
index 2cbd21d..9b7f410 100644
|
||||
--- a/sysdeps/generic/dl-fileid.h
|
||||
+++ b/sysdeps/generic/dl-fileid.h
|
||||
@@ -29,7 +29,8 @@ struct r_file_id
|
||||
On error, returns false, with errno set. */
|
||||
static inline bool
|
||||
_dl_get_file_id (int fd __attribute__ ((unused)),
|
||||
- struct r_file_id *id __attribute__ ((unused)))
|
||||
+ struct r_file_id *id __attribute__ ((unused)),
|
||||
+ struct stat64_t *st __attribute__((unused)))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
diff --git a/sysdeps/posix/dl-fileid.h b/sysdeps/posix/dl-fileid.h
|
||||
index d0d5436..7115c3b 100644
|
||||
--- a/sysdeps/posix/dl-fileid.h
|
||||
+++ b/sysdeps/posix/dl-fileid.h
|
||||
@@ -27,18 +27,16 @@ struct r_file_id
|
||||
ino64_t ino;
|
||||
};
|
||||
|
||||
-/* Sample FD to fill in *ID. Returns true on success.
|
||||
+/* Sample FD to fill in *ID and *ST. Returns true on success.
|
||||
On error, returns false, with errno set. */
|
||||
static inline bool
|
||||
-_dl_get_file_id (int fd, struct r_file_id *id)
|
||||
+_dl_get_file_id (int fd, struct r_file_id *id, struct stat64 *st)
|
||||
{
|
||||
- struct stat64 st;
|
||||
-
|
||||
- if (__glibc_unlikely (__fxstat64 (_STAT_VER, fd, &st) < 0))
|
||||
+ if (__glibc_unlikely (__fxstat64 (_STAT_VER, fd, st) < 0))
|
||||
return false;
|
||||
|
||||
- id->dev = st.st_dev;
|
||||
- id->ino = st.st_ino;
|
||||
+ id->dev = st->st_dev;
|
||||
+ id->ino = st->st_ino;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1,167 +0,0 @@
|
||||
diff -pruN a/malloc/arena.c b/malloc/arena.c
|
||||
--- a/malloc/arena.c 2014-01-07 07:07:47.000000000 +0530
|
||||
+++ b/malloc/arena.c 2014-01-07 14:11:40.093628218 +0530
|
||||
@@ -702,7 +702,7 @@ heap_trim (heap_info *heap, size_t pad)
|
||||
if (!prev_inuse (p)) /* consolidate backward */
|
||||
{
|
||||
p = prev_chunk (p);
|
||||
- unlink (p, bck, fwd);
|
||||
+ unlink (ar_ptr, p, bck, fwd);
|
||||
}
|
||||
assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
|
||||
assert (((char *) p + new_size) == ((char *) heap + heap->size));
|
||||
diff -pruN a/malloc/hooks.c b/malloc/hooks.c
|
||||
--- a/malloc/hooks.c 2014-01-07 07:07:47.000000000 +0530
|
||||
+++ b/malloc/hooks.c 2014-01-07 14:12:41.804625603 +0530
|
||||
@@ -237,7 +237,9 @@ top_check (void)
|
||||
(char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
|
||||
return 0;
|
||||
|
||||
+ mutex_unlock(&main_arena.mutex);
|
||||
malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
|
||||
+ mutex_lock(&main_arena.mutex);
|
||||
|
||||
/* Try to set up a new top chunk. */
|
||||
brk = MORECORE (0);
|
||||
diff -pruN a/malloc/malloc.c b/malloc/malloc.c
|
||||
--- a/malloc/malloc.c 2014-01-07 07:07:47.000000000 +0530
|
||||
+++ b/malloc/malloc.c 2014-01-07 14:29:53.370581893 +0530
|
||||
@@ -1404,11 +1404,15 @@ typedef struct malloc_chunk *mbinptr;
|
||||
#define last(b) ((b)->bk)
|
||||
|
||||
/* Take a chunk off a bin list */
|
||||
-#define unlink(P, BK, FD) { \
|
||||
+#define unlink(AV, P, BK, FD) { \
|
||||
FD = P->fd; \
|
||||
BK = P->bk; \
|
||||
if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
|
||||
- malloc_printerr (check_action, "corrupted double-linked list", P); \
|
||||
+ { \
|
||||
+ mutex_unlock(&(AV)->mutex); \
|
||||
+ malloc_printerr (check_action, "corrupted double-linked list", P); \
|
||||
+ mutex_lock(&(AV)->mutex); \
|
||||
+ } \
|
||||
else { \
|
||||
FD->bk = BK; \
|
||||
BK->fd = FD; \
|
||||
@@ -2524,7 +2528,9 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av
|
||||
else if (contiguous (av) && old_size && brk < old_end)
|
||||
{
|
||||
/* Oops! Someone else killed our space.. Can't touch anything. */
|
||||
+ mutex_unlock(&av->mutex);
|
||||
malloc_printerr (3, "break adjusted to free malloc space", brk);
|
||||
+ mutex_lock(&av->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3353,7 +3359,9 @@ _int_malloc (mstate av, size_t bytes)
|
||||
{
|
||||
errstr = "malloc(): memory corruption (fast)";
|
||||
errout:
|
||||
+ mutex_unlock(&av->mutex);
|
||||
malloc_printerr (check_action, errstr, chunk2mem (victim));
|
||||
+ mutex_lock(&av->mutex);
|
||||
return NULL;
|
||||
}
|
||||
check_remalloced_chunk (av, victim, nb);
|
||||
@@ -3441,8 +3449,12 @@ _int_malloc (mstate av, size_t bytes)
|
||||
bck = victim->bk;
|
||||
if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
|
||||
|| __builtin_expect (victim->size > av->system_mem, 0))
|
||||
- malloc_printerr (check_action, "malloc(): memory corruption",
|
||||
- chunk2mem (victim));
|
||||
+ {
|
||||
+ void *p = chunk2mem(victim);
|
||||
+ mutex_unlock(&av->mutex);
|
||||
+ malloc_printerr (check_action, "malloc(): memory corruption", p);
|
||||
+ mutex_lock(&av->mutex);
|
||||
+ }
|
||||
size = chunksize (victim);
|
||||
|
||||
/*
|
||||
@@ -3589,7 +3601,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
victim = victim->fd;
|
||||
|
||||
remainder_size = size - nb;
|
||||
- unlink (victim, bck, fwd);
|
||||
+ unlink (av, victim, bck, fwd);
|
||||
|
||||
/* Exhaust */
|
||||
if (remainder_size < MINSIZE)
|
||||
@@ -3694,7 +3706,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
remainder_size = size - nb;
|
||||
|
||||
/* unlink */
|
||||
- unlink (victim, bck, fwd);
|
||||
+ unlink (av, victim, bck, fwd);
|
||||
|
||||
/* Exhaust */
|
||||
if (remainder_size < MINSIZE)
|
||||
@@ -3832,9 +3844,11 @@ _int_free (mstate av, mchunkptr p, int h
|
||||
{
|
||||
errstr = "free(): invalid pointer";
|
||||
errout:
|
||||
- if (!have_lock && locked)
|
||||
+ if (have_lock || locked)
|
||||
(void) mutex_unlock (&av->mutex);
|
||||
malloc_printerr (check_action, errstr, chunk2mem (p));
|
||||
+ if (have_lock)
|
||||
+ mutex_lock(&av->mutex);
|
||||
return;
|
||||
}
|
||||
/* We know that each chunk is at least MINSIZE bytes in size or a
|
||||
@@ -3981,7 +3995,7 @@ _int_free (mstate av, mchunkptr p, int h
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
- unlink(p, bck, fwd);
|
||||
+ unlink(av, p, bck, fwd);
|
||||
}
|
||||
|
||||
if (nextchunk != av->top) {
|
||||
@@ -3990,7 +4004,7 @@ _int_free (mstate av, mchunkptr p, int h
|
||||
|
||||
/* consolidate forward */
|
||||
if (!nextinuse) {
|
||||
- unlink(nextchunk, bck, fwd);
|
||||
+ unlink(av, nextchunk, bck, fwd);
|
||||
size += nextsize;
|
||||
} else
|
||||
clear_inuse_bit_at_offset(nextchunk, 0);
|
||||
@@ -4151,7 +4165,7 @@ static void malloc_consolidate(mstate av
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
- unlink(p, bck, fwd);
|
||||
+ unlink(av, p, bck, fwd);
|
||||
}
|
||||
|
||||
if (nextchunk != av->top) {
|
||||
@@ -4159,7 +4173,7 @@ static void malloc_consolidate(mstate av
|
||||
|
||||
if (!nextinuse) {
|
||||
size += nextsize;
|
||||
- unlink(nextchunk, bck, fwd);
|
||||
+ unlink(av, nextchunk, bck, fwd);
|
||||
} else
|
||||
clear_inuse_bit_at_offset(nextchunk, 0);
|
||||
|
||||
@@ -4228,7 +4242,9 @@ _int_realloc(mstate av, mchunkptr oldp,
|
||||
{
|
||||
errstr = "realloc(): invalid old size";
|
||||
errout:
|
||||
+ mutex_unlock(&av->mutex);
|
||||
malloc_printerr (check_action, errstr, chunk2mem (oldp));
|
||||
+ mutex_lock(&av->mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -4274,7 +4290,7 @@ _int_realloc(mstate av, mchunkptr oldp,
|
||||
(unsigned long) (nb))
|
||||
{
|
||||
newp = oldp;
|
||||
- unlink (next, bck, fwd);
|
||||
+ unlink (av, next, bck, fwd);
|
||||
}
|
||||
|
||||
/* allocate, copy, free */
|
@ -1,33 +0,0 @@
|
||||
diff -rup a/resolv/res_init.c b/resolv/res_init.c
|
||||
--- a/resolv/res_init.c 2012-07-26 15:10:45.655638776 -0600
|
||||
+++ b/resolv/res_init.c 2012-07-26 15:11:27.731423002 -0600
|
||||
@@ -314,9 +314,9 @@ __res_vinit(res_state statp, int preinit
|
||||
cp++;
|
||||
if ((*cp != '\0') && (*cp != '\n')
|
||||
&& __inet_aton(cp, &a)) {
|
||||
- statp->nsaddr_list[nservall].sin_addr = a;
|
||||
- statp->nsaddr_list[nservall].sin_family = AF_INET;
|
||||
- statp->nsaddr_list[nservall].sin_port =
|
||||
+ statp->nsaddr_list[nserv].sin_addr = a;
|
||||
+ statp->nsaddr_list[nserv].sin_family = AF_INET;
|
||||
+ statp->nsaddr_list[nserv].sin_port =
|
||||
htons(NAMESERVER_PORT);
|
||||
nserv++;
|
||||
#ifdef _LIBC
|
||||
diff -rup a/resolv/res_send.c b/resolv/res_send.c
|
||||
--- a/resolv/res_send.c 2010-05-04 05:27:23.000000000 -0600
|
||||
+++ b/resolv/res_send.c 2012-07-26 15:34:58.398261659 -0600
|
||||
@@ -421,10 +421,10 @@ __libc_res_nsend(res_state statp, const
|
||||
EXT(statp).nsmap[n] = MAXNS;
|
||||
}
|
||||
}
|
||||
- n = statp->nscount;
|
||||
- if (statp->nscount > EXT(statp).nscount)
|
||||
+ n = statp->nscount - EXT(statp).nscount6;
|
||||
+ if (n > EXT(statp).nscount)
|
||||
for (n = EXT(statp).nscount, ns = 0;
|
||||
- n < statp->nscount; n++) {
|
||||
+ n < statp->nscount - EXT(statp).nscount6; n++) {
|
||||
while (ns < MAXNS
|
||||
&& EXT(statp).nsmap[ns] != MAXNS)
|
||||
ns++;
|
26
glibc-rtkaio-libof.patch
Normal file
26
glibc-rtkaio-libof.patch
Normal file
@ -0,0 +1,26 @@
|
||||
diff -pruN glibc-2.21-649-gae5eae7/rtkaio/Makefile glibc-2.21-649-gae5eae7.new/rtkaio/Makefile
|
||||
--- glibc-2.21-649-gae5eae7/rtkaio/Makefile 2015-07-27 22:57:05.742601066 +0530
|
||||
+++ glibc-2.21-649-gae5eae7.new/rtkaio/Makefile 2015-07-27 23:33:09.892874337 +0530
|
||||
@@ -66,7 +66,9 @@ CFLAGS-kaio_librt-cancellation.c = -fasy
|
||||
|
||||
LDFLAGS-rtkaio.so = -Wl,-soname=lib$(libprefix)rt.so$(librt.so-version) \
|
||||
-Wl,--enable-new-dtags,-z,nodelete
|
||||
-CPPFLAGS-librtkaio += -DIS_IN_librt=1 -I$(..)rt
|
||||
+# Resort to this ugliness of undefining and defining MODULE_NAME because
|
||||
+# setting libof-<> to librt has many more side-effects that we want to avoid.
|
||||
+CPPFLAGS-librtkaio += -I$(..)rt -UMODULE_NAME -DMODULE_NAME=librt
|
||||
|
||||
rpath-dirs := $(patsubst rt,rtkaio,$(rpath-dirs))
|
||||
|
||||
diff -pruN glibc-2.21-649-gae5eae7/rtkaio/sysdeps/unix/sysv/linux/syscalls.list glibc-2.21-649-gae5eae7.new/rtkaio/sysdeps/unix/sysv/linux/syscalls.list
|
||||
--- glibc-2.21-649-gae5eae7/rtkaio/sysdeps/unix/sysv/linux/syscalls.list 2015-07-27 22:47:23.073776396 +0530
|
||||
+++ glibc-2.21-649-gae5eae7.new/rtkaio/sysdeps/unix/sysv/linux/syscalls.list 2015-07-27 23:33:09.892874337 +0530
|
||||
@@ -1,5 +1,5 @@
|
||||
# File name Caller Syscall name Args Strong name Weak names
|
||||
|
||||
-kaio_mq_timedsend - mq_timedsend Ci:ipiip __GI_mq_timedsend mq_timedsend
|
||||
-kaio_mq_timedreceive - mq_timedreceive Ci:ipipp __GI_mq_timedreceive mq_timedreceive
|
||||
-kaio_mq_setattr - mq_getsetattr i:ipp __GI_mq_setattr mq_setattr
|
||||
+kaio_mq_timedsend - mq_timedsend Ci:ipiip __mq_timedsend mq_timedsend
|
||||
+kaio_mq_timedreceive - mq_timedreceive Ci:ipipp __mq_timedreceive mq_timedreceive
|
||||
+kaio_mq_setattr - mq_getsetattr i:ipp mq_setattr
|
21
glibc.spec
21
glibc.spec
@ -1,6 +1,6 @@
|
||||
%define glibcsrcdir glibc-2.21-357-gb40a4e1
|
||||
%define glibcsrcdir glibc-2.21-649-gae5eae7
|
||||
%define glibcversion 2.21.90
|
||||
%define glibcrelease 19%{?dist}
|
||||
%define glibcrelease 20%{?dist}
|
||||
# Pre-release tarballs are pulled in from git using a command that is
|
||||
# effectively:
|
||||
#
|
||||
@ -208,8 +208,7 @@ Patch0053: glibc-cs-path.patch
|
||||
# Remove the clock_* functions and use the ones in libc like librt does.
|
||||
Patch0054: glibc-rtkaio-clock.patch
|
||||
|
||||
# Temporarily revert a fix to work around bz #1209451.
|
||||
Patch0055: glibc-revert-arena-threshold-fix.patch
|
||||
Patch0055: glibc-rtkaio-libof.patch
|
||||
|
||||
##############################################################################
|
||||
#
|
||||
@ -230,16 +229,11 @@ Patch0055: glibc-revert-arena-threshold-fix.patch
|
||||
# http://sourceware.org/ml/libc-alpha/2012-12/msg00103.html
|
||||
Patch2007: glibc-rh697421.patch
|
||||
|
||||
Patch2011: glibc-rh757881.patch
|
||||
|
||||
Patch2013: glibc-rh741105.patch
|
||||
|
||||
# Upstream BZ 14247
|
||||
Patch2023: glibc-rh827510.patch
|
||||
|
||||
# Upstream BZ 13028
|
||||
Patch2026: glibc-rh841787.patch
|
||||
|
||||
# Upstream BZ 14185
|
||||
Patch2027: glibc-rh819430.patch
|
||||
|
||||
@ -261,7 +255,6 @@ Patch2105: glibc-rh1238412-unicode-8.0.0-update.patch
|
||||
# Benchmark comparison patches.
|
||||
#
|
||||
##############################################################################
|
||||
Patch3001: glibc-bench-compare.patch
|
||||
Patch3002: glibc-bench-build.patch
|
||||
|
||||
##############################################################################
|
||||
@ -584,7 +577,6 @@ microbenchmark tests on the system.
|
||||
%patch0006 -p1
|
||||
%patch2007 -p1
|
||||
%patch0009 -p1
|
||||
%patch2011 -p1
|
||||
%patch0012 -p1
|
||||
%patch2013 -p1
|
||||
%patch0014 -p1
|
||||
@ -595,7 +587,6 @@ microbenchmark tests on the system.
|
||||
%patch2023 -p1
|
||||
%patch0024 -p1
|
||||
%patch0025 -p1
|
||||
%patch2026 -p1
|
||||
%patch2027 -p1
|
||||
%patch0028 -p1
|
||||
%patch0030 -p1
|
||||
@ -615,8 +606,6 @@ microbenchmark tests on the system.
|
||||
%patch0052 -p1
|
||||
%patch0053 -p1
|
||||
%patch0054 -p1
|
||||
%patch0055 -p1 -R
|
||||
%patch3001 -p1
|
||||
%patch3002 -p1
|
||||
%patch2035 -p1
|
||||
|
||||
@ -625,6 +614,7 @@ microbenchmark tests on the system.
|
||||
%patch2103 -p1
|
||||
%patch2104 -p1
|
||||
%patch2105 -p1
|
||||
%patch0055 -p1
|
||||
|
||||
##############################################################################
|
||||
# %%prep - Additional prep required...
|
||||
@ -1840,6 +1830,9 @@ rm -f *.filelist*
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Tue Jul 28 2015 Siddhesh Poyarekar <siddhesh@redhat.com> - 2.21.90-20
|
||||
- Sync with upstream master.
|
||||
|
||||
* Thu Jul 23 2015 Mike FABIAN <mfabian@redhat.com> - 2.21.90-19
|
||||
- some more additions to the translit_neutral file by Marko Myllynen
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user