aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog5
-rw-r--r--benchtests/README11
-rwxr-xr-xbenchtests/scripts/compare_strings.py37
3 files changed, 40 insertions, 13 deletions
diff --git a/ChangeLog b/ChangeLog
index 58acb54b0a..fd9cc0ce99 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2017-09-16 Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+ * benchtests/scripts/compare_strings.py: Use argparse.
+ * benchtests/README: Document existence of compare_strings.py.
+
2017-09-15 Joseph Myers <joseph@codesourcery.com>
* math/s_fma.c: Include <libm-alias-double.h>.
diff --git a/benchtests/README b/benchtests/README
index b015acfd53..9aa750a519 100644
--- a/benchtests/README
+++ b/benchtests/README
@@ -122,3 +122,14 @@ To add a benchset for `foo':
- Write your bench-foo.c that prints out the measurements to stdout.
- On execution, a bench-foo.out is created in $(objpfx) with the contents of
stdout.
+
+Reading String Benchmark Results:
+================================
+
+Some of the string benchmark results are now in JSON to make it easier to read
+in scripts. Use the benchtests/compare_strings.py script to show the results
+in a tabular format, generate graphs and more. Run
+
+ benchtests/scripts/compare_strings.py -h
+
+for usage information.
diff --git a/benchtests/scripts/compare_strings.py b/benchtests/scripts/compare_strings.py
index b3c57e2b34..3ca9429d04 100755
--- a/benchtests/scripts/compare_strings.py
+++ b/benchtests/scripts/compare_strings.py
@@ -28,6 +28,7 @@ import sys
import os
import json
import pylab
+import argparse
try:
import jsonschema as validator
@@ -118,22 +119,32 @@ def main(args):
Take a string benchmark output file and compare timings.
"""
- if len(args) < 3:
- print('Usage: %s <input file> <schema file> [-base=ifunc_name] attr1 [attr2 ...]' % sys.argv[0])
- sys.exit(os.EX_USAGE)
base_func = None
- filename = args[0]
- schema_filename = args[1]
- if args[2].find('-base=') == 0:
- base_func = args[2][6:]
- attrs = args[3:]
- else:
- attrs = args[2:]
-
- results = parse_file(filename, schema_filename)
+ filename = args.input
+ schema_filename = args.schema
+ base_func = args.base
+ attrs = args.attributes.split(',')
+
+ results = parse_file(args.input, args.schema)
process_results(results, attrs, base_func)
if __name__ == '__main__':
- main(sys.argv[1:])
+ parser = argparse.ArgumentParser()
+
+ # The required arguments.
+ req = parser.add_argument_group(title='required arguments')
+ req.add_argument('-a', '--attributes', required=True,
+ help='Comma separated list of benchmark attributes.')
+ req.add_argument('-i', '--input', required=True,
+ help='Input JSON benchmark result file.')
+ req.add_argument('-s', '--schema', required=True,
+ help='Schema file to validate the result file.')
+
+ # Optional arguments.
+ parser.add_argument('-b', '--base',
+ help='IFUNC variant to set as baseline.')
+
+ args = parser.parse_args()
+ main(args)