diff options
author | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2017-09-16 11:47:32 +0530 |
---|---|---|
committer | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2017-09-16 11:47:32 +0530 |
commit | 06b1de237801402c7da327f0a36f4e6aa2f06cc2 (patch) | |
tree | 7b8bc690a5f7f10dd3683fce0e7b57c9aa5c8b22 | |
parent | 9ac44708881c086f27f86b36c20749052d079c8f (diff) | |
download | glibc-06b1de237801402c7da327f0a36f4e6aa2f06cc2.tar glibc-06b1de237801402c7da327f0a36f4e6aa2f06cc2.tar.gz glibc-06b1de237801402c7da327f0a36f4e6aa2f06cc2.tar.bz2 glibc-06b1de237801402c7da327f0a36f4e6aa2f06cc2.zip |
benchtests: Use argparse to parse arguments
Make the script more usable by adding proper command line options
along with a way to query the options. The script is capable of doing
a bunch of things right now like choosing a base for comparison,
choosing to generate graphs, etc. and they should be accessible via
command line switches.
* benchtests/scripts/compare_strings.py: Use argparse.
* benchtests/README: Document existence of compare_strings.py.
-rw-r--r-- | ChangeLog | 5 | ||||
-rw-r--r-- | benchtests/README | 11 | ||||
-rwxr-xr-x | benchtests/scripts/compare_strings.py | 37 |
3 files changed, 40 insertions, 13 deletions
@@ -1,3 +1,8 @@ +2017-09-16 Siddhesh Poyarekar <siddhesh@sourceware.org> + + * benchtests/scripts/compare_strings.py: Use argparse. + * benchtests/README: Document existence of compare_strings.py. + 2017-09-15 Joseph Myers <joseph@codesourcery.com> * math/s_fma.c: Include <libm-alias-double.h>. diff --git a/benchtests/README b/benchtests/README index b015acfd53..9aa750a519 100644 --- a/benchtests/README +++ b/benchtests/README @@ -122,3 +122,14 @@ To add a benchset for `foo': - Write your bench-foo.c that prints out the measurements to stdout. - On execution, a bench-foo.out is created in $(objpfx) with the contents of stdout. + +Reading String Benchmark Results: +================================ + +Some of the string benchmark results are now in JSON to make it easier to read +in scripts. Use the benchtests/compare_strings.py script to show the results +in a tabular format, generate graphs and more. Run + + benchtests/scripts/compare_strings.py -h + +for usage information. diff --git a/benchtests/scripts/compare_strings.py b/benchtests/scripts/compare_strings.py index b3c57e2b34..3ca9429d04 100755 --- a/benchtests/scripts/compare_strings.py +++ b/benchtests/scripts/compare_strings.py @@ -28,6 +28,7 @@ import sys import os import json import pylab +import argparse try: import jsonschema as validator @@ -118,22 +119,32 @@ def main(args): Take a string benchmark output file and compare timings. """ - if len(args) < 3: - print('Usage: %s <input file> <schema file> [-base=ifunc_name] attr1 [attr2 ...]' % sys.argv[0]) - sys.exit(os.EX_USAGE) base_func = None - filename = args[0] - schema_filename = args[1] - if args[2].find('-base=') == 0: - base_func = args[2][6:] - attrs = args[3:] - else: - attrs = args[2:] - - results = parse_file(filename, schema_filename) + filename = args.input + schema_filename = args.schema + base_func = args.base + attrs = args.attributes.split(',') + + results = parse_file(args.input, args.schema) process_results(results, attrs, base_func) if __name__ == '__main__': - main(sys.argv[1:]) + parser = argparse.ArgumentParser() + + # The required arguments. + req = parser.add_argument_group(title='required arguments') + req.add_argument('-a', '--attributes', required=True, + help='Comma separated list of benchmark attributes.') + req.add_argument('-i', '--input', required=True, + help='Input JSON benchmark result file.') + req.add_argument('-s', '--schema', required=True, + help='Schema file to validate the result file.') + + # Optional arguments. + parser.add_argument('-b', '--base', + help='IFUNC variant to set as baseline.') + + args = parser.parse_args() + main(args) |