aboutsummaryrefslogtreecommitdiff
path: root/math/libm-test.inc
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2015-10-01 15:24:54 +0000
committerJoseph Myers <joseph@codesourcery.com>2015-10-01 15:24:54 +0000
commite27fcd027cbdc2f13bf440177c0434eb9ec3dd28 (patch)
tree8b653db2c0bbc46e46589ed9c2853baf23659dff /math/libm-test.inc
parent7cda516f5f23772fd37ca3a5e018fca5bf388435 (diff)
downloadglibc-e27fcd027cbdc2f13bf440177c0434eb9ec3dd28.tar
glibc-e27fcd027cbdc2f13bf440177c0434eb9ec3dd28.tar.gz
glibc-e27fcd027cbdc2f13bf440177c0434eb9ec3dd28.tar.bz2
glibc-e27fcd027cbdc2f13bf440177c0434eb9ec3dd28.zip
Use type-specific precision when printing results in libm-test.inc.
When libm-test.inc prints the results of failing tests, the output can be unhelpful for ldbl-128 and ldbl-128ibm because the precision used is insufficient to distinguish values of those types, resulting in reported values that look identical but differ by a large number of ulps. This patch changes it to use a precision appropriate for the type, for both decimal and hex output (so output for float is more compact, output for ldbl-128 and ldbl-128ibm is substantially wider). The natural precision to use for decimal is given by the C11 <float.h> macros such as FLT_DECIMAL_DIG. GCC's <float.h> only defines those in C11 mode, so this patch uses the predefines such as __FLT_DECIMAL_DIG__ (added in GCC 4.6) instead; if we move to building with -std=gnu11 (or -std=gnu1x if we can't get rid of 4.6 support). Tested for powerpc and mips64. * math/libm-test.inc (TYPE_DECIMAL_DIG): New macro. (TYPE_HEX_DIG): Likewise. (print_float): Use TYPE_DECIMAL_DIG - 1 and TYPE_HEX_DIG - 1 as precisions when printing floating-point numbers. (check_float_internal): Likewise.
Diffstat (limited to 'math/libm-test.inc')
-rw-r--r--math/libm-test.inc19
1 files changed, 16 insertions, 3 deletions
diff --git a/math/libm-test.inc b/math/libm-test.inc
index 8615957600..f627296d66 100644
--- a/math/libm-test.inc
+++ b/math/libm-test.inc
@@ -338,6 +338,18 @@ static FLOAT max_valid_error;
(LDBL_MANT_DIG-1), (DBL_MANT_DIG-1), (FLT_MANT_DIG-1))
#define MIN_EXP CHOOSE ((LDBL_MIN_EXP-1), (DBL_MIN_EXP-1), (FLT_MIN_EXP-1), \
(LDBL_MIN_EXP-1), (DBL_MIN_EXP-1), (FLT_MIN_EXP-1))
+/* Sufficient numbers of digits to represent any floating-point value
+ unambiguously (for any choice of the number of bits in the first
+ hex digit, in the case of TYPE_HEX_DIG). When used with printf
+ formats where the precision counts only digits after the point, 1
+ is subtracted from these values. */
+#define TYPE_DECIMAL_DIG CHOOSE (__DECIMAL_DIG__, \
+ __DBL_DECIMAL_DIG__, \
+ __FLT_DECIMAL_DIG__, \
+ __DECIMAL_DIG__, \
+ __DBL_DECIMAL_DIG__, \
+ __FLT_DECIMAL_DIG__)
+#define TYPE_HEX_DIG ((MANT_DIG + 7) / 4)
/* Compare KEY (a string, with the name of a function) with ULP (a
pointer to a struct ulp_data structure), returning a value less
@@ -419,7 +431,8 @@ print_float (FLOAT f)
else if (isnan (f))
printf ("qNaN\n");
else
- printf ("% .20" PRINTF_EXPR " % .20" PRINTF_XEXPR "\n", f, f);
+ printf ("% .*" PRINTF_EXPR " % .*" PRINTF_XEXPR "\n",
+ TYPE_DECIMAL_DIG - 1, f, TYPE_HEX_DIG - 1, f);
}
/* Should the message print to screen? This depends on the verbose flag,
@@ -837,8 +850,8 @@ check_float_internal (const char *test_name, FLOAT computed, FLOAT expected,
print_float (expected);
if (print_diff)
{
- printf (" difference: % .20" PRINTF_EXPR " % .20" PRINTF_XEXPR
- "\n", diff, diff);
+ printf (" difference: % .*" PRINTF_EXPR " % .*" PRINTF_XEXPR
+ "\n", TYPE_DECIMAL_DIG - 1, diff, TYPE_HEX_DIG - 1, diff);
printf (" ulp : % .4" PRINTF_NEXPR "\n", ulps);
printf (" max.ulp : % .4" PRINTF_NEXPR "\n", max_ulp);
}