2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "math_private.h"
35 #ifdef USE_BUILTIN_FMA
37 fma(double x, double y, double z)
39 return (__builtin_fma(x, y, z));
43 * A struct dd represents a floating-point number with twice the precision
44 * of a double. We maintain the invariant that "hi" stores the 53 high-order
53 * Compute a+b exactly, returning the exact result in a struct dd. We assume
54 * that both a and b are finite, but make no assumptions about their relative
57 static inline struct dd
58 dd_add(double a, double b)
65 ret.lo = (a - (ret.hi - s)) + (b - s);
70 * Compute a+b, with a small tweak: The least significant bit of the
71 * result is adjusted into a sticky bit summarizing all the bits that
72 * were lost to rounding. This adjustment negates the effects of double
73 * rounding when the result is added to another number with a higher
74 * exponent. For an explanation of round and sticky bits, see any reference
75 * on FPU design, e.g.,
77 * J. Coonen. An Implementation Guide to a Proposed Standard for
78 * Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
81 add_adjusted(double a, double b)
84 uint64_t hibits, lobits;
88 EXTRACT_WORD64(hibits, sum.hi);
89 if ((hibits & 1) == 0) {
90 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
91 EXTRACT_WORD64(lobits, sum.lo);
92 hibits += 1 - ((hibits ^ lobits) >> 62);
93 INSERT_WORD64(sum.hi, hibits);
100 * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
101 * that the result will be subnormal, and care is taken to ensure that
102 * double rounding does not occur.
105 add_and_denormalize(double a, double b, int scale)
108 uint64_t hibits, lobits;
114 * If we are losing at least two bits of accuracy to denormalization,
115 * then the first lost bit becomes a round bit, and we adjust the
116 * lowest bit of sum.hi to make it a sticky bit summarizing all the
117 * bits in sum.lo. With the sticky bit adjusted, the hardware will
118 * break any ties in the correct direction.
120 * If we are losing only one bit to denormalization, however, we must
121 * break the ties manually.
124 EXTRACT_WORD64(hibits, sum.hi);
125 bits_lost = -((int)(hibits >> 52) & 0x7ff) - scale + 1;
126 if ((bits_lost != 1) ^ (int)(hibits & 1)) {
127 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
128 EXTRACT_WORD64(lobits, sum.lo);
129 hibits += 1 - (((hibits ^ lobits) >> 62) & 2);
130 INSERT_WORD64(sum.hi, hibits);
133 return (ldexp(sum.hi, scale));
137 * Compute a*b exactly, returning the exact result in a struct dd. We assume
138 * that both a and b are normalized, so no underflow or overflow will occur.
139 * The current rounding mode must be round-to-nearest.
141 static inline struct dd
142 dd_mul(double a, double b)
144 static const double split = 0x1p27 + 1.0;
146 double ha, hb, la, lb, p, q;
159 q = ha * lb + la * hb;
162 ret.lo = p - ret.hi + q + la * lb;
167 * Fused multiply-add: Compute x * y + z with a single rounding error.
169 * We use scaling to avoid overflow/underflow, along with the
170 * canonical precision-doubling technique adapted from:
172 * Dekker, T. A Floating-Point Technique for Extending the
173 * Available Precision. Numer. Math. 18, 224-242 (1971).
175 * This algorithm is sensitive to the rounding precision. FPUs such
176 * as the i387 must be set in double-precision mode if variables are
177 * to be stored in FP registers in order to avoid incorrect results.
178 * This is the default on FreeBSD, but not on many other systems.
180 * Hardware instructions should be used on architectures that support it,
181 * since this implementation will likely be several times slower.
184 fma(double x, double y, double z)
186 double xs, ys, zs, adj;
193 * Handle special cases. The order of operations and the particular
194 * return values here are crucial in handling special cases involving
195 * infinities, NaNs, overflows, and signed zeroes correctly.
197 if (x == 0.0 || y == 0.0)
201 if (!isfinite(x) || !isfinite(y))
209 oround = fegetround();
210 spread = ex + ey - ez;
213 * If x * y and z are many orders of magnitude apart, the scaling
214 * will overflow, so we handle these cases specially. Rounding
215 * modes other than FE_TONEAREST are painful.
217 if (spread < -DBL_MANT_DIG) {
218 feraiseexcept(FE_INEXACT);
220 feraiseexcept(FE_UNDERFLOW);
225 if (x > 0.0 ^ y < 0.0 ^ z < 0.0)
228 return (nextafter(z, 0));
230 if (x > 0.0 ^ y < 0.0)
233 return (nextafter(z, -INFINITY));
234 default: /* FE_UPWARD */
235 if (x > 0.0 ^ y < 0.0)
236 return (nextafter(z, INFINITY));
241 if (spread <= DBL_MANT_DIG * 2)
242 zs = ldexp(zs, -spread);
244 zs = copysign(DBL_MIN, zs);
246 fesetround(FE_TONEAREST);
247 /* work around clang bug 8100 */
248 volatile double vxs = xs;
251 * Basic approach for round-to-nearest:
253 * (xy.hi, xy.lo) = x * y (exact)
254 * (r.hi, r.lo) = xy.hi + z (exact)
255 * adj = xy.lo + r.lo (inexact; low bit is sticky)
256 * result = r.hi + adj (correctly rounded)
258 xy = dd_mul(vxs, ys);
259 r = dd_add(xy.hi, zs);
265 * When the addends cancel to 0, ensure that the result has
269 volatile double vzs = zs; /* XXX gcc CSE bug workaround */
270 return (xy.hi + vzs + ldexp(xy.lo, spread));
273 if (oround != FE_TONEAREST) {
275 * There is no need to worry about double rounding in directed
279 /* work around clang bug 8100 */
280 volatile double vrlo = r.lo;
282 return (ldexp(r.hi + adj, spread));
285 adj = add_adjusted(r.lo, xy.lo);
286 if (spread + ilogb(r.hi) > -1023)
287 return (ldexp(r.hi + adj, spread));
289 return (add_and_denormalize(r.hi, adj, spread));
291 #endif /* !USE_BUILTIN_FMA */
293 #if (LDBL_MANT_DIG == 53)
294 __weak_reference(fma, fmal);