2 * ====================================================
3 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
5 * Developed at SunSoft, a Sun Microsystems, Inc. business.
6 * Permission to use, copy, modify, and distribute this
7 * software is freely granted, provided that this notice
9 * ====================================================
12 #include <sys/cdefs.h>
18 #include "math_private.h"
20 #define BIAS (LDBL_MAX_EXP - 1)
22 #if LDBL_MANL_SIZE > 32
23 typedef uint64_t manl_t;
25 typedef uint32_t manl_t;
28 #if LDBL_MANH_SIZE > 32
29 typedef uint64_t manh_t;
31 typedef uint32_t manh_t;
35 * These macros add and remove an explicit integer bit in front of the
36 * fractional mantissa, if the architecture doesn't have such a bit by
39 #ifdef LDBL_IMPLICIT_NBIT
40 #define SET_NBIT(hx) ((hx) | (1ULL << LDBL_MANH_SIZE))
41 #define HFRAC_BITS LDBL_MANH_SIZE
43 #define SET_NBIT(hx) (hx)
44 #define HFRAC_BITS (LDBL_MANH_SIZE - 1)
47 #define MANL_SHIFT (LDBL_MANL_SIZE - 1)
49 static const long double Zero[] = {0.0L, -0.0L};
52 * Return the IEEE remainder and set *quo to the last n bits of the
53 * quotient, rounded to the nearest integer. We choose n=31 because
54 * we wind up computing all the integer bits of the quotient anyway as
55 * a side-effect of computing the remainder by the shift and subtract
56 * method. In practice, this is far more bits than are needed to use
57 * remquo in reduction algorithms.
60 * - The low part of the mantissa fits in a manl_t exactly.
61 * - The high part of the mantissa fits in an int64_t with enough room
62 * for an explicit integer bit in front of the fractional bits.
65 remquol(long double x, long double y, int *quo)
67 union IEEEl2bits ux, uy;
68 int64_t hx,hz; /* We need a carry bit even if LDBL_MANH_SIZE is 32. */
76 sxy = sx ^ uy.bits.sign;
77 ux.bits.sign = 0; /* |x| */
78 uy.bits.sign = 0; /* |y| */
80 /* purge off exception values */
81 if((uy.bits.exp|uy.bits.manh|uy.bits.manl)==0 || /* y=0 */
82 (ux.bits.exp == BIAS + LDBL_MAX_EXP) || /* or x not finite */
83 (uy.bits.exp == BIAS + LDBL_MAX_EXP &&
84 ((uy.bits.manh&~LDBL_NBIT)|uy.bits.manl)!=0)) /* or y is NaN */
85 return nan_mix_op(x, y, *)/nan_mix_op(x, y, *);
86 if(ux.bits.exp<=uy.bits.exp) {
87 if((ux.bits.exp<uy.bits.exp) ||
88 (ux.bits.manh<=uy.bits.manh &&
89 (ux.bits.manh<uy.bits.manh ||
90 ux.bits.manl<uy.bits.manl))) {
92 goto fixup; /* |x|<|y| return x or x-y */
94 if(ux.bits.manh==uy.bits.manh && ux.bits.manl==uy.bits.manl) {
95 *quo = (sxy ? -1 : 1);
96 return Zero[sx]; /* |x|=|y| return x*0*/
100 /* determine ix = ilogb(x) */
101 if(ux.bits.exp == 0) { /* subnormal x */
103 ix = ux.bits.exp - (BIAS + 512);
105 ix = ux.bits.exp - BIAS;
108 /* determine iy = ilogb(y) */
109 if(uy.bits.exp == 0) { /* subnormal y */
111 iy = uy.bits.exp - (BIAS + 512);
113 iy = uy.bits.exp - BIAS;
116 /* set up {hx,lx}, {hy,ly} and align y to x */
117 hx = SET_NBIT(ux.bits.manh);
118 hy = SET_NBIT(uy.bits.manh);
126 hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
127 if(hz<0){hx = hx+hx+(lx>>MANL_SHIFT); lx = lx+lx;}
128 else {hx = hz+hz+(lz>>MANL_SHIFT); lx = lz+lz; q++;}
131 hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
132 if(hz>=0) {hx=hz;lx=lz;q++;}
134 /* convert back to floating value and restore the sign */
135 if((hx|lx)==0) { /* return sign(x)*0 */
137 *quo = (sxy ? -q : q);
140 while(hx<(1ULL<<HFRAC_BITS)) { /* normalize x */
141 hx = hx+hx+(lx>>MANL_SHIFT); lx = lx+lx;
144 ux.bits.manh = hx; /* The integer bit is truncated here if needed. */
146 if (iy < LDBL_MIN_EXP) {
147 ux.bits.exp = iy + (BIAS + 512);
150 ux.bits.exp = iy + BIAS;
155 if (y < LDBL_MIN * 2) {
156 if (x+x>y || (x+x==y && (q & 1))) {
160 } else if (x>0.5*y || (x==0.5*y && (q & 1))) {
168 *quo = (sxy ? -q : q);