1 /* @(#)e_fmod.c 1.3 95/01/18 */
3 * ====================================================
4 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
6 * Developed at SunSoft, a Sun Microsystems, Inc. business.
7 * Permission to use, copy, modify, and distribute this
8 * software is freely granted, provided that this notice
10 * ====================================================
13 #include <sys/cdefs.h>
14 __FBSDID("$FreeBSD$");
21 #include "math_private.h"
23 #define BIAS (LDBL_MAX_EXP - 1)
25 #if LDBL_MANL_SIZE > 32
26 typedef uint64_t manl_t;
28 typedef uint32_t manl_t;
31 #if LDBL_MANH_SIZE > 32
32 typedef uint64_t manh_t;
34 typedef uint32_t manh_t;
38 * These macros add and remove an explicit integer bit in front of the
39 * fractional mantissa, if the architecture doesn't have such a bit by
42 #ifdef LDBL_IMPLICIT_NBIT
43 #define SET_NBIT(hx) ((hx) | (1ULL << LDBL_MANH_SIZE))
44 #define HFRAC_BITS LDBL_MANH_SIZE
46 #define SET_NBIT(hx) (hx)
47 #define HFRAC_BITS (LDBL_MANH_SIZE - 1)
50 #define MANL_SHIFT (LDBL_MANL_SIZE - 1)
52 static const long double Zero[] = {0.0L, -0.0L};
55 * Return the IEEE remainder and set *quo to the last n bits of the
56 * quotient, rounded to the nearest integer. We choose n=31 because
57 * we wind up computing all the integer bits of the quotient anyway as
58 * a side-effect of computing the remainder by the shift and subtract
59 * method. In practice, this is far more bits than are needed to use
60 * remquo in reduction algorithms.
63 * - The low part of the mantissa fits in a manl_t exactly.
64 * - The high part of the mantissa fits in an int64_t with enough room
65 * for an explicit integer bit in front of the fractional bits.
68 remquol(long double x, long double y, int *quo)
70 union IEEEl2bits ux, uy;
71 int64_t hx,hz; /* We need a carry bit even if LDBL_MANH_SIZE is 32. */
79 sxy = sx ^ uy.bits.sign;
80 ux.bits.sign = 0; /* |x| */
81 uy.bits.sign = 0; /* |y| */
83 /* purge off exception values */
84 if((uy.bits.exp|uy.bits.manh|uy.bits.manl)==0 || /* y=0 */
85 (ux.bits.exp == BIAS + LDBL_MAX_EXP) || /* or x not finite */
86 (uy.bits.exp == BIAS + LDBL_MAX_EXP &&
87 ((uy.bits.manh&~LDBL_NBIT)|uy.bits.manl)!=0)) /* or y is NaN */
88 return nan_mix_op(x, y, *)/nan_mix_op(x, y, *);
89 if(ux.bits.exp<=uy.bits.exp) {
90 if((ux.bits.exp<uy.bits.exp) ||
91 (ux.bits.manh<=uy.bits.manh &&
92 (ux.bits.manh<uy.bits.manh ||
93 ux.bits.manl<uy.bits.manl))) {
95 goto fixup; /* |x|<|y| return x or x-y */
97 if(ux.bits.manh==uy.bits.manh && ux.bits.manl==uy.bits.manl) {
98 *quo = (sxy ? -1 : 1);
99 return Zero[sx]; /* |x|=|y| return x*0*/
103 /* determine ix = ilogb(x) */
104 if(ux.bits.exp == 0) { /* subnormal x */
106 ix = ux.bits.exp - (BIAS + 512);
108 ix = ux.bits.exp - BIAS;
111 /* determine iy = ilogb(y) */
112 if(uy.bits.exp == 0) { /* subnormal y */
114 iy = uy.bits.exp - (BIAS + 512);
116 iy = uy.bits.exp - BIAS;
119 /* set up {hx,lx}, {hy,ly} and align y to x */
120 hx = SET_NBIT(ux.bits.manh);
121 hy = SET_NBIT(uy.bits.manh);
129 hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
130 if(hz<0){hx = hx+hx+(lx>>MANL_SHIFT); lx = lx+lx;}
131 else {hx = hz+hz+(lz>>MANL_SHIFT); lx = lz+lz; q++;}
134 hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
135 if(hz>=0) {hx=hz;lx=lz;q++;}
137 /* convert back to floating value and restore the sign */
138 if((hx|lx)==0) { /* return sign(x)*0 */
140 *quo = (sxy ? -q : q);
143 while(hx<(1ULL<<HFRAC_BITS)) { /* normalize x */
144 hx = hx+hx+(lx>>MANL_SHIFT); lx = lx+lx;
147 ux.bits.manh = hx; /* The integer bit is truncated here if needed. */
149 if (iy < LDBL_MIN_EXP) {
150 ux.bits.exp = iy + (BIAS + 512);
153 ux.bits.exp = iy + BIAS;
158 if (y < LDBL_MIN * 2) {
159 if (x+x>y || (x+x==y && (q & 1))) {
163 } else if (x>0.5*y || (x==0.5*y && (q & 1))) {
171 *quo = (sxy ? -q : q);