1 /* mpf_ui_sub -- Subtract a float from an unsigned long int.
3 Copyright (C) 1993, 1994, 1995, 1996 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Library General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
15 License for more details.
17 You should have received a copy of the GNU Library General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 MA 02111-1307, USA. */
27 mpf_ui_sub (mpf_ptr r, unsigned long int u, mpf_srcptr v)
37 mp_size_t usize, vsize, rsize;
47 /* Handle special cases that don't work in generic code below. */
59 /* If signs of U and V are different, perform addition. */
62 __mpf_struct v_negated;
63 v_negated._mp_size = -vsize;
64 v_negated._mp_exp = v->_mp_exp;
65 v_negated._mp_d = v->_mp_d;
66 mpf_add_ui (r, &v_negated, u);
72 /* Signs are now known to be the same. */
75 /* Make U be the operand with the largest exponent. */
84 prec = r->_mp_prec + 1;
98 ediff = 1 - v->_mp_exp;
101 /* Ignore leading limbs in U and V that are equal. Doing
102 this helps increase the precision of the result. */
105 /* This loop normally exits immediately. Optimize for that. */
110 if (up[usize] != vp[vsize])
120 /* Note that either operand (but not both operands) might now have
121 leading zero limbs. It matters only that U is unnormalized if
122 vsize is now zero, and vice versa. And it is only in that case
123 that we have to adjust uexp. */
126 while (usize != 0 && up[usize - 1] == 0)
130 while (vsize != 0 && vp[vsize - 1] == 0)
134 /* If U extends beyond PREC, ignore the part that does. */
141 /* If V extends beyond PREC, ignore the part that does.
142 Note that this may make vsize negative. */
143 if (vsize + ediff > prec)
145 vp += vsize + ediff - prec;
146 vsize = prec - ediff;
149 /* Allocate temp space for the result. Allocate
150 just vsize + ediff later??? */
151 tp = (mp_ptr) TMP_ALLOC (prec * BYTES_PER_MP_LIMB);
155 /* V completely cancelled. */
157 MPN_COPY (rp, up, usize);
162 /* Locate the least significant non-zero limb in (the needed
163 parts of) U and V, to simplify the code below. */
168 MPN_COPY (rp, up, usize);
180 MPN_COPY (rp, vp, vsize);
190 /* uuuu | uuuu | uuuu | uuuu | uuuu */
191 /* vvvvvvv | vv | vvvvv | v | vv */
195 /* U and V partially overlaps. */
198 /* Have to compare the leading limbs of u and v
199 to determine whether to compute u - v or v - u. */
205 cmp = mpn_cmp (up + usize - vsize, vp, vsize);
209 size = usize - vsize;
210 MPN_COPY (tp, up, size);
211 mpn_sub_n (tp + size, up + size, vp, vsize);
216 /* vv */ /* Swap U and V. */
219 size = usize - vsize;
221 for (i = 1; i < size; i++)
223 mpn_sub_n (tp + size, vp, up + size, vsize);
224 mpn_sub_1 (tp + size, tp + size, vsize, (mp_limb_t) 1);
229 else if (usize < vsize)
234 cmp = mpn_cmp (up, vp + vsize - usize, usize);
238 size = vsize - usize;
240 for (i = 1; i < size; i++)
242 mpn_sub_n (tp + size, up, vp + size, usize);
243 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
248 /* vvvvvvv */ /* Swap U and V. */
250 /* This is the only place we can get 0.0. */
252 size = vsize - usize;
253 MPN_COPY (tp, vp, size);
254 mpn_sub_n (tp + size, vp + size, up, usize);
264 cmp = mpn_cmp (up, vp + vsize - usize, usize);
267 mpn_sub_n (tp, up, vp, usize);
272 mpn_sub_n (tp, vp, up, usize);
281 if (vsize + ediff <= usize)
286 size = usize - ediff - vsize;
287 MPN_COPY (tp, up, size);
288 mpn_sub (tp + size, up + size, usize - size, vp, vsize);
296 size = vsize + ediff - usize;
298 for (i = 1; i < size; i++)
300 mpn_sub (tp + size, up, usize, vp + size, usize - ediff);
301 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
302 rsize = vsize + ediff;
311 size = vsize + ediff - usize;
313 for (i = 1; i < vsize; i++)
315 for (i = vsize; i < size; i++)
316 tp[i] = ~(mp_limb_t) 0;
317 mpn_sub_1 (tp + size, up, usize, (mp_limb_t) 1);
318 rsize = size + usize;
321 /* Full normalize. Optimize later. */
322 while (rsize != 0 && tp[rsize - 1] == 0)
327 MPN_COPY (rp, tp, rsize);
331 r->_mp_size = negate ? -rsize : rsize;