Mercurial > hg > CbC > CbC_gcc
comparison gcc/double-int.c @ 63:b7f97abdc517 gcc-4.6-20100522
update gcc from gcc-4.5.0 to gcc-4.6
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 24 May 2010 12:47:05 +0900 |
parents | 77e2b8dfacca |
children | f6334be47118 |
comparison
equal
deleted
inserted
replaced
56:3c8a44c06a95 | 63:b7f97abdc517 |
---|---|
1 /* Operations with long integers. | 1 /* Operations with long integers. |
2 Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc. | 2 Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc. |
3 | 3 |
4 This file is part of GCC. | 4 This file is part of GCC. |
5 | 5 |
6 GCC is free software; you can redistribute it and/or modify it | 6 GCC is free software; you can redistribute it and/or modify it |
7 under the terms of the GNU General Public License as published by the | 7 under the terms of the GNU General Public License as published by the |
21 #include "system.h" | 21 #include "system.h" |
22 #include "coretypes.h" | 22 #include "coretypes.h" |
23 #include "tm.h" | 23 #include "tm.h" |
24 #include "tree.h" | 24 #include "tree.h" |
25 | 25 |
26 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring | |
27 overflow. Suppose A, B and SUM have the same respective signs as A1, B1, | |
28 and SUM1. Then this yields nonzero if overflow occurred during the | |
29 addition. | |
30 | |
31 Overflow occurs if A and B have the same sign, but A and SUM differ in | |
32 sign. Use `^' to test whether signs differ, and `< 0' to isolate the | |
33 sign. */ | |
34 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0) | |
35 | |
36 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic. | |
37 We do that by representing the two-word integer in 4 words, with only | |
38 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive | |
39 number. The value of the word is LOWPART + HIGHPART * BASE. */ | |
40 | |
41 #define LOWPART(x) \ | |
42 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1)) | |
43 #define HIGHPART(x) \ | |
44 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2) | |
45 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2) | |
46 | |
47 /* Unpack a two-word integer into 4 words. | |
48 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. | |
49 WORDS points to the array of HOST_WIDE_INTs. */ | |
50 | |
51 static void | |
52 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) | |
53 { | |
54 words[0] = LOWPART (low); | |
55 words[1] = HIGHPART (low); | |
56 words[2] = LOWPART (hi); | |
57 words[3] = HIGHPART (hi); | |
58 } | |
59 | |
60 /* Pack an array of 4 words into a two-word integer. | |
61 WORDS points to the array of words. | |
62 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */ | |
63 | |
64 static void | |
65 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low, | |
66 HOST_WIDE_INT *hi) | |
67 { | |
68 *low = words[0] + words[1] * BASE; | |
69 *hi = words[2] + words[3] * BASE; | |
70 } | |
71 | |
72 /* Force the double-word integer L1, H1 to be within the range of the | |
73 integer type TYPE. Stores the properly truncated and sign-extended | |
74 double-word integer in *LV, *HV. Returns true if the operation | |
75 overflows, that is, argument and result are different. */ | |
76 | |
77 int | |
78 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
79 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type) | |
80 { | |
81 unsigned HOST_WIDE_INT low0 = l1; | |
82 HOST_WIDE_INT high0 = h1; | |
83 unsigned int prec = TYPE_PRECISION (type); | |
84 int sign_extended_type; | |
85 | |
86 /* Size types *are* sign extended. */ | |
87 sign_extended_type = (!TYPE_UNSIGNED (type) | |
88 || (TREE_CODE (type) == INTEGER_TYPE | |
89 && TYPE_IS_SIZETYPE (type))); | |
90 | |
91 /* First clear all bits that are beyond the type's precision. */ | |
92 if (prec >= 2 * HOST_BITS_PER_WIDE_INT) | |
93 ; | |
94 else if (prec > HOST_BITS_PER_WIDE_INT) | |
95 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); | |
96 else | |
97 { | |
98 h1 = 0; | |
99 if (prec < HOST_BITS_PER_WIDE_INT) | |
100 l1 &= ~((HOST_WIDE_INT) (-1) << prec); | |
101 } | |
102 | |
103 /* Then do sign extension if necessary. */ | |
104 if (!sign_extended_type) | |
105 /* No sign extension */; | |
106 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT) | |
107 /* Correct width already. */; | |
108 else if (prec > HOST_BITS_PER_WIDE_INT) | |
109 { | |
110 /* Sign extend top half? */ | |
111 if (h1 & ((unsigned HOST_WIDE_INT)1 | |
112 << (prec - HOST_BITS_PER_WIDE_INT - 1))) | |
113 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT); | |
114 } | |
115 else if (prec == HOST_BITS_PER_WIDE_INT) | |
116 { | |
117 if ((HOST_WIDE_INT)l1 < 0) | |
118 h1 = -1; | |
119 } | |
120 else | |
121 { | |
122 /* Sign extend bottom half? */ | |
123 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1))) | |
124 { | |
125 h1 = -1; | |
126 l1 |= (HOST_WIDE_INT)(-1) << prec; | |
127 } | |
128 } | |
129 | |
130 *lv = l1; | |
131 *hv = h1; | |
132 | |
133 /* If the value didn't fit, signal overflow. */ | |
134 return l1 != low0 || h1 != high0; | |
135 } | |
136 | |
137 /* We force the double-int HIGH:LOW to the range of the type TYPE by | |
138 sign or zero extending it. | |
139 OVERFLOWABLE indicates if we are interested | |
140 in overflow of the value, when >0 we are only interested in signed | |
141 overflow, for <0 we are interested in any overflow. OVERFLOWED | |
142 indicates whether overflow has already occurred. CONST_OVERFLOWED | |
143 indicates whether constant overflow has already occurred. We force | |
144 T's value to be within range of T's type (by setting to 0 or 1 all | |
145 the bits outside the type's range). We set TREE_OVERFLOWED if, | |
146 OVERFLOWED is nonzero, | |
147 or OVERFLOWABLE is >0 and signed overflow occurs | |
148 or OVERFLOWABLE is <0 and any overflow occurs | |
149 We return a new tree node for the extended double-int. The node | |
150 is shared if no overflow flags are set. */ | |
151 | |
152 tree | |
153 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low, | |
154 HOST_WIDE_INT high, int overflowable, | |
155 bool overflowed) | |
156 { | |
157 int sign_extended_type; | |
158 bool overflow; | |
159 | |
160 /* Size types *are* sign extended. */ | |
161 sign_extended_type = (!TYPE_UNSIGNED (type) | |
162 || (TREE_CODE (type) == INTEGER_TYPE | |
163 && TYPE_IS_SIZETYPE (type))); | |
164 | |
165 overflow = fit_double_type (low, high, &low, &high, type); | |
166 | |
167 /* If we need to set overflow flags, return a new unshared node. */ | |
168 if (overflowed || overflow) | |
169 { | |
170 if (overflowed | |
171 || overflowable < 0 | |
172 || (overflowable > 0 && sign_extended_type)) | |
173 { | |
174 tree t = make_node (INTEGER_CST); | |
175 TREE_INT_CST_LOW (t) = low; | |
176 TREE_INT_CST_HIGH (t) = high; | |
177 TREE_TYPE (t) = type; | |
178 TREE_OVERFLOW (t) = 1; | |
179 return t; | |
180 } | |
181 } | |
182 | |
183 /* Else build a shared node. */ | |
184 return build_int_cst_wide (type, low, high); | |
185 } | |
186 | |
187 /* Add two doubleword integers with doubleword result. | |
188 Return nonzero if the operation overflows according to UNSIGNED_P. | |
189 Each argument is given as two `HOST_WIDE_INT' pieces. | |
190 One argument is L1 and H1; the other, L2 and H2. | |
191 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
192 | |
193 int | |
194 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
195 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, | |
196 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, | |
197 bool unsigned_p) | |
198 { | |
199 unsigned HOST_WIDE_INT l; | |
200 HOST_WIDE_INT h; | |
201 | |
202 l = l1 + l2; | |
203 h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1 | |
204 + (unsigned HOST_WIDE_INT) h2 | |
205 + (l < l1)); | |
206 | |
207 *lv = l; | |
208 *hv = h; | |
209 | |
210 if (unsigned_p) | |
211 return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1 | |
212 || (h == h1 | |
213 && l < l1)); | |
214 else | |
215 return OVERFLOW_SUM_SIGN (h1, h2, h); | |
216 } | |
217 | |
218 /* Negate a doubleword integer with doubleword result. | |
219 Return nonzero if the operation overflows, assuming it's signed. | |
220 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1. | |
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
222 | |
223 int | |
224 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
225 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) | |
226 { | |
227 if (l1 == 0) | |
228 { | |
229 *lv = 0; | |
230 *hv = - h1; | |
231 return (*hv & h1) < 0; | |
232 } | |
233 else | |
234 { | |
235 *lv = -l1; | |
236 *hv = ~h1; | |
237 return 0; | |
238 } | |
239 } | |
240 | |
241 /* Multiply two doubleword integers with doubleword result. | |
242 Return nonzero if the operation overflows according to UNSIGNED_P. | |
243 Each argument is given as two `HOST_WIDE_INT' pieces. | |
244 One argument is L1 and H1; the other, L2 and H2. | |
245 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
246 | |
247 int | |
248 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
249 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, | |
250 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, | |
251 bool unsigned_p) | |
252 { | |
253 HOST_WIDE_INT arg1[4]; | |
254 HOST_WIDE_INT arg2[4]; | |
255 HOST_WIDE_INT prod[4 * 2]; | |
256 unsigned HOST_WIDE_INT carry; | |
257 int i, j, k; | |
258 unsigned HOST_WIDE_INT toplow, neglow; | |
259 HOST_WIDE_INT tophigh, neghigh; | |
260 | |
261 encode (arg1, l1, h1); | |
262 encode (arg2, l2, h2); | |
263 | |
264 memset (prod, 0, sizeof prod); | |
265 | |
266 for (i = 0; i < 4; i++) | |
267 { | |
268 carry = 0; | |
269 for (j = 0; j < 4; j++) | |
270 { | |
271 k = i + j; | |
272 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */ | |
273 carry += arg1[i] * arg2[j]; | |
274 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */ | |
275 carry += prod[k]; | |
276 prod[k] = LOWPART (carry); | |
277 carry = HIGHPART (carry); | |
278 } | |
279 prod[i + 4] = carry; | |
280 } | |
281 | |
282 decode (prod, lv, hv); | |
283 decode (prod + 4, &toplow, &tophigh); | |
284 | |
285 /* Unsigned overflow is immediate. */ | |
286 if (unsigned_p) | |
287 return (toplow | tophigh) != 0; | |
288 | |
289 /* Check for signed overflow by calculating the signed representation of the | |
290 top half of the result; it should agree with the low half's sign bit. */ | |
291 if (h1 < 0) | |
292 { | |
293 neg_double (l2, h2, &neglow, &neghigh); | |
294 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); | |
295 } | |
296 if (h2 < 0) | |
297 { | |
298 neg_double (l1, h1, &neglow, &neghigh); | |
299 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); | |
300 } | |
301 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0; | |
302 } | |
303 | |
304 /* Shift the doubleword integer in L1, H1 left by COUNT places | |
305 keeping only PREC bits of result. | |
306 Shift right if COUNT is negative. | |
307 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. | |
308 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
309 | |
310 void | |
311 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
312 HOST_WIDE_INT count, unsigned int prec, | |
313 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, bool arith) | |
314 { | |
315 unsigned HOST_WIDE_INT signmask; | |
316 | |
317 if (count < 0) | |
318 { | |
319 rshift_double (l1, h1, -count, prec, lv, hv, arith); | |
320 return; | |
321 } | |
322 | |
323 if (SHIFT_COUNT_TRUNCATED) | |
324 count %= prec; | |
325 | |
326 if (count >= 2 * HOST_BITS_PER_WIDE_INT) | |
327 { | |
328 /* Shifting by the host word size is undefined according to the | |
329 ANSI standard, so we must handle this as a special case. */ | |
330 *hv = 0; | |
331 *lv = 0; | |
332 } | |
333 else if (count >= HOST_BITS_PER_WIDE_INT) | |
334 { | |
335 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT); | |
336 *lv = 0; | |
337 } | |
338 else | |
339 { | |
340 *hv = (((unsigned HOST_WIDE_INT) h1 << count) | |
341 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1)); | |
342 *lv = l1 << count; | |
343 } | |
344 | |
345 /* Sign extend all bits that are beyond the precision. */ | |
346 | |
347 signmask = -((prec > HOST_BITS_PER_WIDE_INT | |
348 ? ((unsigned HOST_WIDE_INT) *hv | |
349 >> (prec - HOST_BITS_PER_WIDE_INT - 1)) | |
350 : (*lv >> (prec - 1))) & 1); | |
351 | |
352 if (prec >= 2 * HOST_BITS_PER_WIDE_INT) | |
353 ; | |
354 else if (prec >= HOST_BITS_PER_WIDE_INT) | |
355 { | |
356 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); | |
357 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT); | |
358 } | |
359 else | |
360 { | |
361 *hv = signmask; | |
362 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec); | |
363 *lv |= signmask << prec; | |
364 } | |
365 } | |
366 | |
367 /* Shift the doubleword integer in L1, H1 right by COUNT places | |
368 keeping only PREC bits of result. Shift left if COUNT is negative. | |
369 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. | |
370 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
371 | |
372 void | |
373 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
374 HOST_WIDE_INT count, unsigned int prec, | |
375 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, | |
376 bool arith) | |
377 { | |
378 unsigned HOST_WIDE_INT signmask; | |
379 | |
380 if (count < 0) | |
381 { | |
382 lshift_double (l1, h1, -count, prec, lv, hv, arith); | |
383 return; | |
384 } | |
385 | |
386 signmask = (arith | |
387 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) | |
388 : 0); | |
389 | |
390 if (SHIFT_COUNT_TRUNCATED) | |
391 count %= prec; | |
392 | |
393 if (count >= 2 * HOST_BITS_PER_WIDE_INT) | |
394 { | |
395 /* Shifting by the host word size is undefined according to the | |
396 ANSI standard, so we must handle this as a special case. */ | |
397 *hv = 0; | |
398 *lv = 0; | |
399 } | |
400 else if (count >= HOST_BITS_PER_WIDE_INT) | |
401 { | |
402 *hv = 0; | |
403 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT); | |
404 } | |
405 else | |
406 { | |
407 *hv = (unsigned HOST_WIDE_INT) h1 >> count; | |
408 *lv = ((l1 >> count) | |
409 | ((unsigned HOST_WIDE_INT) h1 | |
410 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1)); | |
411 } | |
412 | |
413 /* Zero / sign extend all bits that are beyond the precision. */ | |
414 | |
415 if (count >= (HOST_WIDE_INT)prec) | |
416 { | |
417 *hv = signmask; | |
418 *lv = signmask; | |
419 } | |
420 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT) | |
421 ; | |
422 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT) | |
423 { | |
424 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT)); | |
425 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT); | |
426 } | |
427 else | |
428 { | |
429 *hv = signmask; | |
430 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count)); | |
431 *lv |= signmask << (prec - count); | |
432 } | |
433 } | |
434 | |
435 /* Rotate the doubleword integer in L1, H1 left by COUNT places | |
436 keeping only PREC bits of result. | |
437 Rotate right if COUNT is negative. | |
438 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
439 | |
440 void | |
441 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
442 HOST_WIDE_INT count, unsigned int prec, | |
443 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) | |
444 { | |
445 unsigned HOST_WIDE_INT s1l, s2l; | |
446 HOST_WIDE_INT s1h, s2h; | |
447 | |
448 count %= prec; | |
449 if (count < 0) | |
450 count += prec; | |
451 | |
452 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0); | |
453 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); | |
454 *lv = s1l | s2l; | |
455 *hv = s1h | s2h; | |
456 } | |
457 | |
458 /* Rotate the doubleword integer in L1, H1 left by COUNT places | |
459 keeping only PREC bits of result. COUNT must be positive. | |
460 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ | |
461 | |
462 void | |
463 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, | |
464 HOST_WIDE_INT count, unsigned int prec, | |
465 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) | |
466 { | |
467 unsigned HOST_WIDE_INT s1l, s2l; | |
468 HOST_WIDE_INT s1h, s2h; | |
469 | |
470 count %= prec; | |
471 if (count < 0) | |
472 count += prec; | |
473 | |
474 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0); | |
475 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); | |
476 *lv = s1l | s2l; | |
477 *hv = s1h | s2h; | |
478 } | |
479 | |
480 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN | |
481 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). | |
482 CODE is a tree code for a kind of division, one of | |
483 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR | |
484 or EXACT_DIV_EXPR | |
485 It controls how the quotient is rounded to an integer. | |
486 Return nonzero if the operation overflows. | |
487 UNS nonzero says do unsigned division. */ | |
488 | |
489 int | |
490 div_and_round_double (unsigned code, int uns, | |
491 /* num == numerator == dividend */ | |
492 unsigned HOST_WIDE_INT lnum_orig, | |
493 HOST_WIDE_INT hnum_orig, | |
494 /* den == denominator == divisor */ | |
495 unsigned HOST_WIDE_INT lden_orig, | |
496 HOST_WIDE_INT hden_orig, | |
497 unsigned HOST_WIDE_INT *lquo, | |
498 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem, | |
499 HOST_WIDE_INT *hrem) | |
500 { | |
501 int quo_neg = 0; | |
502 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */ | |
503 HOST_WIDE_INT den[4], quo[4]; | |
504 int i, j; | |
505 unsigned HOST_WIDE_INT work; | |
506 unsigned HOST_WIDE_INT carry = 0; | |
507 unsigned HOST_WIDE_INT lnum = lnum_orig; | |
508 HOST_WIDE_INT hnum = hnum_orig; | |
509 unsigned HOST_WIDE_INT lden = lden_orig; | |
510 HOST_WIDE_INT hden = hden_orig; | |
511 int overflow = 0; | |
512 | |
513 if (hden == 0 && lden == 0) | |
514 overflow = 1, lden = 1; | |
515 | |
516 /* Calculate quotient sign and convert operands to unsigned. */ | |
517 if (!uns) | |
518 { | |
519 if (hnum < 0) | |
520 { | |
521 quo_neg = ~ quo_neg; | |
522 /* (minimum integer) / (-1) is the only overflow case. */ | |
523 if (neg_double (lnum, hnum, &lnum, &hnum) | |
524 && ((HOST_WIDE_INT) lden & hden) == -1) | |
525 overflow = 1; | |
526 } | |
527 if (hden < 0) | |
528 { | |
529 quo_neg = ~ quo_neg; | |
530 neg_double (lden, hden, &lden, &hden); | |
531 } | |
532 } | |
533 | |
534 if (hnum == 0 && hden == 0) | |
535 { /* single precision */ | |
536 *hquo = *hrem = 0; | |
537 /* This unsigned division rounds toward zero. */ | |
538 *lquo = lnum / lden; | |
539 goto finish_up; | |
540 } | |
541 | |
542 if (hnum == 0) | |
543 { /* trivial case: dividend < divisor */ | |
544 /* hden != 0 already checked. */ | |
545 *hquo = *lquo = 0; | |
546 *hrem = hnum; | |
547 *lrem = lnum; | |
548 goto finish_up; | |
549 } | |
550 | |
551 memset (quo, 0, sizeof quo); | |
552 | |
553 memset (num, 0, sizeof num); /* to zero 9th element */ | |
554 memset (den, 0, sizeof den); | |
555 | |
556 encode (num, lnum, hnum); | |
557 encode (den, lden, hden); | |
558 | |
559 /* Special code for when the divisor < BASE. */ | |
560 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE) | |
561 { | |
562 /* hnum != 0 already checked. */ | |
563 for (i = 4 - 1; i >= 0; i--) | |
564 { | |
565 work = num[i] + carry * BASE; | |
566 quo[i] = work / lden; | |
567 carry = work % lden; | |
568 } | |
569 } | |
570 else | |
571 { | |
572 /* Full double precision division, | |
573 with thanks to Don Knuth's "Seminumerical Algorithms". */ | |
574 int num_hi_sig, den_hi_sig; | |
575 unsigned HOST_WIDE_INT quo_est, scale; | |
576 | |
577 /* Find the highest nonzero divisor digit. */ | |
578 for (i = 4 - 1;; i--) | |
579 if (den[i] != 0) | |
580 { | |
581 den_hi_sig = i; | |
582 break; | |
583 } | |
584 | |
585 /* Insure that the first digit of the divisor is at least BASE/2. | |
586 This is required by the quotient digit estimation algorithm. */ | |
587 | |
588 scale = BASE / (den[den_hi_sig] + 1); | |
589 if (scale > 1) | |
590 { /* scale divisor and dividend */ | |
591 carry = 0; | |
592 for (i = 0; i <= 4 - 1; i++) | |
593 { | |
594 work = (num[i] * scale) + carry; | |
595 num[i] = LOWPART (work); | |
596 carry = HIGHPART (work); | |
597 } | |
598 | |
599 num[4] = carry; | |
600 carry = 0; | |
601 for (i = 0; i <= 4 - 1; i++) | |
602 { | |
603 work = (den[i] * scale) + carry; | |
604 den[i] = LOWPART (work); | |
605 carry = HIGHPART (work); | |
606 if (den[i] != 0) den_hi_sig = i; | |
607 } | |
608 } | |
609 | |
610 num_hi_sig = 4; | |
611 | |
612 /* Main loop */ | |
613 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) | |
614 { | |
615 /* Guess the next quotient digit, quo_est, by dividing the first | |
616 two remaining dividend digits by the high order quotient digit. | |
617 quo_est is never low and is at most 2 high. */ | |
618 unsigned HOST_WIDE_INT tmp; | |
619 | |
620 num_hi_sig = i + den_hi_sig + 1; | |
621 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1]; | |
622 if (num[num_hi_sig] != den[den_hi_sig]) | |
623 quo_est = work / den[den_hi_sig]; | |
624 else | |
625 quo_est = BASE - 1; | |
626 | |
627 /* Refine quo_est so it's usually correct, and at most one high. */ | |
628 tmp = work - quo_est * den[den_hi_sig]; | |
629 if (tmp < BASE | |
630 && (den[den_hi_sig - 1] * quo_est | |
631 > (tmp * BASE + num[num_hi_sig - 2]))) | |
632 quo_est--; | |
633 | |
634 /* Try QUO_EST as the quotient digit, by multiplying the | |
635 divisor by QUO_EST and subtracting from the remaining dividend. | |
636 Keep in mind that QUO_EST is the I - 1st digit. */ | |
637 | |
638 carry = 0; | |
639 for (j = 0; j <= den_hi_sig; j++) | |
640 { | |
641 work = quo_est * den[j] + carry; | |
642 carry = HIGHPART (work); | |
643 work = num[i + j] - LOWPART (work); | |
644 num[i + j] = LOWPART (work); | |
645 carry += HIGHPART (work) != 0; | |
646 } | |
647 | |
648 /* If quo_est was high by one, then num[i] went negative and | |
649 we need to correct things. */ | |
650 if (num[num_hi_sig] < (HOST_WIDE_INT) carry) | |
651 { | |
652 quo_est--; | |
653 carry = 0; /* add divisor back in */ | |
654 for (j = 0; j <= den_hi_sig; j++) | |
655 { | |
656 work = num[i + j] + den[j] + carry; | |
657 carry = HIGHPART (work); | |
658 num[i + j] = LOWPART (work); | |
659 } | |
660 | |
661 num [num_hi_sig] += carry; | |
662 } | |
663 | |
664 /* Store the quotient digit. */ | |
665 quo[i] = quo_est; | |
666 } | |
667 } | |
668 | |
669 decode (quo, lquo, hquo); | |
670 | |
671 finish_up: | |
672 /* If result is negative, make it so. */ | |
673 if (quo_neg) | |
674 neg_double (*lquo, *hquo, lquo, hquo); | |
675 | |
676 /* Compute trial remainder: rem = num - (quo * den) */ | |
677 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); | |
678 neg_double (*lrem, *hrem, lrem, hrem); | |
679 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); | |
680 | |
681 switch (code) | |
682 { | |
683 case TRUNC_DIV_EXPR: | |
684 case TRUNC_MOD_EXPR: /* round toward zero */ | |
685 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */ | |
686 return overflow; | |
687 | |
688 case FLOOR_DIV_EXPR: | |
689 case FLOOR_MOD_EXPR: /* round toward negative infinity */ | |
690 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */ | |
691 { | |
692 /* quo = quo - 1; */ | |
693 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, | |
694 lquo, hquo); | |
695 } | |
696 else | |
697 return overflow; | |
698 break; | |
699 | |
700 case CEIL_DIV_EXPR: | |
701 case CEIL_MOD_EXPR: /* round toward positive infinity */ | |
702 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */ | |
703 { | |
704 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, | |
705 lquo, hquo); | |
706 } | |
707 else | |
708 return overflow; | |
709 break; | |
710 | |
711 case ROUND_DIV_EXPR: | |
712 case ROUND_MOD_EXPR: /* round to closest integer */ | |
713 { | |
714 unsigned HOST_WIDE_INT labs_rem = *lrem; | |
715 HOST_WIDE_INT habs_rem = *hrem; | |
716 unsigned HOST_WIDE_INT labs_den = lden, ltwice; | |
717 HOST_WIDE_INT habs_den = hden, htwice; | |
718 | |
719 /* Get absolute values. */ | |
720 if (*hrem < 0) | |
721 neg_double (*lrem, *hrem, &labs_rem, &habs_rem); | |
722 if (hden < 0) | |
723 neg_double (lden, hden, &labs_den, &habs_den); | |
724 | |
725 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */ | |
726 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0, | |
727 labs_rem, habs_rem, <wice, &htwice); | |
728 | |
729 if (((unsigned HOST_WIDE_INT) habs_den | |
730 < (unsigned HOST_WIDE_INT) htwice) | |
731 || (((unsigned HOST_WIDE_INT) habs_den | |
732 == (unsigned HOST_WIDE_INT) htwice) | |
733 && (labs_den <= ltwice))) | |
734 { | |
735 if (*hquo < 0) | |
736 /* quo = quo - 1; */ | |
737 add_double (*lquo, *hquo, | |
738 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); | |
739 else | |
740 /* quo = quo + 1; */ | |
741 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, | |
742 lquo, hquo); | |
743 } | |
744 else | |
745 return overflow; | |
746 } | |
747 break; | |
748 | |
749 default: | |
750 gcc_unreachable (); | |
751 } | |
752 | |
753 /* Compute true remainder: rem = num - (quo * den) */ | |
754 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); | |
755 neg_double (*lrem, *hrem, lrem, hrem); | |
756 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); | |
757 return overflow; | |
758 } | |
759 | |
760 | |
26 /* Returns mask for PREC bits. */ | 761 /* Returns mask for PREC bits. */ |
27 | 762 |
28 double_int | 763 double_int |
29 double_int_mask (unsigned prec) | 764 double_int_mask (unsigned prec) |
30 { | 765 { |
105 } | 840 } |
106 | 841 |
107 return r; | 842 return r; |
108 } | 843 } |
109 | 844 |
110 /* Constructs long integer from tree CST. The extra bits over the precision of | |
111 the number are filled with sign bit if CST is signed, and with zeros if it | |
112 is unsigned. */ | |
113 | |
114 double_int | |
115 tree_to_double_int (const_tree cst) | |
116 { | |
117 /* We do not need to call double_int_restrict here to ensure the semantics as | |
118 described, as this is the default one for trees. */ | |
119 return TREE_INT_CST (cst); | |
120 } | |
121 | |
122 /* Returns true if CST fits in unsigned HOST_WIDE_INT. */ | 845 /* Returns true if CST fits in unsigned HOST_WIDE_INT. */ |
123 | 846 |
124 bool | 847 bool |
125 double_int_fits_in_uhwi_p (double_int cst) | 848 double_int_fits_in_uhwi_p (double_int cst) |
126 { | 849 { |
209 double_int_divmod (double_int a, double_int b, bool uns, unsigned code, | 932 double_int_divmod (double_int a, double_int b, bool uns, unsigned code, |
210 double_int *mod) | 933 double_int *mod) |
211 { | 934 { |
212 double_int ret; | 935 double_int ret; |
213 | 936 |
214 div_and_round_double ((enum tree_code) code, uns, a.low, a.high, | 937 div_and_round_double (code, uns, a.low, a.high, |
215 b.low, b.high, &ret.low, &ret.high, | 938 b.low, b.high, &ret.low, &ret.high, |
216 &mod->low, &mod->high); | 939 &mod->low, &mod->high); |
217 return ret; | 940 return ret; |
218 } | 941 } |
219 | 942 |
288 double_int_umod (double_int a, double_int b, unsigned code) | 1011 double_int_umod (double_int a, double_int b, unsigned code) |
289 { | 1012 { |
290 return double_int_mod (a, b, true, code); | 1013 return double_int_mod (a, b, true, code); |
291 } | 1014 } |
292 | 1015 |
293 /* Constructs tree in type TYPE from with value given by CST. Signedness of CST | 1016 /* Set BITPOS bit in A. */ |
294 is assumed to be the same as the signedness of TYPE. */ | 1017 double_int |
295 | 1018 double_int_setbit (double_int a, unsigned bitpos) |
296 tree | 1019 { |
297 double_int_to_tree (tree type, double_int cst) | 1020 if (bitpos < HOST_BITS_PER_WIDE_INT) |
298 { | 1021 a.low |= (unsigned HOST_WIDE_INT) 1 << bitpos; |
299 cst = double_int_ext (cst, TYPE_PRECISION (type), TYPE_UNSIGNED (type)); | 1022 else |
300 | 1023 a.high |= (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); |
301 return build_int_cst_wide (type, cst.low, cst.high); | 1024 |
302 } | 1025 return a; |
303 | 1026 } |
304 /* Returns true if CST fits into range of TYPE. Signedness of CST is assumed | 1027 |
305 to be the same as the signedness of TYPE. */ | 1028 /* Shift A left by COUNT places keeping only PREC bits of result. Shift |
306 | 1029 right if COUNT is negative. ARITH true specifies arithmetic shifting; |
307 bool | 1030 otherwise use logical shift. */ |
308 double_int_fits_to_tree_p (const_tree type, double_int cst) | 1031 |
309 { | 1032 double_int |
310 double_int ext = double_int_ext (cst, | 1033 double_int_lshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith) |
311 TYPE_PRECISION (type), | 1034 { |
312 TYPE_UNSIGNED (type)); | 1035 double_int ret; |
313 | 1036 lshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith); |
314 return double_int_equal_p (cst, ext); | 1037 return ret; |
315 } | 1038 } |
316 | 1039 |
317 /* Returns true if CST is negative. Of course, CST is considered to | 1040 /* Shift A rigth by COUNT places keeping only PREC bits of result. Shift |
318 be signed. */ | 1041 left if COUNT is negative. ARITH true specifies arithmetic shifting; |
319 | 1042 otherwise use logical shift. */ |
320 bool | 1043 |
321 double_int_negative_p (double_int cst) | 1044 double_int |
322 { | 1045 double_int_rshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith) |
323 return cst.high < 0; | 1046 { |
1047 double_int ret; | |
1048 rshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith); | |
1049 return ret; | |
324 } | 1050 } |
325 | 1051 |
326 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the | 1052 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the |
327 comparison is given by UNS. */ | 1053 comparison is given by UNS. */ |
328 | 1054 |