raw
ch1_mpi                 1 /* mpihelp-div.c  -  MPI helper functions
ch1_mpi 2 * Modified by No Such Labs. (C) 2015. See README.
ch1_mpi 3 *
ch1_mpi 4 * This file was originally part of Gnu Privacy Guard (GPG), ver. 1.4.10,
ch1_mpi 5 * SHA256(gnupg-1.4.10.tar.gz):
ch1_mpi 6 * 0bfd74660a2f6cedcf7d8256db4a63c996ffebbcdc2cf54397bfb72878c5a85a
ch1_mpi 7 * (C) 1994-2005 Free Software Foundation, Inc.
ch1_mpi 8 *
ch1_mpi 9 * This program is free software: you can redistribute it and/or modify
ch1_mpi 10 * it under the terms of the GNU General Public License as published by
ch1_mpi 11 * the Free Software Foundation, either version 3 of the License, or
ch1_mpi 12 * (at your option) any later version.
ch1_mpi 13 *
ch1_mpi 14 * This program is distributed in the hope that it will be useful,
ch1_mpi 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
ch1_mpi 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
ch1_mpi 17 * GNU General Public License for more details.
ch1_mpi 18 *
ch1_mpi 19 * You should have received a copy of the GNU General Public License
ch1_mpi 20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
ch1_mpi 21 */
ch1_mpi 22
ch1_mpi 23 #include <stdio.h>
ch1_mpi 24 #include <stdlib.h>
ch1_mpi 25
ch1_mpi 26 #include "knobs.h"
ch1_mpi 27 #include "mpi-internal.h"
ch1_mpi 28 #include "longlong.h"
ch1_mpi 29
ch1_mpi 30 #ifndef UMUL_TIME
ch1_mpi 31 #define UMUL_TIME 1
ch1_mpi 32 #endif
ch1_mpi 33 #ifndef UDIV_TIME
ch1_mpi 34 #define UDIV_TIME UMUL_TIME
ch1_mpi 35 #endif
ch1_mpi 36
ch1_mpi 37 /* FIXME: We should be using invert_limb (or invert_normalized_limb)
ch1_mpi 38 * here (not udiv_qrnnd).
ch1_mpi 39 */
ch1_mpi 40
ch1_mpi 41 mpi_limb_t
ch1_mpi 42 mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
ch1_mpi 43 mpi_limb_t divisor_limb)
ch1_mpi 44 {
ch1_mpi 45 mpi_size_t i;
ch1_mpi 46 mpi_limb_t n1, n0, r;
ch1_mpi 47 int dummy;
ch1_mpi 48
ch1_mpi 49 /* Botch: Should this be handled at all? Rely on callers? */
ch1_mpi 50 if( !dividend_size )
ch1_mpi 51 return 0;
ch1_mpi 52
ch1_mpi 53 /* If multiplication is much faster than division, and the
ch1_mpi 54 * dividend is large, pre-invert the divisor, and use
ch1_mpi 55 * only multiplications in the inner loop.
ch1_mpi 56 *
ch1_mpi 57 * This test should be read:
ch1_mpi 58 * Does it ever help to use udiv_qrnnd_preinv?
ch1_mpi 59 * && Does what we save compensate for the inversion overhead?
ch1_mpi 60 */
ch1_mpi 61 if( UDIV_TIME > (2 * UMUL_TIME + 6)
ch1_mpi 62 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
ch1_mpi 63 int normalization_steps;
ch1_mpi 64
ch1_mpi 65 count_leading_zeros( normalization_steps, divisor_limb );
ch1_mpi 66 if( normalization_steps ) {
ch1_mpi 67 mpi_limb_t divisor_limb_inverted;
ch1_mpi 68
ch1_mpi 69 divisor_limb <<= normalization_steps;
ch1_mpi 70
ch1_mpi 71 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
ch1_mpi 72 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
ch1_mpi 73 * most significant bit (with weight 2**N) implicit.
ch1_mpi 74 *
ch1_mpi 75 * Special case for DIVISOR_LIMB == 100...000.
ch1_mpi 76 */
ch1_mpi 77 if( !(divisor_limb << 1) )
ch1_mpi 78 divisor_limb_inverted = ~(mpi_limb_t)0;
ch1_mpi 79 else
ch1_mpi 80 udiv_qrnnd(divisor_limb_inverted, dummy,
ch1_mpi 81 -divisor_limb, 0, divisor_limb);
ch1_mpi 82
ch1_mpi 83 n1 = dividend_ptr[dividend_size - 1];
ch1_mpi 84 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
ch1_mpi 85
ch1_mpi 86 /* Possible optimization:
ch1_mpi 87 * if (r == 0
ch1_mpi 88 * && divisor_limb > ((n1 << normalization_steps)
ch1_mpi 89 * | (dividend_ptr[dividend_size - 2] >> ...)))
ch1_mpi 90 * ...one division less...
ch1_mpi 91 */
ch1_mpi 92 for( i = dividend_size - 2; i >= 0; i--) {
ch1_mpi 93 n0 = dividend_ptr[i];
ch1_mpi 94 UDIV_QRNND_PREINV(dummy, r, r,
ch1_mpi 95 ((n1 << normalization_steps)
ch1_mpi 96 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
ch1_mpi 97 divisor_limb, divisor_limb_inverted);
ch1_mpi 98 n1 = n0;
ch1_mpi 99 }
ch1_mpi 100 UDIV_QRNND_PREINV(dummy, r, r,
ch1_mpi 101 n1 << normalization_steps,
ch1_mpi 102 divisor_limb, divisor_limb_inverted);
ch1_mpi 103 return r >> normalization_steps;
ch1_mpi 104 }
ch1_mpi 105 else {
ch1_mpi 106 mpi_limb_t divisor_limb_inverted;
ch1_mpi 107
ch1_mpi 108 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
ch1_mpi 109 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
ch1_mpi 110 * most significant bit (with weight 2**N) implicit.
ch1_mpi 111 *
ch1_mpi 112 * Special case for DIVISOR_LIMB == 100...000.
ch1_mpi 113 */
ch1_mpi 114 if( !(divisor_limb << 1) )
ch1_mpi 115 divisor_limb_inverted = ~(mpi_limb_t)0;
ch1_mpi 116 else
ch1_mpi 117 udiv_qrnnd(divisor_limb_inverted, dummy,
ch1_mpi 118 -divisor_limb, 0, divisor_limb);
ch1_mpi 119
ch1_mpi 120 i = dividend_size - 1;
ch1_mpi 121 r = dividend_ptr[i];
ch1_mpi 122
ch1_mpi 123 if( r >= divisor_limb )
ch1_mpi 124 r = 0;
ch1_mpi 125 else
ch1_mpi 126 i--;
ch1_mpi 127
ch1_mpi 128 for( ; i >= 0; i--) {
ch1_mpi 129 n0 = dividend_ptr[i];
ch1_mpi 130 UDIV_QRNND_PREINV(dummy, r, r,
ch1_mpi 131 n0, divisor_limb, divisor_limb_inverted);
ch1_mpi 132 }
ch1_mpi 133 return r;
ch1_mpi 134 }
ch1_mpi 135 }
ch1_mpi 136 else {
ch1_mpi 137 if( UDIV_NEEDS_NORMALIZATION ) {
ch1_mpi 138 int normalization_steps;
ch1_mpi 139
ch1_mpi 140 count_leading_zeros(normalization_steps, divisor_limb);
ch1_mpi 141 if( normalization_steps ) {
ch1_mpi 142 divisor_limb <<= normalization_steps;
ch1_mpi 143
ch1_mpi 144 n1 = dividend_ptr[dividend_size - 1];
ch1_mpi 145 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
ch1_mpi 146
ch1_mpi 147 /* Possible optimization:
ch1_mpi 148 * if (r == 0
ch1_mpi 149 * && divisor_limb > ((n1 << normalization_steps)
ch1_mpi 150 * | (dividend_ptr[dividend_size - 2] >> ...)))
ch1_mpi 151 * ...one division less...
ch1_mpi 152 */
ch1_mpi 153 for(i = dividend_size - 2; i >= 0; i--) {
ch1_mpi 154 n0 = dividend_ptr[i];
ch1_mpi 155 udiv_qrnnd (dummy, r, r,
ch1_mpi 156 ((n1 << normalization_steps)
ch1_mpi 157 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
ch1_mpi 158 divisor_limb);
ch1_mpi 159 n1 = n0;
ch1_mpi 160 }
ch1_mpi 161 udiv_qrnnd (dummy, r, r,
ch1_mpi 162 n1 << normalization_steps,
ch1_mpi 163 divisor_limb);
ch1_mpi 164 return r >> normalization_steps;
ch1_mpi 165 }
ch1_mpi 166 }
ch1_mpi 167 /* No normalization needed, either because udiv_qrnnd doesn't require
ch1_mpi 168 * it, or because DIVISOR_LIMB is already normalized. */
ch1_mpi 169 i = dividend_size - 1;
ch1_mpi 170 r = dividend_ptr[i];
ch1_mpi 171
ch1_mpi 172 if(r >= divisor_limb)
ch1_mpi 173 r = 0;
ch1_mpi 174 else
ch1_mpi 175 i--;
ch1_mpi 176
ch1_mpi 177 for(; i >= 0; i--) {
ch1_mpi 178 n0 = dividend_ptr[i];
ch1_mpi 179 udiv_qrnnd (dummy, r, r, n0, divisor_limb);
ch1_mpi 180 }
ch1_mpi 181 return r;
ch1_mpi 182 }
ch1_mpi 183 }
ch1_mpi 184
ch1_mpi 185 /* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
ch1_mpi 186 * the NSIZE-DSIZE least significant quotient limbs at QP
ch1_mpi 187 * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
ch1_mpi 188 * non-zero, generate that many fraction bits and append them after the
ch1_mpi 189 * other quotient limbs.
ch1_mpi 190 * Return the most significant limb of the quotient, this is always 0 or 1.
ch1_mpi 191 *
ch1_mpi 192 * Preconditions:
ch1_mpi 193 * 0. NSIZE >= DSIZE.
ch1_mpi 194 * 1. The most significant bit of the divisor must be set.
ch1_mpi 195 * 2. QP must either not overlap with the input operands at all, or
ch1_mpi 196 * QP + DSIZE >= NP must hold true. (This means that it's
ch1_mpi 197 * possible to put the quotient in the high part of NUM, right after the
ch1_mpi 198 * remainder in NUM.
ch1_mpi 199 * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
ch1_mpi 200 */
ch1_mpi 201
ch1_mpi 202 mpi_limb_t
ch1_mpi 203 mpihelp_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs,
ch1_mpi 204 mpi_ptr_t np, mpi_size_t nsize,
ch1_mpi 205 mpi_ptr_t dp, mpi_size_t dsize)
ch1_mpi 206 {
ch1_mpi 207 mpi_limb_t most_significant_q_limb = 0;
ch1_mpi 208
ch1_mpi 209 switch(dsize) {
ch1_mpi 210 case 0:
ch1_mpi 211 /* We are asked to divide by zero, so go ahead and do it! (To make
ch1_mpi 212 the compiler not remove this statement, return the value.) */
ch1_mpi 213 return 1 / dsize;
ch1_mpi 214
ch1_mpi 215 case 1:
ch1_mpi 216 {
ch1_mpi 217 mpi_size_t i;
ch1_mpi 218 mpi_limb_t n1;
ch1_mpi 219 mpi_limb_t d;
ch1_mpi 220
ch1_mpi 221 d = dp[0];
ch1_mpi 222 n1 = np[nsize - 1];
ch1_mpi 223
ch1_mpi 224 if( n1 >= d ) {
ch1_mpi 225 n1 -= d;
ch1_mpi 226 most_significant_q_limb = 1;
ch1_mpi 227 }
ch1_mpi 228
ch1_mpi 229 qp += qextra_limbs;
ch1_mpi 230 for( i = nsize - 2; i >= 0; i--)
ch1_mpi 231 udiv_qrnnd( qp[i], n1, n1, np[i], d );
ch1_mpi 232 qp -= qextra_limbs;
ch1_mpi 233
ch1_mpi 234 for( i = qextra_limbs - 1; i >= 0; i-- )
ch1_mpi 235 udiv_qrnnd (qp[i], n1, n1, 0, d);
ch1_mpi 236
ch1_mpi 237 np[0] = n1;
ch1_mpi 238 }
ch1_mpi 239 break;
ch1_mpi 240
ch1_mpi 241 case 2:
ch1_mpi 242 {
ch1_mpi 243 mpi_size_t i;
ch1_mpi 244 mpi_limb_t n1, n0, n2;
ch1_mpi 245 mpi_limb_t d1, d0;
ch1_mpi 246
ch1_mpi 247 np += nsize - 2;
ch1_mpi 248 d1 = dp[1];
ch1_mpi 249 d0 = dp[0];
ch1_mpi 250 n1 = np[1];
ch1_mpi 251 n0 = np[0];
ch1_mpi 252
ch1_mpi 253 if( n1 >= d1 && (n1 > d1 || n0 >= d0) ) {
ch1_mpi 254 sub_ddmmss (n1, n0, n1, n0, d1, d0);
ch1_mpi 255 most_significant_q_limb = 1;
ch1_mpi 256 }
ch1_mpi 257
ch1_mpi 258 for( i = qextra_limbs + nsize - 2 - 1; i >= 0; i-- ) {
ch1_mpi 259 mpi_limb_t q;
ch1_mpi 260 mpi_limb_t r;
ch1_mpi 261
ch1_mpi 262 if( i >= qextra_limbs )
ch1_mpi 263 np--;
ch1_mpi 264 else
ch1_mpi 265 np[0] = 0;
ch1_mpi 266
ch1_mpi 267 if( n1 == d1 ) {
ch1_mpi 268 /* Q should be either 111..111 or 111..110. Need special
ch1_mpi 269 * treatment of this rare case as normal division would
ch1_mpi 270 * give overflow. */
ch1_mpi 271 q = ~(mpi_limb_t)0;
ch1_mpi 272
ch1_mpi 273 r = n0 + d1;
ch1_mpi 274 if( r < d1 ) { /* Carry in the addition? */
ch1_mpi 275 add_ssaaaa( n1, n0, r - d0, np[0], 0, d0 );
ch1_mpi 276 qp[i] = q;
ch1_mpi 277 continue;
ch1_mpi 278 }
ch1_mpi 279 n1 = d0 - (d0 != 0?1:0);
ch1_mpi 280 n0 = -d0;
ch1_mpi 281 }
ch1_mpi 282 else {
ch1_mpi 283 udiv_qrnnd (q, r, n1, n0, d1);
ch1_mpi 284 umul_ppmm (n1, n0, d0, q);
ch1_mpi 285 }
ch1_mpi 286
ch1_mpi 287 n2 = np[0];
ch1_mpi 288 q_test:
ch1_mpi 289 if( n1 > r || (n1 == r && n0 > n2) ) {
ch1_mpi 290 /* The estimated Q was too large. */
ch1_mpi 291 q--;
ch1_mpi 292 sub_ddmmss (n1, n0, n1, n0, 0, d0);
ch1_mpi 293 r += d1;
ch1_mpi 294 if( r >= d1 ) /* If not carry, test Q again. */
ch1_mpi 295 goto q_test;
ch1_mpi 296 }
ch1_mpi 297
ch1_mpi 298 qp[i] = q;
ch1_mpi 299 sub_ddmmss (n1, n0, r, n2, n1, n0);
ch1_mpi 300 }
ch1_mpi 301 np[1] = n1;
ch1_mpi 302 np[0] = n0;
ch1_mpi 303 }
ch1_mpi 304 break;
ch1_mpi 305
ch1_mpi 306 default:
ch1_mpi 307 {
ch1_mpi 308 mpi_size_t i;
ch1_mpi 309 mpi_limb_t dX, d1, n0;
ch1_mpi 310
ch1_mpi 311 np += nsize - dsize;
ch1_mpi 312 dX = dp[dsize - 1];
ch1_mpi 313 d1 = dp[dsize - 2];
ch1_mpi 314 n0 = np[dsize - 1];
ch1_mpi 315
ch1_mpi 316 if( n0 >= dX ) {
ch1_mpi 317 if(n0 > dX || mpihelp_cmp(np, dp, dsize - 1) >= 0 ) {
ch1_mpi 318 mpihelp_sub_n(np, np, dp, dsize);
ch1_mpi 319 n0 = np[dsize - 1];
ch1_mpi 320 most_significant_q_limb = 1;
ch1_mpi 321 }
ch1_mpi 322 }
ch1_mpi 323
ch1_mpi 324 for( i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
ch1_mpi 325 mpi_limb_t q;
ch1_mpi 326 mpi_limb_t n1, n2;
ch1_mpi 327 mpi_limb_t cy_limb;
ch1_mpi 328
ch1_mpi 329 if( i >= qextra_limbs ) {
ch1_mpi 330 np--;
ch1_mpi 331 n2 = np[dsize];
ch1_mpi 332 }
ch1_mpi 333 else {
ch1_mpi 334 n2 = np[dsize - 1];
ch1_mpi 335 MPN_COPY_DECR (np + 1, np, dsize - 1);
ch1_mpi 336 np[0] = 0;
ch1_mpi 337 }
ch1_mpi 338
ch1_mpi 339 if( n0 == dX ) {
ch1_mpi 340 /* This might over-estimate q, but it's probably not worth
ch1_mpi 341 * the extra code here to find out. */
ch1_mpi 342 q = ~(mpi_limb_t)0;
ch1_mpi 343 }
ch1_mpi 344 else {
ch1_mpi 345 mpi_limb_t r;
ch1_mpi 346
ch1_mpi 347 udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
ch1_mpi 348 umul_ppmm(n1, n0, d1, q);
ch1_mpi 349
ch1_mpi 350 while( n1 > r || (n1 == r && n0 > np[dsize - 2])) {
ch1_mpi 351 q--;
ch1_mpi 352 r += dX;
ch1_mpi 353 if( r < dX ) /* I.e. "carry in previous addition?" */
ch1_mpi 354 break;
ch1_mpi 355 n1 -= n0 < d1;
ch1_mpi 356 n0 -= d1;
ch1_mpi 357 }
ch1_mpi 358 }
ch1_mpi 359
ch1_mpi 360 /* Possible optimization: We already have (q * n0) and (1 * n1)
ch1_mpi 361 * after the calculation of q. Taking advantage of that, we
ch1_mpi 362 * could make this loop make two iterations less. */
ch1_mpi 363 cy_limb = mpihelp_submul_1(np, dp, dsize, q);
ch1_mpi 364
ch1_mpi 365 if( n2 != cy_limb ) {
ch1_mpi 366 mpihelp_add_n(np, np, dp, dsize);
ch1_mpi 367 q--;
ch1_mpi 368 }
ch1_mpi 369
ch1_mpi 370 qp[i] = q;
ch1_mpi 371 n0 = np[dsize - 1];
ch1_mpi 372 }
ch1_mpi 373 }
ch1_mpi 374 }
ch1_mpi 375
ch1_mpi 376 return most_significant_q_limb;
ch1_mpi 377 }
ch1_mpi 378
ch1_mpi 379
ch1_mpi 380 /****************
ch1_mpi 381 * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
ch1_mpi 382 * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
ch1_mpi 383 * Return the single-limb remainder.
ch1_mpi 384 * There are no constraints on the value of the divisor.
ch1_mpi 385 *
ch1_mpi 386 * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
ch1_mpi 387 */
ch1_mpi 388
ch1_mpi 389 mpi_limb_t
ch1_mpi 390 mpihelp_divmod_1( mpi_ptr_t quot_ptr,
ch1_mpi 391 mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
ch1_mpi 392 mpi_limb_t divisor_limb)
ch1_mpi 393 {
ch1_mpi 394 mpi_size_t i;
ch1_mpi 395 mpi_limb_t n1, n0, r;
ch1_mpi 396 int dummy;
ch1_mpi 397
ch1_mpi 398 if( !dividend_size )
ch1_mpi 399 return 0;
ch1_mpi 400
ch1_mpi 401 /* If multiplication is much faster than division, and the
ch1_mpi 402 * dividend is large, pre-invert the divisor, and use
ch1_mpi 403 * only multiplications in the inner loop.
ch1_mpi 404 *
ch1_mpi 405 * This test should be read:
ch1_mpi 406 * Does it ever help to use udiv_qrnnd_preinv?
ch1_mpi 407 * && Does what we save compensate for the inversion overhead?
ch1_mpi 408 */
ch1_mpi 409 if( UDIV_TIME > (2 * UMUL_TIME + 6)
ch1_mpi 410 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
ch1_mpi 411 int normalization_steps;
ch1_mpi 412
ch1_mpi 413 count_leading_zeros( normalization_steps, divisor_limb );
ch1_mpi 414 if( normalization_steps ) {
ch1_mpi 415 mpi_limb_t divisor_limb_inverted;
ch1_mpi 416
ch1_mpi 417 divisor_limb <<= normalization_steps;
ch1_mpi 418
ch1_mpi 419 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
ch1_mpi 420 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
ch1_mpi 421 * most significant bit (with weight 2**N) implicit.
ch1_mpi 422 */
ch1_mpi 423 /* Special case for DIVISOR_LIMB == 100...000. */
ch1_mpi 424 if( !(divisor_limb << 1) )
ch1_mpi 425 divisor_limb_inverted = ~(mpi_limb_t)0;
ch1_mpi 426 else
ch1_mpi 427 udiv_qrnnd(divisor_limb_inverted, dummy,
ch1_mpi 428 -divisor_limb, 0, divisor_limb);
ch1_mpi 429
ch1_mpi 430 n1 = dividend_ptr[dividend_size - 1];
ch1_mpi 431 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
ch1_mpi 432
ch1_mpi 433 /* Possible optimization:
ch1_mpi 434 * if (r == 0
ch1_mpi 435 * && divisor_limb > ((n1 << normalization_steps)
ch1_mpi 436 * | (dividend_ptr[dividend_size - 2] >> ...)))
ch1_mpi 437 * ...one division less...
ch1_mpi 438 */
ch1_mpi 439 for( i = dividend_size - 2; i >= 0; i--) {
ch1_mpi 440 n0 = dividend_ptr[i];
ch1_mpi 441 UDIV_QRNND_PREINV( quot_ptr[i + 1], r, r,
ch1_mpi 442 ((n1 << normalization_steps)
ch1_mpi 443 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
ch1_mpi 444 divisor_limb, divisor_limb_inverted);
ch1_mpi 445 n1 = n0;
ch1_mpi 446 }
ch1_mpi 447 UDIV_QRNND_PREINV( quot_ptr[0], r, r,
ch1_mpi 448 n1 << normalization_steps,
ch1_mpi 449 divisor_limb, divisor_limb_inverted);
ch1_mpi 450 return r >> normalization_steps;
ch1_mpi 451 }
ch1_mpi 452 else {
ch1_mpi 453 mpi_limb_t divisor_limb_inverted;
ch1_mpi 454
ch1_mpi 455 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
ch1_mpi 456 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
ch1_mpi 457 * most significant bit (with weight 2**N) implicit.
ch1_mpi 458 */
ch1_mpi 459 /* Special case for DIVISOR_LIMB == 100...000. */
ch1_mpi 460 if( !(divisor_limb << 1) )
ch1_mpi 461 divisor_limb_inverted = ~(mpi_limb_t) 0;
ch1_mpi 462 else
ch1_mpi 463 udiv_qrnnd(divisor_limb_inverted, dummy,
ch1_mpi 464 -divisor_limb, 0, divisor_limb);
ch1_mpi 465
ch1_mpi 466 i = dividend_size - 1;
ch1_mpi 467 r = dividend_ptr[i];
ch1_mpi 468
ch1_mpi 469 if( r >= divisor_limb )
ch1_mpi 470 r = 0;
ch1_mpi 471 else
ch1_mpi 472 quot_ptr[i--] = 0;
ch1_mpi 473
ch1_mpi 474 for( ; i >= 0; i-- ) {
ch1_mpi 475 n0 = dividend_ptr[i];
ch1_mpi 476 UDIV_QRNND_PREINV( quot_ptr[i], r, r,
ch1_mpi 477 n0, divisor_limb, divisor_limb_inverted);
ch1_mpi 478 }
ch1_mpi 479 return r;
ch1_mpi 480 }
ch1_mpi 481 }
ch1_mpi 482 else {
ch1_mpi 483 if(UDIV_NEEDS_NORMALIZATION) {
ch1_mpi 484 int normalization_steps;
ch1_mpi 485
ch1_mpi 486 count_leading_zeros (normalization_steps, divisor_limb);
ch1_mpi 487 if( normalization_steps ) {
ch1_mpi 488 divisor_limb <<= normalization_steps;
ch1_mpi 489
ch1_mpi 490 n1 = dividend_ptr[dividend_size - 1];
ch1_mpi 491 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
ch1_mpi 492
ch1_mpi 493 /* Possible optimization:
ch1_mpi 494 * if (r == 0
ch1_mpi 495 * && divisor_limb > ((n1 << normalization_steps)
ch1_mpi 496 * | (dividend_ptr[dividend_size - 2] >> ...)))
ch1_mpi 497 * ...one division less...
ch1_mpi 498 */
ch1_mpi 499 for( i = dividend_size - 2; i >= 0; i--) {
ch1_mpi 500 n0 = dividend_ptr[i];
ch1_mpi 501 udiv_qrnnd (quot_ptr[i + 1], r, r,
ch1_mpi 502 ((n1 << normalization_steps)
ch1_mpi 503 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
ch1_mpi 504 divisor_limb);
ch1_mpi 505 n1 = n0;
ch1_mpi 506 }
ch1_mpi 507 udiv_qrnnd (quot_ptr[0], r, r,
ch1_mpi 508 n1 << normalization_steps,
ch1_mpi 509 divisor_limb);
ch1_mpi 510 return r >> normalization_steps;
ch1_mpi 511 }
ch1_mpi 512 }
ch1_mpi 513 /* No normalization needed, either because udiv_qrnnd doesn't require
ch1_mpi 514 * it, or because DIVISOR_LIMB is already normalized. */
ch1_mpi 515 i = dividend_size - 1;
ch1_mpi 516 r = dividend_ptr[i];
ch1_mpi 517
ch1_mpi 518 if(r >= divisor_limb)
ch1_mpi 519 r = 0;
ch1_mpi 520 else
ch1_mpi 521 quot_ptr[i--] = 0;
ch1_mpi 522
ch1_mpi 523 for(; i >= 0; i--) {
ch1_mpi 524 n0 = dividend_ptr[i];
ch1_mpi 525 udiv_qrnnd( quot_ptr[i], r, r, n0, divisor_limb );
ch1_mpi 526 }
ch1_mpi 527 return r;
ch1_mpi 528 }
ch1_mpi 529 }