int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
const BN_ULONG *np, const BN_ULONG *n0, int num)
{
- int bn_mul_mont_fpu64(BN_ULONG *rp, const BN_ULONG *ap,
- const BN_ULONG *bp, const BN_ULONG *np,
- const BN_ULONG *n0, int num);
int bn_mul_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
const BN_ULONG *np, const BN_ULONG *n0, int num);
+ int bn_mul4x_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
+ const BN_ULONG *np, const BN_ULONG *n0, int num);
- if (sizeof(size_t) == 4) {
-# if 1 || (defined(__APPLE__) && defined(__MACH__))
- if (num >= 8 && (num & 3) == 0 && (OPENSSL_ppccap_P & PPC_FPU64))
- return bn_mul_mont_fpu64(rp, ap, bp, np, n0, num);
-# else
- /*
- * boundary of 32 was experimentally determined on Linux 2.6.22,
- * might have to be adjusted on AIX...
- */
- if (num >= 32 && (num & 3) == 0 && (OPENSSL_ppccap_P & PPC_FPU64)) {
- sigset_t oset;
- int ret;
-
- sigprocmask(SIG_SETMASK, &all_masked, &oset);
- ret = bn_mul_mont_fpu64(rp, ap, bp, np, n0, num);
- sigprocmask(SIG_SETMASK, &oset, NULL);
-
- return ret;
- }
-# endif
- } else if ((OPENSSL_ppccap_P & PPC_FPU64))
- /*
- * this is a "must" on POWER6, but run-time detection is not
- * implemented yet...
- */
- return bn_mul_mont_fpu64(rp, ap, bp, np, n0, num);
+ if (num < 4)
+ return 0;
+
+ if ((num & 3) == 0)
+ return bn_mul4x_mont_int(rp, ap, bp, np, n0, num);
+
+ /*
+ * There used to be [optional] call to bn_mul_mont_fpu64 here,
+ * but above subroutine is faster on contemporary processors.
+ * Formulation means that there might be old processors where
+ * FPU code path would be faster, POWER6 perhaps, but there was
+ * no opportunity to figure it out...
+ */
return bn_mul_mont_int(rp, ap, bp, np, n0, num);
}