From: Andy Polyakov Date: Mon, 20 May 2013 13:48:13 +0000 (+0200) Subject: Add BN support for SPARC VIS3 and T4 [from master]. X-Git-Tag: OpenSSL_1_0_2-beta1~362 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c92989d2b7e19daa976189be2cb51191506637d3;p=openssl Add BN support for SPARC VIS3 and T4 [from master]. --- diff --git a/Configure b/Configure index 7d9ef8e79a..6e6852f391 100755 --- a/Configure +++ b/Configure @@ -130,7 +130,7 @@ my $x86_elf_asm="$x86_asm:elf"; my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o aesni-gcm-x86_64.o:"; my $ia64_asm="ia64cpuid.o:bn-ia64.o ia64-mont.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::ghash-ia64.o::void"; -my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o:aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o::md5-sparcv9.o:sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o::::::camellia.o cmll_misc.o cmll_cbc.o cmllt4-sparcv9.o:ghash-sparcv9.o::void"; +my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o:des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o:aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o::md5-sparcv9.o:sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o::::::camellia.o cmll_misc.o cmll_cbc.o cmllt4-sparcv9.o:ghash-sparcv9.o::void"; my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::::void"; my $alpha_asm="alphacpuid.o:bn_asm.o alpha-mont.o:::::sha1-alpha.o:::::::ghash-alpha.o::void"; my $mips64_asm=":bn-mips.o mips-mont.o::aes_cbc.o aes-mips.o:::sha1-mips.o sha256-mips.o sha512-mips.o::::::::"; diff --git a/TABLE b/TABLE index d0e857c0eb..6e4e16cde5 100644 --- a/TABLE +++ b/TABLE @@ -174,7 +174,7 @@ $sys_id = $lflags = $bn_ops = BN_LLONG RC2_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC2 BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -2715,7 +2715,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK_LL DES_PTR DES_RISC1 DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -2748,7 +2748,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -4431,7 +4431,7 @@ $sys_id = ULTRASPARC $lflags = -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -4662,7 +4662,7 @@ $sys_id = ULTRASPARC $lflags = -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -5520,7 +5520,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK_LL DES_PTR DES_RISC1 DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -5553,7 +5553,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -5652,7 +5652,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = @@ -5685,7 +5685,7 @@ $sys_id = ULTRASPARC $lflags = -lsocket -lnsl -ldl $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL BF_PTR $cpuid_obj = sparcv9cap.o sparccpuid.o -$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o +$bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o $des_obj = des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o $bf_obj = diff --git a/crypto/bn/Makefile b/crypto/bn/Makefile index 672773454c..9ab4a2f0b7 100644 --- a/crypto/bn/Makefile +++ b/crypto/bn/Makefile @@ -77,6 +77,12 @@ sparcv9a-mont.s: asm/sparcv9a-mont.pl $(PERL) asm/sparcv9a-mont.pl $(CFLAGS) > $@ sparcv9-mont.s: asm/sparcv9-mont.pl $(PERL) asm/sparcv9-mont.pl $(CFLAGS) > $@ +vis3-mont.s: asm/vis3-mont.pl + $(PERL) asm/vis3-mont.pl $(CFLAGS) > $@ +sparct4-mont.S: asm/sparct4-mont.pl + $(PERL) asm/sparct4-mont.pl $(CFLAGS) > $@ +sparcv9-gf2m.S: asm/sparcv9-gf2m.pl + $(PERL) asm/sparcv9-gf2m.pl $(CFLAGS) > $@ bn-mips3.o: asm/mips3.s @if [ "$(CC)" = "gcc" ]; then \ diff --git a/crypto/bn/asm/sparct4-mont.pl b/crypto/bn/asm/sparct4-mont.pl new file mode 100755 index 0000000000..d8e544fd56 --- /dev/null +++ b/crypto/bn/asm/sparct4-mont.pl @@ -0,0 +1,1201 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by David S. Miller and Andy Polyakov +# . The module is licensed under 2-clause BSD +# license. November 2012. All rights reserved. +# ==================================================================== + +###################################################################### +# Montgomery squaring-n-multiplication module for SPARC T4. +# +# The module consists of three parts: +# +# 1) collection of "single-op" subroutines that perform single +# operation, Montgomery squaring or multiplication, on 512-, +# 1024-, 1536- and 2048-bit operands; +# 2) collection of "multi-op" subroutines that perform 5 squaring and +# 1 multiplication operations on operands of above lengths; +# 3) fall-back and helper VIS3 subroutines. +# +# RSA sign is dominated by multi-op subroutine, while RSA verify and +# DSA - by single-op. Special note about 4096-bit RSA verify result. +# Operands are too long for dedicated hardware and it's handled by +# VIS3 code, which is why you don't see any improvement. It's surely +# possible to improve it [by deploying 'mpmul' instruction], maybe in +# the future... +# +# Performance improvement. +# +# 64-bit process, VIS3: +# sign verify sign/s verify/s +# rsa 1024 bits 0.000633s 0.000033s 1578.9 30513.3 +# rsa 2048 bits 0.003297s 0.000116s 303.3 8585.8 +# rsa 4096 bits 0.026000s 0.000387s 38.5 2587.0 +# dsa 1024 bits 0.000301s 0.000332s 3323.7 3013.9 +# dsa 2048 bits 0.001056s 0.001233s 946.9 810.8 +# +# 64-bit process, this module: +# sign verify sign/s verify/s +# rsa 1024 bits 0.000341s 0.000021s 2931.5 46873.8 +# rsa 2048 bits 0.001244s 0.000044s 803.9 22569.1 +# rsa 4096 bits 0.006203s 0.000387s 161.2 2586.3 +# dsa 1024 bits 0.000179s 0.000195s 5573.9 5115.6 +# dsa 2048 bits 0.000311s 0.000350s 3212.3 2856.6 +# +###################################################################### +# 32-bit process, VIS3: +# sign verify sign/s verify/s +# rsa 1024 bits 0.000675s 0.000033s 1480.9 30159.0 +# rsa 2048 bits 0.003383s 0.000118s 295.6 8499.9 +# rsa 4096 bits 0.026178s 0.000394s 38.2 2541.3 +# dsa 1024 bits 0.000326s 0.000343s 3070.0 2918.8 +# dsa 2048 bits 0.001121s 0.001291s 891.9 774.4 +# +# 32-bit process, this module: +# sign verify sign/s verify/s +# rsa 1024 bits 0.000386s 0.000022s 2589.6 45704.9 +# rsa 2048 bits 0.001335s 0.000046s 749.3 21766.8 +# rsa 4096 bits 0.006390s 0.000393s 156.5 2544.8 +# dsa 1024 bits 0.000208s 0.000204s 4817.6 4896.6 +# dsa 2048 bits 0.000345s 0.000364s 2898.8 2747.3 +# +# 32-bit code is prone to performance degradation as interrupt rate +# dispatched to CPU executing the code grows. This is because in +# standard process of handling interrupt in 32-bit process context +# upper halves of most integer registers used as input or output are +# zeroed. This renders result invalid, and operation has to be re-run. +# If CPU is "bothered" with timer interrupts only, the penalty is +# hardly measurable. But in order to mitigate this problem for higher +# interrupt rates contemporary Linux kernel recognizes biased stack +# even in 32-bit process context and preserves full register contents. +# See http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;h=517ffce4e1a03aea979fe3a18a3dd1761a24fafb +# for details. + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "sparcv9_modes.pl"; + +$code.=<<___; +#include "sparc_arch.h" + +#ifdef __arch64__ +.register %g2,#scratch +.register %g3,#scratch +#endif + +.section ".text",#alloc,#execinstr + +#ifdef __PIC__ +SPARC_PIC_THUNK(%g1) +#endif +___ + +######################################################################## +# Register layout for mont[mul|sqr] instructions. +# For details see "Oracle SPARC Architecture 2011" manual at +# http://www.oracle.com/technetwork/server-storage/sun-sparc-enterprise/documentation/. +# +my @R=map("%f".2*$_,(0..11,30,31,12..29)); +my @N=(map("%l$_",(0..7)),map("%o$_",(0..5))); @N=(@N,@N,@N[0..3]); +my @B=(map("%o$_",(0..5)),@N[0..13],@N[0..11]); +my @A=(@N[0..13],@R[14..31]); + +######################################################################## +# int bn_mul_mont_t4_$NUM(u64 *rp,const u64 *ap,const u64 *bp, +# const u64 *np,const BN_ULONG *n0); +# +sub generate_bn_mul_mont_t4() { +my $NUM=shift; +my ($rp,$ap,$bp,$np,$sentinel)=map("%g$_",(1..5)); + +$code.=<<___; +.globl bn_mul_mont_t4_$NUM +.align 32 +bn_mul_mont_t4_$NUM: +#ifdef __arch64__ + mov 0,$sentinel + mov -128,%g4 +#elif defined(SPARCV9_64BIT_STACK) + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0] + mov -2047,%g4 + and %g1,SPARCV9_64BIT_STACK,%g1 + movrz %g1,0,%g4 + mov -1,$sentinel + add %g4,-128,%g4 +#else + mov -1,$sentinel + mov -128,%g4 +#endif + sllx $sentinel,32,$sentinel + save %sp,%g4,%sp +#ifndef __arch64__ + save %sp,-128,%sp ! warm it up + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + restore + restore + restore + restore + restore + restore +#endif + and %sp,1,%g4 + or $sentinel,%fp,%fp + or %g4,$sentinel,$sentinel + + ! copy arguments to global registers + mov %i0,$rp + mov %i1,$ap + mov %i2,$bp + mov %i3,$np + ld [%i4+0],%f1 ! load *n0 + ld [%i4+4],%f0 + fsrc2 %f0,%f60 +___ + +# load ap[$NUM] ######################################################## +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for($i=0; $i<14 && $i<$NUM; $i++) { +my $lo=$i<13?@A[$i+1]:"%o7"; +$code.=<<___; + ld [$ap+$i*8+0],$lo + ld [$ap+$i*8+4],@A[$i] + sllx @A[$i],32,@A[$i] + or $lo,@A[$i],@A[$i] +___ +} +for(; $i<$NUM; $i++) { +my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1)); +$code.=<<___; + ld [$ap+$i*8+0],$lo + ld [$ap+$i*8+4],$hi + fsrc2 $hi,@A[$i] +___ +} +# load np[$NUM] ######################################################## +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for($i=0; $i<14 && $i<$NUM; $i++) { +my $lo=$i<13?@N[$i+1]:"%o7"; +$code.=<<___; + ld [$np+$i*8+0],$lo + ld [$np+$i*8+4],@N[$i] + sllx @N[$i],32,@N[$i] + or $lo,@N[$i],@N[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<28 && $i<$NUM; $i++) { +my $lo=$i<27?@N[$i+1]:"%o7"; +$code.=<<___; + ld [$np+$i*8+0],$lo + ld [$np+$i*8+4],@N[$i] + sllx @N[$i],32,@N[$i] + or $lo,@N[$i],@N[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<$NUM; $i++) { +my $lo=($i<$NUM-1)?@N[$i+1]:"%o7"; +$code.=<<___; + ld [$np+$i*8+0],$lo + ld [$np+$i*8+4],@N[$i] + sllx @N[$i],32,@N[$i] + or $lo,@N[$i],@N[$i] +___ +} +$code.=<<___; + cmp $ap,$bp + be SIZE_T_CC,.Lmsquare_$NUM + nop +___ + +# load bp[$NUM] ######################################################## +for($i=0; $i<6 && $i<$NUM; $i++) { +my $lo=$i<5?@B[$i+1]:"%o7"; +$code.=<<___; + ld [$bp+$i*8+0],$lo + ld [$bp+$i*8+4],@B[$i] + sllx @B[$i],32,@B[$i] + or $lo,@B[$i],@B[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<20 && $i<$NUM; $i++) { +my $lo=$i<19?@B[$i+1]:"%o7"; +$code.=<<___; + ld [$bp+$i*8+0],$lo + ld [$bp+$i*8+4],@B[$i] + sllx @B[$i],32,@B[$i] + or $lo,@B[$i],@B[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<$NUM; $i++) { +my $lo=($i<$NUM-1)?@B[$i+1]:"%o7"; +$code.=<<___; + ld [$bp+$i*8+0],$lo + ld [$bp+$i*8+4],@B[$i] + sllx @B[$i],32,@B[$i] + or $lo,@B[$i],@B[$i] +___ +} +# magic ################################################################ +$code.=<<___; + .word 0x81b02920+$NUM-1 ! montmul $NUM-1 +.Lmresume_$NUM: + fbu,pn %fcc3,.Lmabort_$NUM +#ifndef __arch64__ + and %fp,$sentinel,$sentinel + brz,pn $sentinel,.Lmabort_$NUM +#endif + nop +#ifdef __arch64__ + restore + restore + restore + restore + restore +#else + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + brz,pn $sentinel,.Lmabort1_$NUM + restore +#endif +___ + +# save tp[$NUM] ######################################################## +for($i=0; $i<14 && $i<$NUM; $i++) { +$code.=<<___; + movxtod @A[$i],@R[$i] +___ +} +$code.=<<___; +#ifdef __arch64__ + restore +#else + and %fp,$sentinel,$sentinel + restore + and $sentinel,1,%o7 + and %fp,$sentinel,$sentinel + srl %fp,0,%fp ! just in case? + or %o7,$sentinel,$sentinel + brz,a,pn $sentinel,.Lmdone_$NUM + mov 0,%i0 ! return failure +#endif +___ +for($i=0; $i<12 && $i<$NUM; $i++) { +@R[$i] =~ /%f([0-9]+)/; +my $lo = "%f".($1+1); +$code.=<<___; + st $lo,[$rp+$i*8+0] + st @R[$i],[$rp+$i*8+4] +___ +} +for(; $i<$NUM; $i++) { +my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1)); +$code.=<<___; + fsrc2 @R[$i],$hi + st $lo,[$rp+$i*8+0] + st $hi,[$rp+$i*8+4] +___ +} +$code.=<<___; + mov 1,%i0 ! return success +.Lmdone_$NUM: + ret + restore + +.Lmabort_$NUM: + restore + restore + restore + restore + restore +.Lmabort1_$NUM: + restore + + mov 0,%i0 ! return failure + ret + restore + +.align 32 +.Lmsquare_$NUM: + save %sp,-128,%sp; or $sentinel,%fp,%fp + save %sp,-128,%sp; or $sentinel,%fp,%fp + .word 0x81b02940+$NUM-1 ! montsqr $NUM-1 + ba .Lmresume_$NUM + nop +.type bn_mul_mont_t4_$NUM, #function +.size bn_mul_mont_t4_$NUM, .-bn_mul_mont_t4_$NUM +___ +} + +for ($i=8;$i<=32;$i+=8) { + &generate_bn_mul_mont_t4($i); +} + +######################################################################## +# +sub load_fcc() { +my ($ptbl,$pwr,$tmp)=@_; +$code.=<<___; + sethi %hi(.Lmagic-1f),$tmp +1: call .+8 + add %o7, $tmp, %o7 + inc %lo(.Lmagic-1b),%o7 + and $pwr, 7<<2, $tmp ! offset within "magic table" + add $tmp, %o7, %o7 + and $pwr, 3, $tmp + sll $tmp, 3, $tmp ! offset within first cache line + add $tmp, $ptbl, $ptbl ! of the pwrtbl + + ! "magic table" is organized in such way that below comparisons + ! make %fcc3:%fcc2:%fcc1:%fcc0 form a byte of 1s with one 0, + ! e.g. 0b11011111, with 0 denoting relevant cache line. + ld [%o7+0], %f0 ! load column + ld [%o7+32], %f1 + ld [%o7+64], %f2 + fcmps %fcc0, %f0, %f1 + ld [%o7+96], %f3 + fcmps %fcc1, %f1, %f2 + fcmps %fcc2, %f2, %f3 + fcmps %fcc3, %f3, %f0 +___ +} +sub load_f16() { +my $ptbl=shift; +$code.=<<___; + ldd [$ptbl+0*32],%f0 ! load all cache lines + ldd [$ptbl+1*32],%f2 + ldd [$ptbl+2*32],%f4 + fmovdg %fcc0,%f0,%f16 ! pick one value + ldd [$ptbl+3*32],%f6 + fmovdl %fcc0,%f2,%f16 + ldd [$ptbl+4*32],%f8 + fmovdg %fcc1,%f4,%f16 + ldd [$ptbl+5*32],%f10 + fmovdl %fcc1,%f6,%f16 + ldd [$ptbl+6*32],%f12 + fmovdg %fcc2,%f8,%f16 + ldd [$ptbl+7*32],%f14 + fmovdl %fcc2,%f10,%f16 + fmovdg %fcc3,%f12,%f16 + fmovdl %fcc3,%f14,%f16 + add $ptbl,8*32,$ptbl +___ +} + +######################################################################## +# int bn_pwr5_mont_t4_$NUM(u64 *tp,const u64 *np,const BN_ULONG *n0, +# const u64 *pwrtbl,int pwr); +# +sub generate_bn_pwr5_mont_t4() { +my $NUM=shift; +my ($tp,$np,$pwrtbl,$pwr,$sentinel)=map("%g$_",(1..5)); + +$code.=<<___; +.globl bn_pwr5_mont_t4_$NUM +.align 32 +bn_pwr5_mont_t4_$NUM: +#ifdef __arch64__ + mov 0,$sentinel + mov -128,%g4 +#elif defined(SPARCV9_64BIT_STACK) + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0] + mov -2047,%g4 + and %g1,SPARCV9_64BIT_STACK,%g1 + movrz %g1,0,%g4 + mov -1,$sentinel + add %g4,-128,%g4 +#else + mov -1,$sentinel + mov -128,%g4 +#endif + sllx $sentinel,32,$sentinel + save %sp,%g4,%sp +#ifndef __arch64__ + save %sp,-128,%sp ! warm it up + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + save %sp,-128,%sp + restore + restore + restore + restore + restore + restore +#endif + and %sp,1,%g4 + or $sentinel,%fp,%fp + or %g4,$sentinel,$sentinel + + ! copy arguments to global registers + mov %i0,$tp + mov %i1,$np + ld [%i2+0],%f1 ! load *n0 + ld [%i2+4],%f0 + mov %i3,$pwrtbl + mov %i4,$pwr + fsrc2 %f0,%f60 +___ + +# load tp[$NUM] ######################################################## +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for($i=0; $i<14 && $i<$NUM; $i++) { +$code.=<<___; + ldx [$tp+$i*8],@A[$i] +___ +} +for(; $i<$NUM; $i++) { +$code.=<<___; + ldd [$tp+$i*8],@A[$i] +___ +} +# load np[$NUM] ######################################################## +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for($i=0; $i<14 && $i<$NUM; $i++) { +$code.=<<___; + ldx [$np+$i*8],@N[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<28 && $i<$NUM; $i++) { +$code.=<<___; + ldx [$np+$i*8],@N[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<$NUM; $i++) { +$code.=<<___; + ldx [$np+$i*8],@N[$i] +___ +} +# load pwrtbl[pwr] ######################################################## + &load_fcc($pwrtbl,$pwr,@B[0]); +for($i=0; $i<6 && $i<$NUM; $i++) { + &load_f16($pwrtbl); +$code.=<<___; + movdtox %f16,@B[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<20 && $i<$NUM; $i++) { + &load_f16($pwrtbl); +$code.=<<___; + movdtox %f16,@B[$i] +___ +} +$code.=<<___; + save %sp,-128,%sp; or $sentinel,%fp,%fp +___ +for(; $i<$NUM; $i++) { + &load_f16($pwrtbl); +$code.=<<___; + movdtox %f16,@B[$i] +___ +} + +# magic ################################################################ +for($i=0; $i<5; $i++) { +$code.=<<___; + .word 0x81b02940+$NUM-1 ! montsqr $NUM-1 + fbu,pn %fcc3,.Labort_$NUM +#ifndef __arch64__ + and %fp,$sentinel,$sentinel + brz,pn $sentinel,.Labort_$NUM +#endif + nop +___ +} +$code.=<<___; + .word 0x81b02920+$NUM-1 ! montmul $NUM-1 + fbu,pn %fcc3,.Labort_$NUM +#ifndef __arch64__ + and %fp,$sentinel,$sentinel + brz,pn $sentinel,.Labort_$NUM +#endif + nop + +#ifdef __arch64__ + restore + restore + restore + restore + restore +#else + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + restore; and %fp,$sentinel,$sentinel + brz,pn $sentinel,.Labort1_$NUM + restore +#endif +___ + +# save tp[$NUM] ######################################################## +for($i=0; $i<14 && $i<$NUM; $i++) { +$code.=<<___; + movxtod @A[$i],@R[$i] +___ +} +$code.=<<___; +#ifdef __arch64__ + restore +#else + and %fp,$sentinel,$sentinel + restore + and $sentinel,1,%o7 + and %fp,$sentinel,$sentinel + srl %fp,0,%fp ! just in case? + or %o7,$sentinel,$sentinel + brz,a,pn $sentinel,.Ldone_$NUM + mov 0,%i0 ! return failure +#endif +___ +for($i=0; $i<$NUM; $i++) { +$code.=<<___; + std @R[$i],[$tp+$i*8] +___ +} +$code.=<<___; + mov 1,%i0 ! return success +.Ldone_$NUM: + ret + restore + +.Labort_$NUM: + restore + restore + restore + restore + restore +.Labort1_$NUM: + restore + + mov 0,%i0 ! return failure + ret + restore +.type bn_pwr5_mont_t4_$NUM, #function +.size bn_pwr5_mont_t4_$NUM, .-bn_pwr5_mont_t4_$NUM +___ +} + +for ($i=8;$i<=32;$i+=8) { + &generate_bn_pwr5_mont_t4($i); +} + +{ +######################################################################## +# Fall-back subroutines +# +# copy of bn_mul_mont_vis3 adjusted for vectors of 64-bit values +# +($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)= + (map("%g$_",(1..5)),map("%o$_",(0..5,7))); + +# int bn_mul_mont( +$rp="%o0"; # u64 *rp, +$ap="%o1"; # const u64 *ap, +$bp="%o2"; # const u64 *bp, +$np="%o3"; # const u64 *np, +$n0p="%o4"; # const BN_ULONG *n0, +$num="%o5"; # int num); # caller ensures that num is >=3 +$code.=<<___; +.globl bn_mul_mont_t4 +.align 32 +bn_mul_mont_t4: + add %sp, STACK_BIAS, %g4 ! real top of stack + sll $num, 3, $num ! size in bytes + add $num, 63, %g1 + andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes + sub %g4, %g1, %g1 + andn %g1, 63, %g1 ! align at 64 byte + sub %g1, STACK_FRAME, %g1 ! new top of stack + sub %g1, %g4, %g1 + + save %sp, %g1, %sp +___ +# +-------------------------------+<----- %sp +# . . +# +-------------------------------+<----- aligned at 64 bytes +# | __int64 tmp[0] | +# +-------------------------------+ +# . . +# . . +# +-------------------------------+<----- aligned at 64 bytes +# . . +($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5)); +($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7)); +($ovf,$i)=($t0,$t1); +$code.=<<___; + ld [$n0p+0], $t0 ! pull n0[0..1] value + ld [$n0p+4], $t1 + add %sp, STACK_BIAS+STACK_FRAME, $tp + ldx [$bp+0], $m0 ! m0=bp[0] + sllx $t1, 32, $n0 + add $bp, 8, $bp + or $t0, $n0, $n0 + + ldx [$ap+0], $aj ! ap[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[0] + umulxhi $aj, $m0, $hi0 + + ldx [$ap+8], $aj ! ap[1] + add $ap, 16, $ap + ldx [$np+0], $nj ! np[0] + + mulx $lo0, $n0, $m1 ! "tp[0]"*n0 + + mulx $aj, $m0, $alo ! ap[1]*bp[0] + umulxhi $aj, $m0, $aj ! ahi=aj + + mulx $nj, $m1, $lo1 ! np[0]*m1 + umulxhi $nj, $m1, $hi1 + + ldx [$np+8], $nj ! np[1] + + addcc $lo0, $lo1, $lo1 + add $np, 16, $np + addxc %g0, $hi1, $hi1 + + mulx $nj, $m1, $nlo ! np[1]*m1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .L1st + sub $num, 24, $cnt ! cnt=num-3 + +.align 16 +.L1st: + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 + + ldx [$ap+0], $aj ! ap[j] + addcc $nlo, $hi1, $lo1 + add $ap, 8, $ap + addxc $nj, %g0, $hi1 ! nhi=nj + + ldx [$np+0], $nj ! np[j] + mulx $aj, $m0, $alo ! ap[j]*bp[0] + add $np, 8, $np + umulxhi $aj, $m0, $aj ! ahi=aj + + mulx $nj, $m1, $nlo ! np[j]*m1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + umulxhi $nj, $m1, $nj ! nhi=nj + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp ! tp++ + + brnz,pt $cnt, .L1st + sub $cnt, 8, $cnt ! j-- +!.L1st + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp + + addcc $hi0, $hi1, $hi1 + addxc %g0, %g0, $ovf ! upmost overflow bit + stxa $hi1, [$tp]0xe2 + add $tp, 8, $tp + + ba .Louter + sub $num, 16, $i ! i=num-2 + +.align 16 +.Louter: + ldx [$bp+0], $m0 ! m0=bp[i] + add $bp, 8, $bp + + sub $ap, $num, $ap ! rewind + sub $np, $num, $np + sub $tp, $num, $tp + + ldx [$ap+0], $aj ! ap[0] + ldx [$np+0], $nj ! np[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[i] + ldx [$tp], $tj ! tp[0] + umulxhi $aj, $m0, $hi0 + ldx [$ap+8], $aj ! ap[1] + addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0] + mulx $aj, $m0, $alo ! ap[1]*bp[i] + addxc %g0, $hi0, $hi0 + mulx $lo0, $n0, $m1 ! tp[0]*n0 + umulxhi $aj, $m0, $aj ! ahi=aj + mulx $nj, $m1, $lo1 ! np[0]*m1 + add $ap, 16, $ap + umulxhi $nj, $m1, $hi1 + ldx [$np+8], $nj ! np[1] + add $np, 16, $np + addcc $lo1, $lo0, $lo1 + mulx $nj, $m1, $nlo ! np[1]*m1 + addxc %g0, $hi1, $hi1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .Linner + sub $num, 24, $cnt ! cnt=num-3 +.align 16 +.Linner: + addcc $alo, $hi0, $lo0 + ldx [$tp+8], $tj ! tp[j] + addxc $aj, %g0, $hi0 ! ahi=aj + ldx [$ap+0], $aj ! ap[j] + add $ap, 8, $ap + addcc $nlo, $hi1, $lo1 + mulx $aj, $m0, $alo ! ap[j]*bp[i] + addxc $nj, %g0, $hi1 ! nhi=nj + ldx [$np+0], $nj ! np[j] + add $np, 8, $np + umulxhi $aj, $m0, $aj ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + mulx $nj, $m1, $nlo ! np[j]*m1 + addxc %g0, $hi0, $hi0 + umulxhi $nj, $m1, $nj ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + add $tp, 8, $tp + brnz,pt $cnt, .Linner + sub $cnt, 8, $cnt +!.Linner + ldx [$tp+8], $tj ! tp[j] + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + addxc %g0, $hi0, $hi0 + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + + subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc + addxccc $hi1, $hi0, $hi1 + addxc %g0, %g0, $ovf + stx $hi1, [$tp+8] + add $tp, 16, $tp + + brnz,pt $i, .Louter + sub $i, 8, $i + + sub $ap, $num, $ap ! rewind + sub $np, $num, $np + sub $tp, $num, $tp + ba .Lsub + subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc + +.align 16 +.Lsub: + ldx [$tp], $tj + add $tp, 8, $tp + ldx [$np+0], $nj + add $np, 8, $np + subccc $tj, $nj, $t2 ! tp[j]-np[j] + srlx $tj, 32, $tj + srlx $nj, 32, $nj + subccc $tj, $nj, $t3 + add $rp, 8, $rp + st $t2, [$rp-4] ! reverse order + st $t3, [$rp-8] + brnz,pt $cnt, .Lsub + sub $cnt, 8, $cnt + + sub $np, $num, $np ! rewind + sub $tp, $num, $tp + sub $rp, $num, $rp + + subc $ovf, %g0, $ovf ! handle upmost overflow bit + and $tp, $ovf, $ap + andn $rp, $ovf, $np + or $np, $ap, $ap ! ap=borrow?tp:rp + ba .Lcopy + sub $num, 8, $cnt + +.align 16 +.Lcopy: ! copy or in-place refresh + ldx [$ap+0], $t2 + add $ap, 8, $ap + stx %g0, [$tp] ! zap + add $tp, 8, $tp + stx $t2, [$rp+0] + add $rp, 8, $rp + brnz $cnt, .Lcopy + sub $cnt, 8, $cnt + + mov 1, %o0 + ret + restore +.type bn_mul_mont_t4, #function +.size bn_mul_mont_t4, .-bn_mul_mont_t4 +___ + +# int bn_mul_mont_gather5( +$rp="%o0"; # u64 *rp, +$ap="%o1"; # const u64 *ap, +$bp="%o2"; # const u64 *pwrtbl, +$np="%o3"; # const u64 *np, +$n0p="%o4"; # const BN_ULONG *n0, +$num="%o5"; # int num, # caller ensures that num is >=3 + # int power); +$code.=<<___; +.globl bn_mul_mont_gather5_t4 +.align 32 +bn_mul_mont_gather5_t4: + add %sp, STACK_BIAS, %g4 ! real top of stack + sll $num, 3, $num ! size in bytes + add $num, 63, %g1 + andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes + sub %g4, %g1, %g1 + andn %g1, 63, %g1 ! align at 64 byte + sub %g1, STACK_FRAME, %g1 ! new top of stack + sub %g1, %g4, %g1 + LDPTR [%sp+STACK_7thARG], %g4 ! load power, 7th argument + + save %sp, %g1, %sp +___ +# +-------------------------------+<----- %sp +# . . +# +-------------------------------+<----- aligned at 64 bytes +# | __int64 tmp[0] | +# +-------------------------------+ +# . . +# . . +# +-------------------------------+<----- aligned at 64 bytes +# . . +($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5)); +($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7)); +($ovf,$i)=($t0,$t1); + &load_fcc($bp,"%g4","%g1"); + &load_f16($bp); +$code.=<<___; + movdtox %f16, $m0 ! m0=bp[0] + + ld [$n0p+0], $t0 ! pull n0[0..1] value + ld [$n0p+4], $t1 + add %sp, STACK_BIAS+STACK_FRAME, $tp + sllx $t1, 32, $n0 + or $t0, $n0, $n0 + + ldx [$ap+0], $aj ! ap[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[0] + umulxhi $aj, $m0, $hi0 + + ldx [$ap+8], $aj ! ap[1] + add $ap, 16, $ap + ldx [$np+0], $nj ! np[0] + + mulx $lo0, $n0, $m1 ! "tp[0]"*n0 + + mulx $aj, $m0, $alo ! ap[1]*bp[0] + umulxhi $aj, $m0, $aj ! ahi=aj + + mulx $nj, $m1, $lo1 ! np[0]*m1 + umulxhi $nj, $m1, $hi1 + + ldx [$np+8], $nj ! np[1] + + addcc $lo0, $lo1, $lo1 + add $np, 16, $np + addxc %g0, $hi1, $hi1 + + mulx $nj, $m1, $nlo ! np[1]*m1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .L1st_g5 + sub $num, 24, $cnt ! cnt=num-3 + +.align 16 +.L1st_g5: + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 + + ldx [$ap+0], $aj ! ap[j] + addcc $nlo, $hi1, $lo1 + add $ap, 8, $ap + addxc $nj, %g0, $hi1 ! nhi=nj + + ldx [$np+0], $nj ! np[j] + mulx $aj, $m0, $alo ! ap[j]*bp[0] + add $np, 8, $np + umulxhi $aj, $m0, $aj ! ahi=aj + + mulx $nj, $m1, $nlo ! np[j]*m1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + umulxhi $nj, $m1, $nj ! nhi=nj + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp ! tp++ + + brnz,pt $cnt, .L1st_g5 + sub $cnt, 8, $cnt ! j-- +!.L1st_g5 + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp + + addcc $hi0, $hi1, $hi1 + addxc %g0, %g0, $ovf ! upmost overflow bit + stxa $hi1, [$tp]0xe2 + add $tp, 8, $tp + + ba .Louter_g5 + sub $num, 16, $i ! i=num-2 + +.align 16 +.Louter_g5: +___ + &load_f16($bp); +$code.=<<___; + movdtox %f16, $m0 ! m0=bp[i] + + sub $ap, $num, $ap ! rewind + sub $np, $num, $np + sub $tp, $num, $tp + + ldx [$ap+0], $aj ! ap[0] + ldx [$np+0], $nj ! np[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[i] + ldx [$tp], $tj ! tp[0] + umulxhi $aj, $m0, $hi0 + ldx [$ap+8], $aj ! ap[1] + addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0] + mulx $aj, $m0, $alo ! ap[1]*bp[i] + addxc %g0, $hi0, $hi0 + mulx $lo0, $n0, $m1 ! tp[0]*n0 + umulxhi $aj, $m0, $aj ! ahi=aj + mulx $nj, $m1, $lo1 ! np[0]*m1 + add $ap, 16, $ap + umulxhi $nj, $m1, $hi1 + ldx [$np+8], $nj ! np[1] + add $np, 16, $np + addcc $lo1, $lo0, $lo1 + mulx $nj, $m1, $nlo ! np[1]*m1 + addxc %g0, $hi1, $hi1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .Linner_g5 + sub $num, 24, $cnt ! cnt=num-3 +.align 16 +.Linner_g5: + addcc $alo, $hi0, $lo0 + ldx [$tp+8], $tj ! tp[j] + addxc $aj, %g0, $hi0 ! ahi=aj + ldx [$ap+0], $aj ! ap[j] + add $ap, 8, $ap + addcc $nlo, $hi1, $lo1 + mulx $aj, $m0, $alo ! ap[j]*bp[i] + addxc $nj, %g0, $hi1 ! nhi=nj + ldx [$np+0], $nj ! np[j] + add $np, 8, $np + umulxhi $aj, $m0, $aj ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + mulx $nj, $m1, $nlo ! np[j]*m1 + addxc %g0, $hi0, $hi0 + umulxhi $nj, $m1, $nj ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + add $tp, 8, $tp + brnz,pt $cnt, .Linner_g5 + sub $cnt, 8, $cnt +!.Linner_g5 + ldx [$tp+8], $tj ! tp[j] + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + addxc %g0, $hi0, $hi0 + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + + subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc + addxccc $hi1, $hi0, $hi1 + addxc %g0, %g0, $ovf + stx $hi1, [$tp+8] + add $tp, 16, $tp + + brnz,pt $i, .Louter_g5 + sub $i, 8, $i + + sub $ap, $num, $ap ! rewind + sub $np, $num, $np + sub $tp, $num, $tp + ba .Lsub_g5 + subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc + +.align 16 +.Lsub_g5: + ldx [$tp], $tj + add $tp, 8, $tp + ldx [$np+0], $nj + add $np, 8, $np + subccc $tj, $nj, $t2 ! tp[j]-np[j] + srlx $tj, 32, $tj + srlx $nj, 32, $nj + subccc $tj, $nj, $t3 + add $rp, 8, $rp + st $t2, [$rp-4] ! reverse order + st $t3, [$rp-8] + brnz,pt $cnt, .Lsub_g5 + sub $cnt, 8, $cnt + + sub $np, $num, $np ! rewind + sub $tp, $num, $tp + sub $rp, $num, $rp + + subc $ovf, %g0, $ovf ! handle upmost overflow bit + and $tp, $ovf, $ap + andn $rp, $ovf, $np + or $np, $ap, $ap ! ap=borrow?tp:rp + ba .Lcopy_g5 + sub $num, 8, $cnt + +.align 16 +.Lcopy_g5: ! copy or in-place refresh + ldx [$ap+0], $t2 + add $ap, 8, $ap + stx %g0, [$tp] ! zap + add $tp, 8, $tp + stx $t2, [$rp+0] + add $rp, 8, $rp + brnz $cnt, .Lcopy_g5 + sub $cnt, 8, $cnt + + mov 1, %o0 + ret + restore +.type bn_mul_mont_gather5_t4, #function +.size bn_mul_mont_gather5_t4, .-bn_mul_mont_gather5_t4 +___ +} + +$code.=<<___; +.globl bn_flip_t4 +.align 32 +bn_flip_t4: +.Loop_flip: + ld [%o1+0], %o4 + sub %o2, 1, %o2 + ld [%o1+4], %o5 + add %o1, 8, %o1 + st %o5, [%o0+0] + st %o4, [%o0+4] + brnz %o2, .Loop_flip + add %o0, 8, %o0 + retl + nop +.type bn_flip_t4, #function +.size bn_flip_t4, .-bn_flip_t4 + +.globl bn_scatter5_t4 +.align 32 +bn_scatter5_t4: + sll %o3, 3, %o3 + sub %o1, 1, %o1 + add %o3, %o2, %o2 ! &pwrtbl[pwr] + nop +.Loop_scatter5: + ldx [%o0], %g1 ! inp[i] + add %o0, 8, %o0 + stx %g1, [%o2] + add %o2, 32*8, %o2 + brnz %o1, .Loop_scatter5 + sub %o1, 1, %o1 + retl + nop +.type bn_scatter5_t4, #function +.size bn_scatter5_t4, .-bn_scatter5_t4 + +.globl bn_gather5_t4 +.align 32 +bn_gather5_t4: + mov %o7, %o5 +___ + &load_fcc("%o2","%o3","%o4"); +$code.=<<___; + mov %o5, %o7 + sub %o1, 1, %o1 +.Loop_gather5: +___ + &load_f16("%o2"); +$code.=<<___; + std %f16, [%o0] + add %o0, 8, %o0 + brnz %o1, .Loop_gather5 + sub %o1, 1, %o1 + + retl + nop +.type bn_gather5_t4, #function +.size bn_gather5_t4, .-bn_gather5_t4 +___ + +$code.=<<___; +#define ONE 0x3f800000 +#define NUL 0x00000000 +#define NaN 0xffffffff + +.align 64 +.Lmagic: + .long ONE,NUL,NaN,NaN,NaN,NaN,NUL,ONE + .long NUL,ONE,ONE,NUL,NaN,NaN,NaN,NaN + .long NaN,NaN,NUL,ONE,ONE,NUL,NaN,NaN + .long NaN,NaN,NaN,NaN,NUL,ONE,ONE,NUL +.asciz "Montgomery Multiplication for SPARC T4, David S. Miller, Andy Polyakov" +.align 4 +___ + +&emit_assembler(); + +close STDOUT; diff --git a/crypto/bn/asm/sparcv9-gf2m.pl b/crypto/bn/asm/sparcv9-gf2m.pl new file mode 100644 index 0000000000..ab94cd917c --- /dev/null +++ b/crypto/bn/asm/sparcv9-gf2m.pl @@ -0,0 +1,190 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# October 2012 +# +# The module implements bn_GF2m_mul_2x2 polynomial multiplication used +# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for +# the time being... Except that it has two code paths: one suitable +# for all SPARCv9 processors and one for VIS3-capable ones. Former +# delivers ~25-45% more, more for longer keys, heaviest DH and DSA +# verify operations on venerable UltraSPARC II. On T4 VIS3 code is +# ~100-230% faster than gcc-generated code and ~35-90% faster than +# the pure SPARCv9 code path. + +$locals=16*8; + +$tab="%l0"; + +@T=("%g2","%g3"); +@i=("%g4","%g5"); + +($a1,$a2,$a4,$a8,$a12,$a48)=map("%o$_",(0..5)); +($lo,$hi,$b)=("%g1",$a8,"%o7"); $a=$lo; + +$code.=<<___; +#include + +#ifdef __arch64__ +.register %g2,#scratch +.register %g3,#scratch +#endif + +#ifdef __PIC__ +SPARC_PIC_THUNK(%g1) +#endif + +.globl bn_GF2m_mul_2x2 +.align 16 +bn_GF2m_mul_2x2: + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1+0],%g1 ! OPENSSL_sparcv9cap_P[0] + + andcc %g1, SPARCV9_VIS3, %g0 + bz,pn %icc,.Lsoftware + nop + + sllx %o1, 32, %o1 + sllx %o3, 32, %o3 + or %o2, %o1, %o1 + or %o4, %o3, %o3 + .word 0x95b262ab ! xmulx %o1, %o3, %o2 + .word 0x99b262cb ! xmulxhi %o1, %o3, %o4 + srlx %o2, 32, %o1 ! 13 cycles later + st %o2, [%o0+0] + st %o1, [%o0+4] + srlx %o4, 32, %o3 + st %o4, [%o0+8] + retl + st %o3, [%o0+12] + +.align 16 +.Lsoftware: + save %sp,-STACK_FRAME-$locals,%sp + + sllx %i1,32,$a + mov -1,$a12 + sllx %i3,32,$b + or %i2,$a,$a + srlx $a12,1,$a48 ! 0x7fff... + or %i4,$b,$b + srlx $a12,2,$a12 ! 0x3fff... + add %sp,STACK_BIAS+STACK_FRAME,$tab + + sllx $a,2,$a4 + mov $a,$a1 + sllx $a,1,$a2 + + srax $a4,63,@i[1] ! broadcast 61st bit + and $a48,$a4,$a4 ! (a<<2)&0x7fff... + srlx $a48,2,$a48 + srax $a2,63,@i[0] ! broadcast 62nd bit + and $a12,$a2,$a2 ! (a<<1)&0x3fff... + srax $a1,63,$lo ! broadcast 63rd bit + and $a48,$a1,$a1 ! (a<<0)&0x1fff... + + sllx $a1,3,$a8 + and $b,$lo,$lo + and $b,@i[0],@i[0] + and $b,@i[1],@i[1] + + stx %g0,[$tab+0*8] ! tab[0]=0 + xor $a1,$a2,$a12 + stx $a1,[$tab+1*8] ! tab[1]=a1 + stx $a2,[$tab+2*8] ! tab[2]=a2 + xor $a4,$a8,$a48 + stx $a12,[$tab+3*8] ! tab[3]=a1^a2 + xor $a4,$a1,$a1 + + stx $a4,[$tab+4*8] ! tab[4]=a4 + xor $a4,$a2,$a2 + stx $a1,[$tab+5*8] ! tab[5]=a1^a4 + xor $a4,$a12,$a12 + stx $a2,[$tab+6*8] ! tab[6]=a2^a4 + xor $a48,$a1,$a1 + stx $a12,[$tab+7*8] ! tab[7]=a1^a2^a4 + xor $a48,$a2,$a2 + + stx $a8,[$tab+8*8] ! tab[8]=a8 + xor $a48,$a12,$a12 + stx $a1,[$tab+9*8] ! tab[9]=a1^a8 + xor $a4,$a1,$a1 + stx $a2,[$tab+10*8] ! tab[10]=a2^a8 + xor $a4,$a2,$a2 + stx $a12,[$tab+11*8] ! tab[11]=a1^a2^a8 + + xor $a4,$a12,$a12 + stx $a48,[$tab+12*8] ! tab[12]=a4^a8 + srlx $lo,1,$hi + stx $a1,[$tab+13*8] ! tab[13]=a1^a4^a8 + sllx $lo,63,$lo + stx $a2,[$tab+14*8] ! tab[14]=a2^a4^a8 + srlx @i[0],2,@T[0] + stx $a12,[$tab+15*8] ! tab[15]=a1^a2^a4^a8 + + sllx @i[0],62,$a1 + sllx $b,3,@i[0] + srlx @i[1],3,@T[1] + and @i[0],`0xf<<3`,@i[0] + sllx @i[1],61,$a2 + ldx [$tab+@i[0]],@i[0] + srlx $b,4-3,@i[1] + xor @T[0],$hi,$hi + and @i[1],`0xf<<3`,@i[1] + xor $a1,$lo,$lo + ldx [$tab+@i[1]],@i[1] + xor @T[1],$hi,$hi + + xor @i[0],$lo,$lo + srlx $b,8-3,@i[0] + xor $a2,$lo,$lo + and @i[0],`0xf<<3`,@i[0] +___ +for($n=1;$n<14;$n++) { +$code.=<<___; + sllx @i[1],`$n*4`,@T[0] + ldx [$tab+@i[0]],@i[0] + srlx @i[1],`64-$n*4`,@T[1] + xor @T[0],$lo,$lo + srlx $b,`($n+2)*4`-3,@i[1] + xor @T[1],$hi,$hi + and @i[1],`0xf<<3`,@i[1] +___ + push(@i,shift(@i)); push(@T,shift(@T)); +} +$code.=<<___; + sllx @i[1],`$n*4`,@T[0] + ldx [$tab+@i[0]],@i[0] + srlx @i[1],`64-$n*4`,@T[1] + xor @T[0],$lo,$lo + + sllx @i[0],`($n+1)*4`,@T[0] + xor @T[1],$hi,$hi + srlx @i[0],`64-($n+1)*4`,@T[1] + xor @T[0],$lo,$lo + xor @T[1],$hi,$hi + + srlx $lo,32,%i1 + st $lo,[%i0+0] + st %i1,[%i0+4] + srlx $hi,32,%i2 + st $hi,[%i0+8] + st %i2,[%i0+12] + + ret + restore +.type bn_GF2m_mul_2x2,#function +.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 +.asciz "GF(2^m) Multiplication for SPARCv9, CRYPTOGAMS by " +.align 4 +___ + +$code =~ s/\`([^\`]*)\`/eval($1)/gem; +print $code; +close STDOUT; diff --git a/crypto/bn/asm/vis3-mont.pl b/crypto/bn/asm/vis3-mont.pl new file mode 100644 index 0000000000..a1357de0e9 --- /dev/null +++ b/crypto/bn/asm/vis3-mont.pl @@ -0,0 +1,373 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2012. +# +# SPARCv9 VIS3 Montgomery multiplicaion procedure suitable for T3 and +# onward. There are three new instructions used here: umulxhi, +# addxc[cc] and initializing store. On T3 RSA private key operations +# are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key +# lengths. This is without dedicated squaring procedure. On T4 +# corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly +# for reference purposes, because T4 has dedicated Montgomery +# multiplication and squaring *instructions* that deliver even more. + +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } +if ($bits==64) { $bias=2047; $frame=192; } +else { $bias=0; $frame=112; } + +$code.=<<___ if ($bits==64); +.register %g2,#scratch +.register %g3,#scratch +___ +$code.=<<___; +.section ".text",#alloc,#execinstr +___ + +($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)= + (map("%g$_",(1..5)),map("%o$_",(0..5,7))); + +# int bn_mul_mont( +$rp="%o0"; # BN_ULONG *rp, +$ap="%o1"; # const BN_ULONG *ap, +$bp="%o2"; # const BN_ULONG *bp, +$np="%o3"; # const BN_ULONG *np, +$n0p="%o4"; # const BN_ULONG *n0, +$num="%o5"; # int num); # caller ensures that num is even + # and >=6 +$code.=<<___; +.globl bn_mul_mont_vis3 +.align 32 +bn_mul_mont_vis3: + add %sp, $bias, %g4 ! real top of stack + sll $num, 2, $num ! size in bytes + add $num, 63, %g5 + andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes + add %g5, %g5, %g1 + add %g5, %g1, %g1 ! 3*buffer size + sub %g4, %g1, %g1 + andn %g1, 63, %g1 ! align at 64 byte + sub %g1, $frame, %g1 ! new top of stack + sub %g1, %g4, %g1 + + save %sp, %g1, %sp +___ + +# +-------------------------------+<----- %sp +# . . +# +-------------------------------+<----- aligned at 64 bytes +# | __int64 tmp[0] | +# +-------------------------------+ +# . . +# . . +# +-------------------------------+<----- aligned at 64 bytes +# | __int64 ap[1..0] | converted ap[] +# +-------------------------------+ +# | __int64 np[1..0] | converted np[] +# +-------------------------------+ +# | __int64 ap[3..2] | +# . . +# . . +# +-------------------------------+ +($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5)); +($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7)); +($ovf,$i)=($t0,$t1); +$code.=<<___; + ld [$n0p+0], $t0 ! pull n0[0..1] value + add %sp, $bias+$frame, $tp + ld [$n0p+4], $t1 + add $tp, %g5, $anp + ld [$bp+0], $t2 ! m0=bp[0] + sllx $t1, 32, $n0 + ld [$bp+4], $t3 + or $t0, $n0, $n0 + add $bp, 8, $bp + + ld [$ap+0], $t0 ! ap[0] + sllx $t3, 32, $m0 + ld [$ap+4], $t1 + or $t2, $m0, $m0 + + ld [$ap+8], $t2 ! ap[1] + sllx $t1, 32, $aj + ld [$ap+12], $t3 + or $t0, $aj, $aj + add $ap, 16, $ap + stxa $aj, [$anp]0xe2 ! converted ap[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[0] + umulxhi $aj, $m0, $hi0 + + ld [$np+0], $t0 ! np[0] + sllx $t3, 32, $aj + ld [$np+4], $t1 + or $t2, $aj, $aj + + ld [$np+8], $t2 ! np[1] + sllx $t1, 32, $nj + ld [$np+12], $t3 + or $t0, $nj, $nj + add $np, 16, $np + stx $nj, [$anp+8] ! converted np[0] + + mulx $lo0, $n0, $m1 ! "tp[0]"*n0 + stx $aj, [$anp+16] ! converted ap[1] + + mulx $aj, $m0, $alo ! ap[1]*bp[0] + umulxhi $aj, $m0, $aj ! ahi=aj + + mulx $nj, $m1, $lo1 ! np[0]*m1 + umulxhi $nj, $m1, $hi1 + + sllx $t3, 32, $nj + or $t2, $nj, $nj + stx $nj, [$anp+24] ! converted np[1] + add $anp, 32, $anp + + addcc $lo0, $lo1, $lo1 + addxc %g0, $hi1, $hi1 + + mulx $nj, $m1, $nlo ! np[1]*m1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .L1st + sub $num, 24, $cnt ! cnt=num-3 + +.align 16 +.L1st: + ld [$ap+0], $t0 ! ap[j] + addcc $alo, $hi0, $lo0 + ld [$ap+4], $t1 + addxc $aj, %g0, $hi0 + + sllx $t1, 32, $aj + add $ap, 8, $ap + or $t0, $aj, $aj + stxa $aj, [$anp]0xe2 ! converted ap[j] + + ld [$np+0], $t2 ! np[j] + addcc $nlo, $hi1, $lo1 + ld [$np+4], $t3 + addxc $nj, %g0, $hi1 ! nhi=nj + + sllx $t3, 32, $nj + add $np, 8, $np + mulx $aj, $m0, $alo ! ap[j]*bp[0] + or $t2, $nj, $nj + umulxhi $aj, $m0, $aj ! ahi=aj + stx $nj, [$anp+8] ! converted np[j] + add $anp, 16, $anp ! anp++ + + mulx $nj, $m1, $nlo ! np[j]*m1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + umulxhi $nj, $m1, $nj ! nhi=nj + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp ! tp++ + + brnz,pt $cnt, .L1st + sub $cnt, 8, $cnt ! j-- +!.L1st + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 + addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0] + addxc %g0, $hi1, $hi1 + stxa $lo1, [$tp]0xe2 ! tp[j-1] + add $tp, 8, $tp + + addcc $hi0, $hi1, $hi1 + addxc %g0, %g0, $ovf ! upmost overflow bit + stxa $hi1, [$tp]0xe2 + add $tp, 8, $tp + + ba .Louter + sub $num, 16, $i ! i=num-2 + +.align 16 +.Louter: + ld [$bp+0], $t2 ! m0=bp[i] + ld [$bp+4], $t3 + + sub $anp, $num, $anp ! rewind + sub $tp, $num, $tp + sub $anp, $num, $anp + + add $bp, 8, $bp + sllx $t3, 32, $m0 + ldx [$anp+0], $aj ! ap[0] + or $t2, $m0, $m0 + ldx [$anp+8], $nj ! np[0] + + mulx $aj, $m0, $lo0 ! ap[0]*bp[i] + ldx [$tp], $tj ! tp[0] + umulxhi $aj, $m0, $hi0 + ldx [$anp+16], $aj ! ap[1] + addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0] + mulx $aj, $m0, $alo ! ap[1]*bp[i] + addxc %g0, $hi0, $hi0 + mulx $lo0, $n0, $m1 ! tp[0]*n0 + umulxhi $aj, $m0, $aj ! ahi=aj + mulx $nj, $m1, $lo1 ! np[0]*m1 + umulxhi $nj, $m1, $hi1 + ldx [$anp+24], $nj ! np[1] + add $anp, 32, $anp + addcc $lo1, $lo0, $lo1 + mulx $nj, $m1, $nlo ! np[1]*m1 + addxc %g0, $hi1, $hi1 + umulxhi $nj, $m1, $nj ! nhi=nj + + ba .Linner + sub $num, 24, $cnt ! cnt=num-3 +.align 16 +.Linner: + addcc $alo, $hi0, $lo0 + ldx [$tp+8], $tj ! tp[j] + addxc $aj, %g0, $hi0 ! ahi=aj + ldx [$anp+0], $aj ! ap[j] + addcc $nlo, $hi1, $lo1 + mulx $aj, $m0, $alo ! ap[j]*bp[i] + addxc $nj, %g0, $hi1 ! nhi=nj + ldx [$anp+8], $nj ! np[j] + add $anp, 16, $anp + umulxhi $aj, $m0, $aj ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + mulx $nj, $m1, $nlo ! np[j]*m1 + addxc %g0, $hi0, $hi0 + umulxhi $nj, $m1, $nj ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + add $tp, 8, $tp + brnz,pt $cnt, .Linner + sub $cnt, 8, $cnt +!.Linner + ldx [$tp+8], $tj ! tp[j] + addcc $alo, $hi0, $lo0 + addxc $aj, %g0, $hi0 ! ahi=aj + addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j] + addxc %g0, $hi0, $hi0 + + addcc $nlo, $hi1, $lo1 + addxc $nj, %g0, $hi1 ! nhi=nj + addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j] + addxc %g0, $hi1, $hi1 + stx $lo1, [$tp] ! tp[j-1] + + subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc + addxccc $hi1, $hi0, $hi1 + addxc %g0, %g0, $ovf + stx $hi1, [$tp+8] + add $tp, 16, $tp + + brnz,pt $i, .Louter + sub $i, 8, $i + + sub $anp, $num, $anp ! rewind + sub $tp, $num, $tp + sub $anp, $num, $anp + ba .Lsub + subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc + +.align 16 +.Lsub: + ldx [$tp], $tj + add $tp, 8, $tp + ldx [$anp+8], $nj + add $anp, 16, $anp + subccc $tj, $nj, $t2 ! tp[j]-np[j] + srlx $tj, 32, $tj + srlx $nj, 32, $nj + subccc $tj, $nj, $t3 + add $rp, 8, $rp + st $t2, [$rp-4] ! reverse order + st $t3, [$rp-8] + brnz,pt $cnt, .Lsub + sub $cnt, 8, $cnt + + sub $anp, $num, $anp ! rewind + sub $tp, $num, $tp + sub $anp, $num, $anp + sub $rp, $num, $rp + + subc $ovf, %g0, $ovf ! handle upmost overflow bit + and $tp, $ovf, $ap + andn $rp, $ovf, $np + or $np, $ap, $ap ! ap=borrow?tp:rp + ba .Lcopy + sub $num, 8, $cnt + +.align 16 +.Lcopy: ! copy or in-place refresh + ld [$ap+0], $t2 + ld [$ap+4], $t3 + add $ap, 8, $ap + stx %g0, [$tp] ! zap + add $tp, 8, $tp + stx %g0, [$anp] ! zap + stx %g0, [$anp+8] + add $anp, 16, $anp + st $t3, [$rp+0] ! flip order + st $t2, [$rp+4] + add $rp, 8, $rp + brnz $cnt, .Lcopy + sub $cnt, 8, $cnt + + mov 1, %o0 + ret + restore +.type bn_mul_mont_vis3, #function +.size bn_mul_mont_vis3, .-bn_mul_mont_vis3 +.asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by " +.align 4 +___ + +# Purpose of these subroutines is to explicitly encode VIS instructions, +# so that one can compile the module without having to specify VIS +# extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a. +# Idea is to reserve for option to produce "universal" binary and let +# programmer detect if current CPU is VIS capable at run-time. +sub unvis3 { +my ($mnemonic,$rs1,$rs2,$rd)=@_; +my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 ); +my ($ref,$opf); +my %visopf = ( "addxc" => 0x011, + "addxccc" => 0x013, + "umulxhi" => 0x016 ); + + $ref = "$mnemonic\t$rs1,$rs2,$rd"; + + if ($opf=$visopf{$mnemonic}) { + foreach ($rs1,$rs2,$rd) { + return $ref if (!/%([goli])([0-9])/); + $_=$bias{$1}+$2; + } + + return sprintf ".word\t0x%08x !%s", + 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2, + $ref; + } else { + return $ref; + } +} + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/ge; + + s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/ + &unvis3($1,$2,$3,$4) + /ge; + + print $_,"\n"; +} + +close STDOUT; diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c index 2abf6fd678..86ccf00e7f 100644 --- a/crypto/bn/bn_exp.c +++ b/crypto/bn/bn_exp.c @@ -125,6 +125,11 @@ # endif #endif +#if defined(OPENSSL_BN_ASM_MONT) && defined(__sparc__) +# include "sparc_arch.h" +extern unsigned int OPENSSL_sparcv9cap_P[]; +#endif + /* maximum precomputation table size for *variable* sliding windows */ #define TABLE_SIZE 32 @@ -587,6 +592,9 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, int powerbufLen = 0; unsigned char *powerbuf=NULL; BIGNUM tmp, am; +#if defined(OPENSSL_BN_ASM_MONT) && defined(__sparc__) + unsigned int t4=0; +#endif bn_check_top(a); bn_check_top(p); @@ -621,9 +629,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, /* Get the window size to use with size of p. */ window = BN_window_bits_for_ctime_exponent_size(bits); +#if defined(OPENSSL_BN_ASM_MONT) && defined(__sparc__) + if (window>=5 && (top&15)==0 && top<=64 && + (OPENSSL_sparcv9cap_P[1]&(CFR_MONTMUL|CFR_MONTSQR))== + (CFR_MONTMUL|CFR_MONTSQR) && + (t4=OPENSSL_sparcv9cap_P[0])) + window=5; + else +#endif #if defined(OPENSSL_BN_ASM_MONT5) if (window==6 && bits<=1024) window=5; /* ~5% improvement of 2048-bit RSA sign */ #endif + (void)0; /* Allocate a buffer large enough to hold all of the pre-computed * powers of am, am itself and tmp. @@ -673,6 +690,94 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } else if (!BN_to_montgomery(&am,a,mont,ctx)) goto err; +#if defined(OPENSSL_BN_ASM_MONT) && defined(__sparc__) + if (t4) + { + typedef int (*bn_pwr5_mont_f)(BN_ULONG *tp,const BN_ULONG *np, + const BN_ULONG *n0,const void *table,int power); + int bn_pwr5_mont_t4_8(BN_ULONG *tp,const BN_ULONG *np, + const BN_ULONG *n0,const void *table,int power); + int bn_pwr5_mont_t4_16(BN_ULONG *tp,const BN_ULONG *np, + const BN_ULONG *n0,const void *table,int power); + int bn_pwr5_mont_t4_24(BN_ULONG *tp,const BN_ULONG *np, + const BN_ULONG *n0,const void *table,int power); + int bn_pwr5_mont_t4_32(BN_ULONG *tp,const BN_ULONG *np, + const BN_ULONG *n0,const void *table,int power); + static const bn_pwr5_mont_f funcs[4] = { + bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, + bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 }; + bn_pwr5_mont_f worker = funcs[top/16-1]; + + void bn_mul_mont_t4(BN_ULONG *rp,const BN_ULONG *ap, + const void *bp,const BN_ULONG *np, + const BN_ULONG *n0,int num); + void bn_mul_mont_gather5_t4(BN_ULONG *rp,const BN_ULONG *ap, + const void *table,const BN_ULONG *np, + const BN_ULONG *n0,int num,int power); + void bn_scatter5_t4(const BN_ULONG *inp,size_t num, + void *table,size_t power); + void bn_gather5_t4(BN_ULONG *out,size_t num, + void *table,size_t power); + void bn_flip_t4(BN_ULONG *dst,BN_ULONG *src,size_t num); + + BN_ULONG *np=alloca(top*sizeof(BN_ULONG)), *n0=mont->n0; + + /* BN_to_montgomery can contaminate words above .top + * [in BN_DEBUG[_DEBUG] build]... */ + for (i=am.top; iN.d,top); + bn_flip_t4(tmp.d,tmp.d,top); + bn_flip_t4(am.d,am.d,top); + + bn_scatter5_t4(tmp.d,top,powerbuf,0); + bn_scatter5_t4(am.d,top,powerbuf,1); + bn_mul_mont_t4(tmp.d,am.d,am.d,np,n0,top); + bn_scatter5_t4(tmp.d,top,powerbuf,2); + + for (i=3; i<32; i++) + { + /* Calculate a^i = a^(i-1) * a */ + bn_mul_mont_gather5_t4(tmp.d,am.d,powerbuf,np,n0,top,i-1); + bn_scatter5_t4(tmp.d,top,powerbuf,i); + } + + bits--; + for (wvalue=0, i=bits%5; i>=0; i--,bits--) + wvalue = (wvalue<<1)+BN_is_bit_set(p,bits); + bn_gather5_t4(tmp.d,top,powerbuf,wvalue); + + /* Scan the exponent one window at a time starting from the most + * significant bits. + */ + while (bits >= 0) + { + for (wvalue=0, i=0; i<5; i++,bits--) + wvalue = (wvalue<<1)+BN_is_bit_set(p,bits); + + if ((*worker)(tmp.d,np,n0,powerbuf,wvalue)) continue; + /* retry once and fall back */ + if ((*worker)(tmp.d,np,n0,powerbuf,wvalue)) continue; + bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top); + bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top); + bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top); + bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top); + bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top); + bn_mul_mont_gather5_t4(tmp.d,tmp.d,powerbuf,np,n0,top,wvalue); + } + + bn_flip_t4(tmp.d,tmp.d,top); + top *= 2; + /* back to 32-bit domain */ + tmp.top=top; + bn_correct_top(&tmp); + OPENSSL_cleanse(np,top*sizeof(BN_ULONG)); + } + else +#endif #if defined(OPENSSL_BN_ASM_MONT5) /* This optimization uses ideas from http://eprint.iacr.org/2011/239, * specifically optimization of cache-timing attack countermeasures diff --git a/crypto/sparcv9cap.c b/crypto/sparcv9cap.c index 70a581992d..38f33bee75 100644 --- a/crypto/sparcv9cap.c +++ b/crypto/sparcv9cap.c @@ -16,15 +16,39 @@ unsigned int OPENSSL_sparcv9cap_P[2]={SPARCV9_TICK_PRIVILEGED,0}; int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num) { + int bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num); int bn_mul_mont_fpu(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num); int bn_mul_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num); - if (num>=8 && !(num&1) && - (OPENSSL_sparcv9cap_P[0]&(SPARCV9_PREFER_FPU|SPARCV9_VIS1)) == - (SPARCV9_PREFER_FPU|SPARCV9_VIS1)) - return bn_mul_mont_fpu(rp,ap,bp,np,n0,num); - else - return bn_mul_mont_int(rp,ap,bp,np,n0,num); + if (!(num&1) && num>=6) + { + if ((num&15)==0 && num<=64 && + (OPENSSL_sparcv9cap_P[1]&(CFR_MONTMUL|CFR_MONTSQR))== + (CFR_MONTMUL|CFR_MONTSQR)) + { + typedef int (*bn_mul_mont_f)(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0); + int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0); + int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0); + int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0); + int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0); + static const bn_mul_mont_f funcs[4] = { + bn_mul_mont_t4_8, bn_mul_mont_t4_16, + bn_mul_mont_t4_24, bn_mul_mont_t4_32 }; + bn_mul_mont_f worker = funcs[num/16-1]; + + if ((*worker)(rp,ap,bp,np,n0)) return 1; + /* retry once and fall back */ + if ((*worker)(rp,ap,bp,np,n0)) return 1; + return bn_mul_mont_vis3(rp,ap,bp,np,n0,num); + } + if ((OPENSSL_sparcv9cap_P[0]&SPARCV9_VIS3)) + return bn_mul_mont_vis3(rp,ap,bp,np,n0,num); + else if (num>=8 && + (OPENSSL_sparcv9cap_P[0]&(SPARCV9_PREFER_FPU|SPARCV9_VIS1)) == + (SPARCV9_PREFER_FPU|SPARCV9_VIS1)) + return bn_mul_mont_fpu(rp,ap,bp,np,n0,num); + } + return bn_mul_mont_int(rp,ap,bp,np,n0,num); } unsigned long _sparcv9_rdtick(void);