Commit 964ed946 authored by Andy Polyakov's avatar Andy Polyakov
Browse files

parisc-mont.pl: PA-RISC 2.0 code path optimization based on intruction-

level profiling data resulted in almost 50% performance improvement.
PA-RISC 1.1 is also reordered in same manner, mostly to be consistent,
as no gain was observed, not on PA-7100LC.
parent cab6de03
Loading
Loading
Loading
Loading
+129 −122
Original line number Diff line number Diff line
@@ -20,39 +20,45 @@
# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
# improvement coefficient was never collected on PA-7100LC, or any
# other 1.1 CPU, because I don't have access to such machine with
# vendor compiler. But to give you a taste, PA-RISC 1.1 code-path
# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
# of ~5x on PA-8600.
#
# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
# reportedly ~2x faster than vendor compiler generated code [see
# commentary in assembler source code]. Here comes a catch. Execution
# core of this implementation is actually 32-bit one, in the sense
# that it expects arrays of 32-bit BN_LONG values as input. But
# pa-risc2[W].s operates on arrays of 64-bit BN_LONGs... How do they
# interoperate then? Simple. This module picks halves of 64-bit
# values in reverse order. But can it compete with "pure" 64-bit code
# such as pa-risc2[W].s then? Well, the thing is that 32x32=64-bit
# multiplication is the best even PA-RISC 2.0 can do, i.e. there is
# no "wider" multiplication like on most other 64-bit platforms.
# This means that even being effectively 32-bit, this implementation
# performs the same computational task in same amount of arithmetic
# operations, most notably multiplications. It requires more memory
# references, most notably to tp[num], but this doesn't seem to
# exhaust memory port capacity. In other words this implementation,
# or more specifically its PA-RISC 2.0 code-path, competes with
# pa-risc2W.s on virtually same terms.
# reportedly ~2x faster than vendor compiler generated code [according
# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
# this implementation is actually 32-bit one, in the sense that it
# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
# 64-bit BN_LONGs... How do they interoperate then? No problem. This
# module picks halves of 64-bit values in reverse order and pretends
# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
# i.e. there is no "wider" multiplication like on most other 64-bit
# platforms. This means that even being effectively 32-bit, this
# implementation performs "64-bit" computational task in same amount
# of arithmetic operations, most notably multiplications. It requires
# more memory references, most notably to tp[num], but this doesn't
# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
# 2.0 code path, provides virtually same performance as pa-risc2[W].s:
# it's ~10% better for shortest key length and ~10% worse for longest
# one.
#
# In case it wasn't clear. The module has two distinct code-paths:
# In case it wasn't clear. The module has two distinct code paths:
# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
# additions and 64-bit integer loads, not to mention specific
# instruction scheduling. In 32-bit build module imposes couple of
# limitations: vector lengths has to be even and vector addresses has
# to be 64-bit aligned. Normally neither is a problem: most common
# key lengths are even and vectors are commonly malloc-ed, which
# ensures 64-bit alignment.
# instruction scheduling. In 64-bit build naturally only 2.0 code path
# is assembled. In 32-bit application context both code paths are
# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
# is taken automatically. Also, in 32-bit build the module imposes
# couple of limitations: vector lengths has to be even and vector
# addresses has to be 64-bit aligned. Normally neither is a problem:
# most common key lengths are even and vectors are commonly malloc-ed,
# which ensures alignment.
#
# Special thanks to polarhome.com for providing HP-UX account.
# Special thanks to polarhome.com for providing HP-UX account on
# PA-RISC 1.1 machine, and to correspondent who chose to remain
# anonymous for testing the code on PA-RISC 2.0 machine.

$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;

@@ -134,7 +140,7 @@ $code=<<___;
	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY

	.EXPORT	bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
	.ALIGN	16
	.ALIGN	64
bn_mul_mont
	.PROC
	.CALLINFO	FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
@@ -168,6 +174,7 @@ $code.=<<___ if ($BN_SZ==4);
	b		L\$abort
	nop
	nop					; alignment
	nop

	fldws		0($n0),${fn0}
	fldws,ma	4($bp),${fbi}		; bp[0]
@@ -219,58 +226,58 @@ $code.=<<___ if ($BN_SZ==4);
	nop
___
$code.=<<___;					# PA-RISC 2.0 code-path
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldd		-16($xfer),$ab0
	ldd		-8($xfer),$nm0
	fstds		${fab0},-16($xfer)

	extrd,u		$ab0,31,32,$hi0
	extrd,u		$ab0,63,32,$ab0
	ldd		-8($xfer),$nm0
	fstds		${fnm0},-8($xfer)
	 ldo		8($idx),$idx		; j++++
	 addl		$ab0,$nm0,$nm0		; low part is discarded
	 extrd,u	$nm0,31,32,$hi1
	ldd		0($xfer),$ab1

L\$1st
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldd		8($xfer),$nm1
	fstds		${fab0},-16($xfer)
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[0]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
	fstds		${fnm0},-8($xfer)
	 addl		$hi0,$ab1,$ab1
	ldd		0($xfer),$ab1
	fstds		${fab1},0($xfer)
	 addl		$hi0,$ab1,$ab1
	 extrd,u	$ab1,31,32,$hi0
	ldd		8($xfer),$nm1
	fstds		${fnm1},8($xfer)
	 extrd,u	$ab1,63,32,$ab1
	 addl		$hi1,$nm1,$nm1
	ldd		-16($xfer),$ab0
	flddx		$idx($ap),${fai}	; ap[j,j+1]
	flddx		$idx($np),${fni}	; np[j,j+1]
	 addl		$ab1,$nm1,$nm1
	ldd		-8($xfer),$nm0
	 extrd,u	$nm1,31,32,$hi1

	flddx		$idx($ap),${fai}	; ap[j,j+1]
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldd		-16($xfer),$ab0
	fstds		${fab0},-16($xfer)
	 addl		$hi0,$ab0,$ab0
	flddx		$idx($np),${fni}	; np[j,j+1]
	 extrd,u	$ab0,31,32,$hi0
	stw		$nm1,-4($tp)		; tp[j-1]
	ldd		-8($xfer),$nm0
	fstds		${fnm0},-8($xfer)
	 extrd,u	$ab0,63,32,$ab0
	 addl		$hi1,$nm0,$nm0
	stw		$nm1,-4($tp)		; tp[j-1]
	 addl		$ab0,$nm0,$nm0
	ldd		0($xfer),$ab1
	 stw,ma		$nm0,8($tp)		; tp[j-1]
	addib,<>	8,$idx,L\$1st		; j++++
	 extrd,u	$nm0,31,32,$hi1

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldd		8($xfer),$nm1
	fstds		${fab0},-16($xfer)
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[0]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
	fstds		${fnm0},-8($xfer)
	 addl		$hi0,$ab1,$ab1
	ldd		0($xfer),$ab1
	fstds		${fab1},0($xfer)
	 addl		$hi0,$ab1,$ab1
	 extrd,u	$ab1,31,32,$hi0
	ldd		8($xfer),$nm1
	fstds		${fnm1},8($xfer)
	 extrd,u	$ab1,63,32,$ab1
	 addl		$hi1,$nm1,$nm1
@@ -340,6 +347,8 @@ L\$outer
	ldd		-8($xfer),$nm0
	ldw		0($xfer),$hi0		; high part

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 extrd,u	$ab0,31,32,$ti0		; carry bit
	 extrd,u	$ab0,63,32,$ab0
	fstds		${fab1},0($xfer)
@@ -348,61 +357,59 @@ L\$outer
	 addl		$ab0,$nm0,$nm0		; low part is discarded
	ldw		0($tp),$ti1		; tp[1]
	 extrd,u	$nm0,31,32,$hi1
	ldd		0($xfer),$ab1
	fstds		${fab0},-16($xfer)
	fstds		${fnm0},-8($xfer)

L\$inner
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldd		8($xfer),$nm1
	fstds		${fab0},-16($xfer)
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[i]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
	fstds		${fnm0},-8($xfer)
	ldw		4($tp),$ti0		; tp[j]
	 addl		$hi0,$ab1,$ab1
	ldd		0($xfer),$ab1
	fstds		${fab1},0($xfer)
	 addl		$hi0,$ti1,$ti1
	 addl		$ti1,$ab1,$ab1
	 extrd,u	$ab1,31,32,$hi0
	ldd		8($xfer),$nm1
	fstds		${fnm1},8($xfer)
	 extrd,u	$ab1,31,32,$hi0
	 extrd,u	$ab1,63,32,$ab1
	flddx		$idx($ap),${fai}	; ap[j,j+1]
	flddx		$idx($np),${fni}	; np[j,j+1]
	 addl		$hi1,$nm1,$nm1
	ldd		-16($xfer),$ab0
	 addl		$ab1,$nm1,$nm1
	ldd		-8($xfer),$nm0
	 extrd,u	$nm1,31,32,$hi1
	ldw		4($tp),$ti0		; tp[j]
	stw		$nm1,-4($tp)		; tp[j-1]

	flddx		$idx($ap),${fai}	; ap[j,j+1]
	 addl		$hi0,$ab0,$ab0
	flddx		$idx($np),${fni}	; np[j,j+1]
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldd		-16($xfer),$ab0
	fstds		${fab0},-16($xfer)
	 addl		$hi0,$ti0,$ti0
	 addl		$ti0,$ab0,$ab0
	stw		$nm1,-4($tp)		; tp[j-1]
	ldd		-8($xfer),$nm0
	fstds		${fnm0},-8($xfer)
	 extrd,u	$ab0,31,32,$hi0
	 extrd,u	$nm1,31,32,$hi1
	ldw		8($tp),$ti1		; tp[j]
	 extrd,u	$ab0,63,32,$ab0
	 addl		$hi1,$nm0,$nm0
	 addl		$ab0,$nm0,$nm0
	ldd		0($xfer),$ab1
	 stw,ma		$nm0,8($tp)		; tp[j-1]
	addib,<>	8,$idx,L\$inner		; j++++
	 extrd,u	$nm0,31,32,$hi1

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldd		8($xfer),$nm1
	fstds		${fab0},-16($xfer)
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[i]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
	fstds		${fnm0},-8($xfer)
	ldw		4($tp),$ti0		; tp[j]
	 addl		$hi0,$ab1,$ab1
	ldd		0($xfer),$ab1
	fstds		${fab1},0($xfer)
	 addl		$hi0,$ti1,$ti1
	 addl		$ti1,$ab1,$ab1
	 extrd,u	$ab1,31,32,$hi0
	ldd		8($xfer),$nm1
	fstds		${fnm1},8($xfer)
	 extrd,u	$ab1,31,32,$hi0
	 extrd,u	$ab1,63,32,$ab1
	ldw		4($tp),$ti0		; tp[j]
	 addl		$hi1,$nm1,$nm1
	ldd		-16($xfer),$ab0
	 addl		$ab1,$nm1,$nm1
	ldd		-16($xfer),$ab0
	ldd		-8($xfer),$nm0
	 extrd,u	$nm1,31,32,$hi1

@@ -549,46 +556,50 @@ $code.=<<___;

	.ALIGN		8
L\$parisc11
	ldw		-16($xfer),$hi0
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldw		-12($xfer),$ablo
	ldw		-8($xfer),$nmhi0
	ldw		-16($xfer),$hi0
	ldw		-4($xfer),$nmlo0
	ldw		-8($xfer),$nmhi0
	fstds		${fab0},-16($xfer)
	fstds		${fnm0},-8($xfer)

	 ldo		8($idx),$idx		; j++++
	 add		$ablo,$nmlo0,$nmlo0	; discarded
	 addc		%r0,$nmhi0,$hi1
	ldw		0($xfer),$abhi
	ldw		4($xfer),$ablo
	ldw		0($xfer),$abhi
	nop

L\$1st_pa11
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	 ldw		8($xfer),$nmhi1
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldw		12($xfer),$nmlo1
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[0]
	fstds		${fab0},-16($xfer)
	flddx		$idx($ap),${fai}	; ap[j,j+1]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
	fstds		${fnm0},-8($xfer)
	flddx		$idx($np),${fni}	; np[j,j+1]
	 add		$hi0,$ablo,$ablo
	fstds		${fab1},0($xfer)
	ldw		12($xfer),$nmlo1
	 addc		%r0,$abhi,$hi0
	fstds		${fnm1},8($xfer)
	ldw		8($xfer),$nmhi1
	 add		$ablo,$nmlo1,$nmlo1
	ldw		-16($xfer),$abhi
	fstds		${fab1},0($xfer)
	 addc		%r0,$nmhi1,$nmhi1
	ldw		-12($xfer),$ablo
	fstds		${fnm1},8($xfer)
	 add		$hi1,$nmlo1,$nmlo1
	ldw		-8($xfer),$nmhi0
	ldw		-12($xfer),$ablo
	 addc		%r0,$nmhi1,$hi1
	ldw		-4($xfer),$nmlo0
	ldw		-16($xfer),$abhi

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	ldw		-4($xfer),$nmlo0
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldw		-8($xfer),$nmhi0
	 add		$hi0,$ablo,$ablo
	flddx		$idx($ap),${fai}	; ap[j,j+1]
	stw		$nmlo1,-4($tp)		; tp[j-1]
	 addc		%r0,$abhi,$hi0
	flddx		$idx($np),${fni}	; np[j,j+1]
	fstds		${fab0},-16($xfer)
	 add		$ablo,$nmlo0,$nmlo0
	stw		$nmlo1,-4($tp)		; tp[j-1]
	fstds		${fnm0},-8($xfer)
	 addc		%r0,$nmhi0,$nmhi0
	ldw		0($xfer),$abhi
	 add		$hi1,$nmlo0,$nmlo0
@@ -597,14 +608,10 @@ L\$1st_pa11
	addib,<>	8,$idx,L\$1st_pa11	; j++++
	 addc		%r0,$nmhi0,$hi1

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
	 ldw		8($xfer),$nmhi1
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldw		12($xfer),$nmlo1
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[0]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
	fstds		${fab0},-16($xfer)
	fstds		${fnm0},-8($xfer)
	 add		$hi0,$ablo,$ablo
	fstds		${fab1},0($xfer)
	 addc		%r0,$abhi,$hi0
@@ -677,65 +684,65 @@ L\$outer_pa11
	ldw		-4($xfer),$nmlo0
	ldw		0($xfer),$hi0		; high part

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	fstds		${fab1},0($xfer)
	 addl		$abhi,$hi0,$hi0		; account carry bit
	fstds		${fnm1},8($xfer)
	 add		$ablo,$nmlo0,$nmlo0	; discarded
	ldw		0($tp),$ti1		; tp[1]
	 addc		%r0,$nmhi0,$hi1
	ldw		0($xfer),$abhi
	fstds		${fab0},-16($xfer)
	fstds		${fnm0},-8($xfer)
	ldw		4($xfer),$ablo
	ldw		0($xfer),$abhi

L\$inner_pa11
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	 ldw		8($xfer),$nmhi1
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldw		12($xfer),$nmlo1
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[i]
	fstds		${fab0},-16($xfer)
	flddx		$idx($ap),${fai}	; ap[j,j+1]
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
	fstds		${fnm0},-8($xfer)
	flddx		$idx($np),${fni}	; np[j,j+1]
	 add		$hi0,$ablo,$ablo
	ldw		4($tp),$ti0		; tp[j]
	 addc		%r0,$abhi,$abhi
	fstds		${fab1},0($xfer)
	ldw		12($xfer),$nmlo1
	 add		$ti1,$ablo,$ablo
	fstds		${fnm1},8($xfer)
	ldw		8($xfer),$nmhi1
	 addc		%r0,$abhi,$hi0
	ldw		-16($xfer),$abhi
	fstds		${fab1},0($xfer)
	 add		$ablo,$nmlo1,$nmlo1
	ldw		-12($xfer),$ablo
	fstds		${fnm1},8($xfer)
	 addc		%r0,$nmhi1,$nmhi1
	ldw		-8($xfer),$nmhi0
	ldw		-12($xfer),$ablo
	 add		$hi1,$nmlo1,$nmlo1
	ldw		-4($xfer),$nmlo0
	ldw		-16($xfer),$abhi
	 addc		%r0,$nmhi1,$hi1

	flddx		$idx($ap),${fai}	; ap[j,j+1]
	 addl,nuv	$hi0,$ablo,$ablo
	 addi		1,$abhi,$abhi
	flddx		$idx($np),${fni}	; np[j,j+1]
	 add		$ti0,$ablo,$ablo
	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	ldw		8($tp),$ti1		; tp[j]
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	ldw		-4($xfer),$nmlo0
	 add		$hi0,$ablo,$ablo
	ldw		-8($xfer),$nmhi0
	 addc		%r0,$abhi,$abhi
	stw		$nmlo1,-4($tp)		; tp[j-1]
	 add		$ti0,$ablo,$ablo
	fstds		${fab0},-16($xfer)
	 addc		%r0,$abhi,$hi0
	ldw		8($tp),$ti1		; tp[j]
	 addl,nuv	$ablo,$nmlo0,$nmlo0
	 addi		1,$nmhi0,$nmhi0
	fstds		${fnm0},-8($xfer)
	 add		$ablo,$nmlo0,$nmlo0
	ldw		4($xfer),$ablo
	 addc		%r0,$nmhi0,$nmhi0
	ldw		0($xfer),$abhi
	 add		$hi1,$nmlo0,$nmlo0
	ldw		4($xfer),$ablo
	 stws,ma	$nmlo0,8($tp)		; tp[j-1]
	addib,<>	8,$idx,L\$inner_pa11	; j++++
	 addc		%r0,$nmhi0,$hi1

	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
	 ldw		8($xfer),$nmhi1
	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
	 ldw		12($xfer),$nmlo1
	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[i]
	fstds		${fab0},-16($xfer)
	ldw		12($xfer),$nmlo1
	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
	fstds		${fnm0},-8($xfer)
	ldw		8($xfer),$nmhi1
	 add		$hi0,$ablo,$ablo
	ldw		4($tp),$ti0		; tp[j]
	 addc		%r0,$abhi,$abhi