Newer
Older
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
#
# You might fail to appreciate this module performance from the first
# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
# to be *the* best Intel C compiler without -KPIC, performance appears
# to be virtually identical... But try to re-configure with shared
# library support... Aha! Intel compiler "suddenly" lags behind by 30%
# [on P4, more on others]:-) And if compared to position-independent
# code generated by GNU C, this code performs *more* than *twice* as
# fast! Yes, all this buzz about PIC means that unlike other hand-
# coded implementations, this one was explicitly designed to be safe
# to use even in shared library context... This also means that this
# code isn't necessarily absolutely fastest "ever," because in order
# to achieve position independence an extra register has to be
# off-loaded to stack, which affects the benchmark result.
#
# Special note about instruction choice. Do you recall RC4_INT code
# performing poorly on P4? It might be the time to figure out why.
# RC4_INT code implies effective address calculations in base+offset*4
# form. Trouble is that it seems that offset scaling turned to be
# critical path... At least eliminating scaling resulted in 2.8x RC4
# performance improvement [as you might recall]. As AES code is hungry
# for scaling too, I [try to] avoid the latter by favoring off-by-2
# shifts and masking the result with 0xFF<<2 instead of "boring" 0xFF.
#
# As was shown by Dean Gaudet <dean@arctic.org>, the above note turned
# void. Performance improvement with off-by-2 shifts was observed on
# intermediate implementation, which was spilling yet another register
# to stack... Final offset*4 code below runs just a tad faster on P4,
# but exhibits up to 10% improvement on other cores.
#
# Second version is "monolithic" replacement for aes_core.c, which in
# addition to AES_[de|en]crypt implements AES_set_[de|en]cryption_key.
# This made it possible to implement little-endian variant of the
# algorithm without modifying the base C code. Motivating factor for
# the undertaken effort was that it appeared that in tight IA-32
# register window little-endian flavor could achieve slightly higher
# Instruction Level Parallelism, and it indeed resulted in up to 15%
# better performance on most recent µ-archs...
#
# Third version adds AES_cbc_encrypt implementation, which resulted in
# up to 40% performance imrovement of CBC benchmark results. 40% was
# observed on P4 core, where "overall" imrovement coefficient, i.e. if
# compared to PIC generated by GCC and in CBC mode, was observed to be
# as large as 4x:-) CBC performance is virtually identical to ECB now
# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
# Opteron, because certain function prologues and epilogues are
# effectively taken out of the loop...
#
# Version 3.2 implements compressed tables and prefetch of these tables
# in CBC[!] mode. Former means that 3/4 of table references are now
# misaligned, which unfortunately has negative impact on elder IA-32
# implementations, Pentium suffered 30% penalty, PIII - 10%.
#
# Version 3.3 avoids L1 cache aliasing between stack frame and
# S-boxes, and 3.4 - L1 cache aliasing even between key schedule. The
# latter is achieved by copying the key schedule to controlled place in
# stack. This unfortunately has rather strong impact on small block CBC
# performance, ~2x deterioration on 16-byte block if compared to 3.3.
#
# Version 3.5 checks if there is L1 cache aliasing between user-supplied
# key schedule and S-boxes and abstains from copying the former if
# there is no. This allows end-user to consciously retain small block
# performance by aligning key schedule in specific manner.
#
# Version 3.6 compresses Td4 to 256 bytes and prefetches it in ECB.
#
# Current ECB performance numbers for 128-bit key in CPU cycles per
# processed byte [measure commonly used by AES benchmarkers] are:
#
# small footprint fully unrolled
# P4 24 22
# AMD K8 20 19
# PIII 25 23
# Pentium 81 78
#
# Version 3.7 reimplements outer rounds as "compact." Meaning that
# first and last rounds reference compact 256 bytes S-box. This means
# that first round consumes a lot more CPU cycles and that encrypt
# and decrypt performance becomes asymmetric. Encrypt performance
# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
# aggressively pre-fetched.
Andy Polyakov
committed
#
# Version 4.0 effectively rolls back to 3.6 and instead implements
# additional set of functions, _[x86|mmx]_AES_[en|de]crypt_compact,
# which use exclusively 256 byte S-box. These functions are to be
# called in modes not concealing plain text, such as ECB, or when
# we're asked to process smaller amount of data [or unconditionally
# on hyper-threading CPU]. Currently it's called unconditionally from
# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
# still needs to be modified to switch between slower and faster
# mode when appropriate... But in either case benchmark landscape
# changes dramatically and below numbers are CPU cycles per processed
# byte for 128-bit key.
#
# ECB encrypt ECB decrypt CBC large chunk
# P4 56[60] 84[100] 23
Andy Polyakov
committed
# AMD K8 48[44] 70[79] 18
# PIII 41[50] 61[91] 24
# Pentium 120 160 77
#
# Version 4.1 switches to compact S-box even in key schedule setup.
push(@INC,"perlasm","../../perlasm");
require "x86asm.pl";
&asm_init($ARGV[0],"aes-586.pl",$ARGV[$#ARGV] eq "386");
$s0="eax";
$s1="ebx";
$s2="ecx";
$s3="edx";
$key="edi";
$acc="esi";
Andy Polyakov
committed
$tbl="ebp";
sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
$compromise=0; # $compromise=128 abstains from copying key
# schedule to stack when encrypting inputs
# shorter than 128 bytes at the cost of
# risksing aliasing with S-boxes. In return
# you get way better, up to +70%, small block
# performance.
$small_footprint=1; # $small_footprint=1 code is ~5% slower [on
# recent µ-archs], but ~5 times smaller!
# I favor compact code to minimize cache
# contention and in hope to "collect" 5% back
# in real-life applications...
Andy Polyakov
committed
$vertical_spin=0; # shift "verticaly" defaults to 0, because of
# its proof-of-concept status...
# Note that there is no decvert(), as well as last encryption round is
# performed with "horizontal" shifts. This is because this "vertical"
# implementation [one which groups shifts on a given $s[i] to form a
# "column," unlike "horizontal" one, which groups shifts on different
# $s[i] to form a "row"] is work in progress. It was observed to run
# few percents faster on Intel cores, but not AMD. On AMD K8 core it's
# whole 12% slower:-( So we face a trade-off... Shall it be resolved
# some day? Till then the code is considered experimental and by
# default remains dormant...
sub encvert()
{ my ($te,@s) = @_;
my $v0 = $acc, $v1 = $key;
&mov ($v0,$s[3]); # copy s3
&mov (&DWP(4,"esp"),$s[2]); # save s2
&mov ($v1,$s[0]); # copy s0
&mov (&DWP(8,"esp"),$s[1]); # save s1
&movz ($s[2],&HB($s[0]));
&and ($s[0],0xFF);
&mov ($s[0],&DWP(0,$te,$s[0],8)); # s0>>0
&shr ($v1,16);
&mov ($s[3],&DWP(3,$te,$s[2],8)); # s0>>8
&movz ($s[1],&HB($v1));
&and ($v1,0xFF);
&mov ($s[2],&DWP(2,$te,$v1,8)); # s0>>16
&mov ($v1,$v0);
&mov ($s[1],&DWP(1,$te,$s[1],8)); # s0>>24
&and ($v0,0xFF);
&xor ($s[3],&DWP(0,$te,$v0,8)); # s3>>0
&movz ($v0,&HB($v1));
&shr ($v1,16);
&xor ($s[2],&DWP(3,$te,$v0,8)); # s3>>8
&movz ($v0,&HB($v1));
&and ($v1,0xFF);
&xor ($s[1],&DWP(2,$te,$v1,8)); # s3>>16
&mov ($v1,&DWP(4,"esp")); # restore s2
&xor ($s[0],&DWP(1,$te,$v0,8)); # s3>>24
&mov ($v0,$v1);
&and ($v1,0xFF);
&xor ($s[2],&DWP(0,$te,$v1,8)); # s2>>0
&movz ($v1,&HB($v0));
&shr ($v0,16);
&xor ($s[1],&DWP(3,$te,$v1,8)); # s2>>8
&movz ($v1,&HB($v0));
&and ($v0,0xFF);
&xor ($s[0],&DWP(2,$te,$v0,8)); # s2>>16
&mov ($v0,&DWP(8,"esp")); # restore s1
&xor ($s[3],&DWP(1,$te,$v1,8)); # s2>>24
&mov ($v1,$v0);
&and ($v0,0xFF);
&xor ($s[1],&DWP(0,$te,$v0,8)); # s1>>0
&movz ($v0,&HB($v1));
&shr ($v1,16);
&xor ($s[0],&DWP(3,$te,$v0,8)); # s1>>8
&movz ($v0,&HB($v1));
&and ($v1,0xFF);
&xor ($s[3],&DWP(2,$te,$v1,8)); # s1>>16
&mov ($key,&DWP(20,"esp")); # reincarnate v1 as key
&xor ($s[2],&DWP(1,$te,$v0,8)); # s1>>24
}
Andy Polyakov
committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
# Another experimental routine, which features "horizontal spin," but
# eliminates one reference to stack. Strangely enough runs slower...
sub enchoriz()
{ my $v0 = $key, $v1 = $acc;
&movz ($v0,&LB($s0)); # 3, 2, 1, 0*
&rotr ($s2,8); # 8,11,10, 9
&mov ($v1,&DWP(0,$te,$v0,8)); # 0
&movz ($v0,&HB($s1)); # 7, 6, 5*, 4
&rotr ($s3,16); # 13,12,15,14
&xor ($v1,&DWP(3,$te,$v0,8)); # 5
&movz ($v0,&HB($s2)); # 8,11,10*, 9
&rotr ($s0,16); # 1, 0, 3, 2
&xor ($v1,&DWP(2,$te,$v0,8)); # 10
&movz ($v0,&HB($s3)); # 13,12,15*,14
&xor ($v1,&DWP(1,$te,$v0,8)); # 15, t[0] collected
&mov (&DWP(4,"esp"),$v1); # t[0] saved
&movz ($v0,&LB($s1)); # 7, 6, 5, 4*
&shr ($s1,16); # -, -, 7, 6
&mov ($v1,&DWP(0,$te,$v0,8)); # 4
&movz ($v0,&LB($s3)); # 13,12,15,14*
&xor ($v1,&DWP(2,$te,$v0,8)); # 14
&movz ($v0,&HB($s0)); # 1, 0, 3*, 2
&and ($s3,0xffff0000); # 13,12, -, -
&xor ($v1,&DWP(1,$te,$v0,8)); # 3
&movz ($v0,&LB($s2)); # 8,11,10, 9*
&or ($s3,$s1); # 13,12, 7, 6
&xor ($v1,&DWP(3,$te,$v0,8)); # 9, t[1] collected
&mov ($s1,$v1); # s[1]=t[1]
&movz ($v0,&LB($s0)); # 1, 0, 3, 2*
&shr ($s2,16); # -, -, 8,11
&mov ($v1,&DWP(2,$te,$v0,8)); # 2
&movz ($v0,&HB($s3)); # 13,12, 7*, 6
&xor ($v1,&DWP(1,$te,$v0,8)); # 7
&movz ($v0,&HB($s2)); # -, -, 8*,11
&xor ($v1,&DWP(0,$te,$v0,8)); # 8
&mov ($v0,$s3);
&shr ($v0,24); # 13
&xor ($v1,&DWP(3,$te,$v0,8)); # 13, t[2] collected
&movz ($v0,&LB($s2)); # -, -, 8,11*
&shr ($s0,24); # 1*
&mov ($s2,&DWP(1,$te,$v0,8)); # 11
&xor ($s2,&DWP(3,$te,$s0,8)); # 1
&mov ($s0,&DWP(4,"esp")); # s[0]=t[0]
&movz ($v0,&LB($s3)); # 13,12, 7, 6*
&shr ($s3,16); # , ,13,12
&xor ($s2,&DWP(2,$te,$v0,8)); # 6
&mov ($key,&DWP(20,"esp")); # reincarnate v0 as key
&and ($s3,0xff); # , ,13,12*
&mov ($s3,&DWP(0,$te,$s3,8)); # 12
&xor ($s3,$s2); # s[2]=t[3] collected
&mov ($s2,$v1); # s[2]=t[2]
}
Andy Polyakov
committed
# More experimental code... MMX one... Even though this one eliminates
# *all* references to stack, it's not faster...
sub mmx_encbody()
{
&movz ($acc,&LB("eax")); # 0
&mov ("ecx",&DWP(0,$tbl,$acc,8)); # 0
Andy Polyakov
committed
&pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
&movz ("edx",&HB("eax")); # 1
&mov ("edx",&DWP(3,$tbl,"edx",8)); # 1
&shr ("eax",16); # 5, 4
&movz ($acc,&LB("ebx")); # 10
&xor ("ecx",&DWP(2,$tbl,$acc,8)); # 10
Andy Polyakov
committed
&pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
&movz ($acc,&HB("ebx")); # 11
&xor ("edx",&DWP(1,$tbl,$acc,8)); # 11
Andy Polyakov
committed
&shr ("ebx",16); # 15,14
&movz ($acc,&HB("eax")); # 5
&xor ("ecx",&DWP(3,$tbl,$acc,8)); # 5
Andy Polyakov
committed
&movq ("mm3",QWP(16,$key));
&movz ($acc,&HB("ebx")); # 15
&xor ("ecx",&DWP(1,$tbl,$acc,8)); # 15
Andy Polyakov
committed
&movd ("mm0","ecx"); # t[0] collected
&movz ($acc,&LB("eax")); # 4
&mov ("ecx",&DWP(0,$tbl,$acc,8)); # 4
Andy Polyakov
committed
&movd ("eax","mm2"); # 7, 6, 3, 2
&movz ($acc,&LB("ebx")); # 14
&xor ("ecx",&DWP(2,$tbl,$acc,8)); # 14
Andy Polyakov
committed
&movd ("ebx","mm6"); # 13,12, 9, 8
&movz ($acc,&HB("eax")); # 3
&xor ("ecx",&DWP(1,$tbl,$acc,8)); # 3
&movz ($acc,&HB("ebx")); # 9
&xor ("ecx",&DWP(3,$tbl,$acc,8)); # 9
Andy Polyakov
committed
&movd ("mm1","ecx"); # t[1] collected
&movz ($acc,&LB("eax")); # 2
&mov ("ecx",&DWP(2,$tbl,$acc,8)); # 2
Andy Polyakov
committed
&shr ("eax",16); # 7, 6
&punpckldq ("mm0","mm1"); # t[0,1] collected
&movz ($acc,&LB("ebx")); # 8
&xor ("ecx",&DWP(0,$tbl,$acc,8)); # 8
Andy Polyakov
committed
&shr ("ebx",16); # 13,12
&movz ($acc,&HB("eax")); # 7
&xor ("ecx",&DWP(1,$tbl,$acc,8)); # 7
Andy Polyakov
committed
&pxor ("mm0","mm3");
&movz ("eax",&LB("eax")); # 6
&xor ("edx",&DWP(2,$tbl,"eax",8)); # 6
&pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
&movz ($acc,&HB("ebx")); # 13
&xor ("ecx",&DWP(3,$tbl,$acc,8)); # 13
Andy Polyakov
committed
&xor ("ecx",&DWP(24,$key)); # t[2]
&movd ("mm4","ecx"); # t[2] collected
&movz ("ebx",&LB("ebx")); # 12
&xor ("edx",&DWP(0,$tbl,"ebx",8)); # 12
&shr ("ecx",16);
&movd ("eax","mm1"); # 5, 4, 1, 0
&mov ("ebx",&DWP(28,$key)); # t[3]
&xor ("ebx","edx");
&movd ("mm5","ebx"); # t[3] collected
&and ("ebx",0xffff0000);
&or ("ebx","ecx");
&punpckldq ("mm4","mm5"); # t[2,3] collected
Andy Polyakov
committed
######################################################################
# "Compact" block function
######################################################################
sub enccompact()
{ my $Fn = mov;
while ($#_>5) { pop(@_); $Fn=sub{}; }
my ($i,$te,@s)=@_;
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
# $Fn is used in first compact round and its purpose is to
# void restoration of some values from stack, so that after
Andy Polyakov
committed
# 4xenccompact with extra argument $key value is left there...
if ($i==3) { &$Fn ($key,&DWP(20,"esp")); }##%edx
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
if ($i==1) { &shr ($s[0],16); }#%ebx[1]
if ($i==2) { &shr ($s[0],24); }#%ecx[2]
&movz ($out,&BP(-128,$te,$out,1));
if ($i==3) { $tmp=$s[1]; }##%eax
&movz ($tmp,&HB($s[1]));
&movz ($tmp,&BP(-128,$te,$tmp,1));
&shl ($tmp,8);
&xor ($out,$tmp);
if ($i==3) { $tmp=$s[2]; &mov ($s[1],&DWP(4,"esp")); }##%ebx
else { &mov ($tmp,$s[2]);
&shr ($tmp,16); }
if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
&and ($tmp,0xFF);
&movz ($tmp,&BP(-128,$te,$tmp,1));
&shl ($tmp,16);
&xor ($out,$tmp);
if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
else { &mov ($tmp,$s[3]);
&shr ($tmp,24); }
&movz ($tmp,&BP(-128,$te,$tmp,1));
&shl ($tmp,24);
&xor ($out,$tmp);
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
Andy Polyakov
committed
if ($i==3) { &mov ($s[3],$acc); }
&comment();
}
sub enctransform()
{ my @s = ($s0,$s1,$s2,$s3);
my $i = shift;
Andy Polyakov
committed
my $tmp = $tbl;
my $r2 = $key ;
&mov ($acc,$s[$i]);
&and ($acc,0x80808080);
&mov ($tmp,$acc);
&mov ($r2,$s[$i]);
&shr ($tmp,7);
&and ($r2,0x7f7f7f7f);
&sub ($acc,$tmp);
&lea ($r2,&DWP(0,$r2,$r2));
&and ($acc,0x1b1b1b1b);
&mov ($tmp,$s[$i]);
Andy Polyakov
committed
&xor ($acc,$r2); # r2
Andy Polyakov
committed
&xor ($s[$i],$acc); # r0 ^ r2
&rotl ($s[$i],24);
Andy Polyakov
committed
&xor ($s[$i],$acc) # ROTATE(r2^r0,24) ^ r2
&rotr ($tmp,16);
&xor ($s[$i],$tmp);
&rotr ($tmp,8);
&xor ($s[$i],$tmp);
}
Andy Polyakov
committed
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
&public_label("AES_Te");
&function_begin_B("_x86_AES_encrypt_compact");
# note that caller is expected to allocate stack frame for me!
&mov (&DWP(20,"esp"),$key); # save key
&xor ($s0,&DWP(0,$key)); # xor with key
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
&mov ($acc,&DWP(240,$key)); # load key->rounds
&lea ($acc,&DWP(-2,$acc,$acc));
&lea ($acc,&DWP(0,$key,$acc,8));
&mov (&DWP(24,"esp"),$acc); # end of key schedule
# prefetch Te4
&mov ($key,&DWP(0-128,$tbl));
&mov ($acc,&DWP(32-128,$tbl));
&mov ($key,&DWP(64-128,$tbl));
&mov ($acc,&DWP(96-128,$tbl));
&mov ($key,&DWP(128-128,$tbl));
&mov ($acc,&DWP(160-128,$tbl));
&mov ($key,&DWP(192-128,$tbl));
&mov ($acc,&DWP(224-128,$tbl));
&set_label("loop",16);
&enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
&enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
&enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
&enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
&enctransform(2);
&enctransform(3);
&enctransform(0);
&enctransform(1);
&mov ($key,&DWP(20,"esp"));
&mov ($tbl,&DWP(28,"esp"));
&add ($key,16); # advance rd_key
&xor ($s0,&DWP(0,$key));
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
&cmp ($key,&DWP(24,"esp"));
&mov (&DWP(20,"esp"),$key);
&jb (&label("loop"));
&enccompact(0,$tbl,$s0,$s1,$s2,$s3);
&enccompact(1,$tbl,$s1,$s2,$s3,$s0);
&enccompact(2,$tbl,$s2,$s3,$s0,$s1);
&enccompact(3,$tbl,$s3,$s0,$s1,$s2);
&xor ($s0,&DWP(16,$key));
&xor ($s1,&DWP(20,$key));
&xor ($s2,&DWP(24,$key));
&xor ($s3,&DWP(28,$key));
&ret ();
&function_end_B("_x86_AES_encrypt_compact");
######################################################################
# "Compact" MMX block function.
######################################################################
#
# Performance is not actually extraordinary in comparison to pure
# x86 code. In particular encrypt performance is virtually the same.
# Decrypt performance on the other hand is 15-20% better on newer
# µ-archs [but we're thankful for *any* improvement here], and ~50%
# better on PIII:-) And additionally on the pros side this code
Andy Polyakov
committed
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
# eliminates redundant references to stack and thus relieves/
# minimizes the pressure on the memory bus.
#
# MMX register layout lsb
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | mm4 | mm0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | s3 | s2 | s1 | s0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
#
# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
# In this terms encryption and decryption "compact" permutation
# matrices can be depicted as following:
#
# encryption lsb # decryption lsb
# +----++----+----+----+----+ # +----++----+----+----+----+
# | t0 || 15 | 10 | 5 | 0 | # | t0 || 7 | 10 | 13 | 0 |
# +----++----+----+----+----+ # +----++----+----+----+----+
# | t1 || 3 | 14 | 9 | 4 | # | t1 || 11 | 14 | 1 | 4 |
# +----++----+----+----+----+ # +----++----+----+----+----+
# | t2 || 7 | 2 | 13 | 8 | # | t2 || 15 | 2 | 5 | 8 |
# +----++----+----+----+----+ # +----++----+----+----+----+
# | t3 || 11 | 6 | 1 | 12 | # | t3 || 3 | 6 | 9 | 12 |
# +----++----+----+----+----+ # +----++----+----+----+----+
#
######################################################################
# Why not xmm registers? Short answer. It was actually tested and
# was not any faster, but *contrary*, most notably on Intel CPUs.
# Longer answer. Main advantage of using mm registers is that movd
# latency is lower, especially on Intel P4. While arithmetic
# instructions are twice as many, they can be scheduled every cycle
# and not every second one when they are operating on xmm register,
# so that "arithmetic throughput" remains virtually the same. And
# finally the code can be executed even on elder MMX-only CPUs:-)
sub mmx_enccompact()
{
&pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
&pshufw ("mm5","mm4",0x0d); # 15,14,11,10
&movd ("eax","mm1"); # 5, 4, 1, 0
&movd ("ebx","mm5"); # 15,14,11,10
&movz ($acc,&LB("eax")); # 0
&movz ("ecx",&BP(-128,$tbl,$acc,1)); # 0
Andy Polyakov
committed
&pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
&movz ("edx",&HB("eax")); # 1
&movz ("edx",&BP(-128,$tbl,"edx",1)); # 1
Andy Polyakov
committed
&shl ("edx",8); # 1
&shr ("eax",16); # 5, 4
&movz ($acc,&LB("ebx")); # 10
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 10
&shl ($acc,16); # 10
&or ("ecx",$acc); # 10
Andy Polyakov
committed
&pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
&movz ($acc,&HB("ebx")); # 11
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 11
&shl ($acc,24); # 11
&or ("edx",$acc); # 11
Andy Polyakov
committed
&shr ("ebx",16); # 15,14
&movz ($acc,&HB("eax")); # 5
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 5
&shl ($acc,8); # 5
&or ("ecx",$acc); # 5
&movz ($acc,&HB("ebx")); # 15
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 15
&shl ($acc,24); # 15
&or ("ecx",$acc); # 15
Andy Polyakov
committed
&movd ("mm0","ecx"); # t[0] collected
&movz ($acc,&LB("eax")); # 4
&movz ("ecx",&BP(-128,$tbl,$acc,1)); # 4
Andy Polyakov
committed
&movd ("eax","mm2"); # 7, 6, 3, 2
&movz ($acc,&LB("ebx")); # 14
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 14
&shl ($acc,16); # 14
&or ("ecx",$acc); # 14
Andy Polyakov
committed
&movd ("ebx","mm6"); # 13,12, 9, 8
&movz ($acc,&HB("eax")); # 3
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 3
&shl ($acc,24); # 3
&or ("ecx",$acc); # 3
&movz ($acc,&HB("ebx")); # 9
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 9
&shl ($acc,8); # 9
&or ("ecx",$acc); # 9
Andy Polyakov
committed
&movd ("mm1","ecx"); # t[1] collected
&movz ($acc,&LB("ebx")); # 8
&movz ("ecx",&BP(-128,$tbl,$acc,1)); # 8
Andy Polyakov
committed
&shr ("ebx",16); # 13,12
&movz ($acc,&LB("eax")); # 2
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 2
&shl ($acc,16); # 2
&or ("ecx",$acc); # 2
Andy Polyakov
committed
&shr ("eax",16); # 7, 6
&punpckldq ("mm0","mm1"); # t[0,1] collected
&movz ($acc,&HB("eax")); # 7
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 7
&shl ($acc,24); # 7
&or ("ecx",$acc); # 7
Andy Polyakov
committed
&and ("eax",0xff); # 6
&movz ("eax",&BP(-128,$tbl,"eax",1)); # 6
Andy Polyakov
committed
&shl ("eax",16); # 6
&or ("edx","eax"); # 6
&movz ($acc,&HB("ebx")); # 13
&movz ($acc,&BP(-128,$tbl,$acc,1)); # 13
&shl ($acc,8); # 13
&or ("ecx",$acc); # 13
Andy Polyakov
committed
&movd ("mm4","ecx"); # t[2] collected
&and ("ebx",0xff); # 12
&movz ("ebx",&BP(-128,$tbl,"ebx",1)); # 12
Andy Polyakov
committed
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
&or ("edx","ebx"); # 12
&movd ("mm5","edx"); # t[3] collected
&punpckldq ("mm4","mm5"); # t[2,3] collected
}
&public_label("AES_Te");
&function_begin_B("_mmx_AES_encrypt_compact");
&pxor ("mm0",&QWP(0,$key)); # 7, 6, 5, 4, 3, 2, 1, 0
&pxor ("mm4",&QWP(8,$key)); # 15,14,13,12,11,10, 9, 8
# note that caller is expected to allocate stack frame for me!
&mov ($acc,&DWP(240,$key)); # load key->rounds
&lea ($acc,&DWP(-2,$acc,$acc));
&lea ($acc,&DWP(0,$key,$acc,8));
&mov (&DWP(24,"esp"),$acc); # end of key schedule
&mov ($s0,0x1b1b1b1b); # magic constant
&mov (&DWP(8,"esp"),$s0);
&mov (&DWP(12,"esp"),$s0);
# prefetch Te4
&mov ($s0,&DWP(0-128,$tbl));
&mov ($s1,&DWP(32-128,$tbl));
&mov ($s2,&DWP(64-128,$tbl));
&mov ($s3,&DWP(96-128,$tbl));
&mov ($s0,&DWP(128-128,$tbl));
&mov ($s1,&DWP(160-128,$tbl));
&mov ($s2,&DWP(192-128,$tbl));
&mov ($s3,&DWP(224-128,$tbl));
&set_label("loop",16);
&mmx_enccompact();
&add ($key,16);
&cmp ($key,&DWP(24,"esp"));
&ja (&label("out"));
&movq ("mm2",&QWP(8,"esp"));
&pxor ("mm3","mm3"); &pxor ("mm7","mm7");
&movq ("mm1","mm0"); &movq ("mm5","mm4"); # r0
&pcmpgtb("mm3","mm0"); &pcmpgtb("mm7","mm4");
&pand ("mm3","mm2"); &pand ("mm7","mm2");
&pshufw ("mm2","mm0",0xb1); &pshufw ("mm6","mm4",0xb1);# ROTATE(r0,16)
Andy Polyakov
committed
&paddb ("mm0","mm0"); &paddb ("mm4","mm4");
&pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # = r2
&pshufw ("mm3","mm2",0xb1); &pshufw ("mm7","mm6",0xb1);# r0
&pxor ("mm1","mm0"); &pxor ("mm5","mm4"); # r0^r2
&pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= ROTATE(r0,16)
Andy Polyakov
committed
&movq ("mm2","mm3"); &movq ("mm6","mm7");
Andy Polyakov
committed
&pslld ("mm3",8); &pslld ("mm7",8);
&psrld ("mm2",24); &psrld ("mm6",24);
Andy Polyakov
committed
&pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= r0<<8
&pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= r0>>24
&movq ("mm3","mm1"); &movq ("mm7","mm5");
Andy Polyakov
committed
&movq ("mm2",&QWP(0,$key)); &movq ("mm6",&QWP(8,$key));
&psrld ("mm1",8); &psrld ("mm5",8);
Andy Polyakov
committed
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
&pslld ("mm3",24); &pslld ("mm7",24);
&pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= (r2^r0)<<8
&pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= (r2^r0)>>24
&pxor ("mm0","mm2"); &pxor ("mm4","mm6");
&jmp (&label("loop"));
&set_label("out",16);
&pxor ("mm0",&QWP(0,$key));
&pxor ("mm4",&QWP(8,$key));
&ret ();
&function_end_B("_mmx_AES_encrypt_compact");
######################################################################
# Vanilla block function.
######################################################################
sub encstep()
{ my ($i,$te,@s) = @_;
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
# lines marked with #%e?x[i] denote "reordered" instructions...
if ($i==3) { &mov ($key,&DWP(20,"esp")); }##%edx
else { &mov ($out,$s[0]);
&and ($out,0xFF); }
if ($i==1) { &shr ($s[0],16); }#%ebx[1]
if ($i==2) { &shr ($s[0],24); }#%ecx[2]
&mov ($out,&DWP(0,$te,$out,8));
if ($i==3) { $tmp=$s[1]; }##%eax
&movz ($tmp,&HB($s[1]));
&xor ($out,&DWP(3,$te,$tmp,8));
if ($i==3) { $tmp=$s[2]; &mov ($s[1],&DWP(4,"esp")); }##%ebx
else { &mov ($tmp,$s[2]);
&shr ($tmp,16); }
if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
&and ($tmp,0xFF);
&xor ($out,&DWP(2,$te,$tmp,8));
if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
else { &mov ($tmp,$s[3]);
&shr ($tmp,24) }
&xor ($out,&DWP(1,$te,$tmp,8));
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
if ($i==3) { &mov ($s[3],$acc); }
&comment();
}
sub enclast()
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
if ($i==3) { &mov ($key,&DWP(20,"esp")); }##%edx
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
if ($i==1) { &shr ($s[0],16); }#%ebx[1]
if ($i==2) { &shr ($s[0],24); }#%ecx[2]
&mov ($out,&DWP(2,$te,$out,8));
&and ($out,0x000000ff);
if ($i==3) { $tmp=$s[1]; }##%eax
&movz ($tmp,&HB($s[1]));
&mov ($tmp,&DWP(0,$te,$tmp,8));
&and ($tmp,0x0000ff00);
if ($i==3) { $tmp=$s[2]; &mov ($s[1],&DWP(4,"esp")); }##%ebx
else { &mov ($tmp,$s[2]);
&shr ($tmp,16); }
if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
&and ($tmp,0xFF);
&mov ($tmp,&DWP(0,$te,$tmp,8));
&and ($tmp,0x00ff0000);
if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
else { &mov ($tmp,$s[3]);
&shr ($tmp,24); }
&mov ($tmp,&DWP(2,$te,$tmp,8));
&and ($tmp,0xff000000);
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
if ($i==3) { &mov ($s[3],$acc); }
&function_begin_B("_x86_AES_encrypt");
if ($vertical_spin) {
# I need high parts of volatile registers to be accessible...
&exch ($s1="edi",$key="ebx");
&mov ($s2="esi",$acc="ecx");
}
# note that caller is expected to allocate stack frame for me!
&mov (&DWP(20,"esp"),$key); # save key
&xor ($s0,&DWP(0,$key)); # xor with key
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
&mov ($acc,&DWP(240,$key)); # load key->rounds
&lea ($acc,&DWP(-2,$acc,$acc));
&lea ($acc,&DWP(0,$key,$acc,8));
&mov (&DWP(24,"esp"),$acc); # end of key schedule
Andy Polyakov
committed
&set_label("loop",16);
if ($vertical_spin) {
Andy Polyakov
committed
&encvert($tbl,$s0,$s1,$s2,$s3);
} else {
Andy Polyakov
committed
&encstep(0,$tbl,$s0,$s1,$s2,$s3);
&encstep(1,$tbl,$s1,$s2,$s3,$s0);
&encstep(2,$tbl,$s2,$s3,$s0,$s1);
&encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&add ($key,16); # advance rd_key
&xor ($s0,&DWP(0,$key));
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
&cmp ($key,&DWP(24,"esp"));
&mov (&DWP(20,"esp"),$key);
&jb (&label("loop"));
}
else {
&cmp ($acc,10);
&cmp ($acc,12);
Andy Polyakov
committed
&set_label("14rounds",4);
for ($i=1;$i<3;$i++) {
if ($vertical_spin) {
Andy Polyakov
committed
&encvert($tbl,$s0,$s1,$s2,$s3);
} else {
Andy Polyakov
committed
&encstep(0,$tbl,$s0,$s1,$s2,$s3);
&encstep(1,$tbl,$s1,$s2,$s3,$s0);
&encstep(2,$tbl,$s2,$s3,$s0,$s1);
&encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
&xor ($s3,&DWP(16*$i+12,$key));
&add ($key,32);
&mov (&DWP(20,"esp"),$key); # advance rd_key
Andy Polyakov
committed
&set_label("12rounds",4);
for ($i=1;$i<3;$i++) {
if ($vertical_spin) {
Andy Polyakov
committed
&encvert($tbl,$s0,$s1,$s2,$s3);
} else {
Andy Polyakov
committed
&encstep(0,$tbl,$s0,$s1,$s2,$s3);
&encstep(1,$tbl,$s1,$s2,$s3,$s0);
&encstep(2,$tbl,$s2,$s3,$s0,$s1);
&encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
&xor ($s3,&DWP(16*$i+12,$key));
&add ($key,32);
&mov (&DWP(20,"esp"),$key); # advance rd_key
Andy Polyakov
committed
&set_label("10rounds",4);
for ($i=1;$i<10;$i++) {
if ($vertical_spin) {
Andy Polyakov
committed
&encvert($tbl,$s0,$s1,$s2,$s3);
} else {
Andy Polyakov
committed
&encstep(0,$tbl,$s0,$s1,$s2,$s3);
&encstep(1,$tbl,$s1,$s2,$s3,$s0);
&encstep(2,$tbl,$s2,$s3,$s0,$s1);
&encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
&xor ($s3,&DWP(16*$i+12,$key));
if ($vertical_spin) {
# "reincarnate" some registers for "horizontal" spin...
&mov ($s1="ebx",$key="edi");
&mov ($s2="ecx",$acc="esi");
}
Andy Polyakov
committed
&enclast(0,$tbl,$s0,$s1,$s2,$s3);
&enclast(1,$tbl,$s1,$s2,$s3,$s0);
&enclast(2,$tbl,$s2,$s3,$s0,$s1);
&enclast(3,$tbl,$s3,$s0,$s1,$s2);
&add ($key,$small_footprint?16:160);
&xor ($s0,&DWP(0,$key));
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
Andy Polyakov
committed
&set_label("AES_Te",1024); # Yes! I keep it in the code segment!
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
Andy Polyakov
committed
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);