1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930 |
- #!/usr/bin/env perl
- #
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- #
- # This module implements support for AES instructions as per PowerISA
- # specification version 2.07, first implemented by POWER8 processor.
- # The module is endian-agnostic in sense that it supports both big-
- # and little-endian cases. Data alignment in parallelizable modes is
- # handled with VSX loads and stores, which implies MSR.VSX flag being
- # set. It should also be noted that ISA specification doesn't prohibit
- # alignment exceptions for these instructions on page boundaries.
- # Initially alignment was handled in pure AltiVec/VMX way [when data
- # is aligned programmatically, which in turn guarantees exception-
- # free execution], but it turned to hamper performance when vcipher
- # instructions are interleaved. It's reckoned that eventual
- # misalignment penalties at page boundaries are in average lower
- # than additional overhead in pure AltiVec approach.
- $flavour = shift;
- if ($flavour =~ /64/) {
- $SIZE_T =8;
- $LRSAVE =2*$SIZE_T;
- $STU ="stdu";
- $POP ="ld";
- $PUSH ="std";
- $UCMP ="cmpld";
- $SHL ="sldi";
- } elsif ($flavour =~ /32/) {
- $SIZE_T =4;
- $LRSAVE =$SIZE_T;
- $STU ="stwu";
- $POP ="lwz";
- $PUSH ="stw";
- $UCMP ="cmplw";
- $SHL ="slwi";
- } else { die "nonsense $flavour"; }
- $LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
- die "can't locate ppc-xlate.pl";
- open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
- $FRAME=8*$SIZE_T;
- $prefix="aes_p8";
- $sp="r1";
- $vrsave="r12";
- #########################################################################
- {{{ # Key setup procedures #
- my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
- my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
- my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
- $code.=<<___;
- .machine "any"
- .text
- .align 7
- rcon:
- .long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
- .long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
- .long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
- .long 0,0,0,0 ?asis
- Lconsts:
- mflr r0
- bcl 20,31,\$+4
- mflr $ptr #vvvvv "distance between . and rcon
- addi $ptr,$ptr,-0x48
- mtlr r0
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
- .globl .${prefix}_set_encrypt_key
- Lset_encrypt_key:
- mflr r11
- $PUSH r11,$LRSAVE($sp)
- li $ptr,-1
- ${UCMP}i $inp,0
- beq- Lenc_key_abort # if ($inp==0) return -1;
- ${UCMP}i $out,0
- beq- Lenc_key_abort # if ($out==0) return -1;
- li $ptr,-2
- cmpwi $bits,128
- blt- Lenc_key_abort
- cmpwi $bits,256
- bgt- Lenc_key_abort
- andi. r0,$bits,0x3f
- bne- Lenc_key_abort
- lis r0,0xfff0
- mfspr $vrsave,256
- mtspr 256,r0
- bl Lconsts
- mtlr r11
- neg r9,$inp
- lvx $in0,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- lvsr $key,0,r9 # borrow $key
- li r8,0x20
- cmpwi $bits,192
- lvx $in1,0,$inp
- le?vspltisb $mask,0x0f # borrow $mask
- lvx $rcon,0,$ptr
- le?vxor $key,$key,$mask # adjust for byte swap
- lvx $mask,r8,$ptr
- addi $ptr,$ptr,0x10
- vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
- li $cnt,8
- vxor $zero,$zero,$zero
- mtctr $cnt
- ?lvsr $outperm,0,$out
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$zero,$outmask,$outperm
- blt Loop128
- addi $inp,$inp,8
- beq L192
- addi $inp,$inp,8
- b L256
- .align 4
- Loop128:
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
- bdnz Loop128
- lvx $rcon,0,$ptr # last two round keys
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vxor $in0,$in0,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,0x50
- li $rounds,10
- b Ldone
- .align 4
- L192:
- lvx $tmp,0,$inp
- li $cnt,4
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $out,$out,16
- vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
- vspltisb $key,8 # borrow $key
- mtctr $cnt
- vsububm $mask,$mask,$key # adjust the mask
- Loop192:
- vperm $key,$in1,$in1,$mask # roate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vcipherlast $key,$key,$rcon
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $stage,$zero,$in1,8
- vspltw $tmp,$in0,3
- vxor $tmp,$tmp,$in1
- vsldoi $in1,$zero,$in1,12 # >>32
- vadduwm $rcon,$rcon,$rcon
- vxor $in1,$in1,$tmp
- vxor $in0,$in0,$key
- vxor $in1,$in1,$key
- vsldoi $stage,$stage,$in0,8
- vperm $key,$in1,$in1,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$stage,$stage,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
- vsldoi $stage,$in0,$in1,8
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vperm $outtail,$stage,$stage,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- stvx $stage,0,$out
- addi $out,$out,16
- vspltw $tmp,$in0,3
- vxor $tmp,$tmp,$in1
- vsldoi $in1,$zero,$in1,12 # >>32
- vadduwm $rcon,$rcon,$rcon
- vxor $in1,$in1,$tmp
- vxor $in0,$in0,$key
- vxor $in1,$in1,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,16
- bdnz Loop192
- li $rounds,12
- addi $out,$out,0x20
- b Ldone
- .align 4
- L256:
- lvx $tmp,0,$inp
- li $cnt,7
- li $rounds,14
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $out,$out,16
- vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
- mtctr $cnt
- Loop256:
- vperm $key,$in1,$in1,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in1,$in1,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,16
- bdz Ldone
- vspltw $key,$in0,3 # just splat
- vsldoi $tmp,$zero,$in1,12 # >>32
- vsbox $key,$key
- vxor $in1,$in1,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in1,$in1,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in1,$in1,$tmp
- vxor $in1,$in1,$key
- b Loop256
- .align 4
- Ldone:
- lvx $in1,0,$inp # redundant in aligned case
- vsel $in1,$outhead,$in1,$outmask
- stvx $in1,0,$inp
- li $ptr,0
- mtspr 256,$vrsave
- stw $rounds,0($out)
- Lenc_key_abort:
- mr r3,$ptr
- blr
- .long 0
- .byte 0,12,0x14,1,0,0,3,0
- .long 0
- .size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
- .globl .${prefix}_set_decrypt_key
- $STU $sp,-$FRAME($sp)
- mflr r10
- $PUSH r10,$FRAME+$LRSAVE($sp)
- bl Lset_encrypt_key
- mtlr r10
- cmpwi r3,0
- bne- Ldec_key_abort
- slwi $cnt,$rounds,4
- subi $inp,$out,240 # first round key
- srwi $rounds,$rounds,1
- add $out,$inp,$cnt # last round key
- mtctr $rounds
- Ldeckey:
- lwz r0, 0($inp)
- lwz r6, 4($inp)
- lwz r7, 8($inp)
- lwz r8, 12($inp)
- addi $inp,$inp,16
- lwz r9, 0($out)
- lwz r10,4($out)
- lwz r11,8($out)
- lwz r12,12($out)
- stw r0, 0($out)
- stw r6, 4($out)
- stw r7, 8($out)
- stw r8, 12($out)
- subi $out,$out,16
- stw r9, -16($inp)
- stw r10,-12($inp)
- stw r11,-8($inp)
- stw r12,-4($inp)
- bdnz Ldeckey
- xor r3,r3,r3 # return value
- Ldec_key_abort:
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,1,0x80,0,3,0
- .long 0
- .size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
- ___
- }}}
- #########################################################################
- {{{ # Single block en- and decrypt procedures #
- sub gen_block () {
- my $dir = shift;
- my $n = $dir eq "de" ? "n" : "";
- my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
- $code.=<<___;
- .globl .${prefix}_${dir}crypt
- lwz $rounds,240($key)
- lis r0,0xfc00
- mfspr $vrsave,256
- li $idx,15 # 15 is not typo
- mtspr 256,r0
- lvx v0,0,$inp
- neg r11,$out
- lvx v1,$idx,$inp
- lvsl v2,0,$inp # inpperm
- le?vspltisb v4,0x0f
- ?lvsl v3,0,r11 # outperm
- le?vxor v2,v2,v4
- li $idx,16
- vperm v0,v0,v1,v2 # align [and byte swap in LE]
- lvx v1,0,$key
- ?lvsl v5,0,$key # keyperm
- srwi $rounds,$rounds,1
- lvx v2,$idx,$key
- addi $idx,$idx,16
- subi $rounds,$rounds,1
- ?vperm v1,v1,v2,v5 # align round key
- vxor v0,v0,v1
- lvx v1,$idx,$key
- addi $idx,$idx,16
- mtctr $rounds
- Loop_${dir}c:
- ?vperm v2,v2,v1,v5
- v${n}cipher v0,v0,v2
- lvx v2,$idx,$key
- addi $idx,$idx,16
- ?vperm v1,v1,v2,v5
- v${n}cipher v0,v0,v1
- lvx v1,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_${dir}c
- ?vperm v2,v2,v1,v5
- v${n}cipher v0,v0,v2
- lvx v2,$idx,$key
- ?vperm v1,v1,v2,v5
- v${n}cipherlast v0,v0,v1
- vspltisb v2,-1
- vxor v1,v1,v1
- li $idx,15 # 15 is not typo
- ?vperm v2,v1,v2,v3 # outmask
- le?vxor v3,v3,v4
- lvx v1,0,$out # outhead
- vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
- vsel v1,v1,v0,v2
- lvx v4,$idx,$out
- stvx v1,0,$out
- vsel v0,v0,v4,v2
- stvx v0,$idx,$out
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
- ___
- }
- &gen_block("en");
- &gen_block("de");
- }}}
- #########################################################################
- {{{ # CBC en- and decrypt procedures #
- my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
- my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
- my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
- map("v$_",(4..10));
- $code.=<<___;
- .globl .${prefix}_cbc_encrypt
- ${UCMP}i $len,16
- bltlr-
- cmpwi $enc,0 # test direction
- lis r0,0xffe0
- mfspr $vrsave,256
- mtspr 256,r0
- li $idx,15
- vxor $rndkey0,$rndkey0,$rndkey0
- le?vspltisb $tmp,0x0f
- lvx $ivec,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- le?vxor $inpperm,$inpperm,$tmp
- vperm $ivec,$ivec,$inptail,$inpperm
- neg r11,$inp
- ?lvsl $keyperm,0,$key # prepare for unaligned key
- lwz $rounds,240($key)
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inptail,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
- ?lvsr $outperm,0,$out # prepare for unaligned store
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
- srwi $rounds,$rounds,1
- li $idx,16
- subi $rounds,$rounds,1
- beq Lcbc_dec
- Lcbc_enc:
- vmr $inout,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- mtctr $rounds
- subi $len,$len,16 # len-=16
- lvx $rndkey0,0,$key
- vperm $inout,$inout,$inptail,$inpperm
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- vxor $inout,$inout,$ivec
- Loop_cbc_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_cbc_enc
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipherlast $ivec,$inout,$rndkey0
- ${UCMP}i $len,16
- vperm $tmp,$ivec,$ivec,$outperm
- vsel $inout,$outhead,$tmp,$outmask
- vmr $outhead,$tmp
- stvx $inout,0,$out
- addi $out,$out,16
- bge Lcbc_enc
- b Lcbc_done
- .align 4
- Lcbc_dec:
- ${UCMP}i $len,128
- bge _aesp8_cbc_decrypt8x
- vmr $tmp,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- mtctr $rounds
- subi $len,$len,16 # len-=16
- lvx $rndkey0,0,$key
- vperm $tmp,$tmp,$inptail,$inpperm
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$tmp,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- Loop_cbc_dec:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_cbc_dec
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipherlast $inout,$inout,$rndkey0
- ${UCMP}i $len,16
- vxor $inout,$inout,$ivec
- vmr $ivec,$tmp
- vperm $tmp,$inout,$inout,$outperm
- vsel $inout,$outhead,$tmp,$outmask
- vmr $outhead,$tmp
- stvx $inout,0,$out
- addi $out,$out,16
- bge Lcbc_dec
- Lcbc_done:
- addi $out,$out,-1
- lvx $inout,0,$out # redundant in aligned case
- vsel $inout,$outhead,$inout,$outmask
- stvx $inout,0,$out
- neg $enc,$ivp # write [unaligned] iv
- li $idx,15 # 15 is not typo
- vxor $rndkey0,$rndkey0,$rndkey0
- vspltisb $outmask,-1
- le?vspltisb $tmp,0x0f
- ?lvsl $outperm,0,$enc
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
- lvx $outhead,0,$ivp
- vperm $ivec,$ivec,$ivec,$outperm
- vsel $inout,$outhead,$ivec,$outmask
- lvx $inptail,$idx,$ivp
- stvx $inout,0,$ivp
- vsel $inout,$ivec,$inptail,$outmask
- stvx $inout,$idx,$ivp
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,6,0
- .long 0
- ___
- #########################################################################
- {{ # Optimized CBC decrypt procedure #
- my $key_="r11";
- my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
- my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
- my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
- my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
- # v26-v31 last 6 round keys
- my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
- $code.=<<___;
- .align 5
- _aesp8_cbc_decrypt8x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- li r10,`$FRAME+8*16+15`
- li r11,`$FRAME+8*16+31`
- stvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- stvx v21,r11,$sp
- addi r11,r11,32
- stvx v22,r10,$sp
- addi r10,r10,32
- stvx v23,r11,$sp
- addi r11,r11,32
- stvx v24,r10,$sp
- addi r10,r10,32
- stvx v25,r11,$sp
- addi r11,r11,32
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
- subi $rounds,$rounds,3 # -4 in total
- subi $len,$len,128 # bias
- lvx $rndkey0,$x00,$key # load key schedule
- lvx v30,$x10,$key
- addi $key,$key,0x20
- lvx v31,$x00,$key
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
- Load_cbc_dec_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key
- addi $key,$key,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_cbc_dec_key
- lvx v26,$x10,$key
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key
- ?vperm v29,v29,v30,$keyperm
- lvx $out0,$x70,$key # borrow $out0
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$out0,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
- #lvx $inptail,0,$inp # "caller" already did this
- #addi $inp,$inp,15 # 15 is not typo
- subi $inp,$inp,15 # undo "caller"
- le?li $idx,8
- lvx_u $in0,$x00,$inp # load first 8 "words"
- le?lvsl $inpperm,0,$idx
- le?vspltisb $tmp,0x0f
- lvx_u $in1,$x10,$inp
- le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- lvx_u $in2,$x20,$inp
- le?vperm $in0,$in0,$in0,$inpperm
- lvx_u $in3,$x30,$inp
- le?vperm $in1,$in1,$in1,$inpperm
- lvx_u $in4,$x40,$inp
- le?vperm $in2,$in2,$in2,$inpperm
- vxor $out0,$in0,$rndkey0
- lvx_u $in5,$x50,$inp
- le?vperm $in3,$in3,$in3,$inpperm
- vxor $out1,$in1,$rndkey0
- lvx_u $in6,$x60,$inp
- le?vperm $in4,$in4,$in4,$inpperm
- vxor $out2,$in2,$rndkey0
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
- le?vperm $in5,$in5,$in5,$inpperm
- vxor $out3,$in3,$rndkey0
- le?vperm $in6,$in6,$in6,$inpperm
- vxor $out4,$in4,$rndkey0
- le?vperm $in7,$in7,$in7,$inpperm
- vxor $out5,$in5,$rndkey0
- vxor $out6,$in6,$rndkey0
- vxor $out7,$in7,$rndkey0
- mtctr $rounds
- b Loop_cbc_dec8x
- .align 5
- Loop_cbc_dec8x:
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_cbc_dec8x
- subic $len,$len,128 # $len-=128
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- subfe. r0,r0,r0 # borrow?-1:0
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- and r0,r0,$len
- vncipher $out0,$out0,v26
- vncipher $out1,$out1,v26
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- vncipher $out4,$out4,v26
- vncipher $out5,$out5,v26
- vncipher $out6,$out6,v26
- vncipher $out7,$out7,v26
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in7 are loaded
- # with last "words"
- vncipher $out0,$out0,v27
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vncipher $out4,$out4,v27
- vncipher $out5,$out5,v27
- vncipher $out6,$out6,v27
- vncipher $out7,$out7,v27
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vncipher $out0,$out0,v28
- vncipher $out1,$out1,v28
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vncipher $out4,$out4,v28
- vncipher $out5,$out5,v28
- vncipher $out6,$out6,v28
- vncipher $out7,$out7,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
- vncipher $out0,$out0,v29
- vncipher $out1,$out1,v29
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vncipher $out4,$out4,v29
- vncipher $out5,$out5,v29
- vncipher $out6,$out6,v29
- vncipher $out7,$out7,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vncipher $out0,$out0,v30
- vxor $ivec,$ivec,v31 # xor with last round key
- vncipher $out1,$out1,v30
- vxor $in0,$in0,v31
- vncipher $out2,$out2,v30
- vxor $in1,$in1,v31
- vncipher $out3,$out3,v30
- vxor $in2,$in2,v31
- vncipher $out4,$out4,v30
- vxor $in3,$in3,v31
- vncipher $out5,$out5,v30
- vxor $in4,$in4,v31
- vncipher $out6,$out6,v30
- vxor $in5,$in5,v31
- vncipher $out7,$out7,v30
- vxor $in6,$in6,v31
- vncipherlast $out0,$out0,$ivec
- vncipherlast $out1,$out1,$in0
- lvx_u $in0,$x00,$inp # load next input block
- vncipherlast $out2,$out2,$in1
- lvx_u $in1,$x10,$inp
- vncipherlast $out3,$out3,$in2
- le?vperm $in0,$in0,$in0,$inpperm
- lvx_u $in2,$x20,$inp
- vncipherlast $out4,$out4,$in3
- le?vperm $in1,$in1,$in1,$inpperm
- lvx_u $in3,$x30,$inp
- vncipherlast $out5,$out5,$in4
- le?vperm $in2,$in2,$in2,$inpperm
- lvx_u $in4,$x40,$inp
- vncipherlast $out6,$out6,$in5
- le?vperm $in3,$in3,$in3,$inpperm
- lvx_u $in5,$x50,$inp
- vncipherlast $out7,$out7,$in6
- le?vperm $in4,$in4,$in4,$inpperm
- lvx_u $in6,$x60,$inp
- vmr $ivec,$in7
- le?vperm $in5,$in5,$in5,$inpperm
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $in6,$in6,$in6,$inpperm
- vxor $out0,$in0,$rndkey0
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $in7,$in7,$in7,$inpperm
- vxor $out1,$in1,$rndkey0
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- vxor $out2,$in2,$rndkey0
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- vxor $out3,$in3,$rndkey0
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- vxor $out4,$in4,$rndkey0
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- vxor $out5,$in5,$rndkey0
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x60,$out
- vxor $out6,$in6,$rndkey0
- stvx_u $out7,$x70,$out
- addi $out,$out,0x80
- vxor $out7,$in7,$rndkey0
- mtctr $rounds
- beq Loop_cbc_dec8x # did $len-=128 borrow?
- addic. $len,$len,128
- beq Lcbc_dec8x_done
- nop
- nop
- Loop_cbc_dec8x_tail: # up to 7 "words" tail...
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_cbc_dec8x_tail
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- vncipher $out1,$out1,v26
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- vncipher $out4,$out4,v26
- vncipher $out5,$out5,v26
- vncipher $out6,$out6,v26
- vncipher $out7,$out7,v26
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vncipher $out4,$out4,v27
- vncipher $out5,$out5,v27
- vncipher $out6,$out6,v27
- vncipher $out7,$out7,v27
- vncipher $out1,$out1,v28
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vncipher $out4,$out4,v28
- vncipher $out5,$out5,v28
- vncipher $out6,$out6,v28
- vncipher $out7,$out7,v28
- vncipher $out1,$out1,v29
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vncipher $out4,$out4,v29
- vncipher $out5,$out5,v29
- vncipher $out6,$out6,v29
- vncipher $out7,$out7,v29
- vncipher $out1,$out1,v30
- vxor $ivec,$ivec,v31 # last round key
- vncipher $out2,$out2,v30
- vxor $in1,$in1,v31
- vncipher $out3,$out3,v30
- vxor $in2,$in2,v31
- vncipher $out4,$out4,v30
- vxor $in3,$in3,v31
- vncipher $out5,$out5,v30
- vxor $in4,$in4,v31
- vncipher $out6,$out6,v30
- vxor $in5,$in5,v31
- vncipher $out7,$out7,v30
- vxor $in6,$in6,v31
- cmplwi $len,32 # switch($len)
- blt Lcbc_dec8x_one
- nop
- beq Lcbc_dec8x_two
- cmplwi $len,64
- blt Lcbc_dec8x_three
- nop
- beq Lcbc_dec8x_four
- cmplwi $len,96
- blt Lcbc_dec8x_five
- nop
- beq Lcbc_dec8x_six
- Lcbc_dec8x_seven:
- vncipherlast $out1,$out1,$ivec
- vncipherlast $out2,$out2,$in1
- vncipherlast $out3,$out3,$in2
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out1,$out1,$out1,$inpperm
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x00,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x10,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x20,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x30,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x40,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x50,$out
- stvx_u $out7,$x60,$out
- addi $out,$out,0x70
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_six:
- vncipherlast $out2,$out2,$ivec
- vncipherlast $out3,$out3,$in2
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out2,$out2,$out2,$inpperm
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x00,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x10,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x20,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x30,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x40,$out
- stvx_u $out7,$x50,$out
- addi $out,$out,0x60
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_five:
- vncipherlast $out3,$out3,$ivec
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out3,$out3,$out3,$inpperm
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x00,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x10,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x20,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x30,$out
- stvx_u $out7,$x40,$out
- addi $out,$out,0x50
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_four:
- vncipherlast $out4,$out4,$ivec
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out4,$out4,$out4,$inpperm
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x00,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x10,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x20,$out
- stvx_u $out7,$x30,$out
- addi $out,$out,0x40
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_three:
- vncipherlast $out5,$out5,$ivec
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out5,$out5,$out5,$inpperm
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x00,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x10,$out
- stvx_u $out7,$x20,$out
- addi $out,$out,0x30
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_two:
- vncipherlast $out6,$out6,$ivec
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
- le?vperm $out6,$out6,$out6,$inpperm
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x00,$out
- stvx_u $out7,$x10,$out
- addi $out,$out,0x20
- b Lcbc_dec8x_done
- .align 5
- Lcbc_dec8x_one:
- vncipherlast $out7,$out7,$ivec
- vmr $ivec,$in7
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out7,0,$out
- addi $out,$out,0x10
- Lcbc_dec8x_done:
- le?vperm $ivec,$ivec,$ivec,$inpperm
- stvx_u $ivec,0,$ivp # write [unaligned] iv
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $inpperm,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x14,0,0x80,6,6,0
- .long 0
- .size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
- ___
- }} }}}
- #########################################################################
- {{{ # CTR procedure[s] #
- my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
- my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
- my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
- map("v$_",(4..11));
- my $dat=$tmp;
- $code.=<<___;
- .globl .${prefix}_ctr32_encrypt_blocks
- ${UCMP}i $len,1
- bltlr-
- lis r0,0xfff0
- mfspr $vrsave,256
- mtspr 256,r0
- li $idx,15
- vxor $rndkey0,$rndkey0,$rndkey0
- le?vspltisb $tmp,0x0f
- lvx $ivec,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- vspltisb $one,1
- le?vxor $inpperm,$inpperm,$tmp
- vperm $ivec,$ivec,$inptail,$inpperm
- vsldoi $one,$rndkey0,$one,1
- neg r11,$inp
- ?lvsl $keyperm,0,$key # prepare for unaligned key
- lwz $rounds,240($key)
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inptail,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
- srwi $rounds,$rounds,1
- li $idx,16
- subi $rounds,$rounds,1
- ${UCMP}i $len,8
- bge _aesp8_ctr32_encrypt8x
- ?lvsr $outperm,0,$out # prepare for unaligned store
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
- lvx $rndkey0,0,$key
- mtctr $rounds
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$ivec,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- b Loop_ctr32_enc
- .align 5
- Loop_ctr32_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_ctr32_enc
- vadduwm $ivec,$ivec,$one
- vmr $dat,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- subic. $len,$len,1 # blocks--
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- vperm $dat,$dat,$inptail,$inpperm
- li $idx,16
- ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm
- lvx $rndkey0,0,$key
- vxor $dat,$dat,$rndkey1 # last round key
- vcipherlast $inout,$inout,$dat
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- vperm $inout,$inout,$inout,$outperm
- vsel $dat,$outhead,$inout,$outmask
- mtctr $rounds
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vmr $outhead,$inout
- vxor $inout,$ivec,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- stvx $dat,0,$out
- addi $out,$out,16
- bne Loop_ctr32_enc
- addi $out,$out,-1
- lvx $inout,0,$out # redundant in aligned case
- vsel $inout,$outhead,$inout,$outmask
- stvx $inout,0,$out
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,6,0
- .long 0
- ___
- #########################################################################
- {{ # Optimized CTR procedure #
- my $key_="r11";
- my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
- my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
- my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
- my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
- # v26-v31 last 6 round keys
- my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
- my ($two,$three,$four)=($outhead,$outperm,$outmask);
- $code.=<<___;
- .align 5
- _aesp8_ctr32_encrypt8x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- li r10,`$FRAME+8*16+15`
- li r11,`$FRAME+8*16+31`
- stvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- stvx v21,r11,$sp
- addi r11,r11,32
- stvx v22,r10,$sp
- addi r10,r10,32
- stvx v23,r11,$sp
- addi r11,r11,32
- stvx v24,r10,$sp
- addi r10,r10,32
- stvx v25,r11,$sp
- addi r11,r11,32
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
- subi $rounds,$rounds,3 # -4 in total
- lvx $rndkey0,$x00,$key # load key schedule
- lvx v30,$x10,$key
- addi $key,$key,0x20
- lvx v31,$x00,$key
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
- Load_ctr32_enc_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key
- addi $key,$key,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_ctr32_enc_key
- lvx v26,$x10,$key
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key
- ?vperm v29,v29,v30,$keyperm
- lvx $out0,$x70,$key # borrow $out0
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$out0,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
- vadduqm $two,$one,$one
- subi $inp,$inp,15 # undo "caller"
- $SHL $len,$len,4
- vadduqm $out1,$ivec,$one # counter values ...
- vadduqm $out2,$ivec,$two
- vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
- le?li $idx,8
- vadduqm $out3,$out1,$two
- vxor $out1,$out1,$rndkey0
- le?lvsl $inpperm,0,$idx
- vadduqm $out4,$out2,$two
- vxor $out2,$out2,$rndkey0
- le?vspltisb $tmp,0x0f
- vadduqm $out5,$out3,$two
- vxor $out3,$out3,$rndkey0
- le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- vadduqm $out6,$out4,$two
- vxor $out4,$out4,$rndkey0
- vadduqm $out7,$out5,$two
- vxor $out5,$out5,$rndkey0
- vadduqm $ivec,$out6,$two # next counter value
- vxor $out6,$out6,$rndkey0
- vxor $out7,$out7,$rndkey0
- mtctr $rounds
- b Loop_ctr32_enc8x
- .align 5
- Loop_ctr32_enc8x:
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
- vcipher $out6,$out6,v24
- vcipher $out7,$out7,v24
- Loop_ctr32_enc8x_middle:
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
- vcipher $out6,$out6,v25
- vcipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_ctr32_enc8x
- subic r11,$len,256 # $len-256, borrow $key_
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
- vcipher $out6,$out6,v24
- vcipher $out7,$out7,v24
- subfe r0,r0,r0 # borrow?-1:0
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
- vcipher $out6,$out6,v25
- vcipher $out7,$out7,v25
- and r0,r0,r11
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vcipher $out0,$out0,v26
- vcipher $out1,$out1,v26
- vcipher $out2,$out2,v26
- vcipher $out3,$out3,v26
- vcipher $out4,$out4,v26
- vcipher $out5,$out5,v26
- vcipher $out6,$out6,v26
- vcipher $out7,$out7,v26
- lvx v24,$x00,$key_ # re-pre-load round[1]
- subic $len,$len,129 # $len-=129
- vcipher $out0,$out0,v27
- addi $len,$len,1 # $len-=128 really
- vcipher $out1,$out1,v27
- vcipher $out2,$out2,v27
- vcipher $out3,$out3,v27
- vcipher $out4,$out4,v27
- vcipher $out5,$out5,v27
- vcipher $out6,$out6,v27
- vcipher $out7,$out7,v27
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vcipher $out0,$out0,v28
- lvx_u $in0,$x00,$inp # load input
- vcipher $out1,$out1,v28
- lvx_u $in1,$x10,$inp
- vcipher $out2,$out2,v28
- lvx_u $in2,$x20,$inp
- vcipher $out3,$out3,v28
- lvx_u $in3,$x30,$inp
- vcipher $out4,$out4,v28
- lvx_u $in4,$x40,$inp
- vcipher $out5,$out5,v28
- lvx_u $in5,$x50,$inp
- vcipher $out6,$out6,v28
- lvx_u $in6,$x60,$inp
- vcipher $out7,$out7,v28
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
- vcipher $out0,$out0,v29
- le?vperm $in0,$in0,$in0,$inpperm
- vcipher $out1,$out1,v29
- le?vperm $in1,$in1,$in1,$inpperm
- vcipher $out2,$out2,v29
- le?vperm $in2,$in2,$in2,$inpperm
- vcipher $out3,$out3,v29
- le?vperm $in3,$in3,$in3,$inpperm
- vcipher $out4,$out4,v29
- le?vperm $in4,$in4,$in4,$inpperm
- vcipher $out5,$out5,v29
- le?vperm $in5,$in5,$in5,$inpperm
- vcipher $out6,$out6,v29
- le?vperm $in6,$in6,$in6,$inpperm
- vcipher $out7,$out7,v29
- le?vperm $in7,$in7,$in7,$inpperm
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in7 are loaded
- # with last "words"
- subfe. r0,r0,r0 # borrow?-1:0
- vcipher $out0,$out0,v30
- vxor $in0,$in0,v31 # xor with last round key
- vcipher $out1,$out1,v30
- vxor $in1,$in1,v31
- vcipher $out2,$out2,v30
- vxor $in2,$in2,v31
- vcipher $out3,$out3,v30
- vxor $in3,$in3,v31
- vcipher $out4,$out4,v30
- vxor $in4,$in4,v31
- vcipher $out5,$out5,v30
- vxor $in5,$in5,v31
- vcipher $out6,$out6,v30
- vxor $in6,$in6,v31
- vcipher $out7,$out7,v30
- vxor $in7,$in7,v31
- bne Lctr32_enc8x_break # did $len-129 borrow?
- vcipherlast $in0,$out0,$in0
- vcipherlast $in1,$out1,$in1
- vadduqm $out1,$ivec,$one # counter values ...
- vcipherlast $in2,$out2,$in2
- vadduqm $out2,$ivec,$two
- vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
- vcipherlast $in3,$out3,$in3
- vadduqm $out3,$out1,$two
- vxor $out1,$out1,$rndkey0
- vcipherlast $in4,$out4,$in4
- vadduqm $out4,$out2,$two
- vxor $out2,$out2,$rndkey0
- vcipherlast $in5,$out5,$in5
- vadduqm $out5,$out3,$two
- vxor $out3,$out3,$rndkey0
- vcipherlast $in6,$out6,$in6
- vadduqm $out6,$out4,$two
- vxor $out4,$out4,$rndkey0
- vcipherlast $in7,$out7,$in7
- vadduqm $out7,$out5,$two
- vxor $out5,$out5,$rndkey0
- le?vperm $in0,$in0,$in0,$inpperm
- vadduqm $ivec,$out6,$two # next counter value
- vxor $out6,$out6,$rndkey0
- le?vperm $in1,$in1,$in1,$inpperm
- vxor $out7,$out7,$rndkey0
- mtctr $rounds
- vcipher $out0,$out0,v24
- stvx_u $in0,$x00,$out
- le?vperm $in2,$in2,$in2,$inpperm
- vcipher $out1,$out1,v24
- stvx_u $in1,$x10,$out
- le?vperm $in3,$in3,$in3,$inpperm
- vcipher $out2,$out2,v24
- stvx_u $in2,$x20,$out
- le?vperm $in4,$in4,$in4,$inpperm
- vcipher $out3,$out3,v24
- stvx_u $in3,$x30,$out
- le?vperm $in5,$in5,$in5,$inpperm
- vcipher $out4,$out4,v24
- stvx_u $in4,$x40,$out
- le?vperm $in6,$in6,$in6,$inpperm
- vcipher $out5,$out5,v24
- stvx_u $in5,$x50,$out
- le?vperm $in7,$in7,$in7,$inpperm
- vcipher $out6,$out6,v24
- stvx_u $in6,$x60,$out
- vcipher $out7,$out7,v24
- stvx_u $in7,$x70,$out
- addi $out,$out,0x80
- b Loop_ctr32_enc8x_middle
- .align 5
- Lctr32_enc8x_break:
- cmpwi $len,-0x60
- blt Lctr32_enc8x_one
- nop
- beq Lctr32_enc8x_two
- cmpwi $len,-0x40
- blt Lctr32_enc8x_three
- nop
- beq Lctr32_enc8x_four
- cmpwi $len,-0x20
- blt Lctr32_enc8x_five
- nop
- beq Lctr32_enc8x_six
- cmpwi $len,0x00
- blt Lctr32_enc8x_seven
- Lctr32_enc8x_eight:
- vcipherlast $out0,$out0,$in0
- vcipherlast $out1,$out1,$in1
- vcipherlast $out2,$out2,$in2
- vcipherlast $out3,$out3,$in3
- vcipherlast $out4,$out4,$in4
- vcipherlast $out5,$out5,$in5
- vcipherlast $out6,$out6,$in6
- vcipherlast $out7,$out7,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x60,$out
- stvx_u $out7,$x70,$out
- addi $out,$out,0x80
- b Lctr32_enc8x_done
- .align 5
- Lctr32_enc8x_seven:
- vcipherlast $out0,$out0,$in1
- vcipherlast $out1,$out1,$in2
- vcipherlast $out2,$out2,$in3
- vcipherlast $out3,$out3,$in4
- vcipherlast $out4,$out4,$in5
- vcipherlast $out5,$out5,$in6
- vcipherlast $out6,$out6,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- stvx_u $out6,$x60,$out
- addi $out,$out,0x70
- b Lctr32_enc8x_done
- .align 5
- Lctr32_enc8x_six:
- vcipherlast $out0,$out0,$in2
- vcipherlast $out1,$out1,$in3
- vcipherlast $out2,$out2,$in4
- vcipherlast $out3,$out3,$in5
- vcipherlast $out4,$out4,$in6
- vcipherlast $out5,$out5,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- stvx_u $out5,$x50,$out
- addi $out,$out,0x60
- b Lctr32_enc8x_done
- .align 5
- Lctr32_enc8x_five:
- vcipherlast $out0,$out0,$in3
- vcipherlast $out1,$out1,$in4
- vcipherlast $out2,$out2,$in5
- vcipherlast $out3,$out3,$in6
- vcipherlast $out4,$out4,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- stvx_u $out4,$x40,$out
- addi $out,$out,0x50
- b Lctr32_enc8x_done
- .align 5
- Lctr32_enc8x_four:
- vcipherlast $out0,$out0,$in4
- vcipherlast $out1,$out1,$in5
- vcipherlast $out2,$out2,$in6
- vcipherlast $out3,$out3,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- stvx_u $out3,$x30,$out
- addi $out,$out,0x40
- b Lctr32_enc8x_done
- .align 5
- Lctr32_enc8x_three:
- vcipherlast $out0,$out0,$in5
- vcipherlast $out1,$out1,$in6
- vcipherlast $out2,$out2,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- stvx_u $out2,$x20,$out
- addi $out,$out,0x30
- b Lcbc_dec8x_done
- .align 5
- Lctr32_enc8x_two:
- vcipherlast $out0,$out0,$in6
- vcipherlast $out1,$out1,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- stvx_u $out1,$x10,$out
- addi $out,$out,0x20
- b Lcbc_dec8x_done
- .align 5
- Lctr32_enc8x_one:
- vcipherlast $out0,$out0,$in7
- le?vperm $out0,$out0,$out0,$inpperm
- stvx_u $out0,0,$out
- addi $out,$out,0x10
- Lctr32_enc8x_done:
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $inpperm,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x14,0,0x80,6,6,0
- .long 0
- .size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
- ___
- }} }}}
- my $consts=1;
- foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
- # constants table endian-specific conversion
- if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
- my $conv=$3;
- my @bytes=();
- # convert to endian-agnostic format
- if ($1 eq "long") {
- foreach (split(/,\s*/,$2)) {
- my $l = /^0/?oct:int;
- push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
- }
- } else {
- @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
- }
- # little-endian conversion
- if ($flavour =~ /le$/o) {
- SWITCH: for($conv) {
- /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
- /\?rev/ && do { @bytes=reverse(@bytes); last; };
- }
- }
- #emit
- print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
- next;
- }
- $consts=0 if (m/Lconsts:/o); # end of table
- # instructions prefixed with '?' are endian-specific and need
- # to be adjusted accordingly...
- if ($flavour =~ /le$/o) { # little-endian
- s/le\?//o or
- s/be\?/#be#/o or
- s/\?lvsr/lvsl/o or
- s/\?lvsl/lvsr/o or
- s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
- s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
- s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
- } else { # big-endian
- s/le\?/#le#/o or
- s/be\?//o or
- s/\?([a-z]+)/$1/o;
- }
- print $_,"\n";
- }
- close STDOUT;
|