Adding in curl and openssl repos

This commit is contained in:
2025-08-14 12:09:30 -04:00
parent af2117b574
commit 0ace93e303
21174 changed files with 3607720 additions and 2 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,925 @@
#! /usr/bin/env perl
# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# ChaCha20 for C64x+.
#
# October 2015
#
# Performance is 3.54 cycles per processed byte, which is ~4.3 times
# faster than code generated by TI compiler. Compiler also disables
# interrupts for some reason, thus making interrupt response time
# dependent on input length. This module on the other hand is free
# from such limitation.
$output=pop and open STDOUT,">$output";
($OUT,$INP,$LEN,$KEYB,$COUNTERA)=("A4","B4","A6","B6","A8");
($KEYA,$COUNTERB,$STEP)=("A7","B7","A3");
@X= ("A16","B16","A17","B17","A18","B18","A19","B19",
"A20","B20","A21","B21","A22","B22","A23","B23");
@Y= ("A24","B24","A25","B25","A26","B26","A27","B27",
"A28","B28","A29","B29","A30","B30","A31","B31");
@DAT=("A6", "A7", "B6", "B7", "A8", "A9", "B8", "B9",
"A10","A11","B10","B11","A12","A13","B12","B13");
# yes, overlaps with @DAT, used only in 2x interleave code path...
@K2x=("A6", "B6", "A7", "B7", "A8", "B8", "A9", "B9",
"A10","B10","A11","B11","A2", "B2", "A13","B13");
$code.=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg ChaCha20_ctr32,_ChaCha20_ctr32
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.global _ChaCha20_ctr32
.align 32
_ChaCha20_ctr32:
.asmfunc stack_usage(40+64)
MV $LEN,A0 ; reassign
[!A0] BNOP RA ; no data
|| [A0] STW FP,*SP--(40+64) ; save frame pointer and alloca(40+64)
|| [A0] MV SP,FP
[A0] STDW B13:B12,*SP[4+8] ; ABI says so
|| [A0] MV $KEYB,$KEYA
|| [A0] MV $COUNTERA,$COUNTERB
[A0] STDW B11:B10,*SP[3+8]
|| [A0] STDW A13:A12,*FP[-3]
[A0] STDW A11:A10,*FP[-4]
|| [A0] MVK 128,$STEP ; 2 * input block size
[A0] LDW *${KEYA}[0],@Y[4] ; load key
|| [A0] LDW *${KEYB}[1],@Y[5]
|| [A0] MVK 0x00007865,@Y[0] ; synthesize sigma
|| [A0] MVK 0x0000646e,@Y[1]
[A0] LDW *${KEYA}[2],@Y[6]
|| [A0] LDW *${KEYB}[3],@Y[7]
|| [A0] MVKH 0x61700000,@Y[0]
|| [A0] MVKH 0x33200000,@Y[1]
LDW *${KEYA}[4],@Y[8]
|| LDW *${KEYB}[5],@Y[9]
|| MVK 0x00002d32,@Y[2]
|| MVK 0x00006574,@Y[3]
LDW *${KEYA}[6],@Y[10]
|| LDW *${KEYB}[7],@Y[11]
|| MVKH 0x79620000,@Y[2]
|| MVKH 0x6b200000,@Y[3]
LDW *${COUNTERA}[0],@Y[12] ; load counter||nonce
|| LDW *${COUNTERB}[1],@Y[13]
|| CMPLTU A0,$STEP,A1 ; is length < 2*blocks?
LDW *${COUNTERA}[2],@Y[14]
|| LDW *${COUNTERB}[3],@Y[15]
|| [A1] BNOP top1x?
[A1] MVK 64,$STEP ; input block size
|| MVK 10,B0 ; inner loop counter
DMV @Y[2],@Y[0],@X[2]:@X[0] ; copy block
|| DMV @Y[3],@Y[1],@X[3]:@X[1]
||[!A1] STDW @Y[2]:@Y[0],*FP[-12] ; offload key material to stack
||[!A1] STDW @Y[3]:@Y[1],*SP[2]
DMV @Y[6],@Y[4],@X[6]:@X[4]
|| DMV @Y[7],@Y[5],@X[7]:@X[5]
||[!A1] STDW @Y[6]:@Y[4],*FP[-10]
||[!A1] STDW @Y[7]:@Y[5],*SP[4]
DMV @Y[10],@Y[8],@X[10]:@X[8]
|| DMV @Y[11],@Y[9],@X[11]:@X[9]
||[!A1] STDW @Y[10]:@Y[8],*FP[-8]
||[!A1] STDW @Y[11]:@Y[9],*SP[6]
DMV @Y[14],@Y[12],@X[14]:@X[12]
|| DMV @Y[15],@Y[13],@X[15]:@X[13]
||[!A1] MV @Y[12],@K2x[12] ; counter
||[!A1] MV @Y[13],@K2x[13]
||[!A1] STW @Y[14],*FP[-6*2]
||[!A1] STW @Y[15],*SP[8*2]
___
{ ################################################################
# 2x interleave gives 50% performance improvement
#
my ($a0,$a1,$a2,$a3) = (0..3);
my ($b0,$b1,$b2,$b3) = (4..7);
my ($c0,$c1,$c2,$c3) = (8..11);
my ($d0,$d1,$d2,$d3) = (12..15);
$code.=<<___;
outer2x?:
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| DMV @Y[2],@Y[0],@K2x[2]:@K2x[0]
|| DMV @Y[3],@Y[1],@K2x[3]:@K2x[1]
XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| DMV @Y[6],@Y[4],@K2x[6]:@K2x[4]
|| DMV @Y[7],@Y[5],@K2x[7]:@K2x[5]
SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d0],@X[$d0]
|| SWAP2 @X[$d3],@X[$d3]
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
|| ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| DMV @Y[10],@Y[8],@K2x[10]:@K2x[8]
|| DMV @Y[11],@Y[9],@K2x[11]:@K2x[9]
XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
ROTL @X[$b1],12,@X[$b1]
|| ROTL @X[$b2],12,@X[$b2]
|| MV @Y[14],@K2x[14]
|| MV @Y[15],@K2x[15]
top2x?:
ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b3],12,@X[$b3]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d1],8,@X[$d1]
|| ROTL @X[$d2],8,@X[$d2]
|| SWAP2 @Y[$d1],@Y[$d1] ; rotate by 16
|| SWAP2 @Y[$d2],@Y[$d2]
|| SWAP2 @Y[$d0],@Y[$d0]
|| SWAP2 @Y[$d3],@Y[$d3]
ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d3],8,@X[$d3]
|| ADD @Y[$d1],@Y[$c1],@Y[$c1]
|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
|| ADD @Y[$d0],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
|| BNOP middle2x1? ; protect from interrupt
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
|| XOR @Y[$c0],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
|| ROTL @X[$d2],0,@X[$d3]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| MV @X[$d0],@X[$d1]
|| MV @X[$d3],@X[$d0]
|| ROTL @Y[$b1],12,@Y[$b1]
|| ROTL @Y[$b2],12,@Y[$b2]
ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
|| ROTL @X[$b2],7,@X[$b1]
ROTL @X[$b0],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b2]
middle2x1?:
ROTL @Y[$b0],12,@Y[$b0]
|| ROTL @Y[$b3],12,@Y[$b3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ROTL @Y[$d1],8,@Y[$d1]
|| ROTL @Y[$d2],8,@Y[$d2]
|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
|| SWAP2 @X[$d1],@X[$d1]
|| SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d3],@X[$d3]
ROTL @Y[$d0],8,@Y[$d0]
|| ROTL @Y[$d3],8,@Y[$d3]
|| ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
|| ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| BNOP middle2x2? ; protect from interrupt
ADD @Y[$d1],@Y[$c1],@Y[$c1]
|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
ADD @Y[$d0],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
|| ROTL @Y[$d1],0,@Y[$d2] ; moved to avoid cross-path stall
|| ROTL @Y[$d2],0,@Y[$d3]
XOR @Y[$c0],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
|| MV @Y[$d0],@Y[$d1]
|| MV @Y[$d3],@Y[$d0]
|| ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b1],12,@X[$b1]
ROTL @Y[$b1],7,@Y[$b0] ; avoided cross-path stall
|| ROTL @Y[$b2],7,@Y[$b1]
ROTL @Y[$b0],7,@Y[$b3]
|| ROTL @Y[$b3],7,@Y[$b2]
middle2x2?:
ROTL @X[$b2],12,@X[$b2]
|| ROTL @X[$b3],12,@X[$b3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
XOR @Y[$a2],@Y[$d2],@Y[$d2]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d1],8,@X[$d1]
|| SWAP2 @Y[$d0],@Y[$d0] ; rotate by 16
|| SWAP2 @Y[$d1],@Y[$d1]
|| SWAP2 @Y[$d2],@Y[$d2]
|| SWAP2 @Y[$d3],@Y[$d3]
ROTL @X[$d2],8,@X[$d2]
|| ROTL @X[$d3],8,@X[$d3]
|| ADD @Y[$d0],@Y[$c2],@Y[$c2]
|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
|| ADD @Y[$d2],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
|| BNOP bottom2x1? ; protect from interrupt
ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
|| XOR @Y[$c0],@Y[$b2],@Y[$b2]
|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
|| ROTL @X[$d1],0,@X[$d0]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| MV @X[$d2],@X[$d1]
|| MV @X[$d3],@X[$d2]
|| ROTL @Y[$b0],12,@Y[$b0]
|| ROTL @Y[$b1],12,@Y[$b1]
ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
|| ROTL @X[$b1],7,@X[$b2]
ROTL @X[$b2],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b0]
|| [B0] SUB B0,1,B0 ; decrement inner loop counter
bottom2x1?:
ROTL @Y[$b2],12,@Y[$b2]
|| ROTL @Y[$b3],12,@Y[$b3]
|| [B0] ADD @X[$b1],@X[$a1],@X[$a1] ; modulo-scheduled
|| [B0] ADD @X[$b2],@X[$a2],@X[$a2]
[B0] ADD @X[$b0],@X[$a0],@X[$a0]
|| [B0] ADD @X[$b3],@X[$a3],@X[$a3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| [B0] XOR @X[$a1],@X[$d1],@X[$d1]
|| [B0] XOR @X[$a2],@X[$d2],@X[$d2]
[B0] XOR @X[$a0],@X[$d0],@X[$d0]
|| [B0] XOR @X[$a3],@X[$d3],@X[$d3]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
XOR @Y[$a2],@Y[$d2],@Y[$d2]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ROTL @Y[$d0],8,@Y[$d0]
|| ROTL @Y[$d1],8,@Y[$d1]
|| [B0] SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| [B0] SWAP2 @X[$d2],@X[$d2]
|| [B0] SWAP2 @X[$d0],@X[$d0]
|| [B0] SWAP2 @X[$d3],@X[$d3]
ROTL @Y[$d2],8,@Y[$d2]
|| ROTL @Y[$d3],8,@Y[$d3]
|| [B0] ADD @X[$d1],@X[$c1],@X[$c1]
|| [B0] ADD @X[$d2],@X[$c2],@X[$c2]
|| [B0] ADD @X[$d0],@X[$c0],@X[$c0]
|| [B0] ADD @X[$d3],@X[$c3],@X[$c3]
|| [B0] BNOP top2x? ; even protects from interrupt
ADD @Y[$d0],@Y[$c2],@Y[$c2]
|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
|| [B0] XOR @X[$c1],@X[$b1],@X[$b1]
|| [B0] XOR @X[$c2],@X[$b2],@X[$b2]
|| [B0] XOR @X[$c0],@X[$b0],@X[$b0]
|| [B0] XOR @X[$c3],@X[$b3],@X[$b3]
ADD @Y[$d2],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
|| ROTL @Y[$d0],0,@Y[$d3] ; moved to avoid cross-path stall
|| ROTL @Y[$d1],0,@Y[$d0]
XOR @Y[$c0],@Y[$b2],@Y[$b2]
|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
|| MV @Y[$d2],@Y[$d1]
|| MV @Y[$d3],@Y[$d2]
|| [B0] ROTL @X[$b1],12,@X[$b1]
|| [B0] ROTL @X[$b2],12,@X[$b2]
ROTL @Y[$b0],7,@Y[$b1] ; avoided cross-path stall
|| ROTL @Y[$b1],7,@Y[$b2]
ROTL @Y[$b2],7,@Y[$b3]
|| ROTL @Y[$b3],7,@Y[$b0]
bottom2x2?:
___
}
$code.=<<___;
ADD @K2x[0],@X[0],@X[0] ; accumulate key material
|| ADD @K2x[1],@X[1],@X[1]
|| ADD @K2x[2],@X[2],@X[2]
|| ADD @K2x[3],@X[3],@X[3]
ADD @K2x[0],@Y[0],@Y[0]
|| ADD @K2x[1],@Y[1],@Y[1]
|| ADD @K2x[2],@Y[2],@Y[2]
|| ADD @K2x[3],@Y[3],@Y[3]
|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
ADD @K2x[4],@X[4],@X[4]
|| ADD @K2x[5],@X[5],@X[5]
|| ADD @K2x[6],@X[6],@X[6]
|| ADD @K2x[7],@X[7],@X[7]
|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
ADD @K2x[4],@Y[4],@Y[4]
|| ADD @K2x[5],@Y[5],@Y[5]
|| ADD @K2x[6],@Y[6],@Y[6]
|| ADD @K2x[7],@Y[7],@Y[7]
|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
ADD @K2x[8],@X[8],@X[8]
|| ADD @K2x[9],@X[9],@X[9]
|| ADD @K2x[10],@X[10],@X[10]
|| ADD @K2x[11],@X[11],@X[11]
|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
ADD @K2x[8],@Y[8],@Y[8]
|| ADD @K2x[9],@Y[9],@Y[9]
|| ADD @K2x[10],@Y[10],@Y[10]
|| ADD @K2x[11],@Y[11],@Y[11]
|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
ADD @K2x[12],@X[12],@X[12]
|| ADD @K2x[13],@X[13],@X[13]
|| ADD @K2x[14],@X[14],@X[14]
|| ADD @K2x[15],@X[15],@X[15]
|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
ADD @K2x[12],@Y[12],@Y[12]
|| ADD @K2x[13],@Y[13],@Y[13]
|| ADD @K2x[14],@Y[14],@Y[14]
|| ADD @K2x[15],@Y[15],@Y[15]
|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
|| ADD 2,@K2x[12],@K2x[12] ; increment counter
|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
.if .BIG_ENDIAN
SWAP2 @X[0],@X[0]
|| SWAP2 @X[1],@X[1]
|| SWAP2 @X[2],@X[2]
|| SWAP2 @X[3],@X[3]
SWAP2 @X[4],@X[4]
|| SWAP2 @X[5],@X[5]
|| SWAP2 @X[6],@X[6]
|| SWAP2 @X[7],@X[7]
SWAP2 @X[8],@X[8]
|| SWAP2 @X[9],@X[9]
|| SWAP4 @X[0],@X[1]
|| SWAP4 @X[1],@X[0]
SWAP2 @X[10],@X[10]
|| SWAP2 @X[11],@X[11]
|| SWAP4 @X[2],@X[3]
|| SWAP4 @X[3],@X[2]
SWAP2 @X[12],@X[12]
|| SWAP2 @X[13],@X[13]
|| SWAP4 @X[4],@X[5]
|| SWAP4 @X[5],@X[4]
SWAP2 @X[14],@X[14]
|| SWAP2 @X[15],@X[15]
|| SWAP4 @X[6],@X[7]
|| SWAP4 @X[7],@X[6]
SWAP4 @X[8],@X[9]
|| SWAP4 @X[9],@X[8]
|| SWAP2 @Y[0],@Y[0]
|| SWAP2 @Y[1],@Y[1]
SWAP4 @X[10],@X[11]
|| SWAP4 @X[11],@X[10]
|| SWAP2 @Y[2],@Y[2]
|| SWAP2 @Y[3],@Y[3]
SWAP4 @X[12],@X[13]
|| SWAP4 @X[13],@X[12]
|| SWAP2 @Y[4],@Y[4]
|| SWAP2 @Y[5],@Y[5]
SWAP4 @X[14],@X[15]
|| SWAP4 @X[15],@X[14]
|| SWAP2 @Y[6],@Y[6]
|| SWAP2 @Y[7],@Y[7]
SWAP2 @Y[8],@Y[8]
|| SWAP2 @Y[9],@Y[9]
|| SWAP4 @Y[0],@Y[1]
|| SWAP4 @Y[1],@Y[0]
SWAP2 @Y[10],@Y[10]
|| SWAP2 @Y[11],@Y[11]
|| SWAP4 @Y[2],@Y[3]
|| SWAP4 @Y[3],@Y[2]
SWAP2 @Y[12],@Y[12]
|| SWAP2 @Y[13],@Y[13]
|| SWAP4 @Y[4],@Y[5]
|| SWAP4 @Y[5],@Y[4]
SWAP2 @Y[14],@Y[14]
|| SWAP2 @Y[15],@Y[15]
|| SWAP4 @Y[6],@Y[7]
|| SWAP4 @Y[7],@Y[6]
SWAP4 @Y[8],@Y[9]
|| SWAP4 @Y[9],@Y[8]
SWAP4 @Y[10],@Y[11]
|| SWAP4 @Y[11],@Y[10]
SWAP4 @Y[12],@Y[13]
|| SWAP4 @Y[13],@Y[12]
SWAP4 @Y[14],@Y[15]
|| SWAP4 @Y[15],@Y[14]
.endif
XOR @DAT[0],@X[0],@X[0] ; xor 1st block
|| XOR @DAT[3],@X[3],@X[3]
|| XOR @DAT[2],@X[2],@X[1]
|| XOR @DAT[1],@X[1],@X[2]
|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
XOR @DAT[4],@X[4],@X[4]
|| XOR @DAT[7],@X[7],@X[7]
|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
XOR @DAT[6],@X[6],@X[5]
|| XOR @DAT[5],@X[5],@X[6]
|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
XOR @DAT[8],@X[8],@X[8]
|| XOR @DAT[11],@X[11],@X[11]
|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
XOR @DAT[10],@X[10],@X[9]
|| XOR @DAT[9],@X[9],@X[10]
|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
XOR @DAT[12],@X[12],@X[12]
|| XOR @DAT[15],@X[15],@X[15]
|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
XOR @DAT[14],@X[14],@X[13]
|| XOR @DAT[13],@X[13],@X[14]
|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
[A0] SUB A0,$STEP,A0 ; SUB A0,128,A0
|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
XOR @Y[0],@DAT[0],@DAT[0] ; xor 2nd block
|| XOR @Y[1],@DAT[1],@DAT[1]
|| STNDW @X[2]:@X[0],*${OUT}++[8]
XOR @Y[2],@DAT[2],@DAT[2]
|| XOR @Y[3],@DAT[3],@DAT[3]
|| STNDW @X[3]:@X[1],*${OUT}[-7]
XOR @Y[4],@DAT[4],@DAT[4]
|| [A0] LDDW *FP[-12],@X[2]:@X[0] ; re-load key material from stack
|| [A0] LDDW *SP[2], @X[3]:@X[1]
XOR @Y[5],@DAT[5],@DAT[5]
|| STNDW @X[6]:@X[4],*${OUT}[-6]
XOR @Y[6],@DAT[6],@DAT[6]
|| XOR @Y[7],@DAT[7],@DAT[7]
|| STNDW @X[7]:@X[5],*${OUT}[-5]
XOR @Y[8],@DAT[8],@DAT[8]
|| [A0] LDDW *FP[-10],@X[6]:@X[4]
|| [A0] LDDW *SP[4], @X[7]:@X[5]
XOR @Y[9],@DAT[9],@DAT[9]
|| STNDW @X[10]:@X[8],*${OUT}[-4]
XOR @Y[10],@DAT[10],@DAT[10]
|| XOR @Y[11],@DAT[11],@DAT[11]
|| STNDW @X[11]:@X[9],*${OUT}[-3]
XOR @Y[12],@DAT[12],@DAT[12]
|| [A0] LDDW *FP[-8], @X[10]:@X[8]
|| [A0] LDDW *SP[6], @X[11]:@X[9]
XOR @Y[13],@DAT[13],@DAT[13]
|| STNDW @X[14]:@X[12],*${OUT}[-2]
XOR @Y[14],@DAT[14],@DAT[14]
|| XOR @Y[15],@DAT[15],@DAT[15]
|| STNDW @X[15]:@X[13],*${OUT}[-1]
[A0] MV @K2x[12],@X[12]
|| [A0] MV @K2x[13],@X[13]
|| [A0] LDW *FP[-6*2], @X[14]
|| [A0] LDW *SP[8*2], @X[15]
[A0] DMV @X[2],@X[0],@Y[2]:@Y[0] ; duplicate key material
|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
[A0] DMV @X[3],@X[1],@Y[3]:@Y[1]
|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
[A0] DMV @X[6],@X[4],@Y[6]:@Y[4]
|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
|| CMPLTU A0,$STEP,A1 ; is remaining length < 2*blocks?
||[!A0] BNOP epilogue?
[A0] DMV @X[7],@X[5],@Y[7]:@Y[5]
|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
||[!A1] BNOP outer2x?
[A0] DMV @X[10],@X[8],@Y[10]:@Y[8]
|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
[A0] DMV @X[11],@X[9],@Y[11]:@Y[9]
|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
[A0] DMV @X[14],@X[12],@Y[14]:@Y[12]
|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
[A0] DMV @X[15],@X[13],@Y[15]:@Y[13]
|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
;;===== branch to epilogue? is taken here
[A1] MVK 64,$STEP
|| [A0] MVK 10,B0 ; inner loop counter
;;===== branch to outer2x? is taken here
___
{
my ($a0,$a1,$a2,$a3) = (0..3);
my ($b0,$b1,$b2,$b3) = (4..7);
my ($c0,$c1,$c2,$c3) = (8..11);
my ($d0,$d1,$d2,$d3) = (12..15);
$code.=<<___;
top1x?:
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| SWAP2 @X[$d2],@X[$d2]
SWAP2 @X[$d0],@X[$d0]
|| SWAP2 @X[$d3],@X[$d3]
|| ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ROTL @X[$b1],12,@X[$b1]
|| ROTL @X[$b2],12,@X[$b2]
ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b3],12,@X[$b3]
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d1],8,@X[$d1]
|| ROTL @X[$d2],8,@X[$d2]
ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d3],8,@X[$d3]
|| BNOP middle1x? ; protect from interrupt
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
|| ROTL @X[$d2],0,@X[$d3]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ROTL @X[$d0],0,@X[$d1]
|| ROTL @X[$d3],0,@X[$d0]
ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
|| ROTL @X[$b2],7,@X[$b1]
ROTL @X[$b0],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b2]
middle1x?:
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
|| SWAP2 @X[$d1],@X[$d1]
SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d3],@X[$d3]
|| ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b1],12,@X[$b1]
ROTL @X[$b2],12,@X[$b2]
|| ROTL @X[$b3],12,@X[$b3]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| [B0] SUB B0,1,B0 ; decrement inner loop counter
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d1],8,@X[$d1]
ROTL @X[$d2],8,@X[$d2]
|| ROTL @X[$d3],8,@X[$d3]
|| [B0] BNOP top1x? ; even protects from interrupt
ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
|| ROTL @X[$d1],0,@X[$d0]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| ROTL @X[$d2],0,@X[$d1]
|| ROTL @X[$d3],0,@X[$d2]
ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
|| ROTL @X[$b1],7,@X[$b2]
ROTL @X[$b2],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b0]
||[!B0] CMPLTU A0,$STEP,A1 ; less than 64 bytes left?
bottom1x?:
___
}
$code.=<<___;
ADD @Y[0],@X[0],@X[0] ; accumulate key material
|| ADD @Y[1],@X[1],@X[1]
|| ADD @Y[2],@X[2],@X[2]
|| ADD @Y[3],@X[3],@X[3]
||[!A1] LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
|| [A1] BNOP tail?
ADD @Y[4],@X[4],@X[4]
|| ADD @Y[5],@X[5],@X[5]
|| ADD @Y[6],@X[6],@X[6]
|| ADD @Y[7],@X[7],@X[7]
||[!A1] LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
ADD @Y[8],@X[8],@X[8]
|| ADD @Y[9],@X[9],@X[9]
|| ADD @Y[10],@X[10],@X[10]
|| ADD @Y[11],@X[11],@X[11]
||[!A1] LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
ADD @Y[12],@X[12],@X[12]
|| ADD @Y[13],@X[13],@X[13]
|| ADD @Y[14],@X[14],@X[14]
|| ADD @Y[15],@X[15],@X[15]
||[!A1] LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
[!A1] LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
[!A1] LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
.if .BIG_ENDIAN
SWAP2 @X[0],@X[0]
|| SWAP2 @X[1],@X[1]
|| SWAP2 @X[2],@X[2]
|| SWAP2 @X[3],@X[3]
SWAP2 @X[4],@X[4]
|| SWAP2 @X[5],@X[5]
|| SWAP2 @X[6],@X[6]
|| SWAP2 @X[7],@X[7]
SWAP2 @X[8],@X[8]
|| SWAP2 @X[9],@X[9]
|| SWAP4 @X[0],@X[1]
|| SWAP4 @X[1],@X[0]
SWAP2 @X[10],@X[10]
|| SWAP2 @X[11],@X[11]
|| SWAP4 @X[2],@X[3]
|| SWAP4 @X[3],@X[2]
SWAP2 @X[12],@X[12]
|| SWAP2 @X[13],@X[13]
|| SWAP4 @X[4],@X[5]
|| SWAP4 @X[5],@X[4]
SWAP2 @X[14],@X[14]
|| SWAP2 @X[15],@X[15]
|| SWAP4 @X[6],@X[7]
|| SWAP4 @X[7],@X[6]
SWAP4 @X[8],@X[9]
|| SWAP4 @X[9],@X[8]
SWAP4 @X[10],@X[11]
|| SWAP4 @X[11],@X[10]
SWAP4 @X[12],@X[13]
|| SWAP4 @X[13],@X[12]
SWAP4 @X[14],@X[15]
|| SWAP4 @X[15],@X[14]
.else
NOP 1
.endif
XOR @X[0],@DAT[0],@DAT[0] ; xor with input
|| XOR @X[1],@DAT[1],@DAT[1]
|| XOR @X[2],@DAT[2],@DAT[2]
|| XOR @X[3],@DAT[3],@DAT[3]
|| [A0] SUB A0,$STEP,A0 ; SUB A0,64,A0
XOR @X[4],@DAT[4],@DAT[4]
|| XOR @X[5],@DAT[5],@DAT[5]
|| XOR @X[6],@DAT[6],@DAT[6]
|| XOR @X[7],@DAT[7],@DAT[7]
|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
XOR @X[8],@DAT[8],@DAT[8]
|| XOR @X[9],@DAT[9],@DAT[9]
|| XOR @X[10],@DAT[10],@DAT[10]
|| XOR @X[11],@DAT[11],@DAT[11]
|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
XOR @X[12],@DAT[12],@DAT[12]
|| XOR @X[13],@DAT[13],@DAT[13]
|| XOR @X[14],@DAT[14],@DAT[14]
|| XOR @X[15],@DAT[15],@DAT[15]
|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
|| [A0] BNOP top1x?
[A0] DMV @Y[2],@Y[0],@X[2]:@X[0] ; duplicate key material
|| [A0] DMV @Y[3],@Y[1],@X[3]:@X[1]
|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
[A0] DMV @Y[6],@Y[4],@X[6]:@X[4]
|| [A0] DMV @Y[7],@Y[5],@X[7]:@X[5]
|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
[A0] DMV @Y[10],@Y[8],@X[10]:@X[8]
|| [A0] DMV @Y[11],@Y[9],@X[11]:@X[9]
|| [A0] ADD 1,@Y[12],@Y[12] ; increment counter
|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
[A0] DMV @Y[14],@Y[12],@X[14]:@X[12]
|| [A0] DMV @Y[15],@Y[13],@X[15]:@X[13]
|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
[A0] MVK 10,B0 ; inner loop counter
|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
;;===== branch to top1x? is taken here
epilogue?:
LDDW *FP[-4],A11:A10 ; ABI says so
LDDW *FP[-3],A13:A12
|| LDDW *SP[3+8],B11:B10
LDDW *SP[4+8],B13:B12
|| BNOP RA
LDW *++SP(40+64),FP ; restore frame pointer
NOP 4
tail?:
LDBU *${INP}++[1],B24 ; load byte by byte
|| SUB A0,1,A0
|| SUB A0,1,B1
[!B1] BNOP epilogue? ; interrupts are disabled for whole time
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],0,A24
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],24,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],16,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,B25
STB B25,*${OUT}++[1] ; store byte by byte
||[!B1] BNOP epilogue?
|| ROTL @X[0],8,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,B25
STB B25,*${OUT}++[1]
___
sub TAIL_STEP {
my $Xi= shift;
my $T = ($Xi=~/^B/?"B24":"A24"); # match @X[i] to avoid cross path
my $D = $T; $D=~tr/AB/BA/;
my $O = $D; $O=~s/24/25/;
$code.=<<___;
||[!B1] BNOP epilogue?
|| ROTL $Xi,0,$T
|| [A0] LDBU *${INP}++[1],$D
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,24,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,16,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,8,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
___
}
foreach (1..14) { TAIL_STEP(@X[$_]); }
$code.=<<___;
||[!B1] BNOP epilogue?
|| ROTL @X[15],0,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| ROTL @X[15],24,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| ROTL @X[15],16,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| XOR A24,B24,B25
STB B25,*${OUT}++[1]
.endasmfunc
.sect .const
.cstring "ChaCha20 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT or die "error closing STDOUT: $!";

View File

@@ -0,0 +1,293 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov, @dot-asm, initially for use with OpenSSL.
# ====================================================================
#
# ChaCha20 for Itanium.
#
# March 2019
#
# Itanium 9xxx, which has pair of shifters, manages to process one byte
# in 9.3 cycles. This aligns perfectly with theoretical estimate.
# On the other hand, pre-9000 CPU has single shifter and each extr/dep
# pairs below takes additional cycle. Then final input->xor->output
# pass runs slower than expected... Overall result is 15.6 cpb, two
# cycles more than theoretical estimate.
$output = pop and open STDOUT, ">$output";
my @k = map("r$_",(16..31));
my @x = map("r$_",(38..53));
my @y = map("r$_",(8..11));
my @z = map("r$_",(15,35..37));
my ($out,$inp,$len,$key,$counter) = map("r$_",(32..36));
$code.=<<___;
#if defined(_HPUX_SOURCE)
# if !defined(_LP64)
# define ADDP addp4
# else
# define ADDP add
# endif
#else
# define ADDP add
#endif
.text
.global ChaCha20_ctr32#
.proc ChaCha20_ctr32#
.align 32
ChaCha20_ctr32:
.prologue
.save ar.pfs,r2
{ .mmi; alloc r2=ar.pfs,5,17,0,0
ADDP @k[11]=4,$key
.save ar.lc,r3
mov r3=ar.lc }
{ .mmi; ADDP $out=0,$out
ADDP $inp=0,$inp }
{ .mmi; ADDP $key=0,$key
ADDP $counter=0,$counter
.save pr,r14
mov r14=pr };;
.body
{ .mlx; ld4 @k[4]=[$key],8
movl @k[0]=0x61707865 }
{ .mlx; ld4 @k[5]=[@k[11]],8
movl @k[1]=0x3320646e };;
{ .mlx; ld4 @k[6]=[$key],8
movl @k[2]=0x79622d32 }
{ .mlx; ld4 @k[7]=[@k[11]],8
movl @k[3]=0x6b206574 };;
{ .mmi; ld4 @k[8]=[$key],8
ld4 @k[9]=[@k[11]],8
add @k[15]=4,$counter };;
{ .mmi; ld4 @k[10]=[$key]
ld4 @k[11]=[@k[11]]
mov @x[0]=@k[0] };;
{ .mmi; ld4 @k[12]=[$counter],8
ld4 @k[13]=[@k[15]],8
mov @x[1]=@k[1] };;
{ .mmi; ld4 @k[14]=[$counter]
ld4 @k[15]=[@k[15]]
mov @x[2]=@k[2] }
{ .mmi; mov @x[3]=@k[3]
mov @x[4]=@k[4]
mov @x[5]=@k[5] };;
{ .mmi; mov @x[6]=@k[6]
mov @x[7]=@k[7]
mov @x[8]=@k[8] }
{ .mmi; mov @x[9]=@k[9]
mov @x[10]=@k[10]
mov @x[11]=@k[11] }
{ .mmi; mov @x[12]=@k[12]
mov @x[13]=@k[13]
mov @x[14]=@k[14] };;
.Loop_outer:
{ .mii; mov @x[15]=@k[15]
mov ar.lc=9
mov ar.ec=1 }
{ .mmb; cmp.geu p6,p0=64,$len
sub @z[1]=64,$len
brp.loop.imp .Loop_top,.Loop_end-16 };;
.Loop_top:
___
sub ROUND {
my ($a0,$b0,$c0,$d0)=@_;
my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
$code.=<<___;
{ .mmi; add @x[$a0]=@x[$a0],@x[$b0]
add @x[$a1]=@x[$a1],@x[$b1]
add @x[$a2]=@x[$a2],@x[$b2] };;
{ .mmi; add @x[$a3]=@x[$a3],@x[$b3]
xor @x[$d0]=@x[$d0],@x[$a0]
xor @x[$d1]=@x[$d1],@x[$a1] };;
{ .mmi; xor @x[$d2]=@x[$d2],@x[$a2]
xor @x[$d3]=@x[$d3],@x[$a3]
extr.u @y[0]=@x[$d0],16,16 };;
{ .mii; extr.u @y[1]=@x[$d1],16,16
dep @x[$d0]=@x[$d0],@y[0],16,16 };;
{ .mii; add @x[$c0]=@x[$c0],@x[$d0]
extr.u @y[2]=@x[$d2],16,16
dep @x[$d1]=@x[$d1],@y[1],16,16 };;
{ .mii; add @x[$c1]=@x[$c1],@x[$d1]
xor @x[$b0]=@x[$b0],@x[$c0]
extr.u @y[3]=@x[$d3],16,16 };;
{ .mii; xor @x[$b1]=@x[$b1],@x[$c1]
dep @x[$d2]=@x[$d2],@y[2],16,16
dep @x[$d3]=@x[$d3],@y[3],16,16 };;
{ .mmi; add @x[$c2]=@x[$c2],@x[$d2]
add @x[$c3]=@x[$c3],@x[$d3]
extr.u @y[0]=@x[$b0],20,12 };;
{ .mmi; xor @x[$b2]=@x[$b2],@x[$c2]
xor @x[$b3]=@x[$b3],@x[$c3]
dep.z @x[$b0]=@x[$b0],12,20 };;
{ .mii; or @x[$b0]=@x[$b0],@y[0]
extr.u @y[1]=@x[$b1],20,12
dep.z @x[$b1]=@x[$b1],12,20 };;
{ .mii; add @x[$a0]=@x[$a0],@x[$b0]
extr.u @y[2]=@x[$b2],20,12
extr.u @y[3]=@x[$b3],20,12 }
{ .mii; or @x[$b1]=@x[$b1],@y[1]
dep.z @x[$b2]=@x[$b2],12,20
dep.z @x[$b3]=@x[$b3],12,20 };;
{ .mmi; or @x[$b2]=@x[$b2],@y[2]
or @x[$b3]=@x[$b3],@y[3]
add @x[$a1]=@x[$a1],@x[$b1] };;
{ .mmi; add @x[$a2]=@x[$a2],@x[$b2]
add @x[$a3]=@x[$a3],@x[$b3]
xor @x[$d0]=@x[$d0],@x[$a0] };;
{ .mii; xor @x[$d1]=@x[$d1],@x[$a1]
extr.u @y[0]=@x[$d0],24,8
dep.z @x[$d0]=@x[$d0],8,24 };;
{ .mii; or @x[$d0]=@x[$d0],@y[0]
extr.u @y[1]=@x[$d1],24,8
dep.z @x[$d1]=@x[$d1],8,24 };;
{ .mmi; or @x[$d1]=@x[$d1],@y[1]
xor @x[$d2]=@x[$d2],@x[$a2]
xor @x[$d3]=@x[$d3],@x[$a3] };;
{ .mii; add @x[$c0]=@x[$c0],@x[$d0]
extr.u @y[2]=@x[$d2],24,8
dep.z @x[$d2]=@x[$d2],8,24 };;
{ .mii; xor @x[$b0]=@x[$b0],@x[$c0]
extr.u @y[3]=@x[$d3],24,8
dep.z @x[$d3]=@x[$d3],8,24 };;
{ .mmi; or @x[$d2]=@x[$d2],@y[2]
or @x[$d3]=@x[$d3],@y[3]
extr.u @y[0]=@x[$b0],25,7 };;
{ .mmi; add @x[$c1]=@x[$c1],@x[$d1]
add @x[$c2]=@x[$c2],@x[$d2]
dep.z @x[$b0]=@x[$b0],7,25 };;
{ .mmi; xor @x[$b1]=@x[$b1],@x[$c1]
xor @x[$b2]=@x[$b2],@x[$c2]
add @x[$c3]=@x[$c3],@x[$d3] };;
{ .mii; xor @x[$b3]=@x[$b3],@x[$c3]
extr.u @y[1]=@x[$b1],25,7
dep.z @x[$b1]=@x[$b1],7,25 };;
{ .mii; or @x[$b0]=@x[$b0],@y[0]
extr.u @y[2]=@x[$b2],25,7
dep.z @x[$b2]=@x[$b2],7,25 };;
{ .mii; or @x[$b1]=@x[$b1],@y[1]
extr.u @y[3]=@x[$b3],25,7
dep.z @x[$b3]=@x[$b3],7,25 };;
___
$code.=<<___ if ($d0 == 12);
{ .mmi; or @x[$b2]=@x[$b2],@y[2]
or @x[$b3]=@x[$b3],@y[3]
mov @z[0]=-1 };;
___
$code.=<<___ if ($d0 == 15);
{ .mmb; or @x[$b2]=@x[$b2],@y[2]
or @x[$b3]=@x[$b3],@y[3]
br.ctop.sptk .Loop_top };;
___
}
&ROUND(0, 4, 8, 12);
&ROUND(0, 5, 10, 15);
$code.=<<___;
.Loop_end:
{ .mmi; add @x[0]=@x[0],@k[0]
add @x[1]=@x[1],@k[1]
(p6) shr.u @z[0]=@z[0],@z[1] }
{ .mmb; add @x[2]=@x[2],@k[2]
add @x[3]=@x[3],@k[3]
clrrrb.pr };;
{ .mmi; add @x[4]=@x[4],@k[4]
add @x[5]=@x[5],@k[5]
add @x[6]=@x[6],@k[6] }
{ .mmi; add @x[7]=@x[7],@k[7]
add @x[8]=@x[8],@k[8]
add @x[9]=@x[9],@k[9] }
{ .mmi; add @x[10]=@x[10],@k[10]
add @x[11]=@x[11],@k[11]
add @x[12]=@x[12],@k[12] }
{ .mmi; add @x[13]=@x[13],@k[13]
add @x[14]=@x[14],@k[14]
add @x[15]=@x[15],@k[15] }
{ .mmi; add @k[12]=1,@k[12] // next counter
mov pr=@z[0],0x1ffff };;
//////////////////////////////////////////////////////////////////
// Each predicate bit corresponds to byte to be processed. Note
// that p0 is wired to 1, but it works out, because there always
// is at least one byte to process...
{ .mmi; (p0) ld1 @z[0]=[$inp],1
shr.u @y[1]=@x[0],8 };;
{ .mmi; (p1) ld1 @z[1]=[$inp],1
(p2) shr.u @y[2]=@x[0],16 };;
{ .mmi; (p2) ld1 @z[2]=[$inp],1
(p0) xor @z[0]=@z[0],@x[0]
(p3) shr.u @y[3]=@x[0],24 };;
___
for(my $i0=0; $i0<60; $i0+=4) {
my ($i1, $i2, $i3, $i4, $i5, $i6, $i7) = map($i0+$_,(1..7));
my $k = $i0/4+1;
$code.=<<___;
{ .mmi; (p$i3) ld1 @z[3]=[$inp],1
(p$i0) st1 [$out]=@z[0],1
(p$i1) xor @z[1]=@z[1],@y[1] };;
{ .mmi; (p$i4) ld1 @z[0]=[$inp],1
(p$i5) shr.u @y[1]=@x[$k],8 }
{ .mmi; (p$i1) st1 [$out]=@z[1],1
(p$i2) xor @z[2]=@z[2],@y[2]
(p1) mov @x[$k-1]=@k[$k-1] };;
{ .mfi; (p$i5) ld1 @z[1]=[$inp],1
(p$i6) shr.u @y[2]=@x[$k],16 }
{ .mfi; (p$i2) st1 [$out]=@z[2],1
(p$i3) xor @z[3]=@z[3],@y[3] };;
{ .mfi; (p$i6) ld1 @z[2]=[$inp],1
(p$i7) shr.u @y[3]=@x[$k],24 }
___
$code.=<<___ if ($i0==0); # p1,p2 are available for reuse in first round
{ .mmi; (p$i3) st1 [$out]=@z[3],1
(p$i4) xor @z[0]=@z[0],@x[$k]
cmp.ltu p1,p2=64,$len };;
___
$code.=<<___ if ($i0>0);
{ .mfi; (p$i3) st1 [$out]=@z[3],1
(p$i4) xor @z[0]=@z[0],@x[$k] };;
___
}
$code.=<<___;
{ .mmi; (p63) ld1 @z[3]=[$inp],1
(p60) st1 [$out]=@z[0],1
(p61) xor @z[1]=@z[1],@y[1] };;
{ .mmi; (p61) st1 [$out]=@z[1],1
(p62) xor @z[2]=@z[2],@y[2] };;
{ .mmi; (p62) st1 [$out]=@z[2],1
(p63) xor @z[3]=@z[3],@y[3]
(p2) mov ar.lc=r3 };;
{ .mib; (p63) st1 [$out]=@z[3],1
(p1) add $len=-64,$len
(p1) br.dptk.many .Loop_outer };;
{ .mmi; mov @k[4]=0 // wipe key material
mov @k[5]=0
mov @k[6]=0 }
{ .mmi; mov @k[7]=0
mov @k[8]=0
mov @k[9]=0 }
{ .mmi; mov @k[10]=0
mov @k[11]=0
mov @k[12]=0 }
{ .mmi; mov @k[13]=0
mov @k[14]=0
mov @k[15]=0 }
{ .mib; mov pr=r14,0x1ffff
br.ret.sptk.many b0 };;
.endp ChaCha20_ctr32#
stringz "ChaCha20 for IA64, CRYPTOGAMS by \@dot-asm"
___
print $code;
close STDOUT or die "error closing STDOUT: $!";

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,518 @@
#! /usr/bin/env perl
# This file is dual-licensed, meaning that you can use it under your
# choice of either of the following two licenses:
#
# Copyright 2023-2024 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# or
#
# Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Basic Bit-manipulation extension ('Zbb')
# - RISC-V Zicclsm(Main memory supports misaligned loads/stores)
# Optional:
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
use strict;
use warnings;
use FindBin qw($Bin);
use lib "$Bin";
use lib "$Bin/../../perlasm";
use riscv;
# $output is the last argument if it looks like a file (it has an extension)
# $flavour is the first argument if it doesn't look like a file
my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
my $use_zvkb = $flavour && $flavour =~ /zvkb/i ? 1 : 0;
my $isaext = "_v_zbb" . ( $use_zvkb ? "_zvkb" : "" );
$output and open STDOUT, ">$output";
my $code = <<___;
.text
___
# void ChaCha20_ctr32@{[$isaext]}(unsigned char *out, const unsigned char *inp,
# size_t len, const unsigned int key[8],
# const unsigned int counter[4]);
################################################################################
my ( $OUTPUT, $INPUT, $LEN, $KEY, $COUNTER ) = ( "a0", "a1", "a2", "a3", "a4" );
my ( $CONST_DATA0, $CONST_DATA1, $CONST_DATA2, $CONST_DATA3 ) = ( "a5", "a6",
"a7", "s0" );
my ( $KEY0, $KEY1, $KEY2, $KEY3, $KEY4, $KEY5, $KEY6, $KEY7, $COUNTER0,
$COUNTER1, $NONCE0, $NONCE1) = ( "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "t0" );
my ( $STATE0, $STATE1, $STATE2, $STATE3,
$STATE4, $STATE5, $STATE6, $STATE7,
$STATE8, $STATE9, $STATE10, $STATE11,
$STATE12, $STATE13, $STATE14, $STATE15) = (
$CONST_DATA0, $CONST_DATA1, $CONST_DATA2, $CONST_DATA3,
$KEY0, $KEY1, $KEY2, $KEY3,
$KEY4, $KEY5, $KEY6, $KEY7,
$COUNTER0, $COUNTER1, $NONCE0, $NONCE1 );
my ( $VL ) = ( "t1" );
my ( $CURRENT_COUNTER ) = ( "t2" );
my ( $T0 ) = ( "t3" );
my ( $T1 ) = ( "t4" );
my ( $T2 ) = ( "t5" );
my ( $T3 ) = ( "t6" );
my (
$V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, $V8, $V9, $V10,
$V11, $V12, $V13, $V14, $V15, $V16, $V17, $V18, $V19, $V20, $V21,
$V22, $V23, $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
) = map( "v$_", ( 0 .. 31 ) );
sub chacha_sub_round {
my (
$A0, $B0, $C0,
$A1, $B1, $C1,
$A2, $B2, $C2,
$A3, $B3, $C3,
$S_A0, $S_B0, $S_C0,
$S_A1, $S_B1, $S_C1,
$S_A2, $S_B2, $S_C2,
$S_A3, $S_B3, $S_C3,
$ROL_SHIFT,
$V_T0, $V_T1, $V_T2, $V_T3,
) = @_;
# a += b; c ^= a;
my $code = <<___;
@{[vadd_vv $A0, $A0, $B0]}
add $S_A0, $S_A0, $S_B0
@{[vadd_vv $A1, $A1, $B1]}
add $S_A1, $S_A1, $S_B1
@{[vadd_vv $A2, $A2, $B2]}
add $S_A2, $S_A2, $S_B2
@{[vadd_vv $A3, $A3, $B3]}
add $S_A3, $S_A3, $S_B3
@{[vxor_vv $C0, $C0, $A0]}
xor $S_C0, $S_C0, $S_A0
@{[vxor_vv $C1, $C1, $A1]}
xor $S_C1, $S_C1, $S_A1
@{[vxor_vv $C2, $C2, $A2]}
xor $S_C2, $S_C2, $S_A2
@{[vxor_vv $C3, $C3, $A3]}
xor $S_C3, $S_C3, $S_A3
___
# c <<<= $ROL_SHIFT;
if ($use_zvkb) {
my $ror_part = <<___;
@{[vror_vi $C0, $C0, 32 - $ROL_SHIFT]}
@{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
@{[vror_vi $C1, $C1, 32 - $ROL_SHIFT]}
@{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
@{[vror_vi $C2, $C2, 32 - $ROL_SHIFT]}
@{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
@{[vror_vi $C3, $C3, 32 - $ROL_SHIFT]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
$code .= $ror_part;
} else {
my $ror_part = <<___;
@{[vsll_vi $V_T0, $C0, $ROL_SHIFT]}
@{[vsll_vi $V_T1, $C1, $ROL_SHIFT]}
@{[vsll_vi $V_T2, $C2, $ROL_SHIFT]}
@{[vsll_vi $V_T3, $C3, $ROL_SHIFT]}
@{[vsrl_vi $C0, $C0, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C1, $C1, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C2, $C2, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C3, $C3, 32 - $ROL_SHIFT]}
@{[vor_vv $C0, $C0, $V_T0]}
@{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
@{[vor_vv $C1, $C1, $V_T1]}
@{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
@{[vor_vv $C2, $C2, $V_T2]}
@{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
@{[vor_vv $C3, $C3, $V_T3]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
$code .= $ror_part;
}
return $code;
}
sub chacha_quad_round_group {
my (
$A0, $B0, $C0, $D0,
$A1, $B1, $C1, $D1,
$A2, $B2, $C2, $D2,
$A3, $B3, $C3, $D3,
$S_A0, $S_B0, $S_C0, $S_D0,
$S_A1, $S_B1, $S_C1, $S_D1,
$S_A2, $S_B2, $S_C2, $S_D2,
$S_A3, $S_B3, $S_C3, $S_D3,
$V_T0, $V_T1, $V_T2, $V_T3,
) = @_;
my $code = <<___;
# a += b; d ^= a; d <<<= 16;
@{[chacha_sub_round
$A0, $B0, $D0,
$A1, $B1, $D1,
$A2, $B2, $D2,
$A3, $B3, $D3,
$S_A0, $S_B0, $S_D0,
$S_A1, $S_B1, $S_D1,
$S_A2, $S_B2, $S_D2,
$S_A3, $S_B3, $S_D3,
16,
$V_T0, $V_T1, $V_T2, $V_T3]}
# c += d; b ^= c; b <<<= 12;
@{[chacha_sub_round
$C0, $D0, $B0,
$C1, $D1, $B1,
$C2, $D2, $B2,
$C3, $D3, $B3,
$S_C0, $S_D0, $S_B0,
$S_C1, $S_D1, $S_B1,
$S_C2, $S_D2, $S_B2,
$S_C3, $S_D3, $S_B3,
12,
$V_T0, $V_T1, $V_T2, $V_T3]}
# a += b; d ^= a; d <<<= 8;
@{[chacha_sub_round
$A0, $B0, $D0,
$A1, $B1, $D1,
$A2, $B2, $D2,
$A3, $B3, $D3,
$S_A0, $S_B0, $S_D0,
$S_A1, $S_B1, $S_D1,
$S_A2, $S_B2, $S_D2,
$S_A3, $S_B3, $S_D3,
8,
$V_T0, $V_T1, $V_T2, $V_T3]}
# c += d; b ^= c; b <<<= 7;
@{[chacha_sub_round
$C0, $D0, $B0,
$C1, $D1, $B1,
$C2, $D2, $B2,
$C3, $D3, $B3,
$S_C0, $S_D0, $S_B0,
$S_C1, $S_D1, $S_B1,
$S_C2, $S_D2, $S_B2,
$S_C3, $S_D3, $S_B3,
7,
$V_T0, $V_T1, $V_T2, $V_T3]}
___
return $code;
}
$code .= <<___;
.p2align 3
.globl ChaCha20_ctr32@{[$isaext]}
.type ChaCha20_ctr32@{[$isaext]},\@function
ChaCha20_ctr32@{[$isaext]}:
addi sp, sp, -96
sd s0, 0(sp)
sd s1, 8(sp)
sd s2, 16(sp)
sd s3, 24(sp)
sd s4, 32(sp)
sd s5, 40(sp)
sd s6, 48(sp)
sd s7, 56(sp)
sd s8, 64(sp)
sd s9, 72(sp)
sd s10, 80(sp)
sd s11, 88(sp)
addi sp, sp, -64
lw $CURRENT_COUNTER, 0($COUNTER)
.Lblock_loop:
# We will use the scalar ALU for 1 chacha block.
srli $T0, $LEN, 6
@{[vsetvli $VL, $T0, "e32", "m1", "ta", "ma"]}
slli $T1, $VL, 6
bltu $T1, $LEN, 1f
# Since there is no more chacha block existed, we need to split 1 block
# from vector ALU.
addi $T1, $VL, -1
@{[vsetvli $VL, $T1, "e32", "m1", "ta", "ma"]}
1:
#### chacha block data
# init chacha const states into $V0~$V3
# "expa" little endian
li $CONST_DATA0, 0x61707865
@{[vmv_v_x $V0, $CONST_DATA0]}
# "nd 3" little endian
li $CONST_DATA1, 0x3320646e
@{[vmv_v_x $V1, $CONST_DATA1]}
# "2-by" little endian
li $CONST_DATA2, 0x79622d32
@{[vmv_v_x $V2, $CONST_DATA2]}
# "te k" little endian
li $CONST_DATA3, 0x6b206574
lw $KEY0, 0($KEY)
@{[vmv_v_x $V3, $CONST_DATA3]}
# init chacha key states into $V4~$V11
lw $KEY1, 4($KEY)
@{[vmv_v_x $V4, $KEY0]}
lw $KEY2, 8($KEY)
@{[vmv_v_x $V5, $KEY1]}
lw $KEY3, 12($KEY)
@{[vmv_v_x $V6, $KEY2]}
lw $KEY4, 16($KEY)
@{[vmv_v_x $V7, $KEY3]}
lw $KEY5, 20($KEY)
@{[vmv_v_x $V8, $KEY4]}
lw $KEY6, 24($KEY)
@{[vmv_v_x $V9, $KEY5]}
lw $KEY7, 28($KEY)
@{[vmv_v_x $V10, $KEY6]}
@{[vmv_v_x $V11, $KEY7]}
# init chacha key states into $V12~$V13
lw $COUNTER1, 4($COUNTER)
@{[vid_v $V12]}
lw $NONCE0, 8($COUNTER)
@{[vadd_vx $V12, $V12, $CURRENT_COUNTER]}
lw $NONCE1, 12($COUNTER)
@{[vmv_v_x $V13, $COUNTER1]}
add $COUNTER0, $CURRENT_COUNTER, $VL
# init chacha nonce states into $V14~$V15
@{[vmv_v_x $V14, $NONCE0]}
@{[vmv_v_x $V15, $NONCE1]}
li $T0, 64
# load the top-half of input data into $V16~$V23
@{[vlsseg_nf_e32_v 8, $V16, $INPUT, $T0]}
# till now in block_loop, we used:
# - $V0~$V15 for chacha states.
# - $V16~$V23 for top-half of input data.
# - $V24~$V31 haven't been used yet.
# 20 round groups
li $T0, 10
.Lround_loop:
# we can use $V24~$V31 as temporary registers in round_loop.
addi $T0, $T0, -1
@{[chacha_quad_round_group
$V0, $V4, $V8, $V12,
$V1, $V5, $V9, $V13,
$V2, $V6, $V10, $V14,
$V3, $V7, $V11, $V15,
$STATE0, $STATE4, $STATE8, $STATE12,
$STATE1, $STATE5, $STATE9, $STATE13,
$STATE2, $STATE6, $STATE10, $STATE14,
$STATE3, $STATE7, $STATE11, $STATE15,
$V24, $V25, $V26, $V27]}
@{[chacha_quad_round_group
$V3, $V4, $V9, $V14,
$V0, $V5, $V10, $V15,
$V1, $V6, $V11, $V12,
$V2, $V7, $V8, $V13,
$STATE3, $STATE4, $STATE9, $STATE14,
$STATE0, $STATE5, $STATE10, $STATE15,
$STATE1, $STATE6, $STATE11, $STATE12,
$STATE2, $STATE7, $STATE8, $STATE13,
$V24, $V25, $V26, $V27]}
bnez $T0, .Lround_loop
li $T0, 64
# load the bottom-half of input data into $V24~$V31
addi $T1, $INPUT, 32
@{[vlsseg_nf_e32_v 8, $V24, $T1, $T0]}
# now, there are no free vector registers until the round_loop exits.
# add chacha top-half initial block states
# "expa" little endian
li $T0, 0x61707865
@{[vadd_vx $V0, $V0, $T0]}
add $STATE0, $STATE0, $T0
# "nd 3" little endian
li $T1, 0x3320646e
@{[vadd_vx $V1, $V1, $T1]}
add $STATE1, $STATE1, $T1
lw $T0, 0($KEY)
# "2-by" little endian
li $T2, 0x79622d32
@{[vadd_vx $V2, $V2, $T2]}
add $STATE2, $STATE2, $T2
lw $T1, 4($KEY)
# "te k" little endian
li $T3, 0x6b206574
@{[vadd_vx $V3, $V3, $T3]}
add $STATE3, $STATE3, $T3
lw $T2, 8($KEY)
@{[vadd_vx $V4, $V4, $T0]}
add $STATE4, $STATE4, $T0
lw $T3, 12($KEY)
@{[vadd_vx $V5, $V5, $T1]}
add $STATE5, $STATE5, $T1
@{[vadd_vx $V6, $V6, $T2]}
add $STATE6, $STATE6, $T2
@{[vadd_vx $V7, $V7, $T3]}
add $STATE7, $STATE7, $T3
# xor with the top-half input
@{[vxor_vv $V16, $V16, $V0]}
sw $STATE0, 0(sp)
sw $STATE1, 4(sp)
@{[vxor_vv $V17, $V17, $V1]}
sw $STATE2, 8(sp)
sw $STATE3, 12(sp)
@{[vxor_vv $V18, $V18, $V2]}
sw $STATE4, 16(sp)
sw $STATE5, 20(sp)
@{[vxor_vv $V19, $V19, $V3]}
sw $STATE6, 24(sp)
sw $STATE7, 28(sp)
@{[vxor_vv $V20, $V20, $V4]}
lw $T0, 16($KEY)
@{[vxor_vv $V21, $V21, $V5]}
lw $T1, 20($KEY)
@{[vxor_vv $V22, $V22, $V6]}
lw $T2, 24($KEY)
@{[vxor_vv $V23, $V23, $V7]}
# save the top-half of output from $V16~$V23
li $T3, 64
@{[vssseg_nf_e32_v 8, $V16, $OUTPUT, $T3]}
# add chacha bottom-half initial block states
@{[vadd_vx $V8, $V8, $T0]}
add $STATE8, $STATE8, $T0
lw $T3, 28($KEY)
@{[vadd_vx $V9, $V9, $T1]}
add $STATE9, $STATE9, $T1
lw $T0, 4($COUNTER)
@{[vadd_vx $V10, $V10, $T2]}
add $STATE10, $STATE10, $T2
lw $T1, 8($COUNTER)
@{[vadd_vx $V11, $V11, $T3]}
add $STATE11, $STATE11, $T3
lw $T2, 12($COUNTER)
@{[vid_v $V0]}
add $STATE12, $STATE12, $CURRENT_COUNTER
@{[vadd_vx $V12, $V12, $CURRENT_COUNTER]}
add $STATE12, $STATE12, $VL
@{[vadd_vx $V13, $V13, $T0]}
add $STATE13, $STATE13, $T0
@{[vadd_vx $V14, $V14, $T1]}
add $STATE14, $STATE14, $T1
@{[vadd_vx $V15, $V15, $T2]}
add $STATE15, $STATE15, $T2
@{[vadd_vv $V12, $V12, $V0]}
# xor with the bottom-half input
@{[vxor_vv $V24, $V24, $V8]}
sw $STATE8, 32(sp)
@{[vxor_vv $V25, $V25, $V9]}
sw $STATE9, 36(sp)
@{[vxor_vv $V26, $V26, $V10]}
sw $STATE10, 40(sp)
@{[vxor_vv $V27, $V27, $V11]}
sw $STATE11, 44(sp)
@{[vxor_vv $V29, $V29, $V13]}
sw $STATE12, 48(sp)
@{[vxor_vv $V28, $V28, $V12]}
sw $STATE13, 52(sp)
@{[vxor_vv $V30, $V30, $V14]}
sw $STATE14, 56(sp)
@{[vxor_vv $V31, $V31, $V15]}
sw $STATE15, 60(sp)
# save the bottom-half of output from $V24~$V31
li $T0, 64
addi $T1, $OUTPUT, 32
@{[vssseg_nf_e32_v 8, $V24, $T1, $T0]}
# the computed vector parts: `64 * VL`
slli $T0, $VL, 6
add $INPUT, $INPUT, $T0
add $OUTPUT, $OUTPUT, $T0
sub $LEN, $LEN, $T0
add $CURRENT_COUNTER, $CURRENT_COUNTER, $VL
# process the scalar data block
addi $CURRENT_COUNTER, $CURRENT_COUNTER, 1
li $T0, 64
@{[minu $T1, $LEN, $T0]}
sub $LEN, $LEN, $T1
mv $T2, sp
.Lscalar_data_loop:
@{[vsetvli $VL, $T1, "e8", "m8", "ta", "ma"]}
# from this on, vector registers are grouped with lmul = 8
@{[vle8_v $V8, $INPUT]}
@{[vle8_v $V16, $T2]}
@{[vxor_vv $V8, $V8, $V16]}
@{[vse8_v $V8, $OUTPUT]}
add $INPUT, $INPUT, $VL
add $OUTPUT, $OUTPUT, $VL
add $T2, $T2, $VL
sub $T1, $T1, $VL
bnez $T1, .Lscalar_data_loop
bnez $LEN, .Lblock_loop
addi sp, sp, 64
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)
ld s4, 32(sp)
ld s5, 40(sp)
ld s6, 48(sp)
ld s7, 56(sp)
ld s8, 64(sp)
ld s9, 72(sp)
ld s10, 80(sp)
ld s11, 88(sp)
addi sp, sp, 96
ret
.size ChaCha20_ctr32@{[$isaext]},.-ChaCha20_ctr32@{[$isaext]}
___
print $code;
close STDOUT or die "error closing STDOUT: $!";

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff