___
################################################################################
-# gcm_init_clmul_rv64i_zbb_zbc(u128 Htable[16], const u64 Xi[2])
-# Initialization function for clmul-based implementation of GMULT
-# This function is used in tandem with gcm_gmult_clmul_rv64i_zbb_zbc
-################################################################################
+# void gcm_init_rv64i_zbc(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zbc__zbb(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zbc__zbkb(u128 Htable[16], const u64 H[2]);
+#
+# input: H: 128-bit H - secret parameter E(K, 0^128)
+# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zbc* and
+# gcm_ghash_rv64i_zbc*
+#
+# All callers of this function revert the byte-order unconditionally
+# on little-endian machines. So we need to revert the byte-order back.
+# Additionally we reverse the bits of each byte.
+
{
-my ($Haddr,$Xi,$TEMP) = ("a0","a1","a2");
+my ($Htable,$H,$VAL0,$VAL1,$TMP0,$TMP1,$TMP2) = ("a0","a1","a2","a3","t0","t1","t2");
$code .= <<___;
-.balign 16
-.globl gcm_init_clmul_rv64i_zbb_zbc
-.type gcm_init_clmul_rv64i_zbb_zbc,\@function
-# Initialize clmul-based implementation of galois field multiplication routine.
-# gcm_init_clmul_rv64i_zbb_zbc(ctx->Htable, ctx->H.u)
-gcm_init_clmul_rv64i_zbb_zbc:
- # argument 0 = ctx->Htable (store H here)
- # argument 1 = H.u[] (2x 64-bit words) [H_high64, H_low64]
-
- # Simply store [H_high64, H_low64] for later
- ld $TEMP,0($Xi)
- sd $TEMP,0($Haddr)
- ld $TEMP,8($Xi)
- sd $TEMP,8($Haddr)
-
+.p2align 3
+.globl gcm_init_rv64i_zbc
+.type gcm_init_rv64i_zbc,\@function
+gcm_init_rv64i_zbc:
+ ld $VAL0,0($H)
+ ld $VAL1,8($H)
+ @{[brev8_rv64i $VAL0, $TMP0, $TMP1, $TMP2]}
+ @{[brev8_rv64i $VAL1, $TMP0, $TMP1, $TMP2]}
+ @{[sd_rev8_rv64i $VAL0, $Htable, 0, $TMP0]}
+ @{[sd_rev8_rv64i $VAL1, $Htable, 8, $TMP0]}
ret
-
+.size gcm_init_rv64i_zbc,.-gcm_init_rv64i_zbc
___
-
}
-################################################################################
-# gcm_gmult_clmul_rv64i_zbb_zbc(u64 Xi[2], const u128 Htable[16])
-# Compute GMULT (X*H mod f) using the Zbc (clmul) and Zbb (basic bit manip)
-# extensions, and the Modified Barrett Reduction technique
-################################################################################
{
-my ($Xi,$Haddr,$A1,$A0,$B1,$B0,$C1,$C0,$D1,$D0,$E1,$E0,$TEMP,$TEMP2,$qp_low) =
- ("a0","a1","a2","a3","a4","a5","a6","a7","t0","t1","t2","t3","t4","t5","t6");
+my ($Htable,$H,$VAL0,$VAL1,$TMP0,$TMP1,$TMP2) = ("a0","a1","a2","a3","t0","t1","t2");
$code .= <<___;
-.balign 16
-.globl gcm_gmult_clmul_rv64i_zbb_zbc
-.type gcm_gmult_clmul_rv64i_zbb_zbc,\@function
-# static void gcm_gmult_clmul_rv64i_zbb_zbc(u64 Xi[2], const u128 Htable[16])
-# Computes product of X*H mod f
-gcm_gmult_clmul_rv64i_zbb_zbc:
-
- # Load X and H (H is saved previously in gcm_init_clmul_rv64i_zbb_zbc)
- ld $A1,0($Xi)
- ld $A0,8($Xi)
-
- ld $B1,0($Haddr)
- ld $B0,8($Haddr)
-
- li $qp_low,0xe100000000000000
-
- # Perform Katratsuba Multiplication to generate a 255-bit intermediate
- # A = [A1:A0]
- # B = [B1:B0]
- # Let:
- # [C1:C0] = A1*B1
- # [D1:D0] = A0*B0
- # [E1:E0] = (A0+A1)*(B0+B1)
- # Then:
- # A*B = [C1:C0+C1+D1+E1:D1+C0+D0+E0:D0]
-
- @{[rev8 $A1, $A1]}
- @{[clmul $C0,$A1,$B1]}
- @{[clmulh $C1,$A1,$B1]}
-
- @{[rev8 $A0,$A0]}
- @{[clmul $D0,$A0,$B0]}
- @{[clmulh $D1,$A0,$B0]}
-
- xor $TEMP,$A0,$A1
- xor $TEMP2,$B0,$B1
-
- @{[clmul $E0,$TEMP,$TEMP2]}
- @{[clmulh $E1,$TEMP,$TEMP2]}
-
- # 0th term is just C1
-
- # Construct term 1 in E1 (E1 only appears in dword 1)
- xor $E1,$E1,$D1
- xor $E1,$E1,$C1
- xor $E1,$E1,$C0
-
- # Term 1 is E1
-
- # Construct term 2 in E0 (E0 only appears in dword 2)
- xor $E0,$E0,$D0
- xor $E0,$E0,$C0
- xor $E0,$E0,$D1
-
- # Term 2 is E0
-
- # final term is just D0
-
- # X*H is now stored in [C1,E1,E0,D0]
-
- # Left-justify
- slli $C1,$C1,1
- # Or in the high bit of E1
- srli $TEMP,$E1,63
- or $C1,$C1,$TEMP
-
- slli $E1,$E1,1
- # Or in the high bit of E0
- srli $TEMP2,$E0,63
- or $E1,$E1,$TEMP2
-
- slli $E0,$E0,1
- # Or in the high bit of D0
- srli $TEMP,$D0,63
- or $E0,$E0,$TEMP
-
- slli $D0,$D0,1
-
- # Barrett Reduction
- # c = [E0, D0]
- # We want the top 128 bits of the result of c*f
- # We'll get this by computing the low-half (most significant 128 bits in
- # the reflected domain) of clmul(c,fs)<<1 first, then
- # xor in c to complete the calculation
-
- # AA = [AA1:AA0] = [E0,D0] = c
- # BB = [BB1:BB0] = [qp_low,0]
- # [CC1:CC0] = AA1*BB1
- # [DD1:DD0] = AA0*BB0
- # [EE1:EE0] = (AA0+AA1)*(BB0+BB1)
- # Then:
- # AA*BB = [CC1:CC0+CC1+DD1+EE1:DD1+CC0+DD0+EE0:DD0]
- # We only need CC0,DD1,DD0,EE0 to compute the low 128 bits of c * qp_low
+.p2align 3
+.globl gcm_init_rv64i_zbc__zbb
+.type gcm_init_rv64i_zbc__zbb,\@function
+gcm_init_rv64i_zbc__zbb:
+ ld $VAL0,0($H)
+ ld $VAL1,8($H)
+ @{[brev8_rv64i $VAL0, $TMP0, $TMP1, $TMP2]}
+ @{[brev8_rv64i $VAL1, $TMP0, $TMP1, $TMP2]}
+ @{[rev8 $VAL0, $VAL0]}
+ @{[rev8 $VAL1, $VAL1]}
+ sd $VAL0,0($Htable)
+ sd $VAL1,8($Htable)
+ ret
+.size gcm_init_rv64i_zbc__zbb,.-gcm_init_rv64i_zbc__zbb
___
+}
-my ($CC0,$EE0,$AA1,$AA0,$BB1) = ($A0,$B1,$E0,$D0,$qp_low);
+{
+my ($Htable,$H,$TMP0,$TMP1) = ("a0","a1","t0","t1");
$code .= <<___;
-
- @{[clmul $CC0,$AA1,$BB1]}
- #clmul DD0,AA0,BB0 # BB0 is 0, so DD0 = 0
- #clmulh DD1,AA0,BB0 # BB0 is 0, so DD1 = 0
- xor $TEMP,$AA0,$AA1
- #xor TEMP2,BB0,BB1 # TEMP2 = BB1 = qp_low
- @{[clmul $EE0,$TEMP,$BB1]}
-
- # Result is [N/A:N/A:DD1+CC0+DD0+EE0:DD0]
- # Simplifying: [CC0+EE0:0]
- xor $TEMP2,$CC0,$EE0
- # Shift left by 1 to correct for bit reflection
- slli $TEMP2,$TEMP2,1
-
- # xor into c = [E0,D0]
- # Note that only E0 is affected
- xor $E0,$E0,$TEMP2
-
- # Now, q = [E0,D0]
-
- # The final step is to compute clmul(q,[qp_low:0])<<1
- # The leftmost 128 bits are the reduced result.
- # Once again, we use Karatsuba multiplication, but many of the terms
- # simplify or cancel out.
- # AA = [AA1:AA0] = [E0,D0] = c
- # BB = [BB1:BB0] = [qp_low,0]
- # [CC1:CC0] = AA1*BB1
- # [DD1:DD0] = AA0*BB0
- # [EE1:EE0] = (AA0+AA1)*(BB0+BB1)
- # Then:
- # AA*BB = [CC1:CC0+CC1+DD1+EE1:DD1+CC0+DD0+EE0:DD0]
- # We need CC1,CC0,DD0,DD1,EE1,EE0 to compute the leftmost 128 bits of AA*BB
-
+.p2align 3
+.globl gcm_init_rv64i_zbc__zbkb
+.type gcm_init_rv64i_zbc__zbkb,\@function
+gcm_init_rv64i_zbc__zbkb:
+ ld $TMP0,0($H)
+ ld $TMP1,8($H)
+ @{[brev8 $TMP0, $TMP0]}
+ @{[brev8 $TMP1, $TMP1]}
+ @{[rev8 $TMP0, $TMP0]}
+ @{[rev8 $TMP1, $TMP1]}
+ sd $TMP0,0($Htable)
+ sd $TMP1,8($Htable)
+ ret
+.size gcm_init_rv64i_zbc__zbkb,.-gcm_init_rv64i_zbc__zbkb
___
+}
-my ($AA1,$AA0,$BB1,$CC1,$CC0,$EE1,$EE0) = ($E0,$D0,$qp_low,$A0,$A1,$C0,$B0);
+################################################################################
+# void gcm_gmult_rv64i_zbc(u64 Xi[2], const u128 Htable[16]);
+# void gcm_gmult_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16]);
+#
+# input: Xi: current hash value
+# Htable: copy of H
+# output: Xi: next hash value Xi
+#
+# Compute GMULT (Xi*H mod f) using the Zbc (clmul) and Zbb (basic bit manip)
+# extensions. Using the no-Karatsuba approach and clmul for the final reduction.
+# This results in an implementation with minimized number of instructions.
+# HW with clmul latencies higher than 2 cycles might observe a performance
+# improvement with Karatsuba. HW with clmul latencies higher than 6 cycles
+# might observe a performance improvement with additionally converting the
+# reduction to shift&xor. For a full discussion of this estimates see
+# https://github.com/riscv/riscv-crypto/blob/master/doc/supp/gcm-mode-cmul.adoc
+{
+my ($Xi,$Htable,$x0,$x1,$y0,$y1) = ("a0","a1","a4","a5","a6","a7");
+my ($z0,$z1,$z2,$z3,$t0,$t1,$polymod) = ("t0","t1","t2","t3","t4","t5","t6");
$code .= <<___;
+.p2align 3
+.globl gcm_gmult_rv64i_zbc
+.type gcm_gmult_rv64i_zbc,\@function
+gcm_gmult_rv64i_zbc:
+ # Load Xi and bit-reverse it
+ ld $x0, 0($Xi)
+ ld $x1, 8($Xi)
+ @{[brev8_rv64i $x0, $z0, $z1, $z2]}
+ @{[brev8_rv64i $x1, $z0, $z1, $z2]}
+
+ # Load the key (already bit-reversed)
+ ld $y0, 0($Htable)
+ ld $y1, 8($Htable)
+
+ # Load the reduction constant
+ la $polymod, Lpolymod
+ lbu $polymod, 0($polymod)
+
+ # Multiplication (without Karatsuba)
+ @{[clmulh $z3, $x1, $y1]}
+ @{[clmul $z2, $x1, $y1]}
+ @{[clmulh $t1, $x0, $y1]}
+ @{[clmul $z1, $x0, $y1]}
+ xor $z2, $z2, $t1
+ @{[clmulh $t1, $x1, $y0]}
+ @{[clmul $t0, $x1, $y0]}
+ xor $z2, $z2, $t1
+ xor $z1, $z1, $t0
+ @{[clmulh $t1, $x0, $y0]}
+ @{[clmul $z0, $x0, $y0]}
+ xor $z1, $z1, $t1
+
+ # Reduction with clmul
+ @{[clmulh $t1, $z3, $polymod]}
+ @{[clmul $t0, $z3, $polymod]}
+ xor $z2, $z2, $t1
+ xor $z1, $z1, $t0
+ @{[clmulh $t1, $z2, $polymod]}
+ @{[clmul $t0, $z2, $polymod]}
+ xor $x1, $z1, $t1
+ xor $x0, $z0, $t0
+
+ # Bit-reverse Xi back and store it
+ @{[brev8_rv64i $x0, $z0, $z1, $z2]}
+ @{[brev8_rv64i $x1, $z0, $z1, $z2]}
+ sd $x0, 0($Xi)
+ sd $x1, 8($Xi)
+ ret
+.size gcm_gmult_rv64i_zbc,.-gcm_gmult_rv64i_zbc
+___
+}
- @{[clmul $CC0,$AA1,$BB1]}
- @{[clmulh $CC1,$AA1,$BB1]}
-
- #clmul DD0,AA0,BB0 # BB0 = 0 so DD0 = 0
- #clmulh DD1,AA0,BB0 # BB0 = 0 so DD1 = 0
-
- xor $TEMP,$AA0,$AA1
- #xor TEMP2,BB0,BB1 # BB0 = 0 to TEMP2 == BB1 == qp_low
-
- @{[clmul $EE0,$TEMP,$BB1]}
- @{[clmulh $EE1,$TEMP,$BB1]}
-
- # Need the DD1+CC0+DD0+EE0 term to shift its leftmost bit into the
- # intermediate result.
- # This is just CC0+EE0, store it in TEMP
- xor $TEMP,$CC0,$EE0
-
- # Result is [CC1:CC0+CC1+EE1:(a single bit)]<<1
- # Combine into [CC1:CC0]
- xor $CC0,$CC0,$CC1
- xor $CC0,$CC0,$EE1
-
- # Shift 128-bit quantity, xor in [C1,E1] and store
- slli $CC1,$CC1,1
- srli $TEMP2,$CC0,63
- or $CC1,$CC1,$TEMP2
- # xor in C1
- xor $CC1,$CC1,$C1
- @{[rev8 $CC1,$CC1]}
-
- slli $CC0,$CC0,1
- srli $TEMP,$TEMP,63
- or $CC0,$CC0,$TEMP
- # xor in E1
- xor $CC0,$CC0,$E1
- @{[rev8 $CC0,$CC0]}
- sd $CC1,0(a0)
- sd $CC0,8(a0)
+{
+my ($Xi,$Htable,$x0,$x1,$y0,$y1) = ("a0","a1","a4","a5","a6","a7");
+my ($z0,$z1,$z2,$z3,$t0,$t1,$polymod) = ("t0","t1","t2","t3","t4","t5","t6");
+$code .= <<___;
+.p2align 3
+.globl gcm_gmult_rv64i_zbc__zbkb
+.type gcm_gmult_rv64i_zbc__zbkb,\@function
+gcm_gmult_rv64i_zbc__zbkb:
+ # Load Xi and bit-reverse it
+ ld $x0, 0($Xi)
+ ld $x1, 8($Xi)
+ @{[brev8 $x0, $x0]}
+ @{[brev8 $x1, $x1]}
+
+ # Load the key (already bit-reversed)
+ ld $y0, 0($Htable)
+ ld $y1, 8($Htable)
+
+ # Load the reduction constant
+ la $polymod, Lpolymod
+ lbu $polymod, 0($polymod)
+
+ # Multiplication (without Karatsuba)
+ @{[clmulh $z3, $x1, $y1]}
+ @{[clmul $z2, $x1, $y1]}
+ @{[clmulh $t1, $x0, $y1]}
+ @{[clmul $z1, $x0, $y1]}
+ xor $z2, $z2, $t1
+ @{[clmulh $t1, $x1, $y0]}
+ @{[clmul $t0, $x1, $y0]}
+ xor $z2, $z2, $t1
+ xor $z1, $z1, $t0
+ @{[clmulh $t1, $x0, $y0]}
+ @{[clmul $z0, $x0, $y0]}
+ xor $z1, $z1, $t1
+
+ # Reduction with clmul
+ @{[clmulh $t1, $z3, $polymod]}
+ @{[clmul $t0, $z3, $polymod]}
+ xor $z2, $z2, $t1
+ xor $z1, $z1, $t0
+ @{[clmulh $t1, $z2, $polymod]}
+ @{[clmul $t0, $z2, $polymod]}
+ xor $x1, $z1, $t1
+ xor $x0, $z0, $t0
+
+ # Bit-reverse Xi back and store it
+ @{[brev8 $x0, $x0]}
+ @{[brev8 $x1, $x1]}
+ sd $x0, 0($Xi)
+ sd $x1, 8($Xi)
ret
+.size gcm_gmult_rv64i_zbc__zbkb,.-gcm_gmult_rv64i_zbc__zbkb
___
-
}
+$code .= <<___;
+.p2align 3
+Lbrev8_const:
+ .dword 0xAAAAAAAAAAAAAAAA
+ .dword 0xCCCCCCCCCCCCCCCC
+ .dword 0xF0F0F0F0F0F0F0F0
+.size Lbrev8_const,.-Lbrev8_const
+
+Lpolymod:
+ .byte 0x87
+.size Lpolymod,.-Lpolymod
+___
+
print $code;
close STDOUT or die "error closing STDOUT: $!";