# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - Vector Bit-manipulation used in Cryptography ('Zvbb')
-# - Vector Carryless Multiplication ('Zvbc')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+# - RISC-V Vector Carryless Multiplication extension ('Zvbc')
use strict;
use warnings;
___
################################################################################
-# void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
-# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvbb_zvbc and
-# gcm_ghash_rv64i_zvbb_zvbc
+# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvkb_zvbc and
+# gcm_ghash_rv64i_zvkb_zvbc
{
my ($Htable,$H,$TMP0,$TMP1,$TMP2) = ("a0","a1","t0","t1","t2");
my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
$code .= <<___;
.p2align 3
-.globl gcm_init_rv64i_zvbb_zvbc
-.type gcm_init_rv64i_zvbb_zvbc,\@function
-gcm_init_rv64i_zvbb_zvbc:
+.globl gcm_init_rv64i_zvkb_zvbc
+.type gcm_init_rv64i_zvkb_zvbc,\@function
+gcm_init_rv64i_zvkb_zvbc:
# Load/store data in reverse order.
# This is needed as a part of endianness swap.
add $H, $H, 8
@{[vse64_v $V1, $Htable]} # vse64.v v1, (a0)
ret
-.size gcm_init_rv64i_zvbb_zvbc,.-gcm_init_rv64i_zvbb_zvbc
+.size gcm_init_rv64i_zvkb_zvbc,.-gcm_init_rv64i_zvkb_zvbc
___
}
################################################################################
-# void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
+# void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
#
# input: Xi: current hash value
# Htable: preprocessed H
$code .= <<___;
.text
.p2align 3
-.globl gcm_gmult_rv64i_zvbb_zvbc
-.type gcm_gmult_rv64i_zvbb_zvbc,\@function
-gcm_gmult_rv64i_zvbb_zvbc:
+.globl gcm_gmult_rv64i_zvkb_zvbc
+.type gcm_gmult_rv64i_zvkb_zvbc,\@function
+gcm_gmult_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
@{[vrev8_v $V2, $V2]} # vrev8.v v2, v2
@{[vsse64_v $V2, $Xi, $TMP4]} # vsse64.v v2, (a0), t4
ret
-.size gcm_gmult_rv64i_zvbb_zvbc,.-gcm_gmult_rv64i_zvbb_zvbc
+.size gcm_gmult_rv64i_zvkb_zvbc,.-gcm_gmult_rv64i_zvkb_zvbc
___
}
################################################################################
-# void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
+# void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
# const u8 *inp, size_t len);
#
# input: Xi: current hash value
$code .= <<___;
.p2align 3
-.globl gcm_ghash_rv64i_zvbb_zvbc
-.type gcm_ghash_rv64i_zvbb_zvbc,\@function
-gcm_ghash_rv64i_zvbb_zvbc:
+.globl gcm_ghash_rv64i_zvkb_zvbc
+.type gcm_ghash_rv64i_zvkb_zvbc,\@function
+gcm_ghash_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
@{[vsse64_v $V5, $Xi, $M8]} # vsse64.v v2, (a0), t4
ret
-.size gcm_ghash_rv64i_zvbb_zvbc,.-gcm_ghash_rv64i_zvbb_zvbc
+.size gcm_ghash_rv64i_zvkb_zvbc,.-gcm_ghash_rv64i_zvkb_zvbc
___
}
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - RISC-V vector crypto GHASH extension ('Zvkg')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector GCM/GMAC extension ('Zvkg')
+#
+# Optional:
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
use strict;
use warnings;
################################################################################
# void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 H[2]);
-# void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
# output: Htable: Copy of secret parameter (in normalized byte order)
$code .= <<___;
.p2align 3
-.globl gcm_init_rv64i_zvkg_zvbb
-.type gcm_init_rv64i_zvkg_zvbb,\@function
-gcm_init_rv64i_zvkg_zvbb:
- @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
+.globl gcm_init_rv64i_zvkg_zvkb
+.type gcm_init_rv64i_zvkg_zvkb,\@function
+gcm_init_rv64i_zvkg_zvkb:
+ @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, ta, ma
@{[vle64_v $V0, $H]} # vle64.v v0, (a1)
@{[vrev8_v $V0, $V0]} # vrev8.v v0, v0
@{[vse64_v $V0, $Htable]} # vse64.v v0, (a0)
ret
-.size gcm_init_rv64i_zvkg_zvbb,.-gcm_init_rv64i_zvkg_zvbb
+.size gcm_init_rv64i_zvkg_zvkb,.-gcm_init_rv64i_zvkg_zvkb
___
}
$MODESASM_c64xplus=ghash-c64xplus.s
$MODESDEF_c64xplus=GHASH_ASM
- $MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvbb-zvbc.s ghash-riscv64-zvkg.s
+ $MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvkb-zvbc.s ghash-riscv64-zvkg.s
$MODESDEF_riscv64=GHASH_ASM
# Now that we have defined all the arch specific variables, use the
INCLUDE[ghash-s390x.o]=..
GENERATE[ghash-c64xplus.S]=asm/ghash-c64xplus.pl
GENERATE[ghash-riscv64.s]=asm/ghash-riscv64.pl
-GENERATE[ghash-riscv64-zvbb-zvbc.s]=asm/ghash-riscv64-zvbb-zvbc.pl
+GENERATE[ghash-riscv64-zvkb-zvbc.s]=asm/ghash-riscv64-zvkb-zvbc.pl
GENERATE[ghash-riscv64-zvkg.s]=asm/ghash-riscv64-zvkg.pl
const u8 *inp, size_t len);
void gcm_ghash_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
-/* Zvbb/Zvbc (vector crypto with vclmul) based routines. */
-void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 Xi[2]);
-void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
-void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
+/* zvkb/Zvbc (vector crypto with vclmul) based routines. */
+void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 Xi[2]);
+void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
/* Zvkg (vector crypto with vgmul.vv and vghsh.vv). */
void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 Xi[2]);
-void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 Xi[2]);
+void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 Xi[2]);
void gcm_gmult_rv64i_zvkg(u64 Xi[2], const u128 Htable[16]);
void gcm_ghash_rv64i_zvkg(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
ctx->ghash = gcm_ghash_4bit;
if (RISCV_HAS_ZVKG() && riscv_vlen() >= 128) {
- if (RISCV_HAS_ZVBB())
- ctx->ginit = gcm_init_rv64i_zvkg_zvbb;
+ if (RISCV_HAS_ZVKB())
+ ctx->ginit = gcm_init_rv64i_zvkg_zvkb;
else
ctx->ginit = gcm_init_rv64i_zvkg;
ctx->gmult = gcm_gmult_rv64i_zvkg;
ctx->ghash = gcm_ghash_rv64i_zvkg;
- } else if (RISCV_HAS_ZVBB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
- ctx->ginit = gcm_init_rv64i_zvbb_zvbc;
- ctx->gmult = gcm_gmult_rv64i_zvbb_zvbc;
- ctx->ghash = gcm_ghash_rv64i_zvbb_zvbc;
+ } else if (RISCV_HAS_ZVKB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
+ ctx->ginit = gcm_init_rv64i_zvkb_zvbc;
+ ctx->gmult = gcm_gmult_rv64i_zvkb_zvbc;
+ ctx->ghash = gcm_ghash_rv64i_zvkb_zvbc;
} else if (RISCV_HAS_ZBC()) {
if (RISCV_HAS_ZBKB()) {
ctx->ginit = gcm_init_rv64i_zbc__zbkb;
# Vector crypto instructions
-## Zvbb instructions
+## Zvbb and Zvkb instructions
+##
+## vandn (also in zvkb)
+## vbrev
+## vbrev8 (also in zvkb)
+## vrev8 (also in zvkb)
+## vclz
+## vctz
+## vcpop
+## vrol (also in zvkb)
+## vror (also in zvkb)
+## vwsll
sub vrev8_v {
# vrev8.v vd, vs2, vm
# The generated code of this file depends on the following RISC-V extensions:
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - Vector Bit-manipulation used in Cryptography ('Zvbb')
-# - Vector ShangMi Suite: SM4 Block Cipher ('Zvksed')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+# - RISC-V Vector SM4 Block Cipher extension ('Zvksed')
use strict;
use warnings;
RISCV_DEFINE_CAP(V, 0, 14)
RISCV_DEFINE_CAP(ZVBB, 0, 15)
RISCV_DEFINE_CAP(ZVBC, 0, 16)
-RISCV_DEFINE_CAP(ZVKG, 0, 17)
-RISCV_DEFINE_CAP(ZVKNED, 0, 18)
-RISCV_DEFINE_CAP(ZVKNHA, 0, 19)
-RISCV_DEFINE_CAP(ZVKNHB, 0, 20)
-RISCV_DEFINE_CAP(ZVKSED, 0, 21)
-RISCV_DEFINE_CAP(ZVKSH, 0, 22)
+RISCV_DEFINE_CAP(ZVKB, 0, 17)
+RISCV_DEFINE_CAP(ZVKG, 0, 18)
+RISCV_DEFINE_CAP(ZVKNED, 0, 19)
+RISCV_DEFINE_CAP(ZVKNHA, 0, 20)
+RISCV_DEFINE_CAP(ZVKNHB, 0, 21)
+RISCV_DEFINE_CAP(ZVKSED, 0, 22)
+RISCV_DEFINE_CAP(ZVKSH, 0, 23)
/*
* In the future ...
#define RISCV_HAS_ZBB_AND_ZBC() (RISCV_HAS_ZBB() && RISCV_HAS_ZBC())
#define RISCV_HAS_ZBKB_AND_ZKND_AND_ZKNE() (RISCV_HAS_ZBKB() && RISCV_HAS_ZKND() && RISCV_HAS_ZKNE())
#define RISCV_HAS_ZKND_AND_ZKNE() (RISCV_HAS_ZKND() && RISCV_HAS_ZKNE())
-#define RISCV_HAS_ZVBB_AND_ZVKNHA() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHA())
-#define RISCV_HAS_ZVBB_AND_ZVKNHB() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHB())
-#define RISCV_HAS_ZVBB_AND_ZVKSED() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSED())
-#define RISCV_HAS_ZVBB_AND_ZVKSH() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSH())
+/*
+ * The ZVBB is the superset of ZVKB extension. We use macro here to replace the
+ * `RISCV_HAS_ZVKB()` with `RISCV_HAS_ZVBB() || RISCV_HAS_ZVKB()`.
+ */
+#define RISCV_HAS_ZVKB() (RISCV_HAS_ZVBB() || RISCV_HAS_ZVKB())
+#define RISCV_HAS_ZVKB_AND_ZVKNHA() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKNHA())
+#define RISCV_HAS_ZVKB_AND_ZVKNHB() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKNHB())
+#define RISCV_HAS_ZVKB_AND_ZVKSED() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKSED())
+#define RISCV_HAS_ZVKB_AND_ZVKSH() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKSH())
/*
* Get the size of a vector register in bits (VLEN).
const PROV_CCM_HW *ossl_prov_sm4_hw_ccm(size_t keybits)
{
- if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+ if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_ccm;
else
return &ccm_sm4;
const PROV_GCM_HW *ossl_prov_sm4_hw_gcm(size_t keybits)
{
- if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+ if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_gcm;
else
return &sm4_gcm;
cipher_hw_sm4_copyctx \
};
#define PROV_CIPHER_HW_select(mode) \
-if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128) \
+if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128) \
return &rv64i_zvksed_sm4_##mode;
const PROV_CIPHER_HW *ossl_prov_cipher_hw_sm4_xts(size_t keybits)
{
- if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+ if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_xts;
else
return &sm4_generic_xts;