2 * Copyright 2010-2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* This header can move into provider when legacy support is removed */
11 #include <openssl/modes.h>
13 #if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
15 typedef unsigned __int64 u64;
16 # define U64(C) C##UI64
17 #elif defined(__arch64__)
19 typedef unsigned long u64;
22 typedef long long i64;
23 typedef unsigned long long u64;
24 # define U64(C) C##ULL
27 typedef unsigned int u32;
28 typedef unsigned char u8;
30 #define STRICT_ALIGNMENT 1
32 # if defined(__i386) || defined(__i386__) || \
33 defined(__x86_64) || defined(__x86_64__) || \
34 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
35 defined(__aarch64__) || \
36 defined(__s390__) || defined(__s390x__)
37 # undef STRICT_ALIGNMENT
41 #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
42 # if defined(__GNUC__) && __GNUC__>=2
43 # if defined(__x86_64) || defined(__x86_64__)
44 # define BSWAP8(x) ({ u64 ret_=(x); \
46 : "+r"(ret_)); ret_; })
47 # define BSWAP4(x) ({ u32 ret_=(x); \
49 : "+r"(ret_)); ret_; })
50 # elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
51 # define BSWAP8(x) ({ u32 lo_=(u64)(x)>>32,hi_=(x); \
52 asm ("bswapl %0; bswapl %1" \
53 : "+r"(hi_),"+r"(lo_)); \
55 # define BSWAP4(x) ({ u32 ret_=(x); \
57 : "+r"(ret_)); ret_; })
58 # elif defined(__aarch64__)
59 # if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
60 __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
61 # define BSWAP8(x) ({ u64 ret_; \
63 : "=r"(ret_) : "r"(x)); ret_; })
64 # define BSWAP4(x) ({ u32 ret_; \
66 : "=r"(ret_) : "r"(x)); ret_; })
68 # elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT)
69 # define BSWAP8(x) ({ u32 lo_=(u64)(x)>>32,hi_=(x); \
70 asm ("rev %0,%0; rev %1,%1" \
71 : "+r"(hi_),"+r"(lo_)); \
73 # define BSWAP4(x) ({ u32 ret_; \
75 : "=r"(ret_) : "r"((u32)(x))); \
77 # elif (defined(__riscv_zbb) || defined(__riscv_zbkb)) && __riscv_xlen == 64
78 # define BSWAP8(x) ({ u64 ret_=(x); \
80 : "+r"(ret_)); ret_; })
81 # define BSWAP4(x) ({ u32 ret_=(x); \
82 asm ("rev8 %0,%0; srli %0,%0,32"\
83 : "+&r"(ret_)); ret_; })
85 # elif defined(_MSC_VER)
88 # pragma intrinsic(_byteswap_uint64,_byteswap_ulong)
89 # define BSWAP8(x) _byteswap_uint64((u64)(x))
90 # define BSWAP4(x) _byteswap_ulong((u32)(x))
91 # elif defined(_M_IX86)
92 __inline u32 _bswap4(u32 val)
94 _asm mov eax, val _asm bswap eax}
95 # define BSWAP4(x) _bswap4(x)
99 #if defined(BSWAP4) && !defined(STRICT_ALIGNMENT)
100 # define GETU32(p) BSWAP4(*(const u32 *)(p))
101 # define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
103 # define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
104 # define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v))
106 /*- GCM definitions */ typedef struct {
110 typedef void (*gcm_init_fn)(u128 Htable[16], const u64 H[2]);
111 typedef void (*gcm_ghash_fn)(u64 Xi[2], const u128 Htable[16], const u8 *inp, size_t len);
112 typedef void (*gcm_gmult_fn)(u64 Xi[2], const u128 Htable[16]);
113 struct gcm_funcs_st {
119 struct gcm128_context {
120 /* Following 6 names follow names in GCM specification */
125 size_t t[16 / sizeof(size_t)];
126 } Yi, EKi, EK0, len, Xi, H;
128 * Relative position of Yi, EKi, EK0, len, Xi, H and pre-computed Htable is
129 * used in some assembler modules, i.e. don't change the order!
132 struct gcm_funcs_st funcs;
133 unsigned int mres, ares;
136 #if !defined(OPENSSL_SMALL_FOOTPRINT)
137 unsigned char Xn[48];
141 /* GHASH functions */
142 void ossl_gcm_init_4bit(u128 Htable[16], const u64 H[2]);
143 void ossl_gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
144 const u8 *inp, size_t len);
145 void ossl_gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
148 * The maximum permitted number of cipher blocks per data unit in XTS mode.
149 * Reference IEEE Std 1619-2018.
151 #define XTS_MAX_BLOCKS_PER_DATA_UNIT (1<<20)
153 struct xts128_context {
155 block128_f block1, block2;
158 /* XTS mode for SM4 algorithm specified by GB/T 17964-2021 */
159 int ossl_crypto_xts128gb_encrypt(const XTS128_CONTEXT *ctx,
160 const unsigned char iv[16],
161 const unsigned char *inp, unsigned char *out,
162 size_t len, int enc);
164 struct ccm128_context {
174 #ifndef OPENSSL_NO_OCB
180 # define ocb_block16_xor(in1,in2,out) \
181 ( (out)->a[0]=(in1)->a[0]^(in2)->a[0], \
182 (out)->a[1]=(in1)->a[1]^(in2)->a[1] )
183 # if STRICT_ALIGNMENT
184 # define ocb_block16_xor_misaligned(in1,in2,out) \
185 ocb_block_xor((in1)->c,(in2)->c,16,(out)->c)
187 # define ocb_block16_xor_misaligned ocb_block16_xor
190 struct ocb128_context {
191 /* Need both encrypt and decrypt key schedules for decryption */
196 ocb128_f stream; /* direction dependent */
197 /* Key dependent variables. Can be reused if key remains the same */
203 /* Must be reset for each session */
206 u64 blocks_processed;
207 OCB_BLOCK offset_aad;
213 #endif /* OPENSSL_NO_OCB */
215 #ifndef OPENSSL_NO_SIV
219 typedef union siv_block_u {
220 uint64_t word[SIV_LEN/sizeof(uint64_t)];
221 unsigned char byte[SIV_LEN];
224 struct siv128_context {
225 /* d stores intermediate results of S2V; it corresponds to D from the
226 pseudocode in section 2.4 of RFC 5297. */
229 EVP_CIPHER_CTX *cipher_ctx;
231 EVP_MAC_CTX *mac_ctx_init;
236 #endif /* OPENSSL_NO_SIV */