# define GCM_MUL(ctx) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
# if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT)
-# define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len)
+# define GHASH(ctx,in,len) ctx->funcs.ghash((ctx)->Xi.u,(ctx)->Htable,in,len)
/*
* GHASH_CHUNK is "stride parameter" missioned to mitigate cache trashing
* effect. In other words idea is to hash data while it's still in L1 cache
#ifdef GCM_FUNCREF_4BIT
# undef GCM_MUL
# define GCM_MUL(ctx) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable)
-# ifdef GHASH
-# undef GHASH
-# define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len)
-# endif
#endif
static void gcm_get_funcs(struct gcm_funcs_st *ctx)
u64 alen = ctx->len.u[0];
#ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# ifdef GHASH
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
#endif
if (ctx->len.u[1])
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
#endif
mlen += len;
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
#endif
mlen += len;
void *key = ctx->key;
# ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# ifdef GHASH
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
# endif
mlen += len;
void *key = ctx->key;
# ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# ifdef GHASH
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
# endif
mlen += len;
u64 clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
-# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
-# endif
#endif
#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)