Compile ccm128.c, move some structures to modes_lcl.h add prototypes.
[openssl.git] / crypto / modes / modes_lcl.h
index 12368fb..4dab6a6 100644 (file)
@@ -29,7 +29,10 @@ typedef unsigned char u8;
 #if defined(__i386)    || defined(__i386__)    || \
     defined(__x86_64)  || defined(__x86_64__)  || \
     defined(_M_IX86)   || defined(_M_AMD64)    || defined(_M_X64) || \
-    defined(__s390__)  || defined(__s390x__)
+    defined(__s390__)  || defined(__s390x__)   || \
+    ( (defined(__arm__)        || defined(__arm)) && \
+      (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+       defined(__ARM_ARCH_7R__)        || defined(__ARM_ARCH_7M__)) )
 # undef STRICT_ALIGNMENT
 #endif
 
@@ -37,19 +40,28 @@ typedef unsigned char u8;
 #if defined(__GNUC__) && __GNUC__>=2
 # if defined(__x86_64) || defined(__x86_64__)
 #  define BSWAP8(x) ({ u64 ret=(x);                    \
-                       asm volatile ("bswapq %0"       \
+                       asm ("bswapq %0"                \
                        : "+r"(ret));   ret;            })
 #  define BSWAP4(x) ({ u32 ret=(x);                    \
-                       asm volatile ("bswapl %0"       \
+                       asm ("bswapl %0"                \
                        : "+r"(ret));   ret;            })
 # elif (defined(__i386) || defined(__i386__))
 #  define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x);     \
-                       asm volatile ("bswapl %0; bswapl %1"    \
+                       asm ("bswapl %0; bswapl %1"     \
                        : "+r"(hi),"+r"(lo));           \
                        (u64)hi<<32|lo;                 })
 #  define BSWAP4(x) ({ u32 ret=(x);                    \
-                       asm volatile ("bswapl %0"       \
+                       asm ("bswapl %0"                \
                        : "+r"(ret));   ret;            })
+# elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT)
+#  define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x);     \
+                       asm ("rev %0,%0; rev %1,%1"     \
+                       : "+r"(hi),"+r"(lo));           \
+                       (u64)hi<<32|lo;                 })
+#  define BSWAP4(x) ({ u32 ret;                        \
+                       asm ("rev %0,%1"                \
+                       : "=r"(ret) : "r"((u32)(x)));   \
+                       ret;                            })
 # endif
 #elif defined(_MSC_VER)
 # if _MSC_VER>=1300
@@ -73,3 +85,47 @@ typedef unsigned char u8;
 #define GETU32(p)      ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
 #define PUTU32(p,v)    ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v))
 #endif
+
+/* GCM definitions */
+
+typedef struct { u64 hi,lo; } u128;
+
+#ifdef TABLE_BITS
+#undef TABLE_BITS
+#endif
+/*
+ * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
+ * never be set to 8 [or 1]. For further information see gcm128.c.
+ */
+#define        TABLE_BITS 4
+
+struct gcm128_context {
+       /* Following 6 names follow names in GCM specification */
+       union { u64 u[2]; u32 d[4]; u8 c[16]; } Yi,EKi,EK0,len,
+                                               Xi,H;
+       /* Relative position of Xi, H and pre-computed Htable is used
+        * in some assembler modules, i.e. don't change the order! */
+#if TABLE_BITS==8
+       u128 Htable[256];
+#else
+       u128 Htable[16];
+       void (*gmult)(u64 Xi[2],const u128 Htable[16]);
+       void (*ghash)(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+#endif
+       unsigned int mres, ares;
+       block128_f block;
+       void *key;
+};
+
+struct xts128_context {
+       void      *key1, *key2;
+       block128_f block1,block2;
+};
+
+struct ccm128_context {
+       union { u64 u[2]; u8 c[16]; } nonce, cmac;
+       u64 blocks;
+       block128_f block;
+       void *key;
+};
+