X-Git-Url: https://git.openssl.org/gitweb/?p=openssl.git;a=blobdiff_plain;f=crypto%2Faes%2Fasm%2Faes-armv4.pl;h=55b6e04b676d65cc6eb992721a2d1e30230b68e5;hp=b4049d5d72ea1b88179e66092effabe71a3e1795;hb=8ca28da0a798d2289b71b750dad191066600c166;hpb=14b1d089b683fb45fc2ecb90d02ad7b8d163a536 diff --git a/crypto/aes/asm/aes-armv4.pl b/crypto/aes/asm/aes-armv4.pl index b4049d5d72..55b6e04b67 100644 --- a/crypto/aes/asm/aes-armv4.pl +++ b/crypto/aes/asm/aes-armv4.pl @@ -10,13 +10,30 @@ # AES for ARMv4 # January 2007. - -# Code uses single 1K S-box and >2 times faster than code compiled -# with gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which +# +# Code uses single 1K S-box and is >2 times faster than code generated +# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which # allows to merge logical or arithmetic operation with shift or rotate # in one instruction and emit combined result every cycle. The module # is endian-neutral. The performance is ~42 cycles/byte for 128-bit -# key. +# key [on single-issue Xscale PXA250 core]. + +# May 2007. +# +# AES_set_[en|de]crypt_key is added. + +# July 2010. +# +# Rescheduling for dual-issue pipeline resulted in 12% improvement on +# Cortex A8 core and ~25 cycles per byte processed with 128-bit key. + +# February 2011. +# +# Profiler-assisted and platform-specific optimization resulted in 16% +# improvement on Cortex A8 core and ~21.5 cycles per byte. + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; $s0="r0"; $s1="r1"; @@ -34,6 +51,7 @@ $key="r11"; $rounds="r12"; $code=<<___; +#include "arm_arch.h" .text .code 32 @@ -104,6 +122,43 @@ AES_Te: .word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8 .word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11 .word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a +@ Te4[256] +.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 +.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 +.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0 +.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 +.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc +.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 +.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a +.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 +.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0 +.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 +.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b +.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf +.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85 +.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 +.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5 +.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 +.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17 +.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 +.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88 +.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb +.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c +.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 +.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9 +.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 +.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6 +.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a +.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e +.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e +.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94 +.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf +.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68 +.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 +@ rcon[] +.word 0x01000000, 0x02000000, 0x04000000, 0x08000000 +.word 0x10000000, 0x20000000, 0x40000000, 0x80000000 +.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 .size AES_Te,.-AES_Te @ void AES_encrypt(const unsigned char *in, unsigned char *out, @@ -116,215 +171,609 @@ AES_encrypt: stmdb sp!,{r1,r4-r12,lr} mov $rounds,r0 @ inp mov $key,r2 - sub $tbl,r3,#1024 @ Te + sub $tbl,r3,#AES_encrypt-AES_Te @ Te +#if __ARM_ARCH__<7 ldrb $s0,[$rounds,#3] @ load input data in endian-neutral ldrb $t1,[$rounds,#2] @ manner... ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 orr $s3,$s3,$t2,lsl#16 orr $s3,$s3,$t3,lsl#24 - +#else + ldr $s0,[$rounds,#0] + ldr $s1,[$rounds,#4] + ldr $s2,[$rounds,#8] + ldr $s3,[$rounds,#12] +#ifdef __ARMEL__ + rev $s0,$s0 + rev $s1,$s1 + rev $s2,$s2 + rev $s3,$s3 +#endif +#endif bl _armv4_AES_encrypt ldr $rounds,[sp],#4 @ pop out +#if __ARM_ARCH__>=7 +#ifdef __ARMEL__ + rev $s0,$s0 + rev $s1,$s1 + rev $s2,$s2 + rev $s3,$s3 +#endif + str $s0,[$rounds,#0] + str $s1,[$rounds,#4] + str $s2,[$rounds,#8] + str $s3,[$rounds,#12] +#else mov $t1,$s0,lsr#24 @ write output in endian-neutral mov $t2,$s0,lsr#16 @ manner... mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] strb $t3,[$rounds,#14] strb $s3,[$rounds,#15] - +#endif +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) +#endif .size AES_encrypt,.-AES_encrypt .type _armv4_AES_encrypt,%function .align 2 _armv4_AES_encrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Lenc_loop: + and $i1,lr,$s0 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0,lsr#16 - and $i1,lr,$s0 mov $s0,$s0,lsr#24 +.Lenc_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0] - ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] and $i2,lr,$s1 + ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16] - ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0] ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8] eor $s0,$s0,$i1,ror#8 - eor $s1,$s1,$t1,ror#24 - eor $t2,$t2,$i2,ror#8 - eor $t3,$t3,$i3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,ror#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$t3,$i3,ror#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8] + eor $s1,$s1,$t1,ror#24 ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16] - ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] + mov $s2,$s2,lsr#24 + ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#8 - eor $s2,$s2,$t2,ror#16 - eor $t3,$t3,$i3,ror#16 - + ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,ror#8 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$t3,$i3,ror#16 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0] + eor $s2,$s2,$t2,ror#16 ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8] + mov $s3,$s3,lsr#24 + ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16] - ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s0,$s0,$i1,ror#24 + ldr $i1,[$key],#16 eor $s1,$s1,$i2,ror#16 + ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s2,$s2,$i3,ror#8 + ldr $t1,[$key,#-12] eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t2,[$key,#-8] + eor $s0,$s0,$i1 + ldr $t3,[$key,#-4] + and $i1,lr,$s0 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0,lsr#16 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Lenc_loop add $tbl,$tbl,#2 - and $i1,lr,$s0 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0,lsr#16 - mov $s0,$s0,lsr#24 ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0] - ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] - ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] - ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] and $i2,lr,$s1 + ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] and $i3,lr,$s1,lsr#8 + ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] mov $s1,$s1,lsr#24 + ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16] - ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8] eor $s0,$i1,$s0,lsl#8 - eor $s1,$t1,$s1,lsl#24 - eor $t2,$i2,$t2,lsl#8 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,lsl#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8] + eor $s1,$t1,$s1,lsl#24 ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16] - ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] + mov $s2,$s2,lsr#24 + ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0] eor $s0,$i1,$s0,lsl#8 - eor $s1,$s1,$i2,lsl#16 - eor $s2,$t2,$s2,lsl#24 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0] + eor $s2,$t2,$s2,lsl#24 ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8] + mov $s3,$s3,lsr#24 + ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16] - ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] eor $s0,$i1,$s0,lsl#8 + ldr $i1,[$key,#0] + ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$s2,$i3,lsl#16 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#2 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt +.global AES_set_encrypt_key +.type AES_set_encrypt_key,%function +.align 5 +AES_set_encrypt_key: + sub r3,pc,#8 @ AES_set_encrypt_key + teq r0,#0 + moveq r0,#-1 + beq .Labrt + teq r2,#0 + moveq r0,#-1 + beq .Labrt + + teq r1,#128 + beq .Lok + teq r1,#192 + beq .Lok + teq r1,#256 + movne r0,#-1 + bne .Labrt + +.Lok: stmdb sp!,{r4-r12,lr} + sub $tbl,r3,#AES_set_encrypt_key-AES_Te-1024 @ Te4 + + mov $rounds,r0 @ inp + mov lr,r1 @ bits + mov $key,r2 @ key + +#if __ARM_ARCH__<7 + ldrb $s0,[$rounds,#3] @ load input data in endian-neutral + ldrb $t1,[$rounds,#2] @ manner... + ldrb $t2,[$rounds,#1] + ldrb $t3,[$rounds,#0] + orr $s0,$s0,$t1,lsl#8 + ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 + ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 + ldrb $t2,[$rounds,#5] + ldrb $t3,[$rounds,#4] + orr $s1,$s1,$t1,lsl#8 + ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 + ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 + ldrb $t2,[$rounds,#9] + ldrb $t3,[$rounds,#8] + orr $s2,$s2,$t1,lsl#8 + ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 + ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 + ldrb $t2,[$rounds,#13] + ldrb $t3,[$rounds,#12] + orr $s3,$s3,$t1,lsl#8 + str $s0,[$key],#16 + orr $s3,$s3,$t2,lsl#16 + str $s1,[$key,#-12] + orr $s3,$s3,$t3,lsl#24 + str $s2,[$key,#-8] + str $s3,[$key,#-4] +#else + ldr $s0,[$rounds,#0] + ldr $s1,[$rounds,#4] + ldr $s2,[$rounds,#8] + ldr $s3,[$rounds,#12] +#ifdef __ARMEL__ + rev $s0,$s0 + rev $s1,$s1 + rev $s2,$s2 + rev $s3,$s3 +#endif + str $s0,[$key],#16 + str $s1,[$key,#-12] + str $s2,[$key,#-8] + str $s3,[$key,#-4] +#endif + + teq lr,#128 + bne .Lnot128 + mov $rounds,#10 + str $rounds,[$key,#240-16] + add $t3,$tbl,#256 @ rcon + mov lr,#255 + +.L128_loop: + and $t2,lr,$s3,lsr#24 + and $i1,lr,$s3,lsr#16 + ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#8 + ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3 + ldrb $i2,[$tbl,$i2] + orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] + orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] + orr $t2,$t2,$i3,lsl#8 + eor $t2,$t2,$t1 + eor $s0,$s0,$t2 @ rk[4]=rk[0]^... + eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4] + str $s0,[$key],#16 + eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5] + str $s1,[$key,#-12] + eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6] + str $s2,[$key,#-8] + subs $rounds,$rounds,#1 + str $s3,[$key,#-4] + bne .L128_loop + sub r2,$key,#176 + b .Ldone + +.Lnot128: +#if __ARM_ARCH__<7 + ldrb $i2,[$rounds,#19] + ldrb $t1,[$rounds,#18] + ldrb $t2,[$rounds,#17] + ldrb $t3,[$rounds,#16] + orr $i2,$i2,$t1,lsl#8 + ldrb $i3,[$rounds,#23] + orr $i2,$i2,$t2,lsl#16 + ldrb $t1,[$rounds,#22] + orr $i2,$i2,$t3,lsl#24 + ldrb $t2,[$rounds,#21] + ldrb $t3,[$rounds,#20] + orr $i3,$i3,$t1,lsl#8 + orr $i3,$i3,$t2,lsl#16 + str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 + str $i3,[$key,#-4] +#else + ldr $i2,[$rounds,#16] + ldr $i3,[$rounds,#20] +#ifdef __ARMEL__ + rev $i2,$i2 + rev $i3,$i3 +#endif + str $i2,[$key],#8 + str $i3,[$key,#-4] +#endif + + teq lr,#192 + bne .Lnot192 + mov $rounds,#12 + str $rounds,[$key,#240-24] + add $t3,$tbl,#256 @ rcon + mov lr,#255 + mov $rounds,#8 + +.L192_loop: + and $t2,lr,$i3,lsr#24 + and $i1,lr,$i3,lsr#16 + ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 + ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 + ldrb $i2,[$tbl,$i2] + orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] + orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] + orr $t2,$t2,$i3,lsl#8 + eor $i3,$t2,$t1 + eor $s0,$s0,$i3 @ rk[6]=rk[0]^... + eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6] + str $s0,[$key],#24 + eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7] + str $s1,[$key,#-20] + eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8] + str $s2,[$key,#-16] + subs $rounds,$rounds,#1 + str $s3,[$key,#-12] + subeq r2,$key,#216 + beq .Ldone + + ldr $i1,[$key,#-32] + ldr $i2,[$key,#-28] + eor $i1,$i1,$s3 @ rk[10]=rk[4]^rk[9] + eor $i3,$i2,$i1 @ rk[11]=rk[5]^rk[10] + str $i1,[$key,#-8] + str $i3,[$key,#-4] + b .L192_loop + +.Lnot192: +#if __ARM_ARCH__<7 + ldrb $i2,[$rounds,#27] + ldrb $t1,[$rounds,#26] + ldrb $t2,[$rounds,#25] + ldrb $t3,[$rounds,#24] + orr $i2,$i2,$t1,lsl#8 + ldrb $i3,[$rounds,#31] + orr $i2,$i2,$t2,lsl#16 + ldrb $t1,[$rounds,#30] + orr $i2,$i2,$t3,lsl#24 + ldrb $t2,[$rounds,#29] + ldrb $t3,[$rounds,#28] + orr $i3,$i3,$t1,lsl#8 + orr $i3,$i3,$t2,lsl#16 + str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 + str $i3,[$key,#-4] +#else + ldr $i2,[$rounds,#24] + ldr $i3,[$rounds,#28] +#ifdef __ARMEL__ + rev $i2,$i2 + rev $i3,$i3 +#endif + str $i2,[$key],#8 + str $i3,[$key,#-4] +#endif + + mov $rounds,#14 + str $rounds,[$key,#240-32] + add $t3,$tbl,#256 @ rcon + mov lr,#255 + mov $rounds,#7 + +.L256_loop: + and $t2,lr,$i3,lsr#24 + and $i1,lr,$i3,lsr#16 + ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 + ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 + ldrb $i2,[$tbl,$i2] + orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] + orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] + orr $t2,$t2,$i3,lsl#8 + eor $i3,$t2,$t1 + eor $s0,$s0,$i3 @ rk[8]=rk[0]^... + eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8] + str $s0,[$key],#32 + eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9] + str $s1,[$key,#-28] + eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10] + str $s2,[$key,#-24] + subs $rounds,$rounds,#1 + str $s3,[$key,#-20] + subeq r2,$key,#256 + beq .Ldone + + and $t2,lr,$s3 + and $i1,lr,$s3,lsr#8 + ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#16 + ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3,lsr#24 + ldrb $i2,[$tbl,$i2] + orr $t2,$t2,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] + orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$key,#-48] + orr $t2,$t2,$i3,lsl#24 + + ldr $i1,[$key,#-44] + ldr $i2,[$key,#-40] + eor $t1,$t1,$t2 @ rk[12]=rk[4]^... + ldr $i3,[$key,#-36] + eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12] + str $t1,[$key,#-16] + eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13] + str $i1,[$key,#-12] + eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14] + str $i2,[$key,#-8] + str $i3,[$key,#-4] + b .L256_loop + +.Ldone: mov r0,#0 + ldmia sp!,{r4-r12,lr} +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +.size AES_set_encrypt_key,.-AES_set_encrypt_key + +.global AES_set_decrypt_key +.type AES_set_decrypt_key,%function +.align 5 +AES_set_decrypt_key: + str lr,[sp,#-4]! @ push lr + bl AES_set_encrypt_key + teq r0,#0 + ldrne lr,[sp],#4 @ pop lr + bne .Labrt + + stmdb sp!,{r4-r12} + + ldr $rounds,[r2,#240] @ AES_set_encrypt_key preserves r2, + mov $key,r2 @ which is AES_KEY *key + mov $i1,r2 + add $i2,r2,$rounds,lsl#4 + +.Linv: ldr $s0,[$i1] + ldr $s1,[$i1,#4] + ldr $s2,[$i1,#8] + ldr $s3,[$i1,#12] + ldr $t1,[$i2] + ldr $t2,[$i2,#4] + ldr $t3,[$i2,#8] + ldr $i3,[$i2,#12] + str $s0,[$i2],#-16 + str $s1,[$i2,#16+4] + str $s2,[$i2,#16+8] + str $s3,[$i2,#16+12] + str $t1,[$i1],#16 + str $t2,[$i1,#-12] + str $t3,[$i1,#-8] + str $i3,[$i1,#-4] + teq $i1,$i2 + bne .Linv +___ +$mask80=$i1; +$mask1b=$i2; +$mask7f=$i3; +$code.=<<___; + ldr $s0,[$key,#16]! @ prefetch tp1 + mov $mask80,#0x80 + mov $mask1b,#0x1b + orr $mask80,$mask80,#0x8000 + orr $mask1b,$mask1b,#0x1b00 + orr $mask80,$mask80,$mask80,lsl#16 + orr $mask1b,$mask1b,$mask1b,lsl#16 + sub $rounds,$rounds,#1 + mvn $mask7f,$mask80 + mov $rounds,$rounds,lsl#2 @ (rounds-1)*4 + +.Lmix: and $t1,$s0,$mask80 + and $s1,$s0,$mask7f + sub $t1,$t1,$t1,lsr#7 + and $t1,$t1,$mask1b + eor $s1,$t1,$s1,lsl#1 @ tp2 + + and $t1,$s1,$mask80 + and $s2,$s1,$mask7f + sub $t1,$t1,$t1,lsr#7 + and $t1,$t1,$mask1b + eor $s2,$t1,$s2,lsl#1 @ tp4 + + and $t1,$s2,$mask80 + and $s3,$s2,$mask7f + sub $t1,$t1,$t1,lsr#7 + and $t1,$t1,$mask1b + eor $s3,$t1,$s3,lsl#1 @ tp8 + + eor $t1,$s1,$s2 + eor $t2,$s0,$s3 @ tp9 + eor $t1,$t1,$s3 @ tpe + eor $t1,$t1,$s1,ror#24 + eor $t1,$t1,$t2,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8) + eor $t1,$t1,$s2,ror#16 + eor $t1,$t1,$t2,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16) + eor $t1,$t1,$t2,ror#8 @ ^= ROTATE(tp9,24) + + ldr $s0,[$key,#4] @ prefetch tp1 + str $t1,[$key],#4 + subs $rounds,$rounds,#1 + bne .Lmix + + mov r0,#0 +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size AES_set_decrypt_key,.-AES_set_decrypt_key + .type AES_Td,%object .align 5 AES_Td: @@ -392,7 +841,7 @@ AES_Td: .word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541 .word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190 .word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 - +@ Td4[256] .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 @@ -437,220 +886,248 @@ AES_decrypt: stmdb sp!,{r1,r4-r12,lr} mov $rounds,r0 @ inp mov $key,r2 - sub $tbl,r3,#1280 @ Td + sub $tbl,r3,#AES_decrypt-AES_Td @ Td +#if __ARM_ARCH__<7 ldrb $s0,[$rounds,#3] @ load input data in endian-neutral ldrb $t1,[$rounds,#2] @ manner... ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 orr $s3,$s3,$t2,lsl#16 orr $s3,$s3,$t3,lsl#24 - +#else + ldr $s0,[$rounds,#0] + ldr $s1,[$rounds,#4] + ldr $s2,[$rounds,#8] + ldr $s3,[$rounds,#12] +#ifdef __ARMEL__ + rev $s0,$s0 + rev $s1,$s1 + rev $s2,$s2 + rev $s3,$s3 +#endif +#endif bl _armv4_AES_decrypt ldr $rounds,[sp],#4 @ pop out +#if __ARM_ARCH__>=7 +#ifdef __ARMEL__ + rev $s0,$s0 + rev $s1,$s1 + rev $s2,$s2 + rev $s3,$s3 +#endif + str $s0,[$rounds,#0] + str $s1,[$rounds,#4] + str $s2,[$rounds,#8] + str $s3,[$rounds,#12] +#else mov $t1,$s0,lsr#24 @ write output in endian-neutral mov $t2,$s0,lsr#16 @ manner... mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] strb $t3,[$rounds,#14] strb $s3,[$rounds,#15] - +#endif +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) +#endif .size AES_decrypt,.-AES_decrypt .type _armv4_AES_decrypt,%function .align 2 _armv4_AES_decrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Ldec_loop: and $i1,lr,$s0,lsr#16 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0 mov $s0,$s0,lsr#24 +.Ldec_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16] - ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] - and $i1,lr,$s1 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] and $i2,lr,$s1,lsr#16 + ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0] - ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16] ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8] eor $s0,$s0,$i1,ror#24 - eor $s1,$s1,$t1,ror#8 - eor $t2,$i2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,ror#8 and $i2,lr,$s2 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s2,lsr#16 - mov $s2,$s2,lsr#24 ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8] + eor $s1,$s1,$t1,ror#8 ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0] - ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] + mov $s2,$s2,lsr#24 + ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#24 - eor $s2,$s2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] and $i1,lr,$s3,lsr#16 @ i0 + eor $s1,$s1,$i2,ror#24 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s3 @ i2 - mov $s3,$s3,lsr#24 ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16] + eor $s2,$s2,$t2,ror#8 ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8] + mov $s3,$s3,lsr#24 + ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0] - ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s0,$s0,$i1,ror#8 + ldr $i1,[$key],#16 eor $s1,$s1,$i2,ror#16 + ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s2,$s2,$i3,ror#24 - eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t1,[$key,#-12] + eor $s0,$s0,$i1 + ldr $t2,[$key,#-8] + eor $s3,$s3,$t3,ror#8 + ldr $t3,[$key,#-4] + and $i1,lr,$s0,lsr#16 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Ldec_loop add $tbl,$tbl,#1024 - ldr $t1,[$tbl,#0] @ prefetch Td4 - ldr $t2,[$tbl,#32] - ldr $t3,[$tbl,#64] - ldr $i1,[$tbl,#96] - ldr $i2,[$tbl,#128] - ldr $i3,[$tbl,#160] - ldr $t1,[$tbl,#192] - ldr $t2,[$tbl,#224] + ldr $t2,[$tbl,#0] @ prefetch Td4 + ldr $t3,[$tbl,#32] + ldr $t1,[$tbl,#64] + ldr $t2,[$tbl,#96] + ldr $t3,[$tbl,#128] + ldr $t1,[$tbl,#160] + ldr $t2,[$tbl,#192] + ldr $t3,[$tbl,#224] - and $i1,lr,$s0,lsr#16 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0 - ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24] + ldrb $s0,[$tbl,$s0] @ Td4[s0>>24] ldrb $t1,[$tbl,$i1] @ Td4[s0>>16] - ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] - ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] - and $i1,lr,$s1 @ i0 + ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] and $i2,lr,$s1,lsr#16 + ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] and $i3,lr,$s1,lsr#8 + ldrb $i1,[$tbl,$i1] @ Td4[s1>>0] ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24] ldrb $i2,[$tbl,$i2] @ Td4[s1>>16] - ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s0,$i1,$s0,lsl#24 + ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s1,$t1,$s1,lsl#8 - eor $t2,$t2,$i2,lsl#8 - eor $t3,$t3,$i3,lsl#8 - and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,lsl#8 and $i2,lr,$s2 @ i1 - and $i3,lr,$s2,lsr#16 ldrb $i1,[$tbl,$i1] @ Td4[s2>>8] + eor $t3,$t3,$i3,lsl#8 ldrb $i2,[$tbl,$i2] @ Td4[s2>>0] + and $i3,lr,$s2,lsr#16 + ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24] - ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s0,$s0,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s1,$i2,$s1,lsl#16 - eor $s2,$t2,$s2,lsl#16 - eor $t3,$t3,$i3,lsl#16 - and $i1,lr,$s3,lsr#16 @ i0 + eor $s2,$t2,$s2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 - and $i3,lr,$s3 @ i2 ldrb $i1,[$tbl,$i1] @ Td4[s3>>16] + eor $t3,$t3,$i3,lsl#16 ldrb $i2,[$tbl,$i2] @ Td4[s3>>8] + and $i3,lr,$s3 @ i2 + ldrb $i3,[$tbl,$i3] @ Td4[s3>>0] ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24] eor $s0,$s0,$i1,lsl#16 + ldr $i1,[$key,#0] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$i3,$s2,lsl#8 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#1024 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_decrypt,.-_armv4_AES_decrypt .asciz "AES for ARMv4, CRYPTOGAMS by " +.align 2 ___ +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 print $code; +close STDOUT; # enforce flush