summaryrefslogtreecommitdiff
path: root/libavcodec/arm
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-08-01 23:33:06 +0200
committerMichael Niedermayer <michaelni@gmx.at>2012-08-01 23:33:06 +0200
commitec7ecb88117fd2d086f0be45ded9743c94100ef4 (patch)
tree8536e70dd413e0dc1c5cd65dec3c37928ef3b8cc /libavcodec/arm
parent4c8fc6a2a41d1b8a4d3eb4452bb5d728253273f4 (diff)
parent19cf7163c1576e7b03ea33d7bf633e14d7516db8 (diff)
downloadffmpeg-ec7ecb88117fd2d086f0be45ded9743c94100ef4.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: dca: Switch dca_sample_rates to avpriv_ prefix; it is used across libs ARM: use =const syntax instead of explicit literal pools ARM: use standard syntax for all LDRD/STRD instructions fft: port FFT/IMDCT 3dnow functions to yasm, and disable on x86-64. dct-test: allow to compile without HAVE_INLINE_ASM. x86/dsputilenc: bury inline asm under HAVE_INLINE_ASM. dca: Move tables used outside of dcadec.c to a separate file. dca: Rename dca.c ---> dcadec.c x86: h264dsp: Remove unused variable ff_pb_3_1 apetag: change a forgotten return to return 0 Conflicts: libavcodec/Makefile libavcodec/dca.c libavcodec/x86/fft_3dn.c libavcodec/x86/fft_3dn2.c libavcodec/x86/fft_mmx.asm Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm')
-rw-r--r--libavcodec/arm/h264cmc_neon.S4
-rw-r--r--libavcodec/arm/h264dsp_neon.S12
-rw-r--r--libavcodec/arm/mpegvideo_armv5te_s.S6
-rw-r--r--libavcodec/arm/simple_idct_arm.S56
-rw-r--r--libavcodec/arm/simple_idct_armv5te.S34
-rw-r--r--libavcodec/arm/simple_idct_armv6.S33
-rw-r--r--libavcodec/arm/simple_idct_neon.S8
7 files changed, 57 insertions, 96 deletions
diff --git a/libavcodec/arm/h264cmc_neon.S b/libavcodec/arm/h264cmc_neon.S
index e82394d899..c7e54605bb 100644
--- a/libavcodec/arm/h264cmc_neon.S
+++ b/libavcodec/arm/h264cmc_neon.S
@@ -24,7 +24,7 @@
.macro h264_chroma_mc8 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
push {r4-r7, lr}
- ldrd r4, [sp, #20]
+ ldrd r4, r5, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
@@ -182,7 +182,7 @@ endfunc
.macro h264_chroma_mc4 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
push {r4-r7, lr}
- ldrd r4, [sp, #20]
+ ldrd r4, r5, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S
index 341863ef40..be0d2ec0bb 100644
--- a/libavcodec/arm/h264dsp_neon.S
+++ b/libavcodec/arm/h264dsp_neon.S
@@ -886,7 +886,7 @@ T mov sp, r0
mov r12, #8
vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
mov r3, r2
add r12, sp, #64
sub r1, r1, r2, lsl #1
@@ -913,7 +913,7 @@ T mov sp, r0
vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon
mov r4, r0
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, r2
@@ -958,7 +958,7 @@ T mov sp, r0
vpush {d8-d15}
bl put_h264_qpel8_v_lowpass_neon
mov r4, r0
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
sub r1, r1, r3, lsl #1
sub r1, r1, #2
sub r2, r4, #64
@@ -1071,7 +1071,7 @@ T mov sp, r0
mov r3, #16
vpush {d8-d15}
bl put_h264_qpel16_h_lowpass_neon
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
mov r3, r2
add r12, sp, #64
sub r1, r1, r2, lsl #1
@@ -1096,7 +1096,7 @@ T mov sp, r0
vpush {d8-d15}
bl put_h264_qpel16_h_lowpass_neon_packed
mov r4, r0
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, r2
@@ -1139,7 +1139,7 @@ T mov sp, r0
vpush {d8-d15}
bl put_h264_qpel16_v_lowpass_neon_packed
mov r4, r0
- ldrd r0, [r11], #8
+ ldrd r0, r1, [r11], #8
sub r1, r1, r3, lsl #1
sub r1, r1, #2
mov r2, r3
diff --git a/libavcodec/arm/mpegvideo_armv5te_s.S b/libavcodec/arm/mpegvideo_armv5te_s.S
index 8f9dd42b22..8687d6b31c 100644
--- a/libavcodec/arm/mpegvideo_armv5te_s.S
+++ b/libavcodec/arm/mpegvideo_armv5te_s.S
@@ -61,9 +61,9 @@ function ff_dct_unquantize_h263_armv5te, export=1
mov ip, #0
subs r3, r3, #2
ble 2f
- ldrd r4, [r0, #0]
+ ldrd r4, r5, [r0, #0]
1:
- ldrd r6, [r0, #8]
+ ldrd r6, r7, [r0, #8]
dequant_t r9, r4, r1, r2, r9
dequant_t lr, r5, r1, r2, lr
@@ -87,7 +87,7 @@ function ff_dct_unquantize_h263_armv5te, export=1
subs r3, r3, #8
it gt
- ldrdgt r4, [r0, #0] /* load data early to avoid load/use pipeline stall */
+ ldrdgt r4, r5, [r0, #0] /* load data early to avoid load/use pipeline stall */
bgt 1b
adds r3, r3, #2
diff --git a/libavcodec/arm/simple_idct_arm.S b/libavcodec/arm/simple_idct_arm.S
index 42741a07a1..dd1c815104 100644
--- a/libavcodec/arm/simple_idct_arm.S
+++ b/libavcodec/arm/simple_idct_arm.S
@@ -25,8 +25,7 @@
#include "libavutil/arm/asm.S"
-/* useful constants for the algorithm, they are save in __constant_ptr__ at */
-/* the end of the source code.*/
+/* useful constants for the algorithm */
#define W1 22725
#define W2 21407
#define W3 19266
@@ -36,16 +35,6 @@
#define W7 4520
#define MASK_MSHW 0xFFFF0000
-/* offsets of the constants in the vector */
-#define offW1 0
-#define offW2 4
-#define offW3 8
-#define offW4 12
-#define offW5 16
-#define offW6 20
-#define offW7 24
-#define offMASK_MSHW 28
-
#define ROW_SHIFT 11
#define ROW_SHIFT2MSHW (16-11)
#define COL_SHIFT 20
@@ -63,7 +52,6 @@ function ff_simple_idct_arm, export=1
stmfd sp!, {r4-r11, r14} @ R14 is also called LR
@@ at this point, R0=block, other registers are free.
add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block.
- adr r12, __constant_ptr__ @ R12=__constant_ptr__, the vector containing the constants, probably not necessary to reserve a register for it
@@ add 2 temporary variables in the stack: R0 and R14
sub sp, sp, #8 @ allow 2 local variables
str r0, [sp, #0] @ save block in sp[0]
@@ -109,13 +97,13 @@ __b_evaluation:
@@ MAC16(b1, -W7, row[3]);
@@ MAC16(b2, -W1, row[3]);
@@ MAC16(b3, -W5, row[3]);
- ldr r8, [r12, #offW1] @ R8=W1
+ ldr r8, =W1 @ R8=W1
mov r2, r2, asr #16 @ R2=ROWr16[3]
mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
- ldr r9, [r12, #offW3] @ R9=W3
- ldr r10, [r12, #offW5] @ R10=W5
+ ldr r9, =W3 @ R9=W3
+ ldr r10, =W5 @ R10=W5
mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
- ldr r11, [r12, #offW7] @ R11=W7
+ ldr r11, =W7 @ R11=W7
mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
teq r2, #0 @ if null avoid muls
@@ -177,14 +165,14 @@ __a_evaluation:
@@ a2 = a0 - W6 * row[2];
@@ a3 = a0 - W2 * row[2];
@@ a0 = a0 + W2 * row[2];
- ldr r9, [r12, #offW4] @ R9=W4
+ ldr r9, =W4 @ R9=W4
mul r6, r9, r6 @ R6=W4*ROWr16[0]
- ldr r10, [r12, #offW6] @ R10=W6
+ ldr r10, =W6 @ R10=W6
ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet)
add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0)
mul r11, r10, r4 @ R11=W6*ROWr16[2]
- ldr r8, [r12, #offW2] @ R8=W2
+ ldr r8, =W2 @ R8=W2
sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
@@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
@@ if (temp != 0) {}
@@ -248,7 +236,7 @@ __end_a_evaluation:
add r9, r2, r1 @ R9=a1+b1
@@ put 2 16 bits half-words in a 32bits word
@@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!)
- ldr r10, [r12, #offMASK_MSHW] @ R10=0xFFFF0000
+ ldr r10, =MASK_MSHW @ R10=0xFFFF0000
and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5)
mvn r11, r10 @ R11= NOT R10= 0x0000FFFF
and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11)
@@ -319,13 +307,13 @@ __b_evaluation2:
@@ MAC16(b1, -W7, col[8x3]);
@@ MAC16(b2, -W1, col[8x3]);
@@ MAC16(b3, -W5, col[8x3]);
- ldr r8, [r12, #offW1] @ R8=W1
+ ldr r8, =W1 @ R8=W1
ldrsh r7, [r14, #16]
mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
- ldr r9, [r12, #offW3] @ R9=W3
- ldr r10, [r12, #offW5] @ R10=W5
+ ldr r9, =W3 @ R9=W3
+ ldr r10, =W5 @ R10=W5
mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
- ldr r11, [r12, #offW7] @ R11=W7
+ ldr r11, =W7 @ R11=W7
mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldrsh r2, [r14, #48]
mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
@@ -381,13 +369,13 @@ __a_evaluation2:
@@ a3 = a0 - W2 * row[2];
@@ a0 = a0 + W2 * row[2];
ldrsh r6, [r14, #0]
- ldr r9, [r12, #offW4] @ R9=W4
+ ldr r9, =W4 @ R9=W4
mul r6, r9, r6 @ R6=W4*ROWr16[0]
- ldr r10, [r12, #offW6] @ R10=W6
+ ldr r10, =W6 @ R10=W6
ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet)
add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0)
mul r11, r10, r4 @ R11=W6*ROWr16[2]
- ldr r8, [r12, #offW2] @ R8=W2
+ ldr r8, =W2 @ R8=W2
add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1)
sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
mul r11, r8, r4 @ R11=W2*ROWr16[2]
@@ -489,15 +477,3 @@ __end_bef_a_evaluation:
sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3)
add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0)
bal __end_a_evaluation
-
-
- .align
-__constant_ptr__: @@ see #defines at the beginning of the source code for values.
- .word W1
- .word W2
- .word W3
- .word W4
- .word W5
- .word W6
- .word W7
- .word MASK_MSHW
diff --git a/libavcodec/arm/simple_idct_armv5te.S b/libavcodec/arm/simple_idct_armv5te.S
index 20a2305b65..d1f10b75cb 100644
--- a/libavcodec/arm/simple_idct_armv5te.S
+++ b/libavcodec/arm/simple_idct_armv5te.S
@@ -37,17 +37,11 @@
#define W26 (W2 | (W6 << 16))
#define W57 (W5 | (W7 << 16))
- .text
- .align
-w13: .long W13
-w26: .long W26
-w57: .long W57
-
function idct_row_armv5te
str lr, [sp, #-4]!
- ldrd v1, [a1, #8]
- ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */
+ ldrd v1, v2, [a1, #8]
+ ldrd a3, a4, [a1] /* a3 = row[1:0], a4 = row[3:2] */
orrs v1, v1, v2
itt eq
cmpeq v1, a4
@@ -58,7 +52,7 @@ function idct_row_armv5te
mov ip, #16384
sub ip, ip, #1 /* ip = W4 */
smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
- ldr ip, w26 /* ip = W2 | (W6 << 16) */
+ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
smultb a2, ip, a4
smulbb lr, ip, a4
add v2, v1, a2
@@ -66,8 +60,8 @@ function idct_row_armv5te
sub v4, v1, lr
add v1, v1, lr
- ldr ip, w13 /* ip = W1 | (W3 << 16) */
- ldr lr, w57 /* lr = W5 | (W7 << 16) */
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
+ ldr lr, =W57 /* lr = W5 | (W7 << 16) */
smulbt v5, ip, a3
smultt v6, lr, a4
smlatt v5, ip, a4, v5
@@ -78,7 +72,7 @@ function idct_row_armv5te
smultt fp, lr, a3
sub v7, v7, a2
smulbt a2, lr, a4
- ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
+ ldrd a3, a4, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
sub fp, fp, a2
orrs a2, a3, a4
@@ -94,7 +88,7 @@ function idct_row_armv5te
smlatt v7, ip, a4, v7
sub fp, fp, a2
- ldr ip, w26 /* ip = W2 | (W6 << 16) */
+ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
mov a2, #16384
sub a2, a2, #1 /* a2 = W4 */
smulbb a2, a2, a3 /* a2 = W4*row[4] */
@@ -121,7 +115,7 @@ function idct_row_armv5te
add a2, v4, fp
mov a2, a2, lsr #11
add a4, a4, a2, lsl #16
- strd a3, [a1]
+ strd a3, a4, [a1]
sub a2, v4, fp
mov a3, a2, lsr #11
@@ -135,7 +129,7 @@ function idct_row_armv5te
sub a2, v1, v5
mov a2, a2, lsr #11
add a4, a4, a2, lsl #16
- strd a3, [a1, #8]
+ strd a3, a4, [a1, #8]
ldr pc, [sp], #4
@@ -144,8 +138,8 @@ row_dc_only:
bic a3, a3, #0xe000
mov a3, a3, lsl #3
mov a4, a3
- strd a3, [a1]
- strd a3, [a1, #8]
+ strd a3, a4, [a1]
+ strd a3, a4, [a1, #8]
ldr pc, [sp], #4
endfunc
@@ -178,7 +172,7 @@ endfunc
sub v4, v2, a3
sub v6, v2, a3
add fp, v2, a3
- ldr ip, w26
+ ldr ip, =W26
ldr a4, [a1, #(16*2)]
add v2, v2, a3
@@ -211,9 +205,9 @@ endfunc
stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
- ldr ip, w13
+ ldr ip, =W13
ldr a4, [a1, #(16*1)]
- ldr lr, w57
+ ldr lr, =W57
smulbb v1, ip, a4
smultb v3, ip, a4
smulbb v5, lr, a4
diff --git a/libavcodec/arm/simple_idct_armv6.S b/libavcodec/arm/simple_idct_armv6.S
index 25393bfd27..0c19d267a8 100644
--- a/libavcodec/arm/simple_idct_armv6.S
+++ b/libavcodec/arm/simple_idct_armv6.S
@@ -40,15 +40,6 @@
#define W46 (W4 | (W6 << 16))
#define W57 (W5 | (W7 << 16))
- .text
- .align
-w13: .long W13
-w26: .long W26
-w42: .long W42
-w42n: .long W42n
-w46: .long W46
-w57: .long W57
-
/*
Compute partial IDCT of single row.
shift = left-shift amount
@@ -60,12 +51,12 @@ w57: .long W57
Output in registers r4--r11
*/
.macro idct_row shift
- ldr lr, w46 /* lr = W4 | (W6 << 16) */
+ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1
- ldr ip, w13 /* ip = W1 | (W3 << 16) */
- ldr r10,w57 /* r10 = W5 | (W7 << 16) */
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
+ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1
@@ -78,11 +69,11 @@ w57: .long W57
smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */
smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
- ldr r3, w42n /* r3 = -W4 | (-W2 << 16) */
+ ldr r3, =W42n /* r3 = -W4 | (-W2 << 16) */
smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */
ldr r2, [r0, #4] /* r2 = row[6,4] */
smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */
- ldr ip, w46 /* ip = W4 | (W6 << 16) */
+ ldr ip, =W46 /* ip = W4 | (W6 << 16) */
smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */
smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */
@@ -101,12 +92,12 @@ w57: .long W57
Output in registers r4--r11
*/
.macro idct_row4 shift
- ldr lr, w46 /* lr = W4 | (W6 << 16) */
- ldr r10,w57 /* r10 = W5 | (W7 << 16) */
+ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
+ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1
- ldr ip, w13 /* ip = W1 | (W3 << 16) */
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1
smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
@@ -205,7 +196,7 @@ function idct_row_armv6
cmpeq lr, r2, lsr #16
beq 1f
push {r1}
- ldr ip, w42 /* ip = W4 | (W2 << 16) */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
cmp lr, #0
beq 2f
@@ -249,7 +240,7 @@ function idct_col_armv6
push {r1, lr}
ldr r2, [r0] /* r2 = row[2,0] */
- ldr ip, w42 /* ip = W4 | (W2 << 16) */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1}
@@ -277,7 +268,7 @@ function idct_col_put_armv6
push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */
- ldr ip, w42 /* ip = W4 | (W2 << 16) */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1, r2}
@@ -307,7 +298,7 @@ function idct_col_add_armv6
push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */
- ldr ip, w42 /* ip = W4 | (W2 << 16) */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1, r2}
diff --git a/libavcodec/arm/simple_idct_neon.S b/libavcodec/arm/simple_idct_neon.S
index 3c048b0d56..a8fc13768e 100644
--- a/libavcodec/arm/simple_idct_neon.S
+++ b/libavcodec/arm/simple_idct_neon.S
@@ -159,8 +159,8 @@ function idct_col4_neon
vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<<COL_SHIFT-1)/W4)*/
vld1.64 {d8}, [r2,:64], ip /* d5 = col[3] */
- ldrd r4, [r2]
- ldrd r6, [r2, #16]
+ ldrd r4, r5, [r2]
+ ldrd r6, r7, [r2, #16]
orrs r4, r4, r5
idct_col4_top
@@ -176,7 +176,7 @@ function idct_col4_neon
vadd.i32 q14, q14, q7
1: orrs r6, r6, r7
- ldrd r4, [r2, #16]
+ ldrd r4, r5, [r2, #16]
it eq
addeq r2, r2, #16
beq 2f
@@ -188,7 +188,7 @@ function idct_col4_neon
vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */
2: orrs r4, r4, r5
- ldrd r4, [r2, #16]
+ ldrd r4, r5, [r2, #16]
it eq
addeq r2, r2, #16
beq 3f