diff options
author | James Zern <jzern@google.com> | 2016-06-22 12:18:49 -0700 |
---|---|---|
committer | James Zern <jzern@google.com> | 2016-06-27 19:46:57 -0700 |
commit | f51f67602e19cc086e29aa8e503d05b919f0a7b0 (patch) | |
tree | 8e9412123f927e9a446e493438b357b0a7117493 | |
parent | 32ac7cabdff90ce4c91e88d220d6c419b092b973 (diff) | |
download | libvpx-f51f67602e19cc086e29aa8e503d05b919f0a7b0.tar.gz |
*.asm: normalize label format
add a trailing ':', though it's optional with the tools we support, it's
more common to use it to mark a label. this also quiets the
orphan-labels warning with nasm/yasm.
BUG=b/29583530
Change-Id: I46e95255e12026dd542d9838e2dd3fbddf7b56e2
-rw-r--r-- | vp8/common/x86/mfqe_sse2.asm | 6 | ||||
-rw-r--r-- | vp8/common/x86/postproc_mmx.asm | 6 | ||||
-rw-r--r-- | vp8/common/x86/postproc_sse2.asm | 8 | ||||
-rw-r--r-- | vp9/common/x86/vp9_mfqe_sse2.asm | 6 | ||||
-rw-r--r-- | vpx_dsp/x86/intrapred_sse2.asm | 2 | ||||
-rw-r--r-- | vpx_dsp/x86/vpx_convolve_copy_sse2.asm | 2 | ||||
-rw-r--r-- | vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm | 2 |
7 files changed, 16 insertions, 16 deletions
diff --git a/vp8/common/x86/mfqe_sse2.asm b/vp8/common/x86/mfqe_sse2.asm index a8a7f568d..8177b7922 100644 --- a/vp8/common/x86/mfqe_sse2.asm +++ b/vp8/common/x86/mfqe_sse2.asm @@ -45,7 +45,7 @@ sym(vp8_filter_by_weight16x16_sse2): mov rcx, 16 ; loop count pxor xmm6, xmm6 -.combine +.combine: movdqa xmm2, [rax] movdqa xmm4, [rdx] add rax, rsi @@ -122,7 +122,7 @@ sym(vp8_filter_by_weight8x8_sse2): mov rcx, 8 ; loop count pxor xmm4, xmm4 -.combine +.combine: movq xmm2, [rax] movq xmm3, [rdx] add rax, rsi @@ -189,7 +189,7 @@ sym(vp8_variance_and_sad_16x16_sse2): ; Because we're working with the actual output frames ; we can't depend on any kind of data alignment. -.accumulate +.accumulate: movdqa xmm0, [rax] ; src1 movdqa xmm1, [rdx] ; src2 add rax, rcx ; src1 + stride1 diff --git a/vp8/common/x86/postproc_mmx.asm b/vp8/common/x86/postproc_mmx.asm index 1a89e7ead..61fe8854d 100644 --- a/vp8/common/x86/postproc_mmx.asm +++ b/vp8/common/x86/postproc_mmx.asm @@ -59,7 +59,7 @@ sym(vp8_mbpost_proc_down_mmx): lea rdi, [rdi+rdx] movq mm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_borderd ; initialize borders +.init_borderd: ; initialize borders lea rdi, [rdi + rax] movq [rdi], mm1 @@ -72,7 +72,7 @@ sym(vp8_mbpost_proc_down_mmx): mov rdi, rsi movq mm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_border ; initialize borders +.init_border: ; initialize borders lea rdi, [rdi + rax] movq [rdi], mm1 @@ -213,7 +213,7 @@ sym(vp8_mbpost_proc_down_mmx): movd mm1, DWORD PTR [rsp+rcx*4] ;d[rcx*4] movd [rsi], mm1 -.skip_assignment +.skip_assignment: lea rsi, [rsi+rax] lea rdi, [rdi+rax] diff --git a/vp8/common/x86/postproc_sse2.asm b/vp8/common/x86/postproc_sse2.asm index de17afa5c..508b5e887 100644 --- a/vp8/common/x86/postproc_sse2.asm +++ b/vp8/common/x86/postproc_sse2.asm @@ -198,7 +198,7 @@ sym(vp8_post_proc_down_and_across_mb_row_sse2): UPDATE_FLIMIT jmp .acrossnextcol -.acrossdone +.acrossdone: ; last 16 pixels movq QWORD PTR [rdi+rdx-16], mm0 @@ -278,7 +278,7 @@ sym(vp8_mbpost_proc_down_xmm): lea rdi, [rdi+rdx] movq xmm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_borderd ; initialize borders +.init_borderd: ; initialize borders lea rdi, [rdi + rax] movq [rdi], xmm1 @@ -291,7 +291,7 @@ sym(vp8_mbpost_proc_down_xmm): mov rdi, rsi movq xmm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_border ; initialize borders +.init_border: ; initialize borders lea rdi, [rdi + rax] movq [rdi], xmm1 @@ -434,7 +434,7 @@ sym(vp8_mbpost_proc_down_xmm): movq mm0, [rsp + rcx*8] ;d[rcx*8] movq [rsi], mm0 -.skip_assignment +.skip_assignment: lea rsi, [rsi+rax] lea rdi, [rdi+rax] diff --git a/vp9/common/x86/vp9_mfqe_sse2.asm b/vp9/common/x86/vp9_mfqe_sse2.asm index 6029420d1..30852049b 100644 --- a/vp9/common/x86/vp9_mfqe_sse2.asm +++ b/vp9/common/x86/vp9_mfqe_sse2.asm @@ -46,7 +46,7 @@ sym(vp9_filter_by_weight16x16_sse2): mov rcx, 16 ; loop count pxor xmm6, xmm6 -.combine +.combine: movdqa xmm2, [rax] movdqa xmm4, [rdx] add rax, rsi @@ -123,7 +123,7 @@ sym(vp9_filter_by_weight8x8_sse2): mov rcx, 8 ; loop count pxor xmm4, xmm4 -.combine +.combine: movq xmm2, [rax] movq xmm3, [rdx] add rax, rsi @@ -190,7 +190,7 @@ sym(vp9_variance_and_sad_16x16_sse2): ; Because we're working with the actual output frames ; we can't depend on any kind of data alignment. -.accumulate +.accumulate: movdqa xmm0, [rax] ; src1 movdqa xmm1, [rdx] ; src2 add rax, rcx ; src1 + stride1 diff --git a/vpx_dsp/x86/intrapred_sse2.asm b/vpx_dsp/x86/intrapred_sse2.asm index cd6a6ae98..c18095c28 100644 --- a/vpx_dsp/x86/intrapred_sse2.asm +++ b/vpx_dsp/x86/intrapred_sse2.asm @@ -756,7 +756,7 @@ cglobal tm_predictor_8x8, 4, 4, 5, dst, stride, above, left psubw m0, m2 ; t1-tl t2-tl ... t8-tl [word] movq m2, [leftq] punpcklbw m2, m1 ; l1 l2 l3 l4 l5 l6 l7 l8 [word] -.loop +.loop: pshuflw m4, m2, 0x0 ; [63:0] l1 l1 l1 l1 [word] pshuflw m3, m2, 0x55 ; [63:0] l2 l2 l2 l2 [word] punpcklqdq m4, m4 ; l1 l1 l1 l1 l1 l1 l1 l1 [word] diff --git a/vpx_dsp/x86/vpx_convolve_copy_sse2.asm b/vpx_dsp/x86/vpx_convolve_copy_sse2.asm index abc027065..e2311c116 100644 --- a/vpx_dsp/x86/vpx_convolve_copy_sse2.asm +++ b/vpx_dsp/x86/vpx_convolve_copy_sse2.asm @@ -87,7 +87,7 @@ cglobal convolve_%1, 4, 7, 4+AUX_XMM_REGS, src, src_stride, \ RET %endif -.w64 +.w64: mov r4d, dword hm .loop64: movu m0, [srcq] diff --git a/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm b/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm index d2cb8ea29..9cc4f9d7d 100644 --- a/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm +++ b/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm @@ -211,7 +211,7 @@ cglobal filter_block1d4_%1, 6, 6+(ARCH_X86_64*2), 11, LOCAL_VARS_SIZE, \ pavgb m1, m0 %endif movd [dstq], m1 -.done +.done: RET %endm |