Commit 6be7944e authored by Diego Biurrun's avatar Diego Biurrun

x86: Add missing colons after assembly labels

This fixes many warnings of the sort
warning: label alone on a line without a colon might be in error
parent 2816f8a8
...@@ -153,7 +153,7 @@ cglobal vector_clipf, 3, 3, 6, dst, src, len, min, max ...@@ -153,7 +153,7 @@ cglobal vector_clipf, 3, 3, 6, dst, src, len, min, max
movsxdifnidn lenq, lend movsxdifnidn lenq, lend
.loop .loop:
mova m2, [srcq + 4 * lenq - 4 * mmsize] mova m2, [srcq + 4 * lenq - 4 * mmsize]
mova m3, [srcq + 4 * lenq - 3 * mmsize] mova m3, [srcq + 4 * lenq - 3 * mmsize]
mova m4, [srcq + 4 * lenq - 2 * mmsize] mova m4, [srcq + 4 * lenq - 2 * mmsize]
......
...@@ -238,7 +238,7 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ ...@@ -238,7 +238,7 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \
%if ARCH_X86_32 %if ARCH_X86_32
mov buf2, synth_buf2mp mov buf2, synth_buf2mp
%endif %endif
.mainloop .mainloop:
; m1 = a m2 = b m3 = c m4 = d ; m1 = a m2 = b m3 = c m4 = d
SETZERO m3 SETZERO m3
SETZERO m4 SETZERO m4
......
...@@ -386,7 +386,7 @@ MC_CACHE MC10 ...@@ -386,7 +386,7 @@ MC_CACHE MC10
; void ff_h264_qpel_mc02(uint8_t *dst, uint8_t *src, int stride) ; void ff_h264_qpel_mc02(uint8_t *dst, uint8_t *src, int stride)
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
%macro V_FILT 10 %macro V_FILT 10
v_filt%9_%10_10 v_filt%9_%10_10:
add r4, r2 add r4, r2
.no_addr4: .no_addr4:
FILT_V m0, m1, m2, m3, m4, m5, m6, m7 FILT_V m0, m1, m2, m3, m4, m5, m6, m7
......
...@@ -139,12 +139,12 @@ WEIGHT_FUNC_HALF_MM 8, 8 ...@@ -139,12 +139,12 @@ WEIGHT_FUNC_HALF_MM 8, 8
je .nonnormal je .nonnormal
cmp r5d, 128 cmp r5d, 128
jne .normal jne .normal
.nonnormal .nonnormal:
sar r5d, 1 sar r5d, 1
sar r6d, 1 sar r6d, 1
sar off_regd, 1 sar off_regd, 1
sub r4d, 1 sub r4d, 1
.normal .normal:
%if cpuflag(ssse3) %if cpuflag(ssse3)
movd m4, r5d movd m4, r5d
movd m0, r6d movd m0, r6d
......
...@@ -184,7 +184,7 @@ cglobal hevc_qpel_ %+ postfix %+ _ %+ %1 %+ _8, 7, 7, 7, dst, dststride, src, sr ...@@ -184,7 +184,7 @@ cglobal hevc_qpel_ %+ postfix %+ _ %+ %1 %+ _8, 7, 7, 7, dst, dststride, src, sr
sub src_m3, pixstride3 sub src_m3, pixstride3
%endif %endif
.loop .loop:
%assign i 0 %assign i 0
%rep nb_blocks %rep nb_blocks
...@@ -285,7 +285,7 @@ QPEL_8 64, 1 ...@@ -285,7 +285,7 @@ QPEL_8 64, 1
sub srcm3q, sstride3q sub srcm3q, sstride3q
%endif %endif
.loop .loop:
%assign i 0 %assign i 0
%rep nb_blocks %rep nb_blocks
...@@ -444,7 +444,7 @@ cglobal hevc_epel_ %+ postfix %+ _ %+ %1 %+ _8, 7, 7, 6, dst, dststride, src, sr ...@@ -444,7 +444,7 @@ cglobal hevc_epel_ %+ postfix %+ _ %+ %1 %+ _8, 7, 7, 6, dst, dststride, src, sr
%endif %endif
sub srcq, pixstride sub srcq, pixstride
.loop .loop:
%assign i 0 %assign i 0
%rep nb_blocks %rep nb_blocks
...@@ -519,7 +519,7 @@ EPEL_8 32, 1 ...@@ -519,7 +519,7 @@ EPEL_8 32, 1
%endif %endif
sub srcq, pixstride sub srcq, pixstride
.loop .loop:
%assign i 0 %assign i 0
%rep nb_blocks %rep nb_blocks
...@@ -651,7 +651,7 @@ cglobal hevc_put_unweighted_pred_ %+ %2 %+ _ %+ %3, 5, 5, 4, dst, dststride, src ...@@ -651,7 +651,7 @@ cglobal hevc_put_unweighted_pred_ %+ %2 %+ _ %+ %3, 5, 5, 4, dst, dststride, src
%define STORE_HALF movd %define STORE_HALF movd
%endif %endif
.loop .loop:
%assign i 0 %assign i 0
%rep (%2 + 7) / 8 %rep (%2 + 7) / 8
...@@ -772,7 +772,7 @@ cglobal hevc_put_weighted_pred_ %+ %2 %+ _ %+ %3, 8, 8, 8, denom, weight0, offse ...@@ -772,7 +772,7 @@ cglobal hevc_put_weighted_pred_ %+ %2 %+ _ %+ %3, 8, 8, 8, denom, weight0, offse
SPLATD m3 SPLATD m3
%endif %endif
.loop .loop:
%assign i 0 %assign i 0
%rep (%2 + 3) / 4 %rep (%2 + 3) / 4
......
...@@ -56,7 +56,7 @@ cglobal v210_planar_pack_10, 5, 5, 4+cpuflag(avx2), y, u, v, dst, width ...@@ -56,7 +56,7 @@ cglobal v210_planar_pack_10, 5, 5, 4+cpuflag(avx2), y, u, v, dst, width
mova m2, [v210_enc_min_10] mova m2, [v210_enc_min_10]
mova m3, [v210_enc_max_10] mova m3, [v210_enc_max_10]
.loop .loop:
movu xm0, [yq+2*widthq] movu xm0, [yq+2*widthq]
%if cpuflag(avx2) %if cpuflag(avx2)
vinserti128 m0, m0, [yq+2*widthq+12], 1 vinserti128 m0, m0, [yq+2*widthq+12], 1
...@@ -112,7 +112,7 @@ cglobal v210_planar_pack_8, 5, 5, 7, y, u, v, dst, width ...@@ -112,7 +112,7 @@ cglobal v210_planar_pack_8, 5, 5, 7, y, u, v, dst, width
mova m5, [v210_enc_max_8] mova m5, [v210_enc_max_8]
pxor m6, m6 pxor m6, m6
.loop .loop:
movu xm1, [yq+2*widthq] movu xm1, [yq+2*widthq]
%if cpuflag(avx2) %if cpuflag(avx2)
vinserti128 m1, m1, [yq+2*widthq+12], 1 vinserti128 m1, m1, [yq+2*widthq+12], 1
......
...@@ -37,7 +37,7 @@ cglobal lowpass_line, 5, 5, 7 ...@@ -37,7 +37,7 @@ cglobal lowpass_line, 5, 5, 7
pcmpeqb m6, m6 pcmpeqb m6, m6
.loop .loop:
mova m0, [r3+r1] mova m0, [r3+r1]
mova m1, [r3+r1+mmsize] mova m1, [r3+r1+mmsize]
pavgb m0, [r4+r1] pavgb m0, [r4+r1]
......
...@@ -28,10 +28,10 @@ cglobal image_copy_plane_uc_from, 6, 7, 4, dst, dst_linesize, src, src_linesize, ...@@ -28,10 +28,10 @@ cglobal image_copy_plane_uc_from, 6, 7, 4, dst, dst_linesize, src, src_linesize,
add srcq, bwq add srcq, bwq
neg bwq neg bwq
.row_start .row_start:
mov rowposq, bwq mov rowposq, bwq
.loop .loop:
movntdqa m0, [srcq + rowposq + 0 * mmsize] movntdqa m0, [srcq + rowposq + 0 * mmsize]
movntdqa m1, [srcq + rowposq + 1 * mmsize] movntdqa m1, [srcq + rowposq + 1 * mmsize]
movntdqa m2, [srcq + rowposq + 2 * mmsize] movntdqa m2, [srcq + rowposq + 2 * mmsize]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment