row_win.cc 211 KB
Newer Older
1
/*
2
 *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 4 5 6
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS. All contributing project authors may
8 9 10
 *  be found in the AUTHORS file in the root of the source tree.
 */

11
#include "libyuv/row.h"
12

13 14
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \
    defined(_MSC_VER) && !defined(__clang__)
15 16 17 18
#include <emmintrin.h>
#include <tmmintrin.h>  // For _mm_maddubs_epi16
#endif

19 20
#ifdef __cplusplus
namespace libyuv {
21
extern "C" {
22
#endif
23

24 25
// This module is for Visual C 32/64 bit and clangcl 32 bit
#if !defined(LIBYUV_DISABLE_X86) && \
Frank Barchard's avatar
Frank Barchard committed
26
    (defined(_M_IX86) || (defined(_M_X64) && !defined(__clang__)))
27 28 29

// 64 bit
#if defined(_M_X64)
30 31 32

// Read 4 UV from 422, upsample to 8 UV.
#define READYUV422                                                             \
33 34 35 36 37 38 39 40 41 42 43
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);                                 \
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);                                     \
    u_buf += 4;                                                                \
    xmm4 = _mm_loadl_epi64((__m128i*)y_buf);                                   \
    xmm4 = _mm_unpacklo_epi8(xmm4, xmm4);                                      \
    y_buf += 8;

// Read 4 UV from 422, upsample to 8 UV.  With 8 Alpha.
#define READYUVA422                                                            \
44 45 46 47
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);                                 \
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);                                     \
48 49
    u_buf += 4;                                                                \
    xmm4 = _mm_loadl_epi64((__m128i*)y_buf);                                   \
50
    xmm4 = _mm_unpacklo_epi8(xmm4, xmm4);                                      \
51
    y_buf += 8;                                                                \
52 53
    xmm5 = _mm_loadl_epi64((__m128i*)a_buf);                                   \
    a_buf += 8;
54 55

// Convert 8 pixels: 8 UV and 8 Y.
56
#define YUVTORGB(yuvconstants)                                                 \
57 58
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm2 = _mm_loadu_si128(&xmm0);                                             \
59 60 61 62 63 64 65
    xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)yuvconstants->kUVToB);           \
    xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)yuvconstants->kUVToG);           \
    xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)yuvconstants->kUVToR);           \
    xmm0 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasB, xmm0);             \
    xmm1 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasG, xmm1);             \
    xmm2 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasR, xmm2);             \
    xmm4 = _mm_mulhi_epu16(xmm4, *(__m128i*)yuvconstants->kYToRgb);            \
66 67 68
    xmm0 = _mm_adds_epi16(xmm0, xmm4);                                         \
    xmm1 = _mm_adds_epi16(xmm1, xmm4);                                         \
    xmm2 = _mm_adds_epi16(xmm2, xmm4);                                         \
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
    xmm0 = _mm_srai_epi16(xmm0, 6);                                            \
    xmm1 = _mm_srai_epi16(xmm1, 6);                                            \
    xmm2 = _mm_srai_epi16(xmm2, 6);                                            \
    xmm0 = _mm_packus_epi16(xmm0, xmm0);                                       \
    xmm1 = _mm_packus_epi16(xmm1, xmm1);                                       \
    xmm2 = _mm_packus_epi16(xmm2, xmm2);

// Store 8 ARGB values.
#define STOREARGB                                                              \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);                                      \
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);                                     \
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);                                     \
    _mm_storeu_si128((__m128i *)dst_argb, xmm0);                               \
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);                        \
    dst_argb += 32;

// Store 8 ABGR values.
#define STOREABGR                                                              \
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm5);                                      \
    xmm1 = _mm_loadu_si128(&xmm2);                                             \
    xmm2 = _mm_unpacklo_epi16(xmm2, xmm0);                                     \
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm0);                                     \
94 95 96
    _mm_storeu_si128((__m128i *)dst_abgr, xmm2);                               \
    _mm_storeu_si128((__m128i *)(dst_abgr + 16), xmm1);                        \
    dst_abgr += 32;
97 98


99 100 101 102 103 104 105
#if defined(HAS_I422TOARGBROW_SSSE3)
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
106
  __m128i xmm0, xmm1, xmm2, xmm4;
107 108 109
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
110
    READYUV422
111
    YUVTORGB(yuvconstants)
112 113 114 115 116
    STOREARGB
    width -= 8;
  }
}
#endif
117

118 119 120 121
#if defined(HAS_I422TOABGRROW_SSSE3)
void I422ToABGRRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
122
                         uint8* dst_abgr,
123 124
                         struct YuvConstants* yuvconstants,
                         int width) {
125
  __m128i xmm0, xmm1, xmm2, xmm4;
126 127 128 129
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
    READYUV422
130
    YUVTORGB(yuvconstants)
131
    STOREABGR
132 133 134
    width -= 8;
  }
}
135
#endif
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

#if defined(HAS_I422ALPHATOARGBROW_SSSE3)
void I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_argb,
                              struct YuvConstants* yuvconstants,
                              int width) {
  __m128i xmm0, xmm1, xmm2, xmm4, xmm5;
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
    READYUVA422
    YUVTORGB(yuvconstants)
    STOREARGB
    width -= 8;
  }
}
#endif

#if defined(HAS_I422ALPHATOABGRROW_SSSE3)
void I422AlphaToABGRRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_abgr,
                              struct YuvConstants* yuvconstants,
                              int width) {
  __m128i xmm0, xmm1, xmm2, xmm4, xmm5;
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
    READYUVA422
    YUVTORGB(yuvconstants)
    STOREABGR
    width -= 8;
  }
}
#endif

175 176
// 32 bit
#else  // defined(_M_X64)
177 178
#ifdef HAS_ARGBTOYROW_SSSE3

179
// Constants for ARGB.
180
static const vec8 kARGBToY = {
181 182 183
  13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
};

184
// JPeg full range.
185
static const vec8 kARGBToYJ = {
186
  15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
187 188
};

189
static const vec8 kARGBToU = {
190 191 192
  112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
};

193
static const vec8 kARGBToUJ = {
194 195 196
  127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
};

197
static const vec8 kARGBToV = {
198 199 200
  -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
};

201
static const vec8 kARGBToVJ = {
202 203 204
  -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
};

205
// vpshufb for vphaddw + vpackuswb packed to shorts.
206
static const lvec8 kShufARGBToUV_AVX = {
207
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
208
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
209 210
};

211
// Constants for BGRA.
212
static const vec8 kBGRAToY = {
213 214 215
  0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
};

216
static const vec8 kBGRAToU = {
217 218 219
  0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
};

220
static const vec8 kBGRAToV = {
221 222 223
  0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
};

224
// Constants for ABGR.
225
static const vec8 kABGRToY = {
226 227 228
  33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
};

229
static const vec8 kABGRToU = {
230 231 232
  -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
};

233
static const vec8 kABGRToV = {
234 235 236
  112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
};

237
// Constants for RGBA.
238
static const vec8 kRGBAToY = {
239 240 241
  0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
};

242
static const vec8 kRGBAToU = {
243 244 245
  0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
};

246
static const vec8 kRGBAToV = {
247 248 249
  0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
};

250
static const uvec8 kAddY16 = {
251
  16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
252 253
};

254
// 7 bit fixed point 0.5.
255
static const vec16 kAddYJ64 = {
256 257
  64, 64, 64, 64, 64, 64, 64, 64
};
258

259
static const uvec8 kAddUV128 = {
260 261
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
262 263
};

264
static const uvec16 kAddUVJ128 = {
265 266 267
  0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
};

268
// Shuffle table for converting RGB24 to ARGB.
269
static const uvec8 kShuffleMaskRGB24ToARGB = {
270 271 272 273
  0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
};

// Shuffle table for converting RAW to ARGB.
274
static const uvec8 kShuffleMaskRAWToARGB = {
275 276 277
  2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
};

278
// Shuffle table for converting ARGB to RGB24.
279
static const uvec8 kShuffleMaskARGBToRGB24 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
280 281
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
};
282 283

// Shuffle table for converting ARGB to RAW.
284
static const uvec8 kShuffleMaskARGBToRAW = {
285
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
286
};
287

288
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24.  First 8 + next 4
289
static const uvec8 kShuffleMaskARGBToRGB24_0 = {
290 291 292 293
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
};

// Shuffle table for converting ARGB to RAW.
294
static const uvec8 kShuffleMaskARGBToRAW_0 = {
295 296 297
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
};

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
// YUY2 shuf 16 Y to 32 Y.
static const lvec8 kShuffleYUY2Y = {
  0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14,
  0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14
};

// YUY2 shuf 8 UV to 16 UV.
static const lvec8 kShuffleYUY2UV = {
  1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15,
  1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15
};

// UYVY shuf 16 Y to 32 Y.
static const lvec8 kShuffleUYVYY = {
  1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15,
  1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15
};

// UYVY shuf 8 UV to 16 UV.
static const lvec8 kShuffleUYVYUV = {
  0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14,
  0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14
};

322 323 324 325 326 327
// NV21 shuf 8 VU to 16 UV.
static const lvec8 kShuffleNV21 = {
  1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
  1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
};

328
// Duplicates gray value 3 times and fills in alpha opaque.
329
__declspec(naked)
330
void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
331 332 333 334 335 336 337
  __asm {
    mov        eax, [esp + 4]        // src_y
    mov        edx, [esp + 8]        // dst_argb
    mov        ecx, [esp + 12]       // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
    pslld      xmm5, 24

338
  convertloop:
339 340 341 342 343 344 345 346
    movq       xmm0, qword ptr [eax]
    lea        eax,  [eax + 8]
    punpcklbw  xmm0, xmm0
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0
    punpckhwd  xmm1, xmm1
    por        xmm0, xmm5
    por        xmm1, xmm5
347 348
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
349 350
    lea        edx, [edx + 32]
    sub        ecx, 8
351
    jg         convertloop
352 353 354 355
    ret
  }
}

356
#ifdef HAS_J400TOARGBROW_AVX2
357
// Duplicates gray value 3 times and fills in alpha opaque.
358
__declspec(naked)
359
void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) {
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
  __asm {
    mov         eax, [esp + 4]        // src_y
    mov         edx, [esp + 8]        // dst_argb
    mov         ecx, [esp + 12]       // pix
    vpcmpeqb    ymm5, ymm5, ymm5      // generate mask 0xff000000
    vpslld      ymm5, ymm5, 24

  convertloop:
    vmovdqu     xmm0, [eax]
    lea         eax,  [eax + 16]
    vpermq      ymm0, ymm0, 0xd8
    vpunpcklbw  ymm0, ymm0, ymm0
    vpermq      ymm0, ymm0, 0xd8
    vpunpckhwd  ymm1, ymm0, ymm0
    vpunpcklwd  ymm0, ymm0, ymm0
    vpor        ymm0, ymm0, ymm5
    vpor        ymm1, ymm1, ymm5
    vmovdqu     [edx], ymm0
    vmovdqu     [edx + 32], ymm1
    lea         edx, [edx + 64]
    sub         ecx, 16
    jg          convertloop
    vzeroupper
    ret
  }
}
386
#endif  // HAS_J400TOARGBROW_AVX2
387

388
__declspec(naked)
389
void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
390
  __asm {
391
    mov       eax, [esp + 4]   // src_rgb24
392 393 394 395
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
396
    movdqa    xmm4, xmmword ptr kShuffleMaskRGB24ToARGB
397

398
 convertloop:
399 400 401
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
402 403 404 405 406 407 408
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
409
    movdqu    [edx + 32], xmm2
410 411
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
412
    movdqu    [edx], xmm0
413 414 415
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
416
    movdqu    [edx + 16], xmm1
417
    por       xmm3, xmm5
418
    movdqu    [edx + 48], xmm3
419
    lea       edx, [edx + 64]
420
    sub       ecx, 16
421
    jg        convertloop
422 423 424 425
    ret
  }
}

426
__declspec(naked)
427 428
void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
                        int pix) {
429
  __asm {
430 431 432 433 434
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
435
    movdqa    xmm4, xmmword ptr kShuffleMaskRAWToARGB
436

437
 convertloop:
438 439 440
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
441 442 443 444 445 446 447
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
448
    movdqu    [edx + 32], xmm2
449 450
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
451
    movdqu    [edx], xmm0
452 453 454
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
455
    movdqu    [edx + 16], xmm1
456
    por       xmm3, xmm5
457
    movdqu    [edx + 48], xmm3
458
    lea       edx, [edx + 64]
459
    sub       ecx, 16
460
    jg        convertloop
461 462 463 464
    ret
  }
}

465 466
// pmul method to replicate bits.
// Math to replicate bits:
467 468 469 470
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
471
// 20 instructions.
472
__declspec(naked)
473 474
void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
475
  __asm {
476 477 478
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
479
    mov       eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
    pcmpeqb   xmm4, xmm4       // generate mask 0x07e007e0 for Green
    psllw     xmm4, 10
    psrlw     xmm4, 5
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_rgb565
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
497
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgr565
498 499 500 501 502 503 504 505 506 507 508 509 510 511
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    pand      xmm1, xmm3    // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    pand      xmm0, xmm4    // G in middle 6 bits
    pmulhuw   xmm0, xmm6    // << 5 * (256 + 4)
    por       xmm0, xmm7    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
512 513
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
514 515
    lea       eax, [eax + 16]
    sub       ecx, 8
516
    jg        convertloop
517 518 519 520
    ret
  }
}

521 522 523 524 525 526 527
#ifdef HAS_RGB565TOARGBROW_AVX2
// pmul method to replicate bits.
// Math to replicate bits:
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
528
__declspec(naked)
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3       // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpcmpeqb   ymm4, ymm4, ymm4       // generate mask 0x07e007e0 for Green
    vpsllw     ymm4, ymm4, 10
    vpsrlw     ymm4, ymm4, 5
    vpcmpeqb   ymm7, ymm7, ymm7       // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax, [esp + 4]   // src_rgb565
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // pix
    sub        edx, eax
    sub        edx, eax

 convertloop:
    vmovdqu    ymm0, [eax]   // fetch 16 pixels of bgr565
    vpand      ymm1, ymm0, ymm3    // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpand      ymm0, ymm0, ymm4    // G in middle 6 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 5 * (256 + 4)
    vpor       ymm0, ymm0, ymm7    // AG
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 4 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 4 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
573
    ret
574 575
  }
}
576
#endif  // HAS_RGB565TOARGBROW_AVX2
577

578
#ifdef HAS_ARGB1555TOARGBROW_AVX2
579
__declspec(naked)
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpsrlw     ymm4, ymm3, 6    // generate mask 0x03e003e0 for Green
    vpcmpeqb   ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax,  [esp + 4]   // src_argb1555
    mov        edx,  [esp + 8]   // dst_argb
    mov        ecx,  [esp + 12]  // pix
    sub        edx,  eax
    sub        edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of 1555
    vpsllw     ymm1, ymm0, 1       // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpand      ymm1, ymm1, ymm3
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpsraw     ymm2, ymm0, 8       // A
    vpand      ymm0, ymm0, ymm4    // G in middle 5 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 6 * (256 + 8)
    vpand      ymm2, ymm2, ymm7
    vpor       ymm0, ymm0, ymm2    // AG
615 616
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
617 618 619 620 621 622 623 624 625 626 627 628 629 630
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB1555TOARGBROW_AVX2

#ifdef HAS_ARGB4444TOARGBROW_AVX2
631
__declspec(naked)
632 633 634
void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
  __asm {
635 636
    mov       eax,  0x0f0f0f0f  // generate mask 0x0f0f0f0f
    vmovd     xmm4, eax
637
    vbroadcastss ymm4, xmm4
638
    vpslld    ymm5, ymm4, 4     // 0xf0f0f0f0 for high nibbles
639 640 641 642 643 644 645 646 647 648 649 650 651 652
    mov       eax,  [esp + 4]   // src_argb4444
    mov       edx,  [esp + 8]   // dst_argb
    mov       ecx,  [esp + 12]  // pix
    sub       edx,  eax
    sub       edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of bgra4444
    vpand      ymm2, ymm0, ymm5    // mask high nibbles
    vpand      ymm0, ymm0, ymm4    // mask low nibbles
    vpsrlw     ymm3, ymm2, 4
    vpsllw     ymm1, ymm0, 4
    vpor       ymm2, ymm2, ymm3
    vpor       ymm0, ymm0, ymm1
653 654
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm2, ymm2, 0xd8
655 656 657 658 659 660 661 662 663 664 665 666 667
    vpunpckhbw ymm1, ymm0, ymm2
    vpunpcklbw ymm0, ymm0, ymm2
    vmovdqu    [eax * 2 + edx], ymm0  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm1  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB4444TOARGBROW_AVX2

668
// 24 instructions
669
__declspec(naked)
670 671
void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
672
  __asm {
673 674 675 676 677 678 679 680
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
    mov       eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
681
    movdqa    xmm4, xmm3       // generate mask 0x03e003e0 for Green
682 683 684 685 686 687 688 689 690 691 692
    psrlw     xmm4, 6
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_argb1555
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
693
    movdqu    xmm0, [eax]   // fetch 8 pixels of 1555
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    psllw     xmm1, 1       // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pand      xmm1, xmm3
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // G in middle 5 bits
    psraw     xmm2, 8       // A
    pmulhuw   xmm0, xmm6    // << 6 * (256 + 8)
    pand      xmm2, xmm7
    por       xmm0, xmm2    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
712 713
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
714 715
    lea       eax, [eax + 16]
    sub       ecx, 8
716
    jg        convertloop
717 718 719
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
720

721
// 18 instructions.
722
__declspec(naked)
723 724
void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
725
  __asm {
726 727 728 729 730 731 732 733
    mov       eax, 0x0f0f0f0f  // generate mask 0x0f0f0f0f
    movd      xmm4, eax
    pshufd    xmm4, xmm4, 0
    movdqa    xmm5, xmm4       // 0xf0f0f0f0 for high nibbles
    pslld     xmm5, 4
    mov       eax, [esp + 4]   // src_argb4444
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
734 735
    sub       edx, eax
    sub       edx, eax
736 737

 convertloop:
738
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgra4444
739 740 741 742 743 744 745 746 747
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // mask low nibbles
    pand      xmm2, xmm5    // mask high nibbles
    movdqa    xmm1, xmm0
    movdqa    xmm3, xmm2
    psllw     xmm1, 4
    psrlw     xmm3, 4
    por       xmm0, xmm1
    por       xmm2, xmm3
748
    movdqa    xmm1, xmm0
749
    punpcklbw xmm0, xmm2
750
    punpckhbw xmm1, xmm2
751 752
    movdqu    [eax * 2 + edx], xmm0  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm1  // store next 4 pixels of ARGB
753
    lea       eax, [eax + 16]
754
    sub       ecx, 8
755
    jg        convertloop
756 757 758 759
    ret
  }
}

760
__declspec(naked)
761
void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
762
  __asm {
763 764 765
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
Frank Barchard's avatar
Frank Barchard committed
766
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRGB24
767 768

 convertloop:
769 770 771 772
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
773
    lea       eax, [eax + 64]
774 775 776 777 778 779 780 781 782 783
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
784
    movdqu    [edx], xmm0  // store 0
785 786 787 788
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
789 790
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
791 792
    lea       edx, [edx + 48]
    sub       ecx, 16
793
    jg        convertloop
794 795 796 797
    ret
  }
}

798
__declspec(naked)
799
void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
800
  __asm {
801 802 803
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
Frank Barchard's avatar
Frank Barchard committed
804
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRAW
805 806

 convertloop:
807 808 809 810
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
811
    lea       eax, [eax + 64]
812 813 814 815 816 817 818 819 820 821
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
822
    movdqu    [edx], xmm0  // store 0
823 824 825 826
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
827 828
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
829 830
    lea       edx, [edx + 48]
    sub       ecx, 16
831
    jg        convertloop
832 833 834 835
    ret
  }
}

836
// 4 pixels
837
__declspec(naked)
838
void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
839
  __asm {
840 841 842
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
843 844 845 846 847 848 849
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11
850 851

 convertloop:
852
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
853 854
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
855 856 857 858 859 860 861 862 863
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
864
    packssdw  xmm0, xmm0
865
    lea       eax, [eax + 16]
866
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
867 868
    lea       edx, [edx + 8]
    sub       ecx, 4
869
    jg        convertloop
870 871 872 873
    ret
  }
}

874
// 8 pixels
875
__declspec(naked)
876
void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
877
                                const uint32 dither4, int pix) {
878 879 880 881
  __asm {

    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
882
    movd      xmm6, [esp + 12] // dither4
883
    mov       ecx, [esp + 16]  // pix
884 885 886 887
    punpcklbw xmm6, xmm6       // make dither 16 bytes
    movdqa    xmm7, xmm6
    punpcklwd xmm6, xmm6
    punpckhwd xmm7, xmm7
888 889 890 891 892 893 894 895 896 897
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11

 convertloop:
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
898
    paddusb   xmm0, xmm6    // add dither
899 900 901 902 903 904 905 906 907 908 909 910
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
    packssdw  xmm0, xmm0
911
    lea       eax, [eax + 16]
912
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
913 914
    lea       edx, [edx + 8]
    sub       ecx, 4
915 916 917 918 919
    jg        convertloop
    ret
  }
}

920
#ifdef HAS_ARGBTORGB565DITHERROW_AVX2
921
__declspec(naked)
922
void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
923
                                const uint32 dither4, int pix) {
924 925 926
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
927
    vbroadcastss xmm6, [esp + 12]  // dither4
928
    mov        ecx, [esp + 16]     // pix
929 930 931
    vpunpcklbw xmm6, xmm6, xmm6    // make dither 32 bytes
    vpermq     ymm6, ymm6, 0xd8
    vpunpcklwd ymm6, ymm6, ymm6
932 933 934 935 936
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
937
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
938 939 940

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
941
    vpaddusb   ymm0, ymm0, ymm6    // add dither
942 943
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
944
    vpsrld     ymm0, ymm0, 8       // R
945 946 947 948 949
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
950
    vpackusdw  ymm0, ymm0, ymm0
951 952 953 954 955 956 957 958 959 960 961 962
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565DITHERROW_AVX2

963
// TODO(fbarchard): Improve sign extension/packing.
964
__declspec(naked)
965
void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
966
  __asm {
967 968 969
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
970 971 972 973 974 975 976 977
    pcmpeqb   xmm4, xmm4       // generate mask 0x0000001f
    psrld     xmm4, 27
    movdqa    xmm5, xmm4       // generate mask 0x000003e0
    pslld     xmm5, 5
    movdqa    xmm6, xmm4       // generate mask 0x00007c00
    pslld     xmm6, 10
    pcmpeqb   xmm7, xmm7       // generate mask 0xffff8000
    pslld     xmm7, 15
978 979

 convertloop:
980
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
981 982
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
983 984 985 986 987 988 989 990 991 992 993 994
    movdqa    xmm3, xmm0    // R
    psrad     xmm0, 16      // A
    psrld     xmm1, 3       // B
    psrld     xmm2, 6       // G
    psrld     xmm3, 9       // R
    pand      xmm0, xmm7    // A
    pand      xmm1, xmm4    // B
    pand      xmm2, xmm5    // G
    pand      xmm3, xmm6    // R
    por       xmm0, xmm1    // BA
    por       xmm2, xmm3    // GR
    por       xmm0, xmm2    // BGRA
995 996 997
    packssdw  xmm0, xmm0
    lea       eax, [eax + 16]
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB1555
998 999
    lea       edx, [edx + 8]
    sub       ecx, 4
1000
    jg        convertloop
1001 1002 1003 1004
    ret
  }
}

1005
__declspec(naked)
1006
void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
1007
  __asm {
1008 1009 1010
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
1011 1012 1013 1014 1015 1016
    pcmpeqb   xmm4, xmm4       // generate mask 0xf000f000
    psllw     xmm4, 12
    movdqa    xmm3, xmm4       // generate mask 0x00f000f0
    psrlw     xmm3, 8

 convertloop:
1017
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
1018 1019 1020
    movdqa    xmm1, xmm0
    pand      xmm0, xmm3    // low nibble
    pand      xmm1, xmm4    // high nibble
1021 1022
    psrld     xmm0, 4
    psrld     xmm1, 8
1023 1024
    por       xmm0, xmm1
    packuswb  xmm0, xmm0
1025
    lea       eax, [eax + 16]
1026 1027 1028
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB4444
    lea       edx, [edx + 8]
    sub       ecx, 4
1029
    jg        convertloop
1030 1031 1032 1033
    ret
  }
}

1034
#ifdef HAS_ARGBTORGB565ROW_AVX2
1035
__declspec(naked)
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
1046
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
1047 1048 1049 1050 1051

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
1052
    vpsrld     ymm0, ymm0, 8       // R
1053 1054 1055 1056 1057
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
1058
    vpackusdw  ymm0, ymm0, ymm0
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565ROW_AVX2

#ifdef HAS_ARGBTOARGB1555ROW_AVX2
1072
__declspec(naked)
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm4, ymm4, ymm4
    vpsrld     ymm4, ymm4, 27      // generate mask 0x0000001f
    vpslld     ymm5, ymm4, 5       // generate mask 0x000003e0
    vpslld     ymm6, ymm4, 10      // generate mask 0x00007c00
    vpcmpeqb   ymm7, ymm7, ymm7    // generate mask 0xffff8000
    vpslld     ymm7, ymm7, 15

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm3, ymm0, 9       // R
    vpsrld     ymm2, ymm0, 6       // G
    vpsrld     ymm1, ymm0, 3       // B
    vpsrad     ymm0, ymm0, 16      // A
    vpand      ymm3, ymm3, ymm6    // R
    vpand      ymm2, ymm2, ymm5    // G
    vpand      ymm1, ymm1, ymm4    // B
    vpand      ymm0, ymm0, ymm7    // A
    vpor       ymm0, ymm0, ymm1    // BA
    vpor       ymm2, ymm2, ymm3    // GR
    vpor       ymm0, ymm0, ymm2    // BGRA
    vpackssdw  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB1555
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB1555ROW_AVX2

1111
#ifdef HAS_ARGBTOARGB4444ROW_AVX2
1112
__declspec(naked)
1113 1114 1115 1116 1117
void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_rgb
    mov        ecx, [esp + 12]  // pix
1118
    vpcmpeqb   ymm4, ymm4, ymm4   // generate mask 0xf000f000
1119
    vpsllw     ymm4, ymm4, 12
1120
    vpsrlw     ymm3, ymm4, 8      // generate mask 0x00f000f0
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpand      ymm1, ymm0, ymm4    // high nibble
    vpand      ymm0, ymm0, ymm3    // low nibble
    vpsrld     ymm1, ymm1, 8
    vpsrld     ymm0, ymm0, 4
    vpor       ymm0, ymm0, ymm1
    vpackuswb  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB4444
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB4444ROW_AVX2

1142
// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
1143
__declspec(naked)
1144
void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1145
  __asm {
1146 1147 1148
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1149 1150
    movdqa     xmm4, xmmword ptr kARGBToY
    movdqa     xmm5, xmmword ptr kAddY16
1151

1152
 convertloop:
1153 1154 1155 1156
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1157 1158 1159 1160
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1161 1162 1163 1164 1165 1166
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1167
    paddb      xmm0, xmm5
1168
    movdqu     [edx], xmm0
1169
    lea        edx, [edx + 16]
1170
    sub        ecx, 16
1171
    jg         convertloop
1172 1173 1174 1175
    ret
  }
}

1176 1177
// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
1178
__declspec(naked)
1179 1180 1181 1182 1183
void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1184 1185
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
1186 1187

 convertloop:
1188 1189 1190 1191
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1192 1193 1194 1195 1196 1197 1198
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
1199
    paddw      xmm0, xmm5  // Add .5 for rounding.
1200
    paddw      xmm2, xmm5
1201 1202 1203
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1204
    movdqu     [edx], xmm0
1205
    lea        edx, [edx + 16]
1206
    sub        ecx, 16
1207 1208 1209 1210 1211
    jg         convertloop
    ret
  }
}

1212
#ifdef HAS_ARGBTOYROW_AVX2
1213 1214 1215 1216 1217
// vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = {
  0, 4, 1, 5, 2, 6, 3, 7
};

1218
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1219
__declspec(naked)
1220 1221 1222 1223 1224
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1225 1226 1227
    vbroadcastf128 ymm4, xmmword ptr kARGBToY
    vbroadcastf128 ymm5, xmmword ptr kAddY16
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1228 1229

 convertloop:
1230 1231 1232 1233
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
1234 1235 1236 1237 1238
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
1239
    vphaddw    ymm0, ymm0, ymm1  // mutates.
1240 1241 1242
    vphaddw    ymm2, ymm2, ymm3
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
1243
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
1244
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
1245
    vpaddb     ymm0, ymm0, ymm5  // add 16 for Y
1246
    vmovdqu    [edx], ymm0
1247
    lea        edx, [edx + 32]
1248
    sub        ecx, 32
1249
    jg         convertloop
1250
    vzeroupper
1251 1252 1253 1254 1255
    ret
  }
}
#endif  //  HAS_ARGBTOYROW_AVX2

1256
#ifdef HAS_ARGBTOYJROW_AVX2
1257
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1258
__declspec(naked)
1259 1260 1261 1262 1263
void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1264 1265 1266
    vbroadcastf128 ymm4, xmmword ptr kARGBToYJ
    vbroadcastf128 ymm5, xmmword ptr kAddYJ64
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287

 convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
    vphaddw    ymm0, ymm0, ymm1  // mutates.
    vphaddw    ymm2, ymm2, ymm3
    vpaddw     ymm0, ymm0, ymm5  // Add .5 for rounding.
    vpaddw     ymm2, ymm2, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
1288
    sub        ecx, 32
1289 1290 1291 1292 1293 1294 1295 1296
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  //  HAS_ARGBTOYJROW_AVX2

1297
__declspec(naked)
1298
void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1299
  __asm {
1300 1301 1302
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1303 1304
    movdqa     xmm4, xmmword ptr kBGRAToY
    movdqa     xmm5, xmmword ptr kAddY16
1305

1306
 convertloop:
1307 1308 1309 1310
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1311 1312 1313 1314
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1315 1316 1317 1318 1319 1320
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1321
    paddb      xmm0, xmm5
1322
    movdqu     [edx], xmm0
1323
    lea        edx, [edx + 16]
1324
    sub        ecx, 16
1325
    jg         convertloop
1326 1327 1328 1329
    ret
  }
}

1330
__declspec(naked)
1331
void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1332
  __asm {
1333 1334 1335
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1336 1337
    movdqa     xmm4, xmmword ptr kABGRToY
    movdqa     xmm5, xmmword ptr kAddY16
1338

1339
 convertloop:
1340 1341 1342 1343
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1344 1345 1346 1347
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1348 1349 1350 1351 1352 1353
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1354
    paddb      xmm0, xmm5
1355
    movdqu     [edx], xmm0
1356
    lea        edx, [edx + 16]
1357
    sub        ecx, 16
1358
    jg         convertloop
1359 1360 1361 1362
    ret
  }
}

1363
__declspec(naked)
1364
void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1365
  __asm {
1366 1367 1368
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1369 1370
    movdqa     xmm4, xmmword ptr kRGBAToY
    movdqa     xmm5, xmmword ptr kAddY16
1371 1372

 convertloop:
1373 1374 1375 1376
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
    paddb      xmm0, xmm5
1388
    movdqu     [edx], xmm0
1389
    lea        edx, [edx + 16]
1390
    sub        ecx, 16
1391 1392 1393 1394 1395
    jg         convertloop
    ret
  }
}

1396
__declspec(naked)
1397 1398
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1399
  __asm {
1400 1401 1402 1403 1404 1405 1406
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1407 1408 1409
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1410
    sub        edi, edx             // stride from u to v
1411

1412
 convertloop:
1413
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1414
    movdqu     xmm0, [eax]
1415
    movdqu     xmm4, [eax + esi]
1416
    pavgb      xmm0, xmm4
1417
    movdqu     xmm1, [eax + 16]
1418
    movdqu     xmm4, [eax + esi + 16]
1419
    pavgb      xmm1, xmm4
1420
    movdqu     xmm2, [eax + 32]
1421
    movdqu     xmm4, [eax + esi + 32]
1422
    pavgb      xmm2, xmm4
1423
    movdqu     xmm3, [eax + 48]
1424
    movdqu     xmm4, [eax + esi + 48]
1425 1426
    pavgb      xmm3, xmm4

1427 1428
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
1429
    shufps     xmm0, xmm1, 0x88
1430 1431 1432 1433 1434 1435
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4
1436 1437 1438

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
1439
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
1440
    movdqa     xmm1, xmm0
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1457
    sub        ecx, 16
1458 1459
    jg         convertloop

1460 1461 1462 1463 1464 1465
    pop        edi
    pop        esi
    ret
  }
}

1466
__declspec(naked)
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                        uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1477 1478 1479
    movdqa     xmm5, xmmword ptr kAddUVJ128
    movdqa     xmm6, xmmword ptr kARGBToVJ
    movdqa     xmm7, xmmword ptr kARGBToUJ
1480 1481 1482 1483
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1484
    movdqu     xmm0, [eax]
1485
    movdqu     xmm4, [eax + esi]
1486
    pavgb      xmm0, xmm4
1487
    movdqu     xmm1, [eax + 16]
1488
    movdqu     xmm4, [eax + esi + 16]
1489
    pavgb      xmm1, xmm4
1490
    movdqu     xmm2, [eax + 32]
1491
    movdqu     xmm4, [eax + esi + 32]
1492
    pavgb      xmm2, xmm4
1493
    movdqu     xmm3, [eax + 48]
1494
    movdqu     xmm4, [eax + esi + 48]
1495 1496
    pavgb      xmm3, xmm4

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    paddw      xmm0, xmm5            // +.5 rounding -> unsigned
    paddw      xmm1, xmm5
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1528
    sub        ecx, 16
1529 1530 1531 1532 1533 1534 1535 1536
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1537
#ifdef HAS_ARGBTOUVROW_AVX2
1538
__declspec(naked)
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
                      uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1549 1550 1551
    vbroadcastf128 ymm5, xmmword ptr kAddUV128
    vbroadcastf128 ymm6, xmmword ptr kARGBToV
    vbroadcastf128 ymm7, xmmword ptr kARGBToU
1552 1553 1554
    sub        edi, edx             // stride from u to v

 convertloop:
1555
    /* step 1 - subsample 32x2 argb pixels to 16x1 */
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    vpavgb     ymm2, ymm2, [eax + esi + 64]
    vpavgb     ymm3, ymm3, [eax + esi + 96]
    lea        eax,  [eax + 128]
    vshufps    ymm4, ymm0, ymm1, 0x88
    vshufps    ymm0, ymm0, ymm1, 0xdd
    vpavgb     ymm0, ymm0, ymm4  // mutated by vshufps
    vshufps    ymm4, ymm2, ymm3, 0x88
    vshufps    ymm2, ymm2, ymm3, 0xdd
    vpavgb     ymm2, ymm2, ymm4  // mutated by vshufps
1571 1572 1573 1574

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 32 different pixels, its 16 pixels of U and 16 of V
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
    vpmaddubsw ymm1, ymm0, ymm7  // U
    vpmaddubsw ymm3, ymm2, ymm7
    vpmaddubsw ymm0, ymm0, ymm6  // V
    vpmaddubsw ymm2, ymm2, ymm6
    vphaddw    ymm1, ymm1, ymm3  // mutates
    vphaddw    ymm0, ymm0, ymm2
    vpsraw     ymm1, ymm1, 8
    vpsraw     ymm0, ymm0, 8
    vpacksswb  ymm0, ymm1, ymm0  // mutates
    vpermq     ymm0, ymm0, 0xd8  // For vpacksswb
Frank Barchard's avatar
Frank Barchard committed
1585
    vpshufb    ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX  // for vshufps/vphaddw
1586
    vpaddb     ymm0, ymm0, ymm5  // -> unsigned
1587 1588

    // step 3 - store 16 U and 16 V values
1589 1590
    vextractf128 [edx], ymm0, 0 // U
    vextractf128 [edx + edi], ymm0, 1 // V
1591
    lea        edx, [edx + 16]
1592
    sub        ecx, 32
1593 1594 1595 1596
    jg         convertloop

    pop        edi
    pop        esi
1597
    vzeroupper
1598 1599 1600 1601 1602
    ret
  }
}
#endif  // HAS_ARGBTOUVROW_AVX2

1603
__declspec(naked)
1604 1605
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1606
  __asm {
1607
    push       edi
1608 1609 1610 1611
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
Frank Barchard's avatar
Frank Barchard committed
1612 1613 1614
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1615 1616 1617
    sub        edi, edx             // stride from u to v

 convertloop:
1618 1619
    /* convert to U and V */
    movdqu     xmm0, [eax]          // U
1620 1621 1622 1623 1624 1625 1626 1627 1628
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm7
    pmaddubsw  xmm1, xmm7
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm3, xmm7
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1629 1630 1631
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
    paddb      xmm0, xmm5
    movdqu     [edx], xmm0

    movdqu     xmm0, [eax]          // V
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm6
    pmaddubsw  xmm1, xmm6
    pmaddubsw  xmm2, xmm6
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1645 1646 1647
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1648 1649 1650 1651
    paddb      xmm0, xmm5
    lea        eax,  [eax + 64]
    movdqu     [edx + edi], xmm0
    lea        edx,  [edx + 16]
1652
    sub        ecx,  16
1653 1654 1655 1656 1657 1658 1659
    jg         convertloop

    pop        edi
    ret
  }
}

1660
__declspec(naked)
1661 1662
void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1663
  __asm {
1664 1665 1666 1667 1668
    push       edi
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
Frank Barchard's avatar
Frank Barchard committed
1669 1670 1671
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1672 1673 1674 1675
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1676 1677 1678 1679
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1710
    sub        ecx, 16
1711 1712 1713 1714 1715 1716 1717
    jg         convertloop

    pop        edi
    ret
  }
}

1718
__declspec(naked)
1719 1720
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1721
  __asm {
1722 1723 1724 1725 1726 1727 1728
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1729 1730 1731
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kBGRAToV
    movdqa     xmm7, xmmword ptr kBGRAToU
1732
    sub        edi, edx             // stride from u to v
1733

1734
 convertloop:
1735
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1736
    movdqu     xmm0, [eax]
1737
    movdqu     xmm4, [eax + esi]
1738
    pavgb      xmm0, xmm4
1739
    movdqu     xmm1, [eax + 16]
1740
    movdqu     xmm4, [eax + esi + 16]
1741
    pavgb      xmm1, xmm4
1742
    movdqu     xmm2, [eax + 32]
1743
    movdqu     xmm4, [eax + esi + 32]
1744
    pavgb      xmm2, xmm4
1745
    movdqu     xmm3, [eax + 48]
1746
    movdqu     xmm4, [eax + esi + 48]
1747 1748
    pavgb      xmm3, xmm4

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1779
    sub        ecx, 16
1780 1781
    jg         convertloop

1782 1783 1784 1785
    pop        edi
    pop        esi
    ret
  }
1786 1787
}

1788
__declspec(naked)
1789 1790
void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1791
  __asm {
1792 1793 1794 1795 1796 1797 1798
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1799 1800 1801
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kABGRToV
    movdqa     xmm7, xmmword ptr kABGRToU
1802 1803
    sub        edi, edx             // stride from u to v

1804
 convertloop:
1805
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1806
    movdqu     xmm0, [eax]
1807
    movdqu     xmm4, [eax + esi]
1808
    pavgb      xmm0, xmm4
1809
    movdqu     xmm1, [eax + 16]
1810
    movdqu     xmm4, [eax + esi + 16]
1811
    pavgb      xmm1, xmm4
1812
    movdqu     xmm2, [eax + 32]
1813
    movdqu     xmm4, [eax + esi + 32]
1814
    pavgb      xmm2, xmm4
1815
    movdqu     xmm3, [eax + 48]
1816
    movdqu     xmm4, [eax + esi + 48]
1817 1818
    pavgb      xmm3, xmm4

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1849
    sub        ecx, 16
1850 1851
    jg         convertloop

1852 1853 1854 1855 1856 1857
    pop        edi
    pop        esi
    ret
  }
}

1858
__declspec(naked)
1859 1860
void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1861
  __asm {
1862 1863 1864 1865 1866 1867 1868
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1869 1870 1871
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kRGBAToV
    movdqa     xmm7, xmmword ptr kRGBAToU
1872 1873 1874 1875
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1876
    movdqu     xmm0, [eax]
1877
    movdqu     xmm4, [eax + esi]
1878
    pavgb      xmm0, xmm4
1879
    movdqu     xmm1, [eax + 16]
1880
    movdqu     xmm4, [eax + esi + 16]
1881
    pavgb      xmm1, xmm4
1882
    movdqu     xmm2, [eax + 32]
1883
    movdqu     xmm4, [eax + esi + 32]
1884
    pavgb      xmm2, xmm4
1885
    movdqu     xmm3, [eax + 48]
1886
    movdqu     xmm4, [eax + esi + 48]
1887 1888
    pavgb      xmm3, xmm4

1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1919
    sub        ecx, 16
1920 1921 1922 1923 1924 1925 1926
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
1927
#endif  // HAS_ARGBTOYROW_SSSE3
1928

1929 1930
// Read 16 UV from 444
#define READYUV444_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1931 1932
    __asm vmovdqu    xmm0, [esi]                  /* U */                      \
    __asm vmovdqu    xmm1, [esi + edi]            /* V */                      \
1933 1934 1935 1936
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
1937
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1938 1939
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1940
    __asm lea        eax, [eax + 16]                                           \
1941 1942
  }

1943 1944
// Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1945 1946
    __asm vmovq      xmm0, qword ptr [esi]        /* U */                      \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */                      \
1947 1948 1949 1950
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
1951
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1952 1953
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1954
    __asm lea        eax, [eax + 16]                                           \
1955 1956
  }

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
// Read 8 UV from 422, upsample to 16 UV.  With 16 Alpha.
#define READYUVA422_AVX2 __asm {                                               \
    __asm vmovq      xmm0, qword ptr [esi]        /* U */                      \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */                      \
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
    __asm lea        eax, [eax + 16]                                           \
    __asm vmovdqu    xmm5, [ebp]                  /* A */                      \
    __asm vpermq     ymm5, ymm5, 0xd8                                          \
    __asm lea        ebp, [ebp + 16]                                           \
  }

1974 1975
// Read 4 UV from 411, upsample to 16 UV.
#define READYUV411_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1976 1977
    __asm vmovd      xmm0, dword ptr [esi]        /* U */                      \
    __asm vmovd      xmm1, dword ptr [esi + edi]  /* V */                      \
1978 1979 1980 1981 1982
    __asm lea        esi,  [esi + 4]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpckldq ymm0, ymm0, ymm0             /* UVUVUVUV (upsample) */    \
1983
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1984 1985
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1986
    __asm lea        eax, [eax + 16]                                           \
1987 1988
  }

1989 1990 1991 1992 1993 1994
// Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
1995
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1996 1997
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1998
    __asm lea        eax, [eax + 16]                                           \
1999 2000
  }

2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
// Read 8 UV from NV21, upsample to 16 UV.
#define READNV21_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleNV21                      \
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
// Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV.
#define READYUY2_AVX2 __asm {                                                  \
    __asm vmovdqu    ymm4, [eax]          /* YUY2 */                           \
    __asm vpshufb    ymm4, ymm4, ymmword ptr kShuffleYUY2Y                     \
    __asm vmovdqu    ymm0, [eax]          /* UV */                             \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleYUY2UV                    \
    __asm lea        eax, [eax + 32]                                           \
  }

// Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV.
#define READUYVY_AVX2 __asm {                                                  \
    __asm vmovdqu    ymm4, [eax]          /* UYVY */                           \
    __asm vpshufb    ymm4, ymm4, ymmword ptr kShuffleUYVYY                     \
    __asm vmovdqu    ymm0, [eax]          /* UV */                             \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleUYVYUV                    \
    __asm lea        eax, [eax + 32]                                           \
  }

2031
// Convert 16 pixels: 16 UV and 16 Y.
2032
#define YUVTORGB_AVX2(YuvConstants) __asm {                                    \
Frank Barchard's avatar
Frank Barchard committed
2033 2034 2035 2036
    __asm vpmaddubsw ymm2, ymm0, ymmword ptr [YuvConstants + KUVTOR] /* R UV */\
    __asm vpmaddubsw ymm1, ymm0, ymmword ptr [YuvConstants + KUVTOG] /* G UV */\
    __asm vpmaddubsw ymm0, ymm0, ymmword ptr [YuvConstants + KUVTOB] /* B UV */\
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASR]               \
2037
    __asm vpsubw     ymm2, ymm3, ymm2                                          \
Frank Barchard's avatar
Frank Barchard committed
2038
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASG]               \
2039
    __asm vpsubw     ymm1, ymm3, ymm1                                          \
Frank Barchard's avatar
Frank Barchard committed
2040
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASB]               \
2041
    __asm vpsubw     ymm0, ymm3, ymm0                                          \
2042
    /* Step 2: Find Y contribution to 16 R,G,B values */                       \
2043 2044 2045 2046
    __asm vpmulhuw   ymm4, ymm4, ymmword ptr [YuvConstants + KYTORGB]          \
    __asm vpaddsw    ymm0, ymm0, ymm4           /* B += Y */                   \
    __asm vpaddsw    ymm1, ymm1, ymm4           /* G += Y */                   \
    __asm vpaddsw    ymm2, ymm2, ymm4           /* R += Y */                   \
2047 2048 2049 2050 2051 2052 2053 2054
    __asm vpsraw     ymm0, ymm0, 6                                             \
    __asm vpsraw     ymm1, ymm1, 6                                             \
    __asm vpsraw     ymm2, ymm2, 6                                             \
    __asm vpackuswb  ymm0, ymm0, ymm0           /* B */                        \
    __asm vpackuswb  ymm1, ymm1, ymm1           /* G */                        \
    __asm vpackuswb  ymm2, ymm2, ymm2           /* R */                        \
  }

2055 2056 2057 2058 2059 2060 2061 2062
// Store 16 ARGB values.
#define STOREARGB_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm0, ymm0, ymm1           /* BG */                       \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm2, ymm5           /* RA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm1, ymm0, ymm2           /* BGRA first 8 pixels */      \
    __asm vpunpckhwd ymm0, ymm0, ymm2           /* BGRA next 8 pixels */       \
2063 2064
    __asm vmovdqu    0[edx], ymm1                                              \
    __asm vmovdqu    32[edx], ymm0                                             \
2065 2066 2067
    __asm lea        edx,  [edx + 64]                                          \
  }

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
// Store 16 ABGR values.
#define STOREBGRA_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm1, ymm0           /* GB */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm5, ymm2           /* AR */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm2, ymm1           /* ARGB first 8 pixels */      \
    __asm vpunpckhwd ymm2, ymm2, ymm1           /* ARGB next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm2                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

// Store 16 RGBA values.
#define STORERGBA_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm1, ymm2           /* GR */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm5, ymm0           /* AB */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm2, ymm1           /* ABGR first 8 pixels */      \
    __asm vpunpckhwd ymm1, ymm2, ymm1           /* ABGR next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm1                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

// Store 16 ABGR values.
#define STOREABGR_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm2, ymm1           /* RG */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm0, ymm5           /* BA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm1, ymm2           /* RGBA first 8 pixels */      \
    __asm vpunpckhwd ymm1, ymm1, ymm2           /* RGBA next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm1                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

2107
#ifdef HAS_I422TOARGBROW_AVX2
2108 2109
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2110
__declspec(naked)
2111 2112 2113 2114 2115 2116
void I422ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
2117 2118 2119
  __asm {
    push       esi
    push       edi
2120
    push       ebx
2121 2122 2123 2124
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2125
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2126
    mov        ecx, [esp + 12 + 24]  // width
2127 2128 2129 2130 2131
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2132
    YUVTORGB_AVX2(ebx)
2133 2134 2135 2136 2137
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2138
    pop        ebx
2139 2140 2141 2142 2143 2144
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2145
#endif  // HAS_I422TOARGBROW_AVX2
2146

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
#ifdef HAS_I422ALPHATOARGBROW_AVX2
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB.
__declspec(naked)
void I422AlphaToARGBRow_AVX2(const uint8* y_buf,
                             const uint8* u_buf,
                             const uint8* v_buf,
                             const uint8* a_buf,
                             uint8* dst_argb,
                             struct YuvConstants* yuvconstants,
                             int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // argb
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422_AVX2
    YUVTORGB_AVX2(ebx)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I422ALPHATOARGBROW_AVX2

#ifdef HAS_I422ALPHATOABGRROW_AVX2
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ABGR.
__declspec(naked)
void I422AlphaToABGRRow_AVX2(const uint8* y_buf,
                             const uint8* u_buf,
                             const uint8* v_buf,
                             const uint8* a_buf,
                             uint8* dst_abgr,
                             struct YuvConstants* yuvconstants,
                             int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // abgr
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422_AVX2
    YUVTORGB_AVX2(ebx)
    STOREABGR_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I422ALPHATOABGRROW_AVX2

2233
#ifdef HAS_I444TOARGBROW_AVX2
2234 2235
// 16 pixels
// 16 UV values with 16 Y producing 16 ARGB (64 bytes).
2236
__declspec(naked)
2237 2238 2239 2240 2241 2242
void I444ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
2243 2244 2245
  __asm {
    push       esi
    push       edi
2246
    push       ebx
2247 2248 2249 2250
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2251
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2252
    mov        ecx, [esp + 12 + 24]  // width
2253 2254 2255 2256
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
 convertloop:
    READYUV444_AVX2
2257
    YUVTORGB_AVX2(ebx)
2258 2259 2260 2261 2262
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2263
    pop        ebx
2264 2265 2266 2267 2268 2269
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2270
#endif  // HAS_I444TOARGBROW_AVX2
2271

2272
#ifdef HAS_I444TOABGRROW_AVX2
2273 2274 2275
// 16 pixels
// 16 UV values with 16 Y producing 16 ABGR (64 bytes).
__declspec(naked)
2276 2277 2278 2279 2280 2281
void I444ToABGRRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_abgr,
                        struct YuvConstants* yuvconstants,
                        int width) {
2282 2283 2284
  __asm {
    push       esi
    push       edi
2285
    push       ebx
2286 2287 2288 2289
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2290
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2291 2292 2293 2294 2295
    mov        ecx, [esp + 12 + 24]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
 convertloop:
    READYUV444_AVX2
2296
    YUVTORGB_AVX2(ebx)
2297 2298 2299 2300 2301
    STOREABGR_AVX2

    sub        ecx, 16
    jg         convertloop

2302
    pop        ebx
2303 2304 2305 2306 2307 2308
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2309
#endif  // HAS_I444TOABGRROW_AVX2
2310

2311 2312 2313
#ifdef HAS_I411TOARGBROW_AVX2
// 16 pixels
// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2314
__declspec(naked)
2315 2316 2317 2318
void I411ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2319
                        struct YuvConstants* yuvconstants,
2320 2321 2322 2323
                        int width) {
  __asm {
    push       esi
    push       edi
2324
    push       ebx
2325 2326 2327 2328
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2329
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2330
    mov        ecx, [esp + 12 + 24]  // width
2331 2332
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2333

2334 2335
 convertloop:
    READYUV411_AVX2
2336
    YUVTORGB_AVX2(ebx)
2337 2338 2339 2340 2341
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2342
    pop        ebx
2343 2344 2345 2346 2347 2348 2349
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I411TOARGBROW_AVX2
2350

2351
#ifdef HAS_NV12TOARGBROW_AVX2
2352 2353
// 16 pixels.
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2354
__declspec(naked)
2355 2356 2357
void NV12ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
2358
                        struct YuvConstants* yuvconstants,
2359 2360 2361
                        int width) {
  __asm {
    push       esi
2362
    push       ebx
2363 2364 2365
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // UV
    mov        edx, [esp + 8 + 12]  // argb
2366
    mov        ebx, [esp + 8 + 16]  // yuvconstants
2367
    mov        ecx, [esp + 8 + 20]  // width
2368 2369 2370 2371
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
2372
    YUVTORGB_AVX2(ebx)
2373 2374 2375 2376 2377
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2378
    pop        ebx
2379
    pop        esi
2380
    vzeroupper
2381 2382 2383
    ret
  }
}
2384
#endif  // HAS_NV12TOARGBROW_AVX2
2385

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
#ifdef HAS_NV21TOARGBROW_AVX2
// 16 pixels.
// 8 VU values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
__declspec(naked)
void NV21ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* vu_buf,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
  __asm {
    push       esi
    push       ebx
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // VU
    mov        edx, [esp + 8 + 12]  // argb
    mov        ebx, [esp + 8 + 16]  // yuvconstants
    mov        ecx, [esp + 8 + 20]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV21_AVX2
    YUVTORGB_AVX2(ebx)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        ebx
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_NV21TOARGBROW_AVX2

2421 2422 2423 2424 2425 2426 2427 2428
// 16 pixels.
// 8 YUY2 values with 16 Y and 8 UV producing 16 ARGB (64 bytes).
__declspec(naked)
void YUY2ToARGBRow_AVX2(const uint8* src_yuy2,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
  __asm {
2429
    push       ebx
2430 2431
    mov        eax, [esp + 4 + 4]   // yuy2
    mov        edx, [esp + 4 + 8]   // argb
2432
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2433 2434 2435 2436 2437
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUY2_AVX2
2438
    YUVTORGB_AVX2(ebx)
2439 2440 2441 2442 2443
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2444
    pop        ebx
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
    vzeroupper
    ret
  }
}

// 16 pixels.
// 8 UYVY values with 16 Y and 8 UV producing 16 ARGB (64 bytes).
__declspec(naked)
void UYVYToARGBRow_AVX2(const uint8* src_uyvy,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
  __asm {
2458
    push       ebx
2459 2460
    mov        eax, [esp + 4 + 4]   // uyvy
    mov        edx, [esp + 4 + 8]   // argb
2461
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2462 2463 2464 2465 2466
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READUYVY_AVX2
2467
    YUVTORGB_AVX2(ebx)
2468 2469 2470 2471 2472
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2473
    pop        ebx
2474 2475 2476 2477 2478 2479
    vzeroupper
    ret
  }
}


2480
#ifdef HAS_I422TOBGRAROW_AVX2
2481 2482
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
2483
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
2484
__declspec(naked)
2485 2486 2487 2488
void I422ToBGRARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2489
                        struct YuvConstants* yuvconstants,
2490 2491 2492 2493
                        int width) {
  __asm {
    push       esi
    push       edi
2494
    push       ebx
2495 2496 2497 2498
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2499
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2500
    mov        ecx, [esp + 12 + 24]  // width
2501 2502 2503 2504
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
2505
    READYUV422_AVX2
2506
    YUVTORGB_AVX2(ebx)
2507
    STOREBGRA_AVX2
2508 2509 2510 2511

    sub        ecx, 16
    jg         convertloop

2512
    pop        ebx
2513 2514
    pop        edi
    pop        esi
2515
    vzeroupper
2516 2517 2518
    ret
  }
}
2519
#endif  // HAS_I422TOBGRAROW_AVX2
2520

2521
#ifdef HAS_I422TORGBAROW_AVX2
2522 2523
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
2524
__declspec(naked)
2525 2526 2527 2528
void I422ToRGBARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2529
                        struct YuvConstants* yuvconstants,
2530 2531 2532 2533
                        int width) {
  __asm {
    push       esi
    push       edi
2534
    push       ebx
2535 2536 2537 2538
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2539
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2540
    mov        ecx, [esp + 12 + 24]  // width
2541
    sub        edi, esi
2542
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2543 2544 2545

 convertloop:
    READYUV422_AVX2
2546
    YUVTORGB_AVX2(ebx)
2547
    STORERGBA_AVX2
2548 2549 2550 2551

    sub        ecx, 16
    jg         convertloop

2552
    pop        ebx
2553 2554
    pop        edi
    pop        esi
2555
    vzeroupper
2556 2557 2558
    ret
  }
}
2559
#endif  // HAS_I422TORGBAROW_AVX2
2560

2561
#ifdef HAS_I422TOABGRROW_AVX2
2562 2563
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
2564
__declspec(naked)
2565 2566 2567 2568 2569 2570
void I422ToABGRRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        struct YuvConstants* yuvconstants,
                        int width) {
2571 2572 2573
  __asm {
    push       esi
    push       edi
2574
    push       ebx
2575 2576 2577 2578
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2579
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2580
    mov        ecx, [esp + 12 + 24]  // width
2581 2582 2583 2584 2585
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2586
    YUVTORGB_AVX2(ebx)
2587
    STOREABGR_AVX2
2588 2589 2590 2591

    sub        ecx, 16
    jg         convertloop

2592
    pop        ebx
2593 2594
    pop        edi
    pop        esi
2595
    vzeroupper
2596 2597 2598
    ret
  }
}
2599
#endif  // HAS_I422TOABGRROW_AVX2
2600

2601
#if defined(HAS_I422TOARGBROW_SSSE3)
2602
// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
2603

2604
// Read 8 UV from 444.
2605
#define READYUV444 __asm {                                                     \
Frank Barchard's avatar
Frank Barchard committed
2606 2607
    __asm movq       xmm0, qword ptr [esi] /* U */                             \
    __asm movq       xmm1, qword ptr [esi + edi] /* V */                       \
2608 2609
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
2610
    __asm movq       xmm4, qword ptr [eax]                                     \
2611
    __asm punpcklbw  xmm4, xmm4                                                \
2612
    __asm lea        eax, [eax + 8]                                            \
2613 2614
  }

2615
// Read 4 UV from 422, upsample to 8 UV.
2616
#define READYUV422 __asm {                                                     \
2617 2618 2619 2620 2621
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
2622
    __asm movq       xmm4, qword ptr [eax]                                     \
2623
    __asm punpcklbw  xmm4, xmm4                                                \
2624
    __asm lea        eax, [eax + 8]                                            \
2625 2626
  }

2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
// Read 4 UV from 422, upsample to 8 UV.  With 8 Alpha.
#define READYUVA422 __asm {                                                    \
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
    __asm movq       xmm4, qword ptr [eax]   /* Y */                           \
    __asm punpcklbw  xmm4, xmm4                                                \
    __asm lea        eax, [eax + 8]                                            \
    __asm movq       xmm5, qword ptr [ebp]   /* A */                           \
    __asm lea        ebp, [ebp + 8]                                            \
  }

2641
// Read 2 UV from 411, upsample to 8 UV.
2642
#define READYUV411 __asm {                                                     \
2643 2644
    __asm pinsrw     xmm0, [esi], 0        /* U */                             \
    __asm pinsrw     xmm1, [esi + edi], 0  /* V */                             \
2645
    __asm lea        esi,  [esi + 2]                                           \
2646 2647 2648
    __asm punpcklbw  xmm0, xmm1            /* UV */                            \
    __asm punpcklwd  xmm0, xmm0            /* UVUV (upsample) */               \
    __asm punpckldq  xmm0, xmm0            /* UVUVUVUV (upsample) */           \
2649
    __asm movq       xmm4, qword ptr [eax]                                     \
2650
    __asm punpcklbw  xmm4, xmm4                                                \
2651
    __asm lea        eax, [eax + 8]                                            \
2652 2653
  }

2654
// Read 4 UV from NV12, upsample to 8 UV.
2655
#define READNV12 __asm {                                                       \
Frank Barchard's avatar
Frank Barchard committed
2656
    __asm movq       xmm0, qword ptr [esi] /* UV */                            \
2657 2658
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
2659
    __asm movq       xmm4, qword ptr [eax]                                     \
2660
    __asm punpcklbw  xmm4, xmm4                                                \
2661
    __asm lea        eax, [eax + 8]                                            \
2662 2663
  }

2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
// Read 4 VU from NV21, upsample to 8 UV.
#define READNV21 __asm {                                                       \
    __asm movq       xmm0, qword ptr [esi] /* UV */                            \
    __asm lea        esi,  [esi + 8]                                           \
    __asm pshufb     xmm0, xmmword ptr kShuffleNV21                            \
    __asm movq       xmm4, qword ptr [eax]                                     \
    __asm punpcklbw  xmm4, xmm4                                                \
    __asm lea        eax, [eax + 8]                                            \
  }

2674
// Read 4 YUY2 with 8 Y and upsample 4 UV to 8 UV.
2675 2676 2677 2678 2679 2680 2681 2682
#define READYUY2 __asm {                                                       \
    __asm movdqu     xmm4, [eax]          /* YUY2 */                           \
    __asm pshufb     xmm4, xmmword ptr kShuffleYUY2Y                           \
    __asm movdqu     xmm0, [eax]          /* UV */                             \
    __asm pshufb     xmm0, xmmword ptr kShuffleYUY2UV                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2683
// Read 4 UYVY with 8 Y and upsample 4 UV to 8 UV.
2684 2685 2686 2687 2688 2689 2690 2691
#define READUYVY __asm {                                                       \
    __asm movdqu     xmm4, [eax]          /* UYVY */                           \
    __asm pshufb     xmm4, xmmword ptr kShuffleUYVYY                           \
    __asm movdqu     xmm0, [eax]          /* UV */                             \
    __asm pshufb     xmm0, xmmword ptr kShuffleUYVYUV                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2692
// Convert 8 pixels: 8 UV and 8 Y.
2693
#define YUVTORGB(YuvConstants) __asm {                                         \
2694 2695
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
2696
    __asm movdqa     xmm3, xmm0                                                \
Frank Barchard's avatar
Frank Barchard committed
2697 2698
    __asm movdqa     xmm0, xmmword ptr [YuvConstants + KUVBIASB]               \
    __asm pmaddubsw  xmm1, xmmword ptr [YuvConstants + KUVTOB]                 \
2699
    __asm psubw      xmm0, xmm1                                                \
Frank Barchard's avatar
Frank Barchard committed
2700 2701
    __asm movdqa     xmm1, xmmword ptr [YuvConstants + KUVBIASG]               \
    __asm pmaddubsw  xmm2, xmmword ptr [YuvConstants + KUVTOG]                 \
2702
    __asm psubw      xmm1, xmm2                                                \
Frank Barchard's avatar
Frank Barchard committed
2703 2704
    __asm movdqa     xmm2, xmmword ptr [YuvConstants + KUVBIASR]               \
    __asm pmaddubsw  xmm3, xmmword ptr [YuvConstants + KUVTOR]                 \
2705
    __asm psubw      xmm2, xmm3                                                \
2706 2707 2708 2709
    __asm pmulhuw    xmm4, xmmword ptr [YuvConstants + KYTORGB]                \
    __asm paddsw     xmm0, xmm4           /* B += Y */                         \
    __asm paddsw     xmm1, xmm4           /* G += Y */                         \
    __asm paddsw     xmm2, xmm4           /* R += Y */                         \
2710 2711 2712 2713 2714 2715 2716 2717
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

2718 2719 2720 2721 2722 2723 2724
// Store 8 ARGB values.
#define STOREARGB __asm {                                                      \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm5           /* RA */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRA next 4 pixels */             \
2725 2726
    __asm movdqu     0[edx], xmm0                                              \
    __asm movdqu     16[edx], xmm1                                             \
2727 2728 2729
    __asm lea        edx,  [edx + 32]                                          \
  }

2730 2731 2732 2733 2734 2735 2736 2737
// Store 8 BGRA values.
#define STOREBGRA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm0           /* GB */                             \
    __asm punpcklbw  xmm5, xmm2           /* AR */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* BGRA next 4 pixels */             \
2738 2739
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 ABGR values.
#define STOREABGR __asm {                                                      \
    __asm punpcklbw  xmm2, xmm1           /* RG */                             \
    __asm punpcklbw  xmm0, xmm5           /* BA */                             \
    __asm movdqa     xmm1, xmm2                                                \
    __asm punpcklwd  xmm2, xmm0           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm0           /* RGBA next 4 pixels */             \
2750 2751
    __asm movdqu     0[edx], xmm2                                              \
    __asm movdqu     16[edx], xmm1                                             \
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGBA values.
#define STORERGBA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm2           /* GR */                             \
    __asm punpcklbw  xmm5, xmm0           /* AB */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* RGBA next 4 pixels */             \
2763 2764
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2765 2766 2767 2768 2769
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGB24 values.
#define STORERGB24 __asm {                                                     \
2770
    /* Weave into RRGB */                                                      \
2771 2772 2773 2774 2775
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2776
    /* RRGB -> RGB24 */                                                        \
2777 2778 2779
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2780 2781
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2782 2783 2784 2785 2786
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RAW values.
#define STORERAW __asm {                                                       \
2787
    /* Weave into RRGB */                                                      \
2788 2789 2790 2791 2792 2793 2794 2795 2796
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
    /* Step 4: RRGB -> RAW */                                                  \
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2797 2798
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2799 2800 2801 2802 2803
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RGB565 values.
#define STORERGB565 __asm {                                                    \
2804
    /* Weave into RRGB */                                                      \
2805 2806 2807 2808 2809
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2810
    /* RRGB -> RGB565 */                                                       \
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
    __asm movdqa     xmm3, xmm0    /* B  first 4 pixels of argb */             \
    __asm movdqa     xmm2, xmm0    /* G */                                     \
    __asm pslld      xmm0, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm0, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm0, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm0, xmm3    /* BGR */                                   \
    __asm movdqa     xmm3, xmm1    /* B  next 4 pixels of argb */              \
    __asm movdqa     xmm2, xmm1    /* G */                                     \
    __asm pslld      xmm1, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm1, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm1, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm1, xmm3    /* BGR */                                   \
    __asm packssdw   xmm0, xmm1                                                \
2834
    __asm movdqu     0[edx], xmm0  /* store 8 pixels of RGB565 */              \
2835 2836 2837
    __asm lea        edx, [edx + 16]                                           \
  }

2838
// 8 pixels.
2839
// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
2840
__declspec(naked)
2841 2842 2843 2844 2845 2846
void I444ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
2847 2848 2849
  __asm {
    push       esi
    push       edi
2850
    push       ebx
2851 2852 2853 2854
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2855
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2856
    mov        ecx, [esp + 12 + 24]  // width
2857
    sub        edi, esi
2858
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
2859

2860
 convertloop:
2861
    READYUV444
2862
    YUVTORGB(ebx)
2863
    STOREARGB
2864

2865
    sub        ecx, 8
2866
    jg         convertloop
2867

2868
    pop        ebx
2869 2870 2871 2872 2873 2874 2875 2876 2877
    pop        edi
    pop        esi
    ret
  }
}

// 8 pixels.
// 8 UV values, mixed with 8 Y producing 8 ABGR (32 bytes).
__declspec(naked)
2878 2879 2880 2881 2882 2883
void I444ToABGRRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_abgr,
                         struct YuvConstants* yuvconstants,
                         int width) {
2884 2885 2886
  __asm {
    push       esi
    push       edi
2887
    push       ebx
2888 2889 2890 2891
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2892
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2893 2894 2895 2896 2897 2898
    mov        ecx, [esp + 12 + 24]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha

 convertloop:
    READYUV444
2899
    YUVTORGB(ebx)
2900 2901 2902 2903 2904
    STOREABGR

    sub        ecx, 8
    jg         convertloop

2905
    pop        ebx
2906 2907 2908 2909 2910 2911
    pop        edi
    pop        esi
    ret
  }
}

2912
// 8 pixels.
2913
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB24 (24 bytes).
2914
__declspec(naked)
2915 2916 2917
void I422ToRGB24Row_SSSE3(const uint8* y_buf,
                          const uint8* u_buf,
                          const uint8* v_buf,
2918
                          uint8* dst_rgb24,
2919
                          struct YuvConstants* yuvconstants,
2920 2921 2922 2923
                          int width) {
  __asm {
    push       esi
    push       edi
2924
    push       ebx
2925 2926 2927 2928
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2929
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2930
    mov        ecx, [esp + 12 + 24]  // width
2931
    sub        edi, esi
Frank Barchard's avatar
Frank Barchard committed
2932 2933
    movdqa     xmm5, xmmword ptr kShuffleMaskARGBToRGB24_0
    movdqa     xmm6, xmmword ptr kShuffleMaskARGBToRGB24
2934 2935 2936

 convertloop:
    READYUV422
2937
    YUVTORGB(ebx)
2938
    STORERGB24
2939 2940 2941 2942

    sub        ecx, 8
    jg         convertloop

2943
    pop        ebx
2944 2945 2946 2947 2948 2949
    pop        edi
    pop        esi
    ret
  }
}

2950
// 8 pixels.
2951
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RAW (24 bytes).
2952
__declspec(naked)
2953 2954 2955
void I422ToRAWRow_SSSE3(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
2956
                        uint8* dst_raw,
2957
                        struct YuvConstants* yuvconstants,
2958
                        int width) {
2959 2960 2961
  __asm {
    push       esi
    push       edi
2962
    push       ebx
2963 2964 2965 2966
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2967
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2968
    mov        ecx, [esp + 12 + 24]  // width
2969
    sub        edi, esi
Frank Barchard's avatar
Frank Barchard committed
2970 2971
    movdqa     xmm5, xmmword ptr kShuffleMaskARGBToRAW_0
    movdqa     xmm6, xmmword ptr kShuffleMaskARGBToRAW
2972 2973

 convertloop:
2974
    READYUV422
2975
    YUVTORGB(ebx)
2976
    STORERAW
2977 2978 2979 2980

    sub        ecx, 8
    jg         convertloop

2981
    pop        ebx
2982
    pop        edi
2983 2984 2985 2986 2987
    pop        esi
    ret
  }
}

2988
// 8 pixels
2989
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes).
2990
__declspec(naked)
2991 2992 2993 2994
void I422ToRGB565Row_SSSE3(const uint8* y_buf,
                           const uint8* u_buf,
                           const uint8* v_buf,
                           uint8* rgb565_buf,
2995
                           struct YuvConstants* yuvconstants,
2996
                           int width) {
2997 2998 2999
  __asm {
    push       esi
    push       edi
3000
    push       ebx
3001 3002 3003 3004
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
3005
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3006
    mov        ecx, [esp + 12 + 24]  // width
3007
    sub        edi, esi
3008 3009 3010 3011 3012 3013 3014
    pcmpeqb    xmm5, xmm5       // generate mask 0x0000001f
    psrld      xmm5, 27
    pcmpeqb    xmm6, xmm6       // generate mask 0x000007e0
    psrld      xmm6, 26
    pslld      xmm6, 5
    pcmpeqb    xmm7, xmm7       // generate mask 0xfffff800
    pslld      xmm7, 11
3015 3016

 convertloop:
3017
    READYUV422
3018
    YUVTORGB(ebx)
3019
    STORERGB565
3020

3021
    sub        ecx, 8
3022 3023
    jg         convertloop

3024
    pop        ebx
3025 3026 3027 3028 3029 3030
    pop        edi
    pop        esi
    ret
  }
}

3031
// 8 pixels.
3032
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
3033
__declspec(naked)
3034 3035 3036 3037 3038 3039
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
3040 3041 3042
  __asm {
    push       esi
    push       edi
3043
    push       ebx
3044 3045 3046 3047
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
3048
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3049
    mov        ecx, [esp + 12 + 24]  // width
3050 3051 3052 3053
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
3054
    READYUV422
3055
    YUVTORGB(ebx)
3056
    STOREARGB
3057

3058 3059 3060
    sub        ecx, 8
    jg         convertloop

3061
    pop        ebx
3062 3063 3064 3065 3066 3067
    pop        edi
    pop        esi
    ret
  }
}

3068
// 8 pixels.
3069
// 4 UV values upsampled to 8 UV, mixed with 8 Y and 8 A producing 8 ARGB.
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
__declspec(naked)
void I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_argb,
                              struct YuvConstants* yuvconstants,
                              int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // argb
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422
    YUVTORGB(ebx)
    STOREARGB

    sub        ecx, 8
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    ret
  }
}

// 8 pixels.
3109
// 4 UV values upsampled to 8 UV, mixed with 8 Y and 8 A producing 8 ABGR.
3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147
__declspec(naked)
void I422AlphaToABGRRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_abgr,
                              struct YuvConstants* yuvconstants,
                              int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // abgr
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422
    YUVTORGB(ebx)
    STOREABGR

    sub        ecx, 8
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    ret
  }
}

3148
// 8 pixels.
3149
// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
3150
// Similar to I420 but duplicate UV once more.
3151
__declspec(naked)
3152 3153 3154 3155
void I411ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
3156
                         struct YuvConstants* yuvconstants,
3157
                         int width) {
3158 3159 3160
  __asm {
    push       esi
    push       edi
3161
    push       ebx
3162 3163 3164
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
3165
    mov        edx, [esp + 12 + 16]  // abgr
3166
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3167
    mov        ecx, [esp + 12 + 24]  // width
3168
    sub        edi, esi
3169
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
3170 3171

 convertloop:
3172
    READYUV411
3173
    YUVTORGB(ebx)
3174
    STOREARGB
3175

3176 3177 3178
    sub        ecx, 8
    jg         convertloop

3179
    pop        ebx
3180 3181 3182 3183 3184 3185
    pop        edi
    pop        esi
    ret
  }
}

3186
// 8 pixels.
3187
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
3188
__declspec(naked)
3189 3190 3191
void NV12ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
3192
                         struct YuvConstants* yuvconstants,
3193
                         int width) {
3194 3195
  __asm {
    push       esi
3196
    push       ebx
3197 3198 3199
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // UV
    mov        edx, [esp + 8 + 12]  // argb
3200
    mov        ebx, [esp + 8 + 16]  // yuvconstants
3201
    mov        ecx, [esp + 8 + 20]  // width
3202 3203 3204 3205
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
3206
    YUVTORGB(ebx)
3207
    STOREARGB
3208

3209 3210 3211
    sub        ecx, 8
    jg         convertloop

3212
    pop        ebx
3213 3214 3215 3216 3217
    pop        esi
    ret
  }
}

3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
// 8 pixels.
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
__declspec(naked)
void NV21ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* vu_buf,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
  __asm {
    push       esi
    push       ebx
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // VU
    mov        edx, [esp + 8 + 12]  // argb
    mov        ebx, [esp + 8 + 16]  // yuvconstants
    mov        ecx, [esp + 8 + 20]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV21
    YUVTORGB(ebx)
    STOREARGB

    sub        ecx, 8
    jg         convertloop

    pop        ebx
    pop        esi
    ret
  }
}

3250 3251 3252 3253 3254 3255 3256 3257
// 8 pixels.
// 4 YUY2 values with 8 Y and 4 UV producing 8 ARGB (32 bytes).
__declspec(naked)
void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
  __asm {
3258
    push       ebx
3259 3260
    mov        eax, [esp + 4 + 4]   // yuy2
    mov        edx, [esp + 4 + 8]   // argb
3261
    mov        ebx, [esp + 4 + 12]  // yuvconstants
3262 3263 3264 3265 3266
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READYUY2
3267
    YUVTORGB(ebx)
3268 3269 3270 3271 3272
    STOREARGB

    sub        ecx, 8
    jg         convertloop

3273
    pop        ebx
3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
    ret
  }
}

// 8 pixels.
// 4 UYVY values with 8 Y and 4 UV producing 8 ARGB (32 bytes).
__declspec(naked)
void UYVYToARGBRow_SSSE3(const uint8* src_uyvy,
                         uint8* dst_argb,
                         struct YuvConstants* yuvconstants,
                         int width) {
  __asm {
3286
    push       ebx
3287 3288
    mov        eax, [esp + 4 + 4]   // uyvy
    mov        edx, [esp + 4 + 8]   // argb
3289
    mov        ebx, [esp + 4 + 12]  // yuvconstants
3290 3291 3292 3293 3294
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READUYVY
3295
    YUVTORGB(ebx)
3296 3297 3298 3299 3300
    STOREARGB

    sub        ecx, 8
    jg         convertloop

3301
    pop        ebx
3302 3303 3304 3305
    ret
  }
}

3306
__declspec(naked)
3307 3308 3309
void I422ToBGRARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
3310
                         uint8* dst_bgra,
3311
                         struct YuvConstants* yuvconstants,
3312 3313 3314 3315
                         int width) {
  __asm {
    push       esi
    push       edi
3316
    push       ebx
3317 3318 3319 3320
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
3321
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3322
    mov        ecx, [esp + 12 + 24]  // width
3323 3324 3325
    sub        edi, esi

 convertloop:
3326
    READYUV422
3327
    YUVTORGB(ebx)
3328
    STOREBGRA
3329 3330 3331 3332

    sub        ecx, 8
    jg         convertloop

3333
    pop        ebx
3334 3335 3336 3337 3338 3339
    pop        edi
    pop        esi
    ret
  }
}

3340
__declspec(naked)
3341 3342 3343 3344 3345 3346
void I422ToABGRRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_abgr,
                         struct YuvConstants* yuvconstants,
                         int width) {
3347 3348 3349
  __asm {
    push       esi
    push       edi
3350
    push       ebx
3351 3352 3353 3354
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
3355
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3356
    mov        ecx, [esp + 12 + 24]  // width
3357 3358 3359 3360
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
3361
    READYUV422
3362
    YUVTORGB(ebx)
3363
    STOREABGR
3364 3365 3366 3367

    sub        ecx, 8
    jg         convertloop

3368
    pop        ebx
3369 3370 3371 3372 3373 3374
    pop        edi
    pop        esi
    ret
  }
}

3375
__declspec(naked)
3376 3377 3378
void I422ToRGBARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
3379
                         uint8* dst_rgba,
3380
                         struct YuvConstants* yuvconstants,
3381 3382 3383 3384
                         int width) {
  __asm {
    push       esi
    push       edi
3385
    push       ebx
3386 3387 3388 3389
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
3390
    mov        ebx, [esp + 12 + 20]  // yuvconstants
3391
    mov        ecx, [esp + 12 + 24]  // width
3392 3393 3394 3395
    sub        edi, esi

 convertloop:
    READYUV422
3396
    YUVTORGB(ebx)
3397
    STORERGBA
3398

3399 3400 3401
    sub        ecx, 8
    jg         convertloop

3402
    pop        ebx
3403 3404 3405 3406 3407 3408
    pop        edi
    pop        esi
    ret
  }
}
#endif  // HAS_I422TOARGBROW_SSSE3
3409

3410
#ifdef HAS_I400TOARGBROW_SSE2
3411
// 8 pixels of Y converted to 8 pixels of ARGB (32 bytes).
3412
__declspec(naked)
3413 3414 3415
void I400ToARGBRow_SSE2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
3416
  __asm {
3417
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
3418 3419
    movd       xmm2, eax
    pshufd     xmm2, xmm2,0
3420 3421 3422 3423 3424
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    movd       xmm3, eax
    pshufd     xmm3, xmm3, 0
    pcmpeqb    xmm4, xmm4           // generate mask 0xff000000
    pslld      xmm4, 24
3425

3426 3427 3428 3429
    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

3430
 convertloop:
3431
    // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
3432
    movq       xmm0, qword ptr [eax]
3433
    lea        eax, [eax + 8]
3434 3435
    punpcklbw  xmm0, xmm0           // Y.Y
    pmulhuw    xmm0, xmm2
3436
    psubusw    xmm0, xmm3
3437
    psrlw      xmm0, 6
3438 3439 3440 3441 3442 3443 3444
    packuswb   xmm0, xmm0           // G

    // Step 2: Weave into ARGB
    punpcklbw  xmm0, xmm0           // GG
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm1           // BGRA next 4 pixels
3445 3446
    por        xmm0, xmm4
    por        xmm1, xmm4
3447 3448
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3449 3450
    lea        edx,  [edx + 32]
    sub        ecx, 8
3451
    jg         convertloop
3452 3453 3454
    ret
  }
}
3455
#endif  // HAS_I400TOARGBROW_SSE2
3456

3457
#ifdef HAS_I400TOARGBROW_AVX2
3458
// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
3459
// note: vpunpcklbw mutates and vpackuswb unmutates.
3460
__declspec(naked)
3461 3462 3463
void I400ToARGBRow_AVX2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
3464
  __asm {
3465
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
3466 3467
    vmovd      xmm2, eax
    vbroadcastss ymm2, xmm2
3468 3469 3470 3471 3472
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    vmovd      xmm3, eax
    vbroadcastss ymm3, xmm3
    vpcmpeqb   ymm4, ymm4, ymm4     // generate mask 0xff000000
    vpslld     ymm4, ymm4, 24
3473 3474 3475 3476 3477 3478

    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

 convertloop:
3479
    // Step 1: Scale Y contriportbution to 16 G values. G = (y - 16) * 1.164
3480 3481
    vmovdqu    xmm0, [eax]
    lea        eax, [eax + 16]
3482
    vpermq     ymm0, ymm0, 0xd8           // vpunpcklbw mutates
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
    vpunpcklbw ymm0, ymm0, ymm0           // Y.Y
    vpmulhuw   ymm0, ymm0, ymm2
    vpsubusw   ymm0, ymm0, ymm3
    vpsrlw     ymm0, ymm0, 6
    vpackuswb  ymm0, ymm0, ymm0           // G.  still mutated: 3120

    // TODO(fbarchard): Weave alpha with unpack.
    // Step 2: Weave into ARGB
    vpunpcklbw ymm1, ymm0, ymm0           // GG - mutates
    vpermq     ymm1, ymm1, 0xd8
3493 3494
    vpunpcklwd ymm0, ymm1, ymm1           // GGGG first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm1           // GGGG next 8 pixels
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
    vpor       ymm0, ymm0, ymm4
    vpor       ymm1, ymm1, ymm4
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop
    vzeroupper
    ret
  }
}
3506
#endif  // HAS_I400TOARGBROW_AVX2
3507

3508
#ifdef HAS_MIRRORROW_SSSE3
3509
// Shuffle table for reversing the bytes.
3510
static const uvec8 kShuffleMirror = {
3511 3512
  15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
3513

3514
// TODO(fbarchard): Replace lea with -16 offset.
3515
__declspec(naked)
3516
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
3517
  __asm {
3518 3519 3520
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3521
    movdqa    xmm5, xmmword ptr kShuffleMirror
3522

3523
 convertloop:
3524
    movdqu    xmm0, [eax - 16 + ecx]
3525
    pshufb    xmm0, xmm5
3526
    movdqu    [edx], xmm0
3527
    lea       edx, [edx + 16]
3528
    sub       ecx, 16
3529
    jg        convertloop
3530 3531 3532
    ret
  }
}
3533
#endif  // HAS_MIRRORROW_SSSE3
3534

fbarchard@google.com's avatar
fbarchard@google.com committed
3535
#ifdef HAS_MIRRORROW_AVX2
3536
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3537 3538 3539 3540 3541
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3542
    vbroadcastf128 ymm5, xmmword ptr kShuffleMirror
fbarchard@google.com's avatar
fbarchard@google.com committed
3543 3544

 convertloop:
3545
    vmovdqu   ymm0, [eax - 32 + ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
3546 3547 3548 3549
    vpshufb   ymm0, ymm0, ymm5
    vpermq    ymm0, ymm0, 0x4e  // swap high and low halfs
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3550
    sub       ecx, 32
fbarchard@google.com's avatar
fbarchard@google.com committed
3551
    jg        convertloop
3552
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3553 3554 3555 3556 3557
    ret
  }
}
#endif  // HAS_MIRRORROW_AVX2

3558 3559
#ifdef HAS_MIRRORROW_UV_SSSE3
// Shuffle table for reversing the bytes of UV channels.
3560
static const uvec8 kShuffleMirrorUV = {
3561 3562 3563
  14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
};

3564
__declspec(naked)
3565
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
3566 3567 3568 3569 3570 3571 3572
                       int width) {
  __asm {
    push      edi
    mov       eax, [esp + 4 + 4]   // src
    mov       edx, [esp + 4 + 8]   // dst_u
    mov       edi, [esp + 4 + 12]  // dst_v
    mov       ecx, [esp + 4 + 16]  // width
Frank Barchard's avatar
Frank Barchard committed
3573
    movdqa    xmm1, xmmword ptr kShuffleMirrorUV
3574 3575 3576 3577
    lea       eax, [eax + ecx * 2 - 16]
    sub       edi, edx

 convertloop:
3578
    movdqu    xmm0, [eax]
3579 3580 3581 3582 3583
    lea       eax, [eax - 16]
    pshufb    xmm0, xmm1
    movlpd    qword ptr [edx], xmm0
    movhpd    qword ptr [edx + edi], xmm0
    lea       edx, [edx + 8]
3584
    sub       ecx, 8
3585
    jg        convertloop
3586 3587 3588 3589 3590

    pop       edi
    ret
  }
}
3591
#endif  // HAS_MIRRORROW_UV_SSSE3
3592

3593
#ifdef HAS_ARGBMIRRORROW_SSE2
3594
__declspec(naked)
3595
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3596
  __asm {
3597 3598 3599
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3600
    lea       eax, [eax - 16 + ecx * 4]  // last 4 pixels.
3601 3602

 convertloop:
3603
    movdqu    xmm0, [eax]
3604
    lea       eax, [eax - 16]
3605
    pshufd    xmm0, xmm0, 0x1b
3606
    movdqu    [edx], xmm0
3607
    lea       edx, [edx + 16]
3608
    sub       ecx, 4
3609 3610 3611 3612
    jg        convertloop
    ret
  }
}
3613
#endif  // HAS_ARGBMIRRORROW_SSE2
3614

fbarchard@google.com's avatar
fbarchard@google.com committed
3615 3616
#ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes.
3617
static const ulvec32 kARGBShuffleMirror_AVX2 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
3618 3619 3620
  7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};

3621
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3622 3623 3624 3625 3626
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3627
    vmovdqu   ymm5, ymmword ptr kARGBShuffleMirror_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3628 3629

 convertloop:
3630
    vpermd    ymm0, ymm5, [eax - 32 + ecx * 4]  // permute dword order
fbarchard@google.com's avatar
fbarchard@google.com committed
3631 3632
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3633
    sub       ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3634
    jg        convertloop
3635
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3636 3637 3638
    ret
  }
}
3639
#endif  // HAS_ARGBMIRRORROW_AVX2
3640

3641
#ifdef HAS_SPLITUVROW_SSE2
3642
__declspec(naked)
3643
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    lea        eax,  [eax + 32]
    movdqa     xmm2, xmm0
    movdqa     xmm3, xmm1
    pand       xmm0, xmm5   // even bytes
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    psrlw      xmm2, 8      // odd bytes
    psrlw      xmm3, 8
    packuswb   xmm2, xmm3
    movdqu     [edx], xmm0
    movdqu     [edx + edi], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
3676

3677
#endif  // HAS_SPLITUVROW_SSE2
3678

3679
#ifdef HAS_SPLITUVROW_AVX2
3680
__declspec(naked)
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
3693 3694
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
3695 3696 3697 3698 3699 3700 3701 3702 3703
    lea        eax,  [eax + 64]
    vpsrlw     ymm2, ymm0, 8      // odd bytes
    vpsrlw     ymm3, ymm1, 8
    vpand      ymm0, ymm0, ymm5   // even bytes
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1
    vpackuswb  ymm2, ymm2, ymm3
    vpermq     ymm0, ymm0, 0xd8
    vpermq     ymm2, ymm2, 0xd8
3704 3705
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + edi], ymm2
3706 3707 3708 3709 3710
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3711
    vzeroupper
3712 3713 3714
    ret
  }
}
3715
#endif  // HAS_SPLITUVROW_AVX2
3716

3717
#ifdef HAS_MERGEUVROW_SSE2
3718
__declspec(naked)
3719 3720
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
3721 3722
  __asm {
    push       edi
3723 3724 3725 3726 3727
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax
3728 3729

  convertloop:
3730 3731
    movdqu     xmm0, [eax]      // read 16 U's
    movdqu     xmm1, [eax + edx]  // and 16 V's
3732 3733 3734 3735
    lea        eax,  [eax + 16]
    movdqa     xmm2, xmm0
    punpcklbw  xmm0, xmm1       // first 8 UV pairs
    punpckhbw  xmm2, xmm1       // next 8 UV pairs
3736 3737
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm2
3738 3739
    lea        edi, [edi + 32]
    sub        ecx, 16
3740 3741 3742 3743 3744 3745
    jg         convertloop

    pop        edi
    ret
  }
}
3746
#endif  //  HAS_MERGEUVROW_SSE2
3747

3748
#ifdef HAS_MERGEUVROW_AVX2
3749
__declspec(naked)
3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax

  convertloop:
    vmovdqu    ymm0, [eax]           // read 32 U's
    vmovdqu    ymm1, [eax + edx]     // and 32 V's
    lea        eax,  [eax + 32]
    vpunpcklbw ymm2, ymm0, ymm1      // low 16 UV pairs. mutated qqword 0,2
    vpunpckhbw ymm0, ymm0, ymm1      // high 16 UV pairs. mutated qqword 1,3
3766 3767 3768 3769
    vextractf128 [edi], ymm2, 0       // bytes 0..15
    vextractf128 [edi + 16], ymm0, 0  // bytes 16..31
    vextractf128 [edi + 32], ymm2, 1  // bytes 32..47
    vextractf128 [edi + 48], ymm0, 1  // bytes 47..63
3770 3771 3772 3773 3774
    lea        edi, [edi + 64]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3775
    vzeroupper
3776 3777 3778 3779 3780
    ret
  }
}
#endif  //  HAS_MERGEUVROW_AVX2

3781
#ifdef HAS_COPYROW_SSE2
3782
// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
3783
__declspec(naked)
3784 3785 3786 3787 3788
void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3789

3790
  convertloop:
3791 3792
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3793
    lea        eax, [eax + 32]
3794 3795
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3796
    lea        edx, [edx + 32]
3797
    sub        ecx, 32
3798
    jg         convertloop
3799 3800 3801 3802 3803
    ret
  }
}
#endif  // HAS_COPYROW_SSE2

3804 3805
#ifdef HAS_COPYROW_AVX
// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
3806
__declspec(naked)
3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
    sub        ecx, 64
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_COPYROW_AVX

3829
// Multiple of 1.
3830
__declspec(naked)
3831
void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
  __asm {
    mov        eax, esi
    mov        edx, edi
    mov        esi, [esp + 4]   // src
    mov        edi, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    rep movsb
    mov        edi, edx
    mov        esi, eax
    ret
  }
}

3845 3846
#ifdef HAS_ARGBCOPYALPHAROW_SSE2
// width in pixels
3847
__declspec(naked)
3848 3849 3850
void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
fbarchard@google.com's avatar
fbarchard@google.com committed
3851
    mov        edx, [esp + 8]   // dst
3852
    mov        ecx, [esp + 12]  // count
fbarchard@google.com's avatar
fbarchard@google.com committed
3853 3854 3855 3856
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8
3857 3858

  convertloop:
3859 3860
    movdqu     xmm2, [eax]
    movdqu     xmm3, [eax + 16]
3861
    lea        eax, [eax + 32]
3862 3863
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
3864 3865 3866 3867 3868 3869
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3870 3871
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
3872
    lea        edx, [edx + 32]
3873 3874 3875 3876 3877 3878 3879 3880
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
3881 3882
#ifdef HAS_ARGBCOPYALPHAROW_AVX2
// width in pixels
3883
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3884 3885 3886 3887 3888
void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3889
    vpcmpeqb   ymm0, ymm0, ymm0
3890
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff
fbarchard@google.com's avatar
fbarchard@google.com committed
3891 3892

  convertloop:
3893 3894
    vmovdqu    ymm1, [eax]
    vmovdqu    ymm2, [eax + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
3895
    lea        eax, [eax + 64]
3896 3897 3898 3899
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
fbarchard@google.com's avatar
fbarchard@google.com committed
3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_AVX2

3910 3911
#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
// width in pixels
3912
__declspec(naked)
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928
void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8

  convertloop:
    movq       xmm2, qword ptr [eax]  // 8 Y's
    lea        eax, [eax + 8]
    punpcklbw  xmm2, xmm2
    punpckhwd  xmm3, xmm2
    punpcklwd  xmm2, xmm2
3929 3930
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
3931 3932 3933 3934 3935 3936
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3937 3938
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949
    lea        edx, [edx + 32]
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_SSE2

#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
// width in pixels
3950
__declspec(naked)
3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978
void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    vpcmpeqb   ymm0, ymm0, ymm0
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff

  convertloop:
    vpmovzxbd  ymm1, qword ptr [eax]
    vpmovzxbd  ymm2, qword ptr [eax + 8]
    lea        eax, [eax + 16]
    vpslld     ymm1, ymm1, 24
    vpslld     ymm2, ymm2, 24
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_AVX2

3979
#ifdef HAS_SETROW_X86
3980 3981
// Write 'count' bytes using an 8 bit value repeated.
// Count should be multiple of 4.
3982
__declspec(naked)
3983
void SetRow_X86(uint8* dst, uint8 v8, int count) {
3984
  __asm {
3985 3986 3987
    movzx      eax, byte ptr [esp + 8]    // v8
    mov        edx, 0x01010101  // Duplicate byte to all bytes.
    mul        edx              // overwrites edx with upper part of result.
3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        ecx, [esp + 12]  // count
    shr        ecx, 2
    rep stosd
    mov        edi, edx
    ret
  }
}

3998
// Write 'count' bytes using an 8 bit value repeated.
3999
__declspec(naked)
4000
void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
4001
  __asm {
4002 4003 4004 4005 4006 4007 4008 4009 4010
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v8
    mov        ecx, [esp + 12]  // count
    rep stosb
    mov        edi, edx
    ret
  }
}
4011

4012
// Write 'count' 32 bit values.
4013
__declspec(naked)
4014 4015 4016 4017 4018 4019
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
  __asm {
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v32
    mov        ecx, [esp + 12]  // count
4020
    rep stosd
4021
    mov        edi, edx
4022 4023 4024 4025 4026
    ret
  }
}
#endif  // HAS_SETROW_X86

4027
#ifdef HAS_YUY2TOYROW_AVX2
4028
__declspec(naked)
4029
void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix) {
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // even bytes are Y
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
4047
    sub        ecx, 32
4048
    jg         convertloop
4049
    vzeroupper
4050 4051 4052 4053
    ret
  }
}

4054
__declspec(naked)
4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
4093
    vzeroupper
4094 4095 4096 4097
    ret
  }
}

4098
__declspec(naked)
4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
4132
    vzeroupper
4133 4134 4135 4136
    ret
  }
}

4137
__declspec(naked)
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
void UYVYToYRow_AVX2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // odd bytes are Y
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
4155
    sub        ecx, 32
4156
    jg         convertloop
4157
    vzeroupper
4158
    ret
4159 4160 4161
  }
}

4162
__declspec(naked)
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
4201
    vzeroupper
4202 4203 4204 4205
    ret
  }
}

4206
__declspec(naked)
4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239
void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
4240
    vzeroupper
4241 4242 4243 4244 4245
    ret
  }
}
#endif  // HAS_YUY2TOYROW_AVX2

4246
#ifdef HAS_YUY2TOYROW_SSE2
4247
__declspec(naked)
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257
void YUY2ToYRow_SSE2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    pcmpeqb    xmm5, xmm5        // generate mask 0x00ff00ff
    psrlw      xmm5, 8

  convertloop:
4258 4259
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4260 4261 4262 4263
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // even bytes are Y
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
4264
    movdqu     [edx], xmm0
4265
    lea        edx, [edx + 16]
4266
    sub        ecx, 16
4267
    jg         convertloop
4268 4269 4270 4271
    ret
  }
}

4272
__declspec(naked)
4273
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
4274
                      uint8* dst_u, uint8* dst_v, int pix) {
4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4288 4289 4290 4291
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
4307
    jg         convertloop
4308 4309 4310 4311 4312 4313 4314

    pop        edi
    pop        esi
    ret
  }
}

4315
__declspec(naked)
4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4329 4330
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}

4351
__declspec(naked)
4352 4353 4354 4355 4356 4357 4358 4359
void UYVYToYRow_SSE2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
4360 4361
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4362 4363 4364 4365
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8    // odd bytes are Y
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
4366
    movdqu     [edx], xmm0
4367
    lea        edx, [edx + 16]
4368
    sub        ecx, 16
4369
    jg         convertloop
4370 4371 4372 4373
    ret
  }
}

4374
__declspec(naked)
4375
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
4376
                      uint8* dst_u, uint8* dst_v, int pix) {
4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4390 4391 4392 4393
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
4409
    jg         convertloop
4410 4411 4412 4413 4414 4415 4416

    pop        edi
    pop        esi
    ret
  }
}

4417
__declspec(naked)
4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4431 4432
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
4452
#endif  // HAS_YUY2TOYROW_SSE2
4453

4454
#ifdef HAS_ARGBBLENDROW_SSSE3
4455
// Shuffle table for isolating alpha.
4456
static const uvec8 kShuffleAlpha = {
4457 4458 4459
  3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
  11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
4460

4461
// Blend 8 pixels at a time.
4462
__declspec(naked)
4463 4464
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
                        uint8* dst_argb, int width) {
4465 4466 4467 4468 4469 4470
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4471
    pcmpeqb    xmm7, xmm7       // generate constant 0x0001
4472 4473 4474 4475 4476 4477 4478
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4479 4480
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4481

4482
    // 4 pixel loop.
4483
  convertloop4:
4484
    movdqu     xmm3, [eax]      // src argb
4485 4486 4487
    lea        eax, [eax + 16]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4488
    movdqu     xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4489
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4490 4491 4492
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4493
    movdqu     xmm1, [esi]      // _a_g
4494 4495 4496 4497 4498 4499 4500 4501
    lea        esi, [esi + 16]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4502
    movdqu     [edx], xmm0
4503
    lea        edx, [edx + 16]
4504
    sub        ecx, 4
4505
    jge        convertloop4
4506

4507 4508 4509
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4510

4511 4512
    // 1 pixel loop.
  convertloop1:
4513
    movd       xmm3, [eax]      // src argb
4514 4515 4516 4517
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4518
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4533
    sub        ecx, 1
4534
    jge        convertloop1
4535

4536
  convertloop1b:
4537 4538 4539 4540
    pop        esi
    ret
  }
}
4541
#endif  // HAS_ARGBBLENDROW_SSSE3
4542

4543
#ifdef HAS_ARGBATTENUATEROW_SSSE3
4544
// Shuffle table duplicating alpha.
4545
static const uvec8 kShuffleAlpha0 = {
4546 4547
  3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
};
4548
static const uvec8 kShuffleAlpha1 = {
4549 4550 4551
  11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
  15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
};
4552
__declspec(naked)
4553
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4554
  __asm {
4555 4556 4557 4558 4559
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm3, xmm3       // generate mask 0xff000000
    pslld      xmm3, 24
Frank Barchard's avatar
Frank Barchard committed
4560 4561
    movdqa     xmm4, xmmword ptr kShuffleAlpha0
    movdqa     xmm5, xmmword ptr kShuffleAlpha1
4562 4563

 convertloop:
4564
    movdqu     xmm0, [eax]      // read 4 pixels
4565
    pshufb     xmm0, xmm4       // isolate first 2 alphas
4566
    movdqu     xmm1, [eax]      // read 4 pixels
4567 4568
    punpcklbw  xmm1, xmm1       // first 2 pixel rgbs
    pmulhuw    xmm0, xmm1       // rgb * a
4569
    movdqu     xmm1, [eax]      // read 4 pixels
4570
    pshufb     xmm1, xmm5       // isolate next 2 alphas
4571
    movdqu     xmm2, [eax]      // read 4 pixels
4572 4573
    punpckhbw  xmm2, xmm2       // next 2 pixel rgbs
    pmulhuw    xmm1, xmm2       // rgb * a
4574
    movdqu     xmm2, [eax]      // mask original alpha
4575
    lea        eax, [eax + 16]
4576 4577 4578 4579 4580
    pand       xmm2, xmm3
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    por        xmm0, xmm2       // copy original alpha
4581
    movdqu     [edx], xmm0
4582
    lea        edx, [edx + 16]
4583
    sub        ecx, 4
4584 4585 4586 4587 4588
    jg         convertloop

    ret
  }
}
4589
#endif  // HAS_ARGBATTENUATEROW_SSSE3
4590

fbarchard@google.com's avatar
fbarchard@google.com committed
4591 4592
#ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4593
static const uvec8 kShuffleAlpha_AVX2 = {
4594
  6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
4595
};
4596
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4597 4598 4599 4600 4601 4602
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4603
    vbroadcastf128 ymm4, xmmword ptr kShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621
    vpcmpeqb   ymm5, ymm5, ymm5 // generate mask 0xff000000
    vpslld     ymm5, ymm5, 24

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpshufb    ymm2, ymm0, ymm4  // low 4 alphas
    vpshufb    ymm3, ymm1, ymm4  // high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * a
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * a
    vpand      ymm6, ymm6, ymm5  // isolate alpha
    vpsrlw     ymm0, ymm0, 8
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vpor       ymm0, ymm0, ymm6  // copy original alpha
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4622
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4623 4624
    jg         convertloop

4625
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4626 4627 4628 4629 4630
    ret
  }
}
#endif  // HAS_ARGBATTENUATEROW_AVX2

4631
#ifdef HAS_ARGBUNATTENUATEROW_SSE2
4632
// Unattenuate 4 pixels at a time.
4633
__declspec(naked)
4634 4635 4636
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
4637
    push       ebx
4638 4639
    push       esi
    push       edi
4640 4641 4642 4643
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    lea        ebx, fixed_invtbl8
4644 4645

 convertloop:
4646
    movdqu     xmm0, [eax]      // read 4 pixels
4647 4648 4649
    movzx      esi, byte ptr [eax + 3]  // first alpha
    movzx      edi, byte ptr [eax + 7]  // second alpha
    punpcklbw  xmm0, xmm0       // first 2
4650 4651
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4652 4653
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words.  1, a, a, a
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4654 4655 4656
    movlhps    xmm2, xmm3
    pmulhuw    xmm0, xmm2       // rgb * a

4657
    movdqu     xmm1, [eax]      // read 4 pixels
4658 4659 4660
    movzx      esi, byte ptr [eax + 11]  // third alpha
    movzx      edi, byte ptr [eax + 15]  // forth alpha
    punpckhbw  xmm1, xmm1       // next 2
4661 4662
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4663 4664
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4665 4666
    movlhps    xmm2, xmm3
    pmulhuw    xmm1, xmm2       // rgb * a
4667
    lea        eax, [eax + 16]
4668
    packuswb   xmm0, xmm1
4669
    movdqu     [edx], xmm0
4670
    lea        edx, [edx + 16]
4671
    sub        ecx, 4
4672
    jg         convertloop
4673

4674 4675
    pop        edi
    pop        esi
4676
    pop        ebx
4677 4678 4679
    ret
  }
}
4680
#endif  // HAS_ARGBUNATTENUATEROW_SSE2
4681

fbarchard@google.com's avatar
fbarchard@google.com committed
4682 4683
#ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4684
static const uvec8 kUnattenShuffleAlpha_AVX2 = {
4685
  0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
fbarchard@google.com's avatar
fbarchard@google.com committed
4686
};
4687 4688 4689
// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
// USE_GATHER is not on by default, due to being a slow instruction.
#ifdef USE_GATHER
4690
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4691 4692 4693 4694 4695 4696 4697
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4698
    vbroadcastf128 ymm4, xmmword ptr kUnattenShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4699 4700 4701

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
4702
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0xffffffff for gather.
fbarchard@google.com's avatar
fbarchard@google.com committed
4703 4704 4705
    vpsrld     ymm2, ymm6, 24    // alpha in low 8 bits.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
4706 4707 4708 4709
    vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5  // ymm5 cleared.  1, a
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm4  // replicate low 4 alphas. 1, a, a, a
fbarchard@google.com's avatar
fbarchard@google.com committed
4710 4711 4712 4713 4714 4715
    vpshufb    ymm3, ymm3, ymm4  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4716
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4717 4718
    jg         convertloop

4719
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4720 4721 4722
    ret
  }
}
4723
#else  // USE_GATHER
4724
__declspec(naked)
4725 4726 4727 4728
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {

4729
    push       ebx
4730 4731
    push       esi
    push       edi
4732 4733 4734 4735 4736 4737
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    sub        edx, eax
    lea        ebx, fixed_invtbl8
    vbroadcastf128 ymm5, xmmword ptr kUnattenShuffleAlpha_AVX2
4738 4739 4740

 convertloop:
    // replace VPGATHER
4741 4742
    movzx      esi, byte ptr [eax + 3]                 // alpha0
    movzx      edi, byte ptr [eax + 7]                 // alpha1
4743 4744
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a0]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a1]
4745 4746
    movzx      esi, byte ptr [eax + 11]                // alpha2
    movzx      edi, byte ptr [eax + 15]                // alpha3
4747
    vpunpckldq xmm6, xmm0, xmm1                        // [1,a1,1,a0]
4748 4749
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a2]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a3]
4750 4751
    movzx      esi, byte ptr [eax + 19]                // alpha4
    movzx      edi, byte ptr [eax + 23]                // alpha5
4752
    vpunpckldq xmm7, xmm2, xmm3                        // [1,a3,1,a2]
4753 4754
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a4]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a5]
4755 4756
    movzx      esi, byte ptr [eax + 27]                // alpha6
    movzx      edi, byte ptr [eax + 31]                // alpha7
4757
    vpunpckldq xmm0, xmm0, xmm1                        // [1,a5,1,a4]
4758 4759
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a6]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a7]
4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777
    vpunpckldq xmm2, xmm2, xmm3                        // [1,a7,1,a6]
    vpunpcklqdq xmm3, xmm6, xmm7                       // [1,a3,1,a2,1,a1,1,a0]
    vpunpcklqdq xmm0, xmm0, xmm2                       // [1,a7,1,a6,1,a5,1,a4]
    vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
    // end of VPGATHER

    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm5  // replicate low 4 alphas. 1, a, a, a
    vpshufb    ymm3, ymm3, ymm5  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4778
    sub        ecx, 8
4779 4780 4781 4782
    jg         convertloop

    pop        edi
    pop        esi
4783
    pop        ebx
4784
    vzeroupper
4785 4786 4787 4788
    ret
  }
}
#endif  // USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
4789 4790
#endif  // HAS_ARGBATTENUATEROW_AVX2

4791
#ifdef HAS_ARGBGRAYROW_SSSE3
4792
// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
4793
__declspec(naked)
4794
void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4795
  __asm {
4796 4797 4798
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
4799 4800
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
4801 4802

 convertloop:
4803 4804
    movdqu     xmm0, [eax]  // G
    movdqu     xmm1, [eax + 16]
4805 4806 4807
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm0, xmm1
4808
    paddw      xmm0, xmm5  // Add .5 for rounding.
4809
    psrlw      xmm0, 7
4810
    packuswb   xmm0, xmm0   // 8 G bytes
4811 4812
    movdqu     xmm2, [eax]  // A
    movdqu     xmm3, [eax + 16]
4813
    lea        eax, [eax + 32]
4814 4815 4816 4817 4818 4819 4820
    psrld      xmm2, 24
    psrld      xmm3, 24
    packuswb   xmm2, xmm3
    packuswb   xmm2, xmm2   // 8 A bytes
    movdqa     xmm3, xmm0   // Weave into GG, GA, then GGGA
    punpcklbw  xmm0, xmm0   // 8 GG words
    punpcklbw  xmm3, xmm2   // 8 GA words
4821
    movdqa     xmm1, xmm0
4822 4823
    punpcklwd  xmm0, xmm3   // GGGA first 4
    punpckhwd  xmm1, xmm3   // GGGA next 4
4824 4825
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
4826
    lea        edx, [edx + 32]
4827
    sub        ecx, 8
4828 4829 4830 4831 4832
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBGRAYROW_SSSE3
4833 4834 4835 4836 4837

#ifdef HAS_ARGBSEPIAROW_SSSE3
//    b = (r * 35 + g * 68 + b * 17) >> 7
//    g = (r * 45 + g * 88 + b * 22) >> 7
//    r = (r * 50 + g * 98 + b * 24) >> 7
4838
// Constant for ARGB color to sepia tone.
4839
static const vec8 kARGBToSepiaB = {
4840 4841 4842
  17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
};

4843
static const vec8 kARGBToSepiaG = {
4844 4845 4846
  22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
};

4847
static const vec8 kARGBToSepiaR = {
4848 4849 4850
  24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
};

4851
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
4852
__declspec(naked)
4853 4854 4855 4856
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   /* dst_argb */
    mov        ecx, [esp + 8]   /* width */
Frank Barchard's avatar
Frank Barchard committed
4857 4858 4859
    movdqa     xmm2, xmmword ptr kARGBToSepiaB
    movdqa     xmm3, xmmword ptr kARGBToSepiaG
    movdqa     xmm4, xmmword ptr kARGBToSepiaR
4860 4861

 convertloop:
4862 4863
    movdqu     xmm0, [eax]  // B
    movdqu     xmm6, [eax + 16]
4864 4865 4866 4867 4868
    pmaddubsw  xmm0, xmm2
    pmaddubsw  xmm6, xmm2
    phaddw     xmm0, xmm6
    psrlw      xmm0, 7
    packuswb   xmm0, xmm0   // 8 B values
4869 4870
    movdqu     xmm5, [eax]  // G
    movdqu     xmm1, [eax + 16]
4871 4872 4873 4874 4875 4876
    pmaddubsw  xmm5, xmm3
    pmaddubsw  xmm1, xmm3
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 G values
    punpcklbw  xmm0, xmm5   // 8 BG values
4877 4878
    movdqu     xmm5, [eax]  // R
    movdqu     xmm1, [eax + 16]
4879 4880 4881 4882 4883
    pmaddubsw  xmm5, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 R values
4884 4885
    movdqu     xmm6, [eax]  // A
    movdqu     xmm1, [eax + 16]
4886 4887 4888 4889 4890 4891 4892 4893
    psrld      xmm6, 24
    psrld      xmm1, 24
    packuswb   xmm6, xmm1
    packuswb   xmm6, xmm6   // 8 A values
    punpcklbw  xmm5, xmm6   // 8 RA values
    movdqa     xmm1, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm5   // BGRA first 4
    punpckhwd  xmm1, xmm5   // BGRA next 4
4894 4895
    movdqu     [eax], xmm0
    movdqu     [eax + 16], xmm1
4896
    lea        eax, [eax + 32]
4897
    sub        ecx, 8
4898 4899 4900 4901 4902
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBSEPIAROW_SSSE3
4903

4904 4905 4906
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided.
4907
// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
4908
// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
4909
__declspec(naked)
4910 4911
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                              const int8* matrix_argb, int width) {
4912
  __asm {
4913 4914 4915
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* matrix_argb */
4916 4917 4918 4919 4920
    movdqu     xmm5, [ecx]
    pshufd     xmm2, xmm5, 0x00
    pshufd     xmm3, xmm5, 0x55
    pshufd     xmm4, xmm5, 0xaa
    pshufd     xmm5, xmm5, 0xff
4921
    mov        ecx, [esp + 16]  /* width */
4922 4923

 convertloop:
4924 4925
    movdqu     xmm0, [eax]  // B
    movdqu     xmm7, [eax + 16]
4926
    pmaddubsw  xmm0, xmm2
4927
    pmaddubsw  xmm7, xmm2
4928 4929
    movdqu     xmm6, [eax]  // G
    movdqu     xmm1, [eax + 16]
4930
    pmaddubsw  xmm6, xmm3
4931
    pmaddubsw  xmm1, xmm3
4932 4933 4934 4935
    phaddsw    xmm0, xmm7   // B
    phaddsw    xmm6, xmm1   // G
    psraw      xmm0, 6      // B
    psraw      xmm6, 6      // G
4936
    packuswb   xmm0, xmm0   // 8 B values
4937 4938
    packuswb   xmm6, xmm6   // 8 G values
    punpcklbw  xmm0, xmm6   // 8 BG values
4939 4940
    movdqu     xmm1, [eax]  // R
    movdqu     xmm7, [eax + 16]
4941
    pmaddubsw  xmm1, xmm4
4942 4943
    pmaddubsw  xmm7, xmm4
    phaddsw    xmm1, xmm7   // R
4944 4945
    movdqu     xmm6, [eax]  // A
    movdqu     xmm7, [eax + 16]
4946 4947 4948 4949 4950 4951
    pmaddubsw  xmm6, xmm5
    pmaddubsw  xmm7, xmm5
    phaddsw    xmm6, xmm7   // A
    psraw      xmm1, 6      // R
    psraw      xmm6, 6      // A
    packuswb   xmm1, xmm1   // 8 R values
4952
    packuswb   xmm6, xmm6   // 8 A values
4953 4954 4955 4956
    punpcklbw  xmm1, xmm6   // 8 RA values
    movdqa     xmm6, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm1   // BGRA first 4
    punpckhwd  xmm6, xmm1   // BGRA next 4
4957 4958
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm6
4959
    lea        eax, [eax + 32]
4960
    lea        edx, [edx + 32]
4961
    sub        ecx, 8
4962 4963 4964 4965 4966 4967
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBCOLORMATRIXROW_SSSE3

4968 4969
#ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes).
4970
__declspec(naked)
4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
                          int interval_offset, int width) {
  __asm {
    mov        eax, [esp + 4]    /* dst_argb */
    movd       xmm2, [esp + 8]   /* scale */
    movd       xmm3, [esp + 12]  /* interval_size */
    movd       xmm4, [esp + 16]  /* interval_offset */
    mov        ecx, [esp + 20]   /* width */
    pshuflw    xmm2, xmm2, 040h
    pshufd     xmm2, xmm2, 044h
    pshuflw    xmm3, xmm3, 040h
    pshufd     xmm3, xmm3, 044h
    pshuflw    xmm4, xmm4, 040h
    pshufd     xmm4, xmm4, 044h
    pxor       xmm5, xmm5  // constant 0
    pcmpeqb    xmm6, xmm6  // generate mask 0xff000000
    pslld      xmm6, 24

 convertloop:
4990
    movdqu     xmm0, [eax]  // read 4 pixels
4991 4992
    punpcklbw  xmm0, xmm5   // first 2 pixels
    pmulhuw    xmm0, xmm2   // pixel * scale >> 16
4993
    movdqu     xmm1, [eax]  // read 4 pixels
4994 4995 4996
    punpckhbw  xmm1, xmm5   // next 2 pixels
    pmulhuw    xmm1, xmm2
    pmullw     xmm0, xmm3   // * interval_size
4997
    movdqu     xmm7, [eax]  // read 4 pixels
4998 4999 5000 5001 5002 5003
    pmullw     xmm1, xmm3
    pand       xmm7, xmm6   // mask alpha
    paddw      xmm0, xmm4   // + interval_size / 2
    paddw      xmm1, xmm4
    packuswb   xmm0, xmm1
    por        xmm0, xmm7
5004
    movdqu     [eax], xmm0
5005
    lea        eax, [eax + 16]
5006
    sub        ecx, 4
5007 5008 5009 5010 5011 5012
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBQUANTIZEROW_SSE2

5013 5014
#ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value.
5015
__declspec(naked)
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
                       uint32 value) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    movd       xmm2, [esp + 16]  // value
    punpcklbw  xmm2, xmm2
    punpcklqdq xmm2, xmm2

 convertloop:
5027
    movdqu     xmm0, [eax]      // read 4 pixels
5028
    lea        eax, [eax + 16]
5029 5030 5031 5032 5033 5034 5035 5036
    movdqa     xmm1, xmm0
    punpcklbw  xmm0, xmm0       // first 2
    punpckhbw  xmm1, xmm1       // next 2
    pmulhuw    xmm0, xmm2       // argb * value
    pmulhuw    xmm1, xmm2       // argb * value
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
5037
    movdqu     [edx], xmm0
5038
    lea        edx, [edx + 16]
5039
    sub        ecx, 4
5040 5041 5042 5043 5044 5045 5046
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBSHADEROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
5047
#ifdef HAS_ARGBMULTIPLYROW_SSE2
5048
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
5049
__declspec(naked)
5050 5051
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5052
  __asm {
5053 5054 5055 5056 5057
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
fbarchard@google.com's avatar
fbarchard@google.com committed
5058 5059 5060
    pxor       xmm5, xmm5  // constant 0

 convertloop:
5061
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
5062
    movdqu     xmm2, [esi]        // read 4 pixels from src_argb1
5063 5064
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
5065 5066 5067 5068 5069 5070 5071 5072
    punpcklbw  xmm0, xmm0         // first 2
    punpckhbw  xmm1, xmm1         // next 2
    punpcklbw  xmm2, xmm5         // first 2
    punpckhbw  xmm3, xmm5         // next 2
    pmulhuw    xmm0, xmm2         // src_argb0 * src_argb1 first 2
    pmulhuw    xmm1, xmm3         // src_argb0 * src_argb1 next 2
    lea        eax, [eax + 16]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5073
    packuswb   xmm0, xmm1
5074 5075
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5076
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
5077 5078
    jg         convertloop

5079
    pop        esi
fbarchard@google.com's avatar
fbarchard@google.com committed
5080 5081 5082 5083 5084
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_SSE2

5085 5086
#ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time.
5087
// TODO(fbarchard): Port this to posix, neon and other math functions.
5088
__declspec(naked)
5089 5090 5091 5092 5093 5094 5095 5096 5097
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

5098 5099 5100 5101 5102
    sub        ecx, 4
    jl         convertloop49

 convertloop4:
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
5103 5104 5105
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
5106
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
5107 5108
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5109
    sub        ecx, 4
5110 5111 5112 5113 5114
    jge        convertloop4

 convertloop49:
    add        ecx, 4 - 1
    jl         convertloop19
5115

5116 5117
 convertloop1:
    movd       xmm0, [eax]        // read 1 pixels from src_argb0
5118 5119 5120
    lea        eax, [eax + 4]
    movd       xmm1, [esi]        // read 1 pixels from src_argb1
    lea        esi, [esi + 4]
5121
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
5122 5123
    movd       [edx], xmm0
    lea        edx, [edx + 4]
5124
    sub        ecx, 1
5125 5126 5127
    jge        convertloop1

 convertloop19:
5128 5129 5130 5131 5132 5133
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBADDROW_SSE2

5134 5135
#ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
5136
__declspec(naked)
5137 5138 5139 5140 5141 5142 5143 5144 5145 5146
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
5147
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
5148 5149 5150
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
5151
    psubusb    xmm0, xmm1         // src_argb0 - src_argb1
5152 5153
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5154
    sub        ecx, 4
5155 5156 5157 5158 5159 5160 5161 5162
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_SSE2

5163 5164
#ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
5165
__declspec(naked)
5166 5167 5168 5169 5170 5171 5172 5173
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
5174
    vpxor      ymm5, ymm5, ymm5     // constant 0
5175 5176 5177

 convertloop:
    vmovdqu    ymm1, [eax]        // read 8 pixels from src_argb0
5178 5179 5180
    lea        eax, [eax + 32]
    vmovdqu    ymm3, [esi]        // read 8 pixels from src_argb1
    lea        esi, [esi + 32]
5181 5182 5183 5184 5185 5186 5187
    vpunpcklbw ymm0, ymm1, ymm1   // low 4
    vpunpckhbw ymm1, ymm1, ymm1   // high 4
    vpunpcklbw ymm2, ymm3, ymm5   // low 4
    vpunpckhbw ymm3, ymm3, ymm5   // high 4
    vpmulhuw   ymm0, ymm0, ymm2   // src_argb0 * src_argb1 low 4
    vpmulhuw   ymm1, ymm1, ymm3   // src_argb0 * src_argb1 high 4
    vpackuswb  ymm0, ymm0, ymm1
5188 5189
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5190
    sub        ecx, 8
5191 5192 5193
    jg         convertloop

    pop        esi
5194
    vzeroupper
5195 5196 5197 5198 5199 5200 5201
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_AVX2

#ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
5202
__declspec(naked)
5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
5215 5216 5217 5218
    vpaddusb   ymm0, ymm0, [esi]        // add 8 pixels from src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5219
    sub        ecx, 8
5220 5221 5222
    jg         convertloop

    pop        esi
5223
    vzeroupper
5224 5225 5226 5227 5228 5229 5230
    ret
  }
}
#endif  // HAS_ARGBADDROW_AVX2

#ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
5231
__declspec(naked)
5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
5244 5245 5246 5247
    vpsubusb   ymm0, ymm0, [esi]        // src_argb0 - src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5248
    sub        ecx, 8
5249 5250 5251
    jg         convertloop

    pop        esi
5252
    vzeroupper
5253 5254 5255 5256 5257
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_AVX2

5258
#ifdef HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5259 5260 5261 5262
// SobelX as a matrix is
// -1  0  1
// -2  0  2
// -1  0  1
5263
__declspec(naked)
5264 5265
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    const uint8* src_y2, uint8* dst_sobelx, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_y0
    mov        esi, [esp + 8 + 8]   // src_y1
    mov        edi, [esp + 8 + 12]  // src_y2
    mov        edx, [esp + 8 + 16]  // dst_sobelx
    mov        ecx, [esp + 8 + 20]  // width
    sub        esi, eax
    sub        edi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    movq       xmm2, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + edi]      // read 8 pixels from src_y2[0]
    movq       xmm3, qword ptr [eax + edi + 2]  // read 8 pixels from src_y2[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5298 5299 5300
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5301 5302 5303
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5304
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5305 5306 5307 5308 5309 5310 5311
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
5312
#endif  // HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5313

5314
#ifdef HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5315 5316 5317 5318
// SobelY as a matrix is
// -1 -2 -1
//  0  0  0
//  1  2  1
5319
__declspec(naked)
5320 5321
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    uint8* dst_sobely, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_y0
    mov        esi, [esp + 4 + 8]   // src_y1
    mov        edx, [esp + 4 + 12]  // dst_sobely
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + 1]        // read 8 pixels from src_y0[1]
    movq       xmm2, qword ptr [eax + esi + 1]  // read 8 pixels from src_y1[1]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    movq       xmm3, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5351 5352 5353
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5354 5355 5356
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5357
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5358 5359 5360 5361 5362 5363
    jg         convertloop

    pop        esi
    ret
  }
}
5364
#endif  // HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5365

5366 5367 5368 5369 5370 5371
#ifdef HAS_SOBELROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
// A = 255
// R = Sobel
// G = Sobel
// B = Sobel
5372
__declspec(naked)
5373
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
5374
                   uint8* dst_argb, int width) {
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    pcmpeqb    xmm5, xmm5           // alpha 255
    pslld      xmm5, 24             // 0xff000000

 convertloop:
5386 5387
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
    movdqa     xmm2, xmm0             // GG
    punpcklbw  xmm2, xmm0             // First 8
    punpckhbw  xmm0, xmm0             // Next 8
    movdqa     xmm1, xmm2             // GGGG
    punpcklwd  xmm1, xmm2             // First 4
    punpckhwd  xmm2, xmm2             // Next 4
    por        xmm1, xmm5             // GGGA
    por        xmm2, xmm5
    movdqa     xmm3, xmm0             // GGGG
    punpcklwd  xmm3, xmm0             // Next 4
    punpckhwd  xmm0, xmm0             // Last 4
    por        xmm3, xmm5             // GGGA
    por        xmm0, xmm5
5403 5404 5405 5406
    movdqu     [edx], xmm1
    movdqu     [edx + 16], xmm2
    movdqu     [edx + 32], xmm3
    movdqu     [edx + 48], xmm0
5407
    lea        edx, [edx + 64]
5408
    sub        ecx, 16
5409 5410 5411 5412 5413 5414 5415 5416
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELROW_SSE2

5417 5418
#ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane.
5419
__declspec(naked)
5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                          uint8* dst_y, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax

 convertloop:
5431 5432
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5433 5434
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
5435
    movdqu     [edx], xmm0
5436
    lea        edx, [edx + 16]
5437
    sub        ecx, 16
5438 5439 5440 5441 5442 5443 5444 5445
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELTOPLANEROW_SSE2

5446 5447 5448 5449 5450 5451
#ifdef HAS_SOBELXYROW_SSE2
// Mixes Sobel X, Sobel Y and Sobel into ARGB.
// A = 255
// R = Sobel X
// G = Sobel
// B = Sobel Y
5452
__declspec(naked)
5453 5454 5455 5456 5457 5458 5459 5460 5461
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
5462
    pcmpeqb    xmm5, xmm5           // alpha 255
5463 5464

 convertloop:
5465 5466
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481
    lea        eax, [eax + 16]
    movdqa     xmm2, xmm0
    paddusb    xmm2, xmm1             // sobel = sobelx + sobely
    movdqa     xmm3, xmm0             // XA
    punpcklbw  xmm3, xmm5
    punpckhbw  xmm0, xmm5
    movdqa     xmm4, xmm1             // YS
    punpcklbw  xmm4, xmm2
    punpckhbw  xmm1, xmm2
    movdqa     xmm6, xmm4             // YSXA
    punpcklwd  xmm6, xmm3             // First 4
    punpckhwd  xmm4, xmm3             // Next 4
    movdqa     xmm7, xmm1             // YSXA
    punpcklwd  xmm7, xmm0             // Next 4
    punpckhwd  xmm1, xmm0             // Last 4
5482 5483 5484 5485
    movdqu     [edx], xmm6
    movdqu     [edx + 16], xmm4
    movdqu     [edx + 32], xmm7
    movdqu     [edx + 48], xmm1
5486
    lea        edx, [edx + 64]
5487
    sub        ecx, 16
5488 5489 5490 5491 5492 5493
    jg         convertloop

    pop        esi
    ret
  }
}
5494
#endif  // HAS_SOBELXYROW_SSE2
5495

5496
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507
// Consider float CumulativeSum.
// Consider calling CumulativeSum one row at time as needed.
// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
// Convert cumulative sum for an area to an average for 1 pixel.
// topleft is pointer to top left of CumulativeSum buffer for area.
// botleft is pointer to bottom left of CumulativeSum buffer.
// width is offset from left to right of area in CumulativeSum buffer measured
//   in number of ints.
// area is the number of pixels in the area being averaged.
// dst points to pixel to store result to.
// count is number of averaged pixels to produce.
5508
// Does 4 pixels at a time.
5509 5510 5511
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
                                    int width, int area, uint8* dst,
                                    int count) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5512 5513 5514 5515
  __asm {
    mov        eax, topleft  // eax topleft
    mov        esi, botleft  // esi botleft
    mov        edx, width
5516
    movd       xmm5, area
fbarchard@google.com's avatar
fbarchard@google.com committed
5517 5518
    mov        edi, dst
    mov        ecx, count
5519 5520
    cvtdq2ps   xmm5, xmm5
    rcpss      xmm4, xmm5  // 1.0f / area
fbarchard@google.com's avatar
fbarchard@google.com committed
5521 5522 5523 5524
    pshufd     xmm4, xmm4, 0
    sub        ecx, 4
    jl         l4b

5525 5526 5527
    cmp        area, 128  // 128 pixels will not overflow 15 bits.
    ja         l4

5528 5529 5530 5531 5532 5533
    pshufd     xmm5, xmm5, 0        // area
    pcmpeqb    xmm6, xmm6           // constant of 65536.0 - 1 = 65535.0
    psrld      xmm6, 16
    cvtdq2ps   xmm6, xmm6
    addps      xmm5, xmm6           // (65536.0 + area - 1)
    mulps      xmm5, xmm4           // (65536.0 + area - 1) * 1 / area
5534
    cvtps2dq   xmm5, xmm5           // 0.16 fixed point
5535
    packssdw   xmm5, xmm5           // 16 bit shorts
5536 5537 5538 5539

    // 4 pixel loop small blocks.
  s4:
    // top left
5540 5541 5542 5543
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    packssdw   xmm0, xmm1  // pack 4 pixels into 2 registers
    packssdw   xmm2, xmm3

    pmulhuw    xmm0, xmm5
    pmulhuw    xmm2, xmm5

    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        s4

    jmp        l4b

fbarchard@google.com's avatar
fbarchard@google.com committed
5579 5580 5581
    // 4 pixel loop
  l4:
    // top left
5582 5583 5584 5585
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
fbarchard@google.com's avatar
fbarchard@google.com committed
5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    cvtdq2ps   xmm0, xmm0   // Average = Sum * 1 / Area
    cvtdq2ps   xmm1, xmm1
    mulps      xmm0, xmm4
    mulps      xmm1, xmm4
    cvtdq2ps   xmm2, xmm2
    cvtdq2ps   xmm3, xmm3
    mulps      xmm2, xmm4
    mulps      xmm3, xmm4
    cvtps2dq   xmm0, xmm0
    cvtps2dq   xmm1, xmm1
    cvtps2dq   xmm2, xmm2
    cvtps2dq   xmm3, xmm3
    packssdw   xmm0, xmm1
    packssdw   xmm2, xmm3
    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
5633
    movdqu     xmm0, [eax]
fbarchard@google.com's avatar
fbarchard@google.com committed
5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650
    psubd      xmm0, [eax + edx * 4]
    lea        eax, [eax + 16]
    psubd      xmm0, [esi]
    paddd      xmm0, [esi + edx * 4]
    lea        esi, [esi + 16]
    cvtdq2ps   xmm0, xmm0
    mulps      xmm0, xmm4
    cvtps2dq   xmm0, xmm0
    packssdw   xmm0, xmm0
    packuswb   xmm0, xmm0
    movd       dword ptr [edi], xmm0
    lea        edi, [edi + 4]
    sub        ecx, 1
    jge        l1
  l1b:
  }
}
5651
#endif  // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5652 5653 5654 5655 5656

#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
5657
                                  const int32* previous_cumsum, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687
  __asm {
    mov        eax, row
    mov        edx, cumsum
    mov        esi, previous_cumsum
    mov        ecx, width
    pxor       xmm0, xmm0
    pxor       xmm1, xmm1

    sub        ecx, 4
    jl         l4b
    test       edx, 15
    jne        l4b

    // 4 pixel loop
  l4:
    movdqu     xmm2, [eax]  // 4 argb pixels 16 bytes.
    lea        eax, [eax + 16]
    movdqa     xmm4, xmm2

    punpcklbw  xmm2, xmm1
    movdqa     xmm3, xmm2
    punpcklwd  xmm2, xmm1
    punpckhwd  xmm3, xmm1

    punpckhbw  xmm4, xmm1
    movdqa     xmm5, xmm4
    punpcklwd  xmm4, xmm1
    punpckhwd  xmm5, xmm1

    paddd      xmm0, xmm2
5688
    movdqu     xmm2, [esi]  // previous row above.
fbarchard@google.com's avatar
fbarchard@google.com committed
5689 5690 5691
    paddd      xmm2, xmm0

    paddd      xmm0, xmm3
5692
    movdqu     xmm3, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5693 5694 5695
    paddd      xmm3, xmm0

    paddd      xmm0, xmm4
5696
    movdqu     xmm4, [esi + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5697 5698 5699
    paddd      xmm4, xmm0

    paddd      xmm0, xmm5
5700
    movdqu     xmm5, [esi + 48]
5701
    lea        esi, [esi + 64]
fbarchard@google.com's avatar
fbarchard@google.com committed
5702 5703
    paddd      xmm5, xmm0

5704 5705 5706 5707
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
    movdqu     [edx + 32], xmm4
    movdqu     [edx + 48], xmm5
fbarchard@google.com's avatar
fbarchard@google.com committed
5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720

    lea        edx, [edx + 64]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
    movd       xmm2, dword ptr [eax]  // 1 argb pixel 4 bytes.
    lea        eax, [eax + 4]
5721 5722
    punpcklbw  xmm2, xmm1
    punpcklwd  xmm2, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5723
    paddd      xmm0, xmm2
5724 5725
    movdqu     xmm2, [esi]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736
    paddd      xmm2, xmm0
    movdqu     [edx], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 1
    jge        l1

 l1b:
  }
}
#endif  // HAS_COMPUTECUMULATIVESUMROW_SSE2

5737 5738
#ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination.
5739
__declspec(naked)
5740
LIBYUV_API
5741 5742 5743 5744
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                        uint8* dst_argb, const float* uv_dudv, int width) {
  __asm {
    push       esi
5745
    push       edi
5746
    mov        eax, [esp + 12]  // src_argb
5747 5748 5749
    mov        esi, [esp + 16]  // stride
    mov        edx, [esp + 20]  // dst_argb
    mov        ecx, [esp + 24]  // pointer to uv_dudv
5750
    movq       xmm2, qword ptr [ecx]  // uv
5751
    movq       xmm7, qword ptr [ecx + 8]  // dudv
5752
    mov        ecx, [esp + 28]  // width
5753 5754
    shl        esi, 16          // 4, stride
    add        esi, 4
5755 5756 5757
    movd       xmm5, esi
    sub        ecx, 4
    jl         l4b
5758

5759 5760 5761
    // setup for 4 pixel loop
    pshufd     xmm7, xmm7, 0x44  // dup dudv
    pshufd     xmm5, xmm5, 0  // dup 4, stride
5762
    movdqa     xmm0, xmm2    // x0, y0, x1, y1
5763
    addps      xmm0, xmm7
5764
    movlhps    xmm2, xmm0
5765 5766 5767 5768 5769
    movdqa     xmm4, xmm7
    addps      xmm4, xmm4    // dudv *= 2
    movdqa     xmm3, xmm2    // x2, y2, x3, y3
    addps      xmm3, xmm4
    addps      xmm4, xmm4    // dudv *= 4
5770

5771 5772 5773 5774 5775 5776 5777 5778
    // 4 pixel loop
  l4:
    cvttps2dq  xmm0, xmm2    // x, y float to int first 2
    cvttps2dq  xmm1, xmm3    // x, y float to int next 2
    packssdw   xmm0, xmm1    // x, y as 8 shorts
    pmaddwd    xmm0, xmm5    // offsets = x * 4 + y * stride.
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5779
    movd       edi, xmm0
5780
    pshufd     xmm0, xmm0, 0x39  // shift right
5781 5782
    movd       xmm1, [eax + esi]  // read pixel 0
    movd       xmm6, [eax + edi]  // read pixel 1
5783
    punpckldq  xmm1, xmm6     // combine pixel 0 and 1
5784 5785
    addps      xmm2, xmm4    // x, y += dx, dy first 2
    movq       qword ptr [edx], xmm1
5786 5787
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5788
    movd       edi, xmm0
5789
    movd       xmm6, [eax + esi]  // read pixel 2
5790
    movd       xmm0, [eax + edi]  // read pixel 3
5791
    punpckldq  xmm6, xmm0     // combine pixel 2 and 3
5792 5793
    addps      xmm3, xmm4    // x, y += dx, dy next 2
    movq       qword ptr 8[edx], xmm6
5794
    lea        edx, [edx + 16]
5795
    sub        ecx, 4
5796
    jge        l4
5797

5798 5799
  l4b:
    add        ecx, 4 - 1
5800 5801 5802 5803
    jl         l1b

    // 1 pixel loop
  l1:
5804 5805 5806 5807 5808
    cvttps2dq  xmm0, xmm2    // x, y float to int
    packssdw   xmm0, xmm0    // x, y as shorts
    pmaddwd    xmm0, xmm5    // offset = x * 4 + y * stride
    addps      xmm2, xmm7    // x, y += dx, dy
    movd       esi, xmm0
5809 5810 5811
    movd       xmm0, [eax + esi]  // copy a pixel
    movd       [edx], xmm0
    lea        edx, [edx + 4]
5812
    sub        ecx, 1
5813 5814
    jge        l1
  l1b:
5815
    pop        edi
5816 5817 5818 5819 5820 5821
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBAFFINEROW_SSE2

5822
#ifdef HAS_INTERPOLATEROW_AVX2
5823
// Bilinear filter 32x2 -> 32x1
5824
__declspec(naked)
5825
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
5826 5827
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868
  __asm {
    push       esi
    push       edi
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    shr        eax, 1
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
    sub        edi, esi
    cmp        eax, 32
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
    cmp        eax, 64
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
    cmp        eax, 96
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.

    vmovd      xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    vmovd      xmm5, eax  // low fraction 128..1
    vpunpcklbw xmm5, xmm5, xmm0
    vpunpcklwd xmm5, xmm5, xmm5
    vpxor      ymm0, ymm0, ymm0
    vpermd     ymm5, ymm0, ymm5

  xloop:
    vmovdqu    ymm0, [esi]
    vmovdqu    ymm2, [esi + edx]
    vpunpckhbw ymm1, ymm0, ymm2  // mutates
    vpunpcklbw ymm0, ymm0, ymm2  // mutates
    vpmaddubsw ymm0, ymm0, ymm5
    vpmaddubsw ymm1, ymm1, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm1, ymm1, 7
    vpackuswb  ymm0, ymm0, ymm1  // unmutates
    vmovdqu    [esi + edi], ymm0
    lea        esi, [esi + 32]
5869
    sub        ecx, 32
5870 5871 5872
    jg         xloop
    jmp        xloop99

5873 5874 5875 5876 5877 5878 5879 5880
   // Blend 25 / 75.
 xloop25:
   vmovdqu    ymm0, [esi]
   vmovdqu    ymm1, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5881
   sub        ecx, 32
5882 5883 5884 5885 5886 5887
   jg         xloop25
   jmp        xloop99

   // Blend 50 / 50.
 xloop50:
   vmovdqu    ymm0, [esi]
5888
   vpavgb     ymm0, ymm0, [esi + edx]
5889 5890
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5891
   sub        ecx, 32
5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902
   jg         xloop50
   jmp        xloop99

   // Blend 75 / 25.
 xloop75:
   vmovdqu    ymm1, [esi]
   vmovdqu    ymm0, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5903
   sub        ecx, 32
5904 5905 5906 5907 5908 5909
   jg         xloop75
   jmp        xloop99

   // Blend 100 / 0 - Copy row unchanged.
 xloop100:
   rep movsb
5910 5911 5912 5913 5914 5915 5916 5917 5918 5919

  xloop99:
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_INTERPOLATEROW_AVX2

5920
// Bilinear filter 16x2 -> 16x1
5921
__declspec(naked)
5922 5923 5924
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
                          ptrdiff_t src_stride, int dst_width,
                          int source_y_fraction) {
5925 5926 5927
  __asm {
    push       esi
    push       edi
5928 5929
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5930 5931 5932 5933 5934
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    shr        eax, 1
5935 5936 5937
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
5938
    cmp        eax, 32
5939
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
5940
    cmp        eax, 64
5941
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
5942
    cmp        eax, 96
5943
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.
5944

5945 5946 5947 5948 5949 5950 5951 5952
    movd       xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    movd       xmm5, eax  // low fraction 128..1
    punpcklbw  xmm5, xmm0
    punpcklwd  xmm5, xmm5
    pshufd     xmm5, xmm5, 0

5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965
  xloop:
    movdqu     xmm0, [esi]
    movdqu     xmm2, [esi + edx]
    movdqu     xmm1, xmm0
    punpcklbw  xmm0, xmm2
    punpckhbw  xmm1, xmm2
    pmaddubsw  xmm0, xmm5
    pmaddubsw  xmm1, xmm5
    psrlw      xmm0, 7
    psrlw      xmm1, 7
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5966
    sub        ecx, 16
5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5978
    sub        ecx, 16
5979 5980 5981 5982 5983 5984 5985 5986 5987 5988
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5989
    sub        ecx, 16
5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6001
    sub        ecx, 16
6002 6003 6004 6005 6006 6007 6008 6009
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6010
    sub        ecx, 16
6011 6012 6013 6014 6015 6016 6017 6018 6019
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}

6020
#ifdef HAS_INTERPOLATEROW_SSE2
6021
// Bilinear filter 16x2 -> 16x1
6022
__declspec(naked)
6023 6024 6025
void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
6026 6027 6028
  __asm {
    push       esi
    push       edi
6029 6030
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 256.  Blend 100 / 0.
    cmp        eax, 64
    je         xloop75   // 64 / 256 is 0.25.  Blend 75 / 25.
    cmp        eax, 128
    je         xloop50   // 128 / 256 is 0.50.  Blend 50 / 50.
    cmp        eax, 192
    je         xloop25   // 192 / 256 is 0.75.  Blend 25 / 75.

    movd       xmm5, eax            // xmm5 = y fraction
    punpcklbw  xmm5, xmm5
    psrlw      xmm5, 1
    punpcklwd  xmm5, xmm5
    punpckldq  xmm5, xmm5
    punpcklqdq xmm5, xmm5
    pxor       xmm4, xmm4

  xloop:
    movdqu     xmm0, [esi]  // row0
    movdqu     xmm2, [esi + edx]  // row1
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
    punpcklbw  xmm2, xmm4
    punpckhbw  xmm3, xmm4
    punpcklbw  xmm0, xmm4
    punpckhbw  xmm1, xmm4
    psubw      xmm2, xmm0  // row1 - row0
    psubw      xmm3, xmm1
    paddw      xmm2, xmm2  // 9 bits * 15 bits = 8.16
    paddw      xmm3, xmm3
    pmulhw     xmm2, xmm5  // scale diff
    pmulhw     xmm3, xmm5
    paddw      xmm0, xmm2  // sum rows
    paddw      xmm1, xmm3
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6073
    sub        ecx, 16
6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6085
    sub        ecx, 16
6086 6087 6088 6089 6090 6091 6092 6093 6094 6095
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6096
    sub        ecx, 16
6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6108
    sub        ecx, 16
6109 6110 6111 6112 6113 6114 6115 6116
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
6117
    sub        ecx, 16
6118 6119 6120 6121 6122 6123 6124 6125
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}
6126
#endif  // HAS_INTERPOLATEROW_SSE2
6127

fbarchard@google.com's avatar
fbarchard@google.com committed
6128
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
6129
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6130 6131 6132 6133
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                          const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_argb
6134
    mov        edx, [esp + 8]    // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
6135
    mov        ecx, [esp + 12]   // shuffler
6136
    movdqu     xmm5, [ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
6137 6138 6139
    mov        ecx, [esp + 16]   // pix

  wloop:
6140 6141
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
6142 6143 6144
    lea        eax, [eax + 32]
    pshufb     xmm0, xmm5
    pshufb     xmm1, xmm5
6145 6146
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
6147
    lea        edx, [edx + 32]
6148
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
6149 6150 6151 6152 6153 6154
    jg         wloop
    ret
  }
}

#ifdef HAS_ARGBSHUFFLEROW_AVX2
6155
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6156 6157 6158 6159
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]     // src_argb
6160
    mov        edx, [esp + 8]     // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
6161
    mov        ecx, [esp + 12]    // shuffler
6162
    vbroadcastf128 ymm5, [ecx]    // same shuffle in high as low.
fbarchard@google.com's avatar
fbarchard@google.com committed
6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173
    mov        ecx, [esp + 16]    // pix

  wloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vpshufb    ymm0, ymm0, ymm5
    vpshufb    ymm1, ymm1, ymm5
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
6174
    sub        ecx, 16
fbarchard@google.com's avatar
fbarchard@google.com committed
6175
    jg         wloop
6176 6177

    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
6178 6179 6180
    ret
  }
}
6181
#endif  // HAS_ARGBSHUFFLEROW_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
6182

6183
__declspec(naked)
6184 6185 6186 6187 6188 6189 6190 6191 6192
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    push       ebx
    push       esi
    mov        eax, [esp + 8 + 4]    // src_argb
    mov        edx, [esp + 8 + 8]    // dst_argb
    mov        esi, [esp + 8 + 12]   // shuffler
    mov        ecx, [esp + 8 + 16]   // pix
6193
    pxor       xmm5, xmm5
6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228

    mov        ebx, [esi]   // shuffler
    cmp        ebx, 0x03000102
    je         shuf_3012
    cmp        ebx, 0x00010203
    je         shuf_0123
    cmp        ebx, 0x00030201
    je         shuf_0321
    cmp        ebx, 0x02010003
    je         shuf_2103

  // TODO(fbarchard): Use one source pointer and 3 offsets.
  shuf_any1:
    movzx      ebx, byte ptr [esi]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx], bl
    movzx      ebx, byte ptr [esi + 1]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 1], bl
    movzx      ebx, byte ptr [esi + 2]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 2], bl
    movzx      ebx, byte ptr [esi + 3]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 3], bl
    lea        eax, [eax + 4]
    lea        edx, [edx + 4]
    sub        ecx, 1
    jg         shuf_any1
    jmp        shuf99

  shuf_0123:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6229 6230
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6231 6232 6233 6234 6235 6236 6237
    pshufhw    xmm0, xmm0, 01Bh   // 1B = 00011011 = 0x0123 = BGRAToARGB
    pshuflw    xmm0, xmm0, 01Bh
    pshufhw    xmm1, xmm1, 01Bh
    pshuflw    xmm1, xmm1, 01Bh
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6238
    sub        ecx, 4
6239 6240 6241 6242 6243 6244 6245
    jg         shuf_0123
    jmp        shuf99

  shuf_0321:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6246 6247
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6248 6249 6250 6251 6252 6253 6254
    pshufhw    xmm0, xmm0, 039h   // 39 = 00111001 = 0x0321 = RGBAToARGB
    pshuflw    xmm0, xmm0, 039h
    pshufhw    xmm1, xmm1, 039h
    pshuflw    xmm1, xmm1, 039h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6255
    sub        ecx, 4
6256 6257 6258 6259 6260 6261 6262
    jg         shuf_0321
    jmp        shuf99

  shuf_2103:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6263 6264
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6265 6266 6267 6268 6269 6270 6271
    pshufhw    xmm0, xmm0, 093h   // 93 = 10010011 = 0x2103 = ARGBToRGBA
    pshuflw    xmm0, xmm0, 093h
    pshufhw    xmm1, xmm1, 093h
    pshuflw    xmm1, xmm1, 093h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6272
    sub        ecx, 4
6273 6274 6275 6276 6277 6278 6279
    jg         shuf_2103
    jmp        shuf99

  shuf_3012:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6280 6281
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6282 6283 6284 6285 6286 6287 6288
    pshufhw    xmm0, xmm0, 0C6h   // C6 = 11000110 = 0x3012 = ABGRToARGB
    pshuflw    xmm0, xmm0, 0C6h
    pshufhw    xmm1, xmm1, 0C6h
    pshuflw    xmm1, xmm1, 0C6h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6289
    sub        ecx, 4
6290 6291 6292 6293 6294 6295 6296 6297 6298
    jg         shuf_3012

  shuf99:
    pop        esi
    pop        ebx
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
6299 6300 6301 6302 6303 6304
// YUY2 - Macro-pixel = 2 image pixels
// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....

// UYVY - Macro-pixel = 2 image pixels
// U0Y0V0Y1

6305
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324
void I422ToYUY2Row_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6325
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6326
    lea        eax, [eax + 16]
6327
    movdqa     xmm1, xmm0
fbarchard@google.com's avatar
fbarchard@google.com committed
6328 6329
    punpcklbw  xmm0, xmm2 // YUYV
    punpckhbw  xmm1, xmm2
6330 6331
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
6332 6333 6334 6335 6336 6337 6338 6339 6340 6341
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

6342
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361
void I422ToUYVYRow_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6362
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6363 6364 6365 6366
    movdqa     xmm1, xmm2
    lea        eax, [eax + 16]
    punpcklbw  xmm1, xmm0 // UYVY
    punpckhbw  xmm2, xmm0
6367 6368
    movdqu     [edi], xmm1
    movdqu     [edi + 16], xmm2
fbarchard@google.com's avatar
fbarchard@google.com committed
6369 6370 6371 6372 6373 6374 6375 6376 6377
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
6378

6379
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
6380
__declspec(naked)
6381 6382 6383 6384
void ARGBPolynomialRow_SSE2(const uint8* src_argb,
                            uint8* dst_argb, const float* poly,
                            int width) {
  __asm {
6385 6386 6387 6388 6389
    push       esi
    mov        eax, [esp + 4 + 4]   /* src_argb */
    mov        edx, [esp + 4 + 8]   /* dst_argb */
    mov        esi, [esp + 4 + 12]  /* poly */
    mov        ecx, [esp + 4 + 16]  /* width */
6390
    pxor       xmm3, xmm3  // 0 constant for zero extending bytes to ints.
6391

6392
    // 2 pixel loop.
6393
 convertloop:
6394 6395
//    pmovzxbd  xmm0, dword ptr [eax]  // BGRA pixel
//    pmovzxbd  xmm4, dword ptr [eax + 4]  // BGRA pixel
6396 6397
    movq       xmm0, qword ptr [eax]  // BGRABGRA
    lea        eax, [eax + 8]
6398
    punpcklbw  xmm0, xmm3
6399 6400 6401
    movdqa     xmm4, xmm0
    punpcklwd  xmm0, xmm3  // pixel 0
    punpckhwd  xmm4, xmm3  // pixel 1
6402
    cvtdq2ps   xmm0, xmm0  // 4 floats
6403
    cvtdq2ps   xmm4, xmm4
6404
    movdqa     xmm1, xmm0  // X
6405 6406 6407 6408 6409
    movdqa     xmm5, xmm4
    mulps      xmm0, [esi + 16]  // C1 * X
    mulps      xmm4, [esi + 16]
    addps      xmm0, [esi]  // result = C0 + C1 * X
    addps      xmm4, [esi]
6410
    movdqa     xmm2, xmm1
6411
    movdqa     xmm6, xmm5
6412
    mulps      xmm2, xmm1  // X * X
6413
    mulps      xmm6, xmm5
6414
    mulps      xmm1, xmm2  // X * X * X
6415 6416 6417 6418 6419
    mulps      xmm5, xmm6
    mulps      xmm2, [esi + 32]  // C2 * X * X
    mulps      xmm6, [esi + 32]
    mulps      xmm1, [esi + 48]  // C3 * X * X * X
    mulps      xmm5, [esi + 48]
6420
    addps      xmm0, xmm2  // result += C2 * X * X
6421
    addps      xmm4, xmm6
6422
    addps      xmm0, xmm1  // result += C3 * X * X * X
6423
    addps      xmm4, xmm5
6424
    cvttps2dq  xmm0, xmm0
6425 6426
    cvttps2dq  xmm4, xmm4
    packuswb   xmm0, xmm4
6427
    packuswb   xmm0, xmm0
6428 6429
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
6430
    sub        ecx, 2
6431
    jg         convertloop
6432
    pop        esi
6433 6434 6435 6436 6437
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_SSE2

6438
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
6439
__declspec(naked)
6440
void ARGBPolynomialRow_AVX2(const uint8* src_argb,
6441 6442
                            uint8* dst_argb, const float* poly,
                            int width) {
6443 6444 6445
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
6446 6447 6448 6449 6450
    mov        ecx, [esp + 12]   /* poly */
    vbroadcastf128 ymm4, [ecx]       // C0
    vbroadcastf128 ymm5, [ecx + 16]  // C1
    vbroadcastf128 ymm6, [ecx + 32]  // C2
    vbroadcastf128 ymm7, [ecx + 48]  // C3
6451 6452
    mov        ecx, [esp + 16]  /* width */

6453
    // 2 pixel loop.
6454
 convertloop:
6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468
    vpmovzxbd   ymm0, qword ptr [eax]  // 2 BGRA pixels
    lea         eax, [eax + 8]
    vcvtdq2ps   ymm0, ymm0        // X 8 floats
    vmulps      ymm2, ymm0, ymm0  // X * X
    vmulps      ymm3, ymm0, ymm7  // C3 * X
    vfmadd132ps ymm0, ymm4, ymm5  // result = C0 + C1 * X
    vfmadd231ps ymm0, ymm2, ymm6  // result += C2 * X * X
    vfmadd231ps ymm0, ymm2, ymm3  // result += C3 * X * X * X
    vcvttps2dq  ymm0, ymm0
    vpackusdw   ymm0, ymm0, ymm0  // b0g0r0a0_00000000_b0g0r0a0_00000000
    vpermq      ymm0, ymm0, 0xd8  // b0g0r0a0_b0g0r0a0_00000000_00000000
    vpackuswb   xmm0, xmm0, xmm0  // bgrabgra_00000000_00000000_00000000
    vmovq       qword ptr [edx], xmm0
    lea         edx, [edx + 8]
6469
    sub         ecx, 2
6470
    jg          convertloop
6471 6472 6473 6474 6475 6476
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_AVX2

fbarchard@google.com's avatar
fbarchard@google.com committed
6477 6478
#ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table.
6479
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6480 6481 6482 6483 6484 6485 6486
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
                           int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */
6487

fbarchard@google.com's avatar
fbarchard@google.com committed
6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509
    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    movzx      edx, byte ptr [eax - 4 + 3]
    movzx      edx, byte ptr [esi + edx * 4 + 3]
    mov        byte ptr [eax - 4 + 3], dl
    dec        ecx
    jg         convertloop
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBCOLORTABLEROW_X86
6510

fbarchard@google.com's avatar
fbarchard@google.com committed
6511 6512
#ifdef HAS_RGBCOLORTABLEROW_X86
// Tranform RGB pixels with color table.
6513
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6514
void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
6515
  __asm {
fbarchard@google.com's avatar
fbarchard@google.com committed
6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */

    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    dec        ecx
    jg         convertloop

    pop        esi
6537 6538 6539
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6540
#endif  // HAS_RGBCOLORTABLEROW_X86
6541

fbarchard@google.com's avatar
fbarchard@google.com committed
6542 6543
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table.
6544
__declspec(naked)
6545 6546 6547
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                                 int width,
                                 const uint8* luma, uint32 lumacoeff) {
fbarchard@google.com's avatar
fbarchard@google.com committed
6548 6549 6550 6551 6552
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   /* src_argb */
    mov        edi, [esp + 8 + 8]   /* dst_argb */
6553 6554 6555
    mov        ecx, [esp + 8 + 12]  /* width */
    movd       xmm2, dword ptr [esp + 8 + 16]  // luma table
    movd       xmm3, dword ptr [esp + 8 + 20]  // lumacoeff
fbarchard@google.com's avatar
fbarchard@google.com committed
6556
    pshufd     xmm2, xmm2, 0
6557
    pshufd     xmm3, xmm3, 0
6558
    pcmpeqb    xmm4, xmm4        // generate mask 0xff00ff00
fbarchard@google.com's avatar
fbarchard@google.com committed
6559 6560 6561 6562 6563
    psllw      xmm4, 8
    pxor       xmm5, xmm5

    // 4 pixel loop.
  convertloop:
Frank Barchard's avatar
Frank Barchard committed
6564
    movdqu     xmm0, xmmword ptr [eax]      // generate luma ptr
fbarchard@google.com's avatar
fbarchard@google.com committed
6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630
    pmaddubsw  xmm0, xmm3
    phaddw     xmm0, xmm0
    pand       xmm0, xmm4  // mask out low bits
    punpcklwd  xmm0, xmm5
    paddd      xmm0, xmm2  // add table base
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi], dl
    movzx      edx, byte ptr [eax + 1]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 1], dl
    movzx      edx, byte ptr [eax + 2]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 2], dl
    movzx      edx, byte ptr [eax + 3]  // copy alpha.
    mov        byte ptr [edi + 3], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 4]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 4], dl
    movzx      edx, byte ptr [eax + 5]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 5], dl
    movzx      edx, byte ptr [eax + 6]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 6], dl
    movzx      edx, byte ptr [eax + 7]  // copy alpha.
    mov        byte ptr [edi + 7], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 8]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 8], dl
    movzx      edx, byte ptr [eax + 9]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 9], dl
    movzx      edx, byte ptr [eax + 10]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 10], dl
    movzx      edx, byte ptr [eax + 11]  // copy alpha.
    mov        byte ptr [edi + 11], dl

    movd       esi, xmm0

    movzx      edx, byte ptr [eax + 12]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 12], dl
    movzx      edx, byte ptr [eax + 13]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 13], dl
    movzx      edx, byte ptr [eax + 14]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 14], dl
    movzx      edx, byte ptr [eax + 15]  // copy alpha.
    mov        byte ptr [edi + 15], dl

    lea        eax, [eax + 16]
    lea        edi, [edi + 16]
6631
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
6632 6633 6634 6635 6636
    jg         convertloop

    pop        edi
    pop        esi
    ret
6637 6638
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6639
#endif  // HAS_ARGBLUMACOLORTABLEROW_SSSE3
6640

6641
#endif  // defined(_M_X64)
6642
#endif  // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64))
6643

6644
#ifdef __cplusplus
6645
}  // extern "C"
6646 6647
}  // namespace libyuv
#endif