row_win.cc 196 KB
Newer Older
1
/*
2
 *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 4 5 6
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS. All contributing project authors may
8 9 10
 *  be found in the AUTHORS file in the root of the source tree.
 */

11
#include "libyuv/row.h"
12

13 14
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \
    defined(_MSC_VER) && !defined(__clang__)
15 16 17 18
#include <emmintrin.h>
#include <tmmintrin.h>  // For _mm_maddubs_epi16
#endif

19 20
#ifdef __cplusplus
namespace libyuv {
21
extern "C" {
22
#endif
23

24 25 26
// This module is for Visual C 32/64 bit and clangcl 32 bit
#if !defined(LIBYUV_DISABLE_X86) && \
   (defined(_M_IX86) || (defined(_M_X64) && !defined(__clang__)))
27

28 29 30 31 32 33 34 35 36 37
struct YuvConstants {
  lvec8 kUVToB;     // 0
  lvec8 kUVToG;     // 32
  lvec8 kUVToR;     // 64
  lvec16 kUVBiasB;  // 96
  lvec16 kUVBiasG;  // 128
  lvec16 kUVBiasR;  // 160
  lvec16 kYToRgb;   // 192
};

38 39 40 41 42
// BT.601 YUV to RGB reference
//  R = (Y - 16) * 1.164              - V * -1.596
//  G = (Y - 16) * 1.164 - U *  0.391 - V *  0.813
//  B = (Y - 16) * 1.164 - U * -2.018

43
// Y contribution to R,G,B.  Scale and bias.
44
// TODO(fbarchard): Consider moving constants into a common header.
45
#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
46
#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */
47 48

// U and V contributions to R,G,B.
49 50 51 52
#define UB -128 /* max(-128, round(-2.018 * 64)) */
#define UG 25 /* round(0.391 * 64) */
#define VG 52 /* round(0.813 * 64) */
#define VR -102 /* round(-1.596 * 64) */
53 54

// Bias values to subtract 16 from Y and 128 from U and V.
55 56 57
#define BB (UB * 128            + YGB)
#define BG (UG * 128 + VG * 128 + YGB)
#define BR            (VR * 128 + YGB)
58

59 60 61 62 63 64 65 66 67 68 69 70
// BT601 constants for YUV to RGB.
static YuvConstants SIMD_ALIGNED(kYuvConstants) = {
  { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
    UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 },
  { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
    UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG },
  { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
    0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR },
  { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
  { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
  { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
  { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
71 72
};

73 74 75 76 77 78 79 80 81 82 83 84
// BT601 constants for NV21 where chroma plane is VU instead of UV.
static YuvConstants SIMD_ALIGNED(kYvuConstants) = {
  { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
    0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB },
  { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
    VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG },
  { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
    VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 },
  { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
  { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
  { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
  { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
#undef YG
#undef YGB
#undef UB
#undef UG
#undef VG
#undef VR
#undef BB
#undef BG
#undef BR

// JPEG YUV to RGB reference
// *  R = Y                - V * -1.40200
// *  G = Y - U *  0.34414 - V *  0.71414
// *  B = Y - U * -1.77200

// Y contribution to R,G,B.  Scale and bias.
// TODO(fbarchard): Consider moving constants into a common header.
#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
#define YGBJ 32  /* 64 / 2 */

// U and V contributions to R,G,B.
#define UBJ -113 /* round(-1.77200 * 64) */
#define UGJ 22 /* round(0.34414 * 64) */
#define VGJ 46 /* round(0.71414  * 64) */
#define VRJ -90 /* round(-1.40200 * 64) */

// Bias values to subtract 16 from Y and 128 from U and V.
#define BBJ (UBJ * 128             + YGBJ)
#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ)
#define BRJ             (VRJ * 128 + YGBJ)

// JPEG constants for YUV to RGB.
static YuvConstants SIMD_ALIGNED(kYuvJConstants) = {
  { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0,
    UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 },
  { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ },
  { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ,
    0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ },
  { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ,
    BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ },
  { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ,
    BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ },
  { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ,
    BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ },
  { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ,
    YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ }
};

#undef YGJ
#undef YGBJ
#undef UBJ
#undef UGJ
#undef VGJ
#undef VRJ
#undef BBJ
#undef BGJ
#undef BRJ

148 149
// 64 bit
#if defined(_M_X64)
150
#if defined(HAS_I422TOARGBROW_SSSE3)
151 152 153 154 155 156 157 158 159 160 161 162 163 164
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
  __m128i xmm0, xmm1, xmm2, xmm3;
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;

  while (width > 0) {
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);
165 166
    xmm1 = _mm_loadu_si128(&xmm0);
    xmm2 = _mm_loadu_si128(&xmm0);
167 168 169 170 171 172
    xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)kYuvConstants.kUVToB);
    xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)kYuvConstants.kUVToG);
    xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)kYuvConstants.kUVToR);
    xmm0 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasB, xmm0);
    xmm1 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasG, xmm1);
    xmm2 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasR, xmm2);
173
    xmm3 = _mm_loadl_epi64((__m128i*)y_buf);
174
    xmm3 = _mm_unpacklo_epi8(xmm3, xmm3);
175
    xmm3 = _mm_mulhi_epu16(xmm3, *(__m128i*)kYuvConstants.kYToRgb);
176 177 178 179 180 181 182 183 184 185 186
    xmm0 = _mm_adds_epi16(xmm0, xmm3);
    xmm1 = _mm_adds_epi16(xmm1, xmm3);
    xmm2 = _mm_adds_epi16(xmm2, xmm3);
    xmm0 = _mm_srai_epi16(xmm0, 6);
    xmm1 = _mm_srai_epi16(xmm1, 6);
    xmm2 = _mm_srai_epi16(xmm2, 6);
    xmm0 = _mm_packus_epi16(xmm0, xmm0);
    xmm1 = _mm_packus_epi16(xmm1, xmm1);
    xmm2 = _mm_packus_epi16(xmm2, xmm2);
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
187
    xmm1 = _mm_loadu_si128(&xmm0);
188 189 190
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);

191 192
    _mm_storeu_si128((__m128i *)dst_argb, xmm0);
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);
193 194 195 196 197 198 199

    y_buf += 8;
    u_buf += 4;
    dst_argb += 32;
    width -= 8;
  }
}
200
#endif
201 202
// 32 bit
#else  // defined(_M_X64)
203 204
#ifdef HAS_ARGBTOYROW_SSSE3

205
// Constants for ARGB.
206
static const vec8 kARGBToY = {
207 208 209
  13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
};

210
// JPeg full range.
211
static const vec8 kARGBToYJ = {
212
  15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
213 214
};

215
static const vec8 kARGBToU = {
216 217 218
  112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
};

219
static const vec8 kARGBToUJ = {
220 221 222
  127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
};

223
static const vec8 kARGBToV = {
224 225 226
  -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
};

227
static const vec8 kARGBToVJ = {
228 229 230
  -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
};

231
// vpshufb for vphaddw + vpackuswb packed to shorts.
232
static const lvec8 kShufARGBToUV_AVX = {
233
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
234
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
235 236
};

237
// Constants for BGRA.
238
static const vec8 kBGRAToY = {
239 240 241
  0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
};

242
static const vec8 kBGRAToU = {
243 244 245
  0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
};

246
static const vec8 kBGRAToV = {
247 248 249
  0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
};

250
// Constants for ABGR.
251
static const vec8 kABGRToY = {
252 253 254
  33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
};

255
static const vec8 kABGRToU = {
256 257 258
  -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
};

259
static const vec8 kABGRToV = {
260 261 262
  112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
};

263
// Constants for RGBA.
264
static const vec8 kRGBAToY = {
265 266 267
  0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
};

268
static const vec8 kRGBAToU = {
269 270 271
  0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
};

272
static const vec8 kRGBAToV = {
273 274 275
  0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
};

276
static const uvec8 kAddY16 = {
277
  16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
278 279
};

280
// 7 bit fixed point 0.5.
281
static const vec16 kAddYJ64 = {
282 283
  64, 64, 64, 64, 64, 64, 64, 64
};
284

285
static const uvec8 kAddUV128 = {
286 287
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
288 289
};

290
static const uvec16 kAddUVJ128 = {
291 292 293
  0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
};

294
// Shuffle table for converting RGB24 to ARGB.
295
static const uvec8 kShuffleMaskRGB24ToARGB = {
296 297 298 299
  0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
};

// Shuffle table for converting RAW to ARGB.
300
static const uvec8 kShuffleMaskRAWToARGB = {
301 302 303
  2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
};

304
// Shuffle table for converting ARGB to RGB24.
305
static const uvec8 kShuffleMaskARGBToRGB24 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
306 307
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
};
308 309

// Shuffle table for converting ARGB to RAW.
310
static const uvec8 kShuffleMaskARGBToRAW = {
311
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
312
};
313

314
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24.  First 8 + next 4
315
static const uvec8 kShuffleMaskARGBToRGB24_0 = {
316 317 318 319
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
};

// Shuffle table for converting ARGB to RAW.
320
static const uvec8 kShuffleMaskARGBToRAW_0 = {
321 322 323
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
};

324
// Duplicates gray value 3 times and fills in alpha opaque.
325
__declspec(naked)
326
void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
327 328 329 330 331 332 333
  __asm {
    mov        eax, [esp + 4]        // src_y
    mov        edx, [esp + 8]        // dst_argb
    mov        ecx, [esp + 12]       // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
    pslld      xmm5, 24

334
  convertloop:
335 336 337 338 339 340 341 342
    movq       xmm0, qword ptr [eax]
    lea        eax,  [eax + 8]
    punpcklbw  xmm0, xmm0
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0
    punpckhwd  xmm1, xmm1
    por        xmm0, xmm5
    por        xmm1, xmm5
343 344
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
345 346
    lea        edx, [edx + 32]
    sub        ecx, 8
347
    jg         convertloop
348 349 350 351
    ret
  }
}

352
#ifdef HAS_J400TOARGBROW_AVX2
353
// Duplicates gray value 3 times and fills in alpha opaque.
354
__declspec(naked)
355
void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) {
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
  __asm {
    mov         eax, [esp + 4]        // src_y
    mov         edx, [esp + 8]        // dst_argb
    mov         ecx, [esp + 12]       // pix
    vpcmpeqb    ymm5, ymm5, ymm5      // generate mask 0xff000000
    vpslld      ymm5, ymm5, 24

  convertloop:
    vmovdqu     xmm0, [eax]
    lea         eax,  [eax + 16]
    vpermq      ymm0, ymm0, 0xd8
    vpunpcklbw  ymm0, ymm0, ymm0
    vpermq      ymm0, ymm0, 0xd8
    vpunpckhwd  ymm1, ymm0, ymm0
    vpunpcklwd  ymm0, ymm0, ymm0
    vpor        ymm0, ymm0, ymm5
    vpor        ymm1, ymm1, ymm5
    vmovdqu     [edx], ymm0
    vmovdqu     [edx + 32], ymm1
    lea         edx, [edx + 64]
    sub         ecx, 16
    jg          convertloop
    vzeroupper
    ret
  }
}
382
#endif  // HAS_J400TOARGBROW_AVX2
383

384
__declspec(naked)
385
void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
386
  __asm {
387
    mov       eax, [esp + 4]   // src_rgb24
388 389 390 391
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
392
    movdqa    xmm4, kShuffleMaskRGB24ToARGB
393

394
 convertloop:
395 396 397
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
398 399 400 401 402 403 404
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
405
    movdqu    [edx + 32], xmm2
406 407
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
408
    movdqu    [edx], xmm0
409 410 411
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
412
    movdqu    [edx + 16], xmm1
413
    por       xmm3, xmm5
414
    movdqu    [edx + 48], xmm3
415
    lea       edx, [edx + 64]
416
    sub       ecx, 16
417
    jg        convertloop
418 419 420 421
    ret
  }
}

422
__declspec(naked)
423 424
void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
                        int pix) {
425
  __asm {
426 427 428 429 430
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
431
    movdqa    xmm4, kShuffleMaskRAWToARGB
432

433
 convertloop:
434 435 436
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
437 438 439 440 441 442 443
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
444
    movdqu    [edx + 32], xmm2
445 446
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
447
    movdqu    [edx], xmm0
448 449 450
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
451
    movdqu    [edx + 16], xmm1
452
    por       xmm3, xmm5
453
    movdqu    [edx + 48], xmm3
454
    lea       edx, [edx + 64]
455
    sub       ecx, 16
456
    jg        convertloop
457 458 459 460
    ret
  }
}

461 462
// pmul method to replicate bits.
// Math to replicate bits:
463 464 465 466
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
467
// 20 instructions.
468
__declspec(naked)
469 470
void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
471
  __asm {
472 473 474
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
475
    mov       eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
    pcmpeqb   xmm4, xmm4       // generate mask 0x07e007e0 for Green
    psllw     xmm4, 10
    psrlw     xmm4, 5
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_rgb565
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
493
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgr565
494 495 496 497 498 499 500 501 502 503 504 505 506 507
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    pand      xmm1, xmm3    // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    pand      xmm0, xmm4    // G in middle 6 bits
    pmulhuw   xmm0, xmm6    // << 5 * (256 + 4)
    por       xmm0, xmm7    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
508 509
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
510 511
    lea       eax, [eax + 16]
    sub       ecx, 8
512
    jg        convertloop
513 514 515 516
    ret
  }
}

517 518 519 520 521 522 523
#ifdef HAS_RGB565TOARGBROW_AVX2
// pmul method to replicate bits.
// Math to replicate bits:
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
524
__declspec(naked)
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3       // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpcmpeqb   ymm4, ymm4, ymm4       // generate mask 0x07e007e0 for Green
    vpsllw     ymm4, ymm4, 10
    vpsrlw     ymm4, ymm4, 5
    vpcmpeqb   ymm7, ymm7, ymm7       // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax, [esp + 4]   // src_rgb565
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // pix
    sub        edx, eax
    sub        edx, eax

 convertloop:
    vmovdqu    ymm0, [eax]   // fetch 16 pixels of bgr565
    vpand      ymm1, ymm0, ymm3    // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpand      ymm0, ymm0, ymm4    // G in middle 6 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 5 * (256 + 4)
    vpor       ymm0, ymm0, ymm7    // AG
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 4 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 4 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
569
    ret
570 571
  }
}
572
#endif  // HAS_RGB565TOARGBROW_AVX2
573

574
#ifdef HAS_ARGB1555TOARGBROW_AVX2
575
__declspec(naked)
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpsrlw     ymm4, ymm3, 6    // generate mask 0x03e003e0 for Green
    vpcmpeqb   ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax,  [esp + 4]   // src_argb1555
    mov        edx,  [esp + 8]   // dst_argb
    mov        ecx,  [esp + 12]  // pix
    sub        edx,  eax
    sub        edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of 1555
    vpsllw     ymm1, ymm0, 1       // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpand      ymm1, ymm1, ymm3
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpsraw     ymm2, ymm0, 8       // A
    vpand      ymm0, ymm0, ymm4    // G in middle 5 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 6 * (256 + 8)
    vpand      ymm2, ymm2, ymm7
    vpor       ymm0, ymm0, ymm2    // AG
611 612
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
613 614 615 616 617 618 619 620 621 622 623 624 625 626
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB1555TOARGBROW_AVX2

#ifdef HAS_ARGB4444TOARGBROW_AVX2
627
__declspec(naked)
628 629 630
void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
  __asm {
631 632
    mov       eax,  0x0f0f0f0f  // generate mask 0x0f0f0f0f
    vmovd     xmm4, eax
633
    vbroadcastss ymm4, xmm4
634
    vpslld    ymm5, ymm4, 4     // 0xf0f0f0f0 for high nibbles
635 636 637 638 639 640 641 642 643 644 645 646 647 648
    mov       eax,  [esp + 4]   // src_argb4444
    mov       edx,  [esp + 8]   // dst_argb
    mov       ecx,  [esp + 12]  // pix
    sub       edx,  eax
    sub       edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of bgra4444
    vpand      ymm2, ymm0, ymm5    // mask high nibbles
    vpand      ymm0, ymm0, ymm4    // mask low nibbles
    vpsrlw     ymm3, ymm2, 4
    vpsllw     ymm1, ymm0, 4
    vpor       ymm2, ymm2, ymm3
    vpor       ymm0, ymm0, ymm1
649 650
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm2, ymm2, 0xd8
651 652 653 654 655 656 657 658 659 660 661 662 663
    vpunpckhbw ymm1, ymm0, ymm2
    vpunpcklbw ymm0, ymm0, ymm2
    vmovdqu    [eax * 2 + edx], ymm0  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm1  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB4444TOARGBROW_AVX2

664
// 24 instructions
665
__declspec(naked)
666 667
void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
668
  __asm {
669 670 671 672 673 674 675 676
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
    mov       eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
677
    movdqa    xmm4, xmm3       // generate mask 0x03e003e0 for Green
678 679 680 681 682 683 684 685 686 687 688
    psrlw     xmm4, 6
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_argb1555
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
689
    movdqu    xmm0, [eax]   // fetch 8 pixels of 1555
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    psllw     xmm1, 1       // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pand      xmm1, xmm3
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // G in middle 5 bits
    psraw     xmm2, 8       // A
    pmulhuw   xmm0, xmm6    // << 6 * (256 + 8)
    pand      xmm2, xmm7
    por       xmm0, xmm2    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
708 709
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
710 711
    lea       eax, [eax + 16]
    sub       ecx, 8
712
    jg        convertloop
713 714 715
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
716

717
// 18 instructions.
718
__declspec(naked)
719 720
void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
721
  __asm {
722 723 724 725 726 727 728 729
    mov       eax, 0x0f0f0f0f  // generate mask 0x0f0f0f0f
    movd      xmm4, eax
    pshufd    xmm4, xmm4, 0
    movdqa    xmm5, xmm4       // 0xf0f0f0f0 for high nibbles
    pslld     xmm5, 4
    mov       eax, [esp + 4]   // src_argb4444
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
730 731
    sub       edx, eax
    sub       edx, eax
732 733

 convertloop:
734
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgra4444
735 736 737 738 739 740 741 742 743
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // mask low nibbles
    pand      xmm2, xmm5    // mask high nibbles
    movdqa    xmm1, xmm0
    movdqa    xmm3, xmm2
    psllw     xmm1, 4
    psrlw     xmm3, 4
    por       xmm0, xmm1
    por       xmm2, xmm3
744
    movdqa    xmm1, xmm0
745
    punpcklbw xmm0, xmm2
746
    punpckhbw xmm1, xmm2
747 748
    movdqu    [eax * 2 + edx], xmm0  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm1  // store next 4 pixels of ARGB
749
    lea       eax, [eax + 16]
750
    sub       ecx, 8
751
    jg        convertloop
752 753 754 755
    ret
  }
}

756
__declspec(naked)
757
void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
758
  __asm {
759 760 761
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
762
    movdqa    xmm6, kShuffleMaskARGBToRGB24
763 764

 convertloop:
765 766 767 768
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
769
    lea       eax, [eax + 64]
770 771 772 773 774 775 776 777 778 779
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
780
    movdqu    [edx], xmm0  // store 0
781 782 783 784
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
785 786
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
787 788
    lea       edx, [edx + 48]
    sub       ecx, 16
789
    jg        convertloop
790 791 792 793
    ret
  }
}

794
__declspec(naked)
795
void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
796
  __asm {
797 798 799
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
800
    movdqa    xmm6, kShuffleMaskARGBToRAW
801 802

 convertloop:
803 804 805 806
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
807
    lea       eax, [eax + 64]
808 809 810 811 812 813 814 815 816 817
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
818
    movdqu    [edx], xmm0  // store 0
819 820 821 822
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
823 824
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
825 826
    lea       edx, [edx + 48]
    sub       ecx, 16
827
    jg        convertloop
828 829 830 831
    ret
  }
}

832
// 4 pixels
833
__declspec(naked)
834
void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
835
  __asm {
836 837 838
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
839 840 841 842 843 844 845
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11
846 847

 convertloop:
848
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
849 850
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
851 852 853 854 855 856 857 858 859
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
860
    packssdw  xmm0, xmm0
861
    lea       eax, [eax + 16]
862
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
863 864
    lea       edx, [edx + 8]
    sub       ecx, 4
865
    jg        convertloop
866 867 868 869
    ret
  }
}

870
// 8 pixels
871
__declspec(naked)
872
void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
873
                                const uint32 dither4, int pix) {
874 875 876 877
  __asm {

    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
878
    movd      xmm6, [esp + 12] // dither4
879
    mov       ecx, [esp + 16]  // pix
880 881 882 883
    punpcklbw xmm6, xmm6       // make dither 16 bytes
    movdqa    xmm7, xmm6
    punpcklwd xmm6, xmm6
    punpckhwd xmm7, xmm7
884 885 886 887 888 889 890 891 892 893
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11

 convertloop:
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
894
    paddusb   xmm0, xmm6    // add dither
895 896 897 898 899 900 901 902 903 904 905 906
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
    packssdw  xmm0, xmm0
907
    lea       eax, [eax + 16]
908
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
909 910
    lea       edx, [edx + 8]
    sub       ecx, 4
911 912 913 914 915
    jg        convertloop
    ret
  }
}

916
#ifdef HAS_ARGBTORGB565DITHERROW_AVX2
917
__declspec(naked)
918
void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
919
                                const uint32 dither4, int pix) {
920 921 922
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
923
    vbroadcastss xmm6, [esp + 12]  // dither4
924
    mov        ecx, [esp + 16]     // pix
925 926 927
    vpunpcklbw xmm6, xmm6, xmm6    // make dither 32 bytes
    vpermq     ymm6, ymm6, 0xd8
    vpunpcklwd ymm6, ymm6, ymm6
928 929 930 931 932
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
933
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
934 935 936

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
937
    vpaddusb   ymm0, ymm0, ymm6    // add dither
938 939
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
940
    vpsrld     ymm0, ymm0, 8       // R
941 942 943 944 945
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
946
    vpackusdw  ymm0, ymm0, ymm0
947 948 949 950 951 952 953 954 955 956 957 958
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565DITHERROW_AVX2

959
// TODO(fbarchard): Improve sign extension/packing.
960
__declspec(naked)
961
void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
962
  __asm {
963 964 965
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
966 967 968 969 970 971 972 973
    pcmpeqb   xmm4, xmm4       // generate mask 0x0000001f
    psrld     xmm4, 27
    movdqa    xmm5, xmm4       // generate mask 0x000003e0
    pslld     xmm5, 5
    movdqa    xmm6, xmm4       // generate mask 0x00007c00
    pslld     xmm6, 10
    pcmpeqb   xmm7, xmm7       // generate mask 0xffff8000
    pslld     xmm7, 15
974 975

 convertloop:
976
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
977 978
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
979 980 981 982 983 984 985 986 987 988 989 990
    movdqa    xmm3, xmm0    // R
    psrad     xmm0, 16      // A
    psrld     xmm1, 3       // B
    psrld     xmm2, 6       // G
    psrld     xmm3, 9       // R
    pand      xmm0, xmm7    // A
    pand      xmm1, xmm4    // B
    pand      xmm2, xmm5    // G
    pand      xmm3, xmm6    // R
    por       xmm0, xmm1    // BA
    por       xmm2, xmm3    // GR
    por       xmm0, xmm2    // BGRA
991 992 993
    packssdw  xmm0, xmm0
    lea       eax, [eax + 16]
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB1555
994 995
    lea       edx, [edx + 8]
    sub       ecx, 4
996
    jg        convertloop
997 998 999 1000
    ret
  }
}

1001
__declspec(naked)
1002
void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
1003
  __asm {
1004 1005 1006
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
1007 1008 1009 1010 1011 1012
    pcmpeqb   xmm4, xmm4       // generate mask 0xf000f000
    psllw     xmm4, 12
    movdqa    xmm3, xmm4       // generate mask 0x00f000f0
    psrlw     xmm3, 8

 convertloop:
1013
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
1014 1015 1016
    movdqa    xmm1, xmm0
    pand      xmm0, xmm3    // low nibble
    pand      xmm1, xmm4    // high nibble
1017 1018
    psrld     xmm0, 4
    psrld     xmm1, 8
1019 1020
    por       xmm0, xmm1
    packuswb  xmm0, xmm0
1021
    lea       eax, [eax + 16]
1022 1023 1024
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB4444
    lea       edx, [edx + 8]
    sub       ecx, 4
1025
    jg        convertloop
1026 1027 1028 1029
    ret
  }
}

1030
#ifdef HAS_ARGBTORGB565ROW_AVX2
1031
__declspec(naked)
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
1042
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
1043 1044 1045 1046 1047

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
1048
    vpsrld     ymm0, ymm0, 8       // R
1049 1050 1051 1052 1053
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
1054
    vpackusdw  ymm0, ymm0, ymm0
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565ROW_AVX2

#ifdef HAS_ARGBTOARGB1555ROW_AVX2
1068
__declspec(naked)
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm4, ymm4, ymm4
    vpsrld     ymm4, ymm4, 27      // generate mask 0x0000001f
    vpslld     ymm5, ymm4, 5       // generate mask 0x000003e0
    vpslld     ymm6, ymm4, 10      // generate mask 0x00007c00
    vpcmpeqb   ymm7, ymm7, ymm7    // generate mask 0xffff8000
    vpslld     ymm7, ymm7, 15

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm3, ymm0, 9       // R
    vpsrld     ymm2, ymm0, 6       // G
    vpsrld     ymm1, ymm0, 3       // B
    vpsrad     ymm0, ymm0, 16      // A
    vpand      ymm3, ymm3, ymm6    // R
    vpand      ymm2, ymm2, ymm5    // G
    vpand      ymm1, ymm1, ymm4    // B
    vpand      ymm0, ymm0, ymm7    // A
    vpor       ymm0, ymm0, ymm1    // BA
    vpor       ymm2, ymm2, ymm3    // GR
    vpor       ymm0, ymm0, ymm2    // BGRA
    vpackssdw  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB1555
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB1555ROW_AVX2

1107
#ifdef HAS_ARGBTOARGB4444ROW_AVX2
1108
__declspec(naked)
1109 1110 1111 1112 1113
void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_rgb
    mov        ecx, [esp + 12]  // pix
1114
    vpcmpeqb   ymm4, ymm4, ymm4   // generate mask 0xf000f000
1115
    vpsllw     ymm4, ymm4, 12
1116
    vpsrlw     ymm3, ymm4, 8      // generate mask 0x00f000f0
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpand      ymm1, ymm0, ymm4    // high nibble
    vpand      ymm0, ymm0, ymm3    // low nibble
    vpsrld     ymm1, ymm1, 8
    vpsrld     ymm0, ymm0, 4
    vpor       ymm0, ymm0, ymm1
    vpackuswb  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB4444
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB4444ROW_AVX2

1138
// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
1139
__declspec(naked)
1140
void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1141
  __asm {
1142 1143 1144
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1145
    movdqa     xmm4, kARGBToY
1146
    movdqa     xmm5, kAddY16
1147

1148
 convertloop:
1149 1150 1151 1152
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1153 1154 1155 1156
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1157 1158 1159 1160 1161 1162
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1163
    paddb      xmm0, xmm5
1164
    movdqu     [edx], xmm0
1165
    lea        edx, [edx + 16]
1166
    sub        ecx, 16
1167
    jg         convertloop
1168 1169 1170 1171
    ret
  }
}

1172 1173
// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
1174
__declspec(naked)
1175 1176 1177 1178 1179
void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1180 1181
    movdqa     xmm4, kARGBToYJ
    movdqa     xmm5, kAddYJ64
1182 1183

 convertloop:
1184 1185 1186 1187
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1188 1189 1190 1191 1192 1193 1194
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
1195
    paddw      xmm0, xmm5  // Add .5 for rounding.
1196
    paddw      xmm2, xmm5
1197 1198 1199
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1200
    movdqu     [edx], xmm0
1201
    lea        edx, [edx + 16]
1202
    sub        ecx, 16
1203 1204 1205 1206 1207
    jg         convertloop
    ret
  }
}

1208
#ifdef HAS_ARGBTOYROW_AVX2
1209 1210 1211 1212 1213
// vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = {
  0, 4, 1, 5, 2, 6, 3, 7
};

1214
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1215
__declspec(naked)
1216 1217 1218 1219 1220
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1221 1222
    vbroadcastf128 ymm4, kARGBToY
    vbroadcastf128 ymm5, kAddY16
1223
    vmovdqu    ymm6, kPermdARGBToY_AVX
1224 1225

 convertloop:
1226 1227 1228 1229
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
1230 1231 1232 1233 1234
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
1235
    vphaddw    ymm0, ymm0, ymm1  // mutates.
1236 1237 1238
    vphaddw    ymm2, ymm2, ymm3
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
1239
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
1240
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
1241
    vpaddb     ymm0, ymm0, ymm5  // add 16 for Y
1242
    vmovdqu    [edx], ymm0
1243
    lea        edx, [edx + 32]
1244
    sub        ecx, 32
1245
    jg         convertloop
1246
    vzeroupper
1247 1248 1249 1250 1251
    ret
  }
}
#endif  //  HAS_ARGBTOYROW_AVX2

1252
#ifdef HAS_ARGBTOYJROW_AVX2
1253
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1254
__declspec(naked)
1255 1256 1257 1258 1259
void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1260 1261
    vbroadcastf128 ymm4, kARGBToYJ
    vbroadcastf128 ymm5, kAddYJ64
1262
    vmovdqu    ymm6, kPermdARGBToY_AVX
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283

 convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
    vphaddw    ymm0, ymm0, ymm1  // mutates.
    vphaddw    ymm2, ymm2, ymm3
    vpaddw     ymm0, ymm0, ymm5  // Add .5 for rounding.
    vpaddw     ymm2, ymm2, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
1284
    sub        ecx, 32
1285 1286 1287 1288 1289 1290 1291 1292
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  //  HAS_ARGBTOYJROW_AVX2

1293
__declspec(naked)
1294
void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1295
  __asm {
1296 1297 1298
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1299
    movdqa     xmm4, kBGRAToY
1300
    movdqa     xmm5, kAddY16
1301

1302
 convertloop:
1303 1304 1305 1306
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1307 1308 1309 1310
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1311 1312 1313 1314 1315 1316
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1317
    paddb      xmm0, xmm5
1318
    movdqu     [edx], xmm0
1319
    lea        edx, [edx + 16]
1320
    sub        ecx, 16
1321
    jg         convertloop
1322 1323 1324 1325
    ret
  }
}

1326
__declspec(naked)
1327
void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1328
  __asm {
1329 1330 1331
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
1332
    movdqa     xmm4, kABGRToY
1333
    movdqa     xmm5, kAddY16
1334

1335
 convertloop:
1336 1337 1338 1339
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1340 1341 1342 1343
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1344 1345 1346 1347 1348 1349
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1350
    paddb      xmm0, xmm5
1351
    movdqu     [edx], xmm0
1352
    lea        edx, [edx + 16]
1353
    sub        ecx, 16
1354
    jg         convertloop
1355 1356 1357 1358
    ret
  }
}

1359
__declspec(naked)
1360
void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1361
  __asm {
1362 1363 1364 1365
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
    movdqa     xmm4, kRGBAToY
1366
    movdqa     xmm5, kAddY16
1367 1368

 convertloop:
1369 1370 1371 1372
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
    paddb      xmm0, xmm5
1384
    movdqu     [edx], xmm0
1385
    lea        edx, [edx + 16]
1386
    sub        ecx, 16
1387 1388 1389 1390 1391
    jg         convertloop
    ret
  }
}

1392
__declspec(naked)
1393 1394
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1395
  __asm {
1396 1397 1398 1399 1400 1401 1402
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1403
    movdqa     xmm5, kAddUV128
1404 1405
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
1406
    sub        edi, edx             // stride from u to v
1407

1408
 convertloop:
1409
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1410
    movdqu     xmm0, [eax]
1411
    movdqu     xmm4, [eax + esi]
1412
    pavgb      xmm0, xmm4
1413
    movdqu     xmm1, [eax + 16]
1414
    movdqu     xmm4, [eax + esi + 16]
1415
    pavgb      xmm1, xmm4
1416
    movdqu     xmm2, [eax + 32]
1417
    movdqu     xmm4, [eax + esi + 32]
1418
    pavgb      xmm2, xmm4
1419
    movdqu     xmm3, [eax + 48]
1420
    movdqu     xmm4, [eax + esi + 48]
1421 1422
    pavgb      xmm3, xmm4

1423 1424
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
1425
    shufps     xmm0, xmm1, 0x88
1426 1427 1428 1429 1430 1431
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4
1432 1433 1434

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
1435
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
1436
    movdqa     xmm1, xmm0
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1453
    sub        ecx, 16
1454 1455
    jg         convertloop

1456 1457 1458 1459 1460 1461
    pop        edi
    pop        esi
    ret
  }
}

1462
__declspec(naked)
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                        uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
    movdqa     xmm5, kAddUVJ128
1474 1475
    movdqa     xmm6, kARGBToVJ
    movdqa     xmm7, kARGBToUJ
1476 1477 1478 1479
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1480
    movdqu     xmm0, [eax]
1481
    movdqu     xmm4, [eax + esi]
1482
    pavgb      xmm0, xmm4
1483
    movdqu     xmm1, [eax + 16]
1484
    movdqu     xmm4, [eax + esi + 16]
1485
    pavgb      xmm1, xmm4
1486
    movdqu     xmm2, [eax + 32]
1487
    movdqu     xmm4, [eax + esi + 32]
1488
    pavgb      xmm2, xmm4
1489
    movdqu     xmm3, [eax + 48]
1490
    movdqu     xmm4, [eax + esi + 48]
1491 1492
    pavgb      xmm3, xmm4

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    paddw      xmm0, xmm5            // +.5 rounding -> unsigned
    paddw      xmm1, xmm5
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1524
    sub        ecx, 16
1525 1526 1527 1528 1529 1530 1531 1532
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1533
#ifdef HAS_ARGBTOUVROW_AVX2
1534
__declspec(naked)
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
                      uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1545 1546 1547
    vbroadcastf128 ymm5, kAddUV128
    vbroadcastf128 ymm6, kARGBToV
    vbroadcastf128 ymm7, kARGBToU
1548 1549 1550
    sub        edi, edx             // stride from u to v

 convertloop:
1551
    /* step 1 - subsample 32x2 argb pixels to 16x1 */
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    vpavgb     ymm2, ymm2, [eax + esi + 64]
    vpavgb     ymm3, ymm3, [eax + esi + 96]
    lea        eax,  [eax + 128]
    vshufps    ymm4, ymm0, ymm1, 0x88
    vshufps    ymm0, ymm0, ymm1, 0xdd
    vpavgb     ymm0, ymm0, ymm4  // mutated by vshufps
    vshufps    ymm4, ymm2, ymm3, 0x88
    vshufps    ymm2, ymm2, ymm3, 0xdd
    vpavgb     ymm2, ymm2, ymm4  // mutated by vshufps
1567 1568 1569 1570

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 32 different pixels, its 16 pixels of U and 16 of V
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
    vpmaddubsw ymm1, ymm0, ymm7  // U
    vpmaddubsw ymm3, ymm2, ymm7
    vpmaddubsw ymm0, ymm0, ymm6  // V
    vpmaddubsw ymm2, ymm2, ymm6
    vphaddw    ymm1, ymm1, ymm3  // mutates
    vphaddw    ymm0, ymm0, ymm2
    vpsraw     ymm1, ymm1, 8
    vpsraw     ymm0, ymm0, 8
    vpacksswb  ymm0, ymm1, ymm0  // mutates
    vpermq     ymm0, ymm0, 0xd8  // For vpacksswb
    vpshufb    ymm0, ymm0, kShufARGBToUV_AVX  // For vshufps + vphaddw
    vpaddb     ymm0, ymm0, ymm5  // -> unsigned
1583 1584

    // step 3 - store 16 U and 16 V values
1585 1586
    vextractf128 [edx], ymm0, 0 // U
    vextractf128 [edx + edi], ymm0, 1 // V
1587
    lea        edx, [edx + 16]
1588
    sub        ecx, 32
1589 1590 1591 1592
    jg         convertloop

    pop        edi
    pop        esi
1593
    vzeroupper
1594 1595 1596 1597 1598
    ret
  }
}
#endif  // HAS_ARGBTOUVROW_AVX2

1599
__declspec(naked)
1600 1601
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1602
  __asm {
1603
    push       edi
1604 1605 1606 1607
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
1608
    movdqa     xmm5, kAddUV128
1609 1610
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
1611 1612 1613
    sub        edi, edx             // stride from u to v

 convertloop:
1614 1615
    /* convert to U and V */
    movdqu     xmm0, [eax]          // U
1616 1617 1618 1619 1620 1621 1622 1623 1624
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm7
    pmaddubsw  xmm1, xmm7
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm3, xmm7
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1625 1626 1627
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
    paddb      xmm0, xmm5
    movdqu     [edx], xmm0

    movdqu     xmm0, [eax]          // V
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm6
    pmaddubsw  xmm1, xmm6
    pmaddubsw  xmm2, xmm6
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1641 1642 1643
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1644 1645 1646 1647
    paddb      xmm0, xmm5
    lea        eax,  [eax + 64]
    movdqu     [edx + edi], xmm0
    lea        edx,  [edx + 16]
1648
    sub        ecx,  16
1649 1650 1651 1652 1653 1654 1655
    jg         convertloop

    pop        edi
    ret
  }
}

1656
__declspec(naked)
1657 1658
void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1659
  __asm {
1660 1661 1662 1663 1664 1665
    push       edi
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
    movdqa     xmm5, kAddUV128
1666 1667
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
1668 1669 1670 1671
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1672 1673 1674 1675
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1706
    sub        ecx, 16
1707 1708 1709 1710 1711 1712 1713
    jg         convertloop

    pop        edi
    ret
  }
}

1714
__declspec(naked)
1715 1716
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1717
  __asm {
1718 1719 1720 1721 1722 1723 1724
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1725
    movdqa     xmm5, kAddUV128
1726 1727
    movdqa     xmm6, kBGRAToV
    movdqa     xmm7, kBGRAToU
1728
    sub        edi, edx             // stride from u to v
1729

1730
 convertloop:
1731
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1732
    movdqu     xmm0, [eax]
1733
    movdqu     xmm4, [eax + esi]
1734
    pavgb      xmm0, xmm4
1735
    movdqu     xmm1, [eax + 16]
1736
    movdqu     xmm4, [eax + esi + 16]
1737
    pavgb      xmm1, xmm4
1738
    movdqu     xmm2, [eax + 32]
1739
    movdqu     xmm4, [eax + esi + 32]
1740
    pavgb      xmm2, xmm4
1741
    movdqu     xmm3, [eax + 48]
1742
    movdqu     xmm4, [eax + esi + 48]
1743 1744
    pavgb      xmm3, xmm4

1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1775
    sub        ecx, 16
1776 1777
    jg         convertloop

1778 1779 1780 1781
    pop        edi
    pop        esi
    ret
  }
1782 1783
}

1784
__declspec(naked)
1785 1786
void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1787
  __asm {
1788 1789 1790 1791 1792 1793 1794
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1795
    movdqa     xmm5, kAddUV128
1796 1797
    movdqa     xmm6, kABGRToV
    movdqa     xmm7, kABGRToU
1798 1799
    sub        edi, edx             // stride from u to v

1800
 convertloop:
1801
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1802
    movdqu     xmm0, [eax]
1803
    movdqu     xmm4, [eax + esi]
1804
    pavgb      xmm0, xmm4
1805
    movdqu     xmm1, [eax + 16]
1806
    movdqu     xmm4, [eax + esi + 16]
1807
    pavgb      xmm1, xmm4
1808
    movdqu     xmm2, [eax + 32]
1809
    movdqu     xmm4, [eax + esi + 32]
1810
    pavgb      xmm2, xmm4
1811
    movdqu     xmm3, [eax + 48]
1812
    movdqu     xmm4, [eax + esi + 48]
1813 1814
    pavgb      xmm3, xmm4

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1845
    sub        ecx, 16
1846 1847
    jg         convertloop

1848 1849 1850 1851 1852 1853
    pop        edi
    pop        esi
    ret
  }
}

1854
__declspec(naked)
1855 1856
void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1857
  __asm {
1858 1859 1860 1861 1862 1863 1864 1865
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
    movdqa     xmm5, kAddUV128
1866 1867
    movdqa     xmm6, kRGBAToV
    movdqa     xmm7, kRGBAToU
1868 1869 1870 1871
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1872
    movdqu     xmm0, [eax]
1873
    movdqu     xmm4, [eax + esi]
1874
    pavgb      xmm0, xmm4
1875
    movdqu     xmm1, [eax + 16]
1876
    movdqu     xmm4, [eax + esi + 16]
1877
    pavgb      xmm1, xmm4
1878
    movdqu     xmm2, [eax + 32]
1879
    movdqu     xmm4, [eax + esi + 32]
1880
    pavgb      xmm2, xmm4
1881
    movdqu     xmm3, [eax + 48]
1882
    movdqu     xmm4, [eax + esi + 48]
1883 1884
    pavgb      xmm3, xmm4

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1915
    sub        ecx, 16
1916 1917 1918 1919 1920 1921 1922
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
1923
#endif  // HAS_ARGBTOYROW_SSSE3
1924

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
// Read 16 UV from 444
#define READYUV444_AVX2 __asm {                                                \
    __asm vmovdqu    xmm0, [esi]                  /* U */         /* NOLINT */ \
    __asm vmovdqu    xmm1, [esi + edi]            /* V */         /* NOLINT */ \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
  }

1935 1936
// Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 __asm {                                                \
1937 1938
    __asm vmovq      xmm0, qword ptr [esi]        /* U */         /* NOLINT */ \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */         /* NOLINT */ \
1939 1940 1941 1942 1943 1944
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
  }

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
// Read 4 UV from 411, upsample to 16 UV.
#define READYUV411_AVX2 __asm {                                                \
    __asm vmovd      xmm0, dword ptr [esi]        /* U */         /* NOLINT */ \
    __asm vmovd      xmm1, dword ptr [esi + edi]  /* V */         /* NOLINT */ \
    __asm lea        esi,  [esi + 4]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpckldq ymm0, ymm0, ymm0             /* UVUVUVUV (upsample) */    \
  }

1956 1957 1958 1959 1960 1961 1962 1963
// Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
  }

1964
// Convert 16 pixels: 16 UV and 16 Y.
1965
#define YUVTORGB_AVX2(YuvConstants) __asm {                                    \
1966
    /* Step 1: Find 8 UV contributions to 16 R,G,B values */                   \
1967 1968 1969 1970
    __asm vpmaddubsw ymm2, ymm0, YuvConstants.kUVToR        /* scale R UV */   \
    __asm vpmaddubsw ymm1, ymm0, YuvConstants.kUVToG        /* scale G UV */   \
    __asm vpmaddubsw ymm0, ymm0, YuvConstants.kUVToB        /* scale B UV */   \
    __asm vmovdqu    ymm3, YuvConstants.kUVBiasR                               \
1971
    __asm vpsubw     ymm2, ymm3, ymm2                                          \
1972
    __asm vmovdqu    ymm3, YuvConstants.kUVBiasG                               \
1973
    __asm vpsubw     ymm1, ymm3, ymm1                                          \
1974
    __asm vmovdqu    ymm3, YuvConstants.kUVBiasB                               \
1975
    __asm vpsubw     ymm0, ymm3, ymm0                                          \
1976 1977 1978 1979
    /* Step 2: Find Y contribution to 16 R,G,B values */                       \
    __asm vmovdqu    xmm3, [eax]                  /* NOLINT */                 \
    __asm lea        eax, [eax + 16]                                           \
    __asm vpermq     ymm3, ymm3, 0xd8                                          \
1980
    __asm vpunpcklbw ymm3, ymm3, ymm3                                          \
1981
    __asm vpmulhuw   ymm3, ymm3, YuvConstants.kYToRgb                          \
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
    __asm vpaddsw    ymm0, ymm0, ymm3           /* B += Y */                   \
    __asm vpaddsw    ymm1, ymm1, ymm3           /* G += Y */                   \
    __asm vpaddsw    ymm2, ymm2, ymm3           /* R += Y */                   \
    __asm vpsraw     ymm0, ymm0, 6                                             \
    __asm vpsraw     ymm1, ymm1, 6                                             \
    __asm vpsraw     ymm2, ymm2, 6                                             \
    __asm vpackuswb  ymm0, ymm0, ymm0           /* B */                        \
    __asm vpackuswb  ymm1, ymm1, ymm1           /* G */                        \
    __asm vpackuswb  ymm2, ymm2, ymm2           /* R */                        \
  }

1993 1994 1995 1996 1997 1998 1999 2000 2001
// Store 16 ARGB values.
#define STOREARGB_AVX2 __asm {                                                 \
    /* Step 3: Weave into ARGB */                                              \
    __asm vpunpcklbw ymm0, ymm0, ymm1           /* BG */                       \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm2, ymm5           /* RA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm1, ymm0, ymm2           /* BGRA first 8 pixels */      \
    __asm vpunpckhwd ymm0, ymm0, ymm2           /* BGRA next 8 pixels */       \
2002 2003
    __asm vmovdqu    0[edx], ymm1                                              \
    __asm vmovdqu    32[edx], ymm0                                             \
2004 2005 2006
    __asm lea        edx,  [edx + 64]                                          \
  }

2007
#ifdef HAS_I422TOARGBROW_AVX2
2008 2009
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2010
__declspec(naked)
2011
void I422ToARGBRow_AVX2(const uint8* y_buf,
2012 2013 2014 2015
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
2028
    READYUV422_AVX2
2029
    YUVTORGB_AVX2(kYuvConstants)
2030
    STOREARGB_AVX2
2031 2032 2033 2034 2035 2036

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2037
    vzeroupper
2038 2039 2040
    ret
  }
}
2041
#endif  // HAS_I422TOARGBROW_AVX2
2042

2043 2044 2045
#ifdef HAS_J422TOARGBROW_AVX2
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2046
__declspec(naked)
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
void J422ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
    YUVTORGB_AVX2(kYuvJConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_J422TOARGBROW_AVX2

2079 2080 2081
#ifdef HAS_I444TOARGBROW_AVX2
// 16 pixels
// 16 UV values with 16 Y producing 16 ARGB (64 bytes).
2082
__declspec(naked)
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
void I444ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV444_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I444TOARGBROW_AVX2

2115 2116 2117
#ifdef HAS_I411TOARGBROW_AVX2
// 16 pixels
// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2118
__declspec(naked)
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
void I411ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2134

2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
 convertloop:
    READYUV411_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I411TOARGBROW_AVX2
2150

2151
#ifdef HAS_NV12TOARGBROW_AVX2
2152 2153
// 16 pixels.
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2154
__declspec(naked)
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
void NV12ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        esi
2176
    vzeroupper
2177 2178 2179
    ret
  }
}
2180
#endif  // HAS_NV12TOARGBROW_AVX2
2181

2182
#ifdef HAS_NV21TOARGBROW_AVX2
2183 2184
// 16 pixels.
// 8 VU values upsampled to 16 VU, mixed with 16 Y producing 16 ARGB (64 bytes).
2185
__declspec(naked)
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
void NV21ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
    YUVTORGB_AVX2(kYvuConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        esi
2207
    vzeroupper
2208 2209 2210
    ret
  }
}
2211
#endif  // HAS_NV21TOARGBROW_AVX2
2212

2213
#ifdef HAS_I422TOBGRAROW_AVX2
2214 2215
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
2216
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
2217
__declspec(naked)
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
void I422ToBGRARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
2235
    READYUV422_AVX2
2236
    YUVTORGB_AVX2(kYuvConstants)
2237 2238

    // Step 3: Weave into BGRA
2239
    vpunpcklbw ymm1, ymm1, ymm0           // GB
2240
    vpermq     ymm1, ymm1, 0xd8
2241 2242 2243 2244 2245 2246
    vpunpcklbw ymm2, ymm5, ymm2           // AR
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm2, ymm1           // ARGB first 8 pixels
    vpunpckhwd ymm2, ymm2, ymm1           // ARGB next 8 pixels
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm2
2247 2248 2249 2250 2251 2252
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2253
    vzeroupper
2254 2255 2256
    ret
  }
}
2257
#endif  // HAS_I422TOBGRAROW_AVX2
2258

2259
#ifdef HAS_I422TORGBAROW_AVX2
2260 2261
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
2262
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
2263
__declspec(naked)
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
void I422ToRGBARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
2278
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2279 2280 2281

 convertloop:
    READYUV422_AVX2
2282
    YUVTORGB_AVX2(kYuvConstants)
2283 2284 2285 2286

    // Step 3: Weave into RGBA
    vpunpcklbw ymm1, ymm1, ymm2           // GR
    vpermq     ymm1, ymm1, 0xd8
2287 2288 2289 2290
    vpunpcklbw ymm2, ymm5, ymm0           // AB
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm2, ymm1           // ABGR first 8 pixels
    vpunpckhwd ymm1, ymm2, ymm1           // ABGR next 8 pixels
2291 2292 2293 2294 2295 2296 2297 2298
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2299
    vzeroupper
2300 2301 2302
    ret
  }
}
2303
#endif  // HAS_I422TORGBAROW_AVX2
2304

2305
#ifdef HAS_I422TOABGRROW_AVX2
2306 2307 2308
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
2309
__declspec(naked)
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
void I422ToABGRRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2328
    YUVTORGB_AVX2(kYuvConstants)
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344

    // Step 3: Weave into ABGR
    vpunpcklbw ymm1, ymm2, ymm1           // RG
    vpermq     ymm1, ymm1, 0xd8
    vpunpcklbw ymm2, ymm0, ymm5           // BA
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm1, ymm2           // RGBA first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm2           // RGBA next 8 pixels
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2345
    vzeroupper
2346 2347 2348
    ret
  }
}
2349
#endif  // HAS_I422TOABGRROW_AVX2
2350

2351
#if defined(HAS_I422TOARGBROW_SSSE3)
2352
// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
2353

2354
// Read 8 UV from 444.
2355
#define READYUV444 __asm {                                                     \
2356 2357
    __asm movq       xmm0, qword ptr [esi] /* U */                /* NOLINT */ \
    __asm movq       xmm1, qword ptr [esi + edi] /* V */          /* NOLINT */ \
2358 2359 2360 2361
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
  }

2362
// Read 4 UV from 422, upsample to 8 UV.
2363
#define READYUV422 __asm {                                                     \
2364 2365 2366 2367 2368 2369 2370
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

2371
// Read 2 UV from 411, upsample to 8 UV.
2372
#define READYUV411 __asm {                                                     \
2373
    __asm movzx      ebx, word ptr [esi]        /* U */           /* NOLINT */ \
2374
    __asm movd       xmm0, ebx                                                 \
2375
    __asm movzx      ebx, word ptr [esi + edi]  /* V */           /* NOLINT */ \
2376
    __asm movd       xmm1, ebx                                                 \
2377 2378 2379
    __asm lea        esi,  [esi + 2]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
2380
    __asm punpckldq  xmm0, xmm0           /* UVUVUVUV (upsample) */            \
2381 2382
  }

2383
// Read 4 UV from NV12, upsample to 8 UV.
2384
#define READNV12 __asm {                                                       \
2385
    __asm movq       xmm0, qword ptr [esi] /* UV */               /* NOLINT */ \
2386 2387 2388 2389
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

2390
// Convert 8 pixels: 8 UV and 8 Y.
2391
#define YUVTORGB(YuvConstants) __asm {                                         \
2392
    /* Step 1: Find 4 UV contributions to 8 R,G,B values */                    \
2393 2394
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
2395
    __asm movdqa     xmm3, xmm0                                                \
2396 2397
    __asm movdqa     xmm0, YuvConstants.kUVBiasB /* unbias back to signed */   \
    __asm pmaddubsw  xmm1, YuvConstants.kUVToB   /* scale B UV */              \
2398
    __asm psubw      xmm0, xmm1                                                \
2399 2400
    __asm movdqa     xmm1, YuvConstants.kUVBiasG                               \
    __asm pmaddubsw  xmm2, YuvConstants.kUVToG   /* scale G UV */              \
2401
    __asm psubw      xmm1, xmm2                                                \
2402 2403
    __asm movdqa     xmm2, YuvConstants.kUVBiasR                               \
    __asm pmaddubsw  xmm3, YuvConstants.kUVToR   /* scale R UV */              \
2404
    __asm psubw      xmm2, xmm3                                                \
2405 2406 2407
    /* Step 2: Find Y contribution to 8 R,G,B values */                        \
    __asm movq       xmm3, qword ptr [eax]                        /* NOLINT */ \
    __asm lea        eax, [eax + 8]                                            \
2408
    __asm punpcklbw  xmm3, xmm3                                                \
2409
    __asm pmulhuw    xmm3, YuvConstants.kYToRgb                                \
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
    __asm paddsw     xmm0, xmm3           /* B += Y */                         \
    __asm paddsw     xmm1, xmm3           /* G += Y */                         \
    __asm paddsw     xmm2, xmm3           /* R += Y */                         \
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

2421 2422 2423 2424 2425 2426 2427 2428
// Store 8 ARGB values.
#define STOREARGB __asm {                                                      \
    /* Step 3: Weave into ARGB */                                              \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm5           /* RA */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRA next 4 pixels */             \
2429 2430
    __asm movdqu     0[edx], xmm0                                              \
    __asm movdqu     16[edx], xmm1                                             \
2431 2432 2433
    __asm lea        edx,  [edx + 32]                                          \
  }

2434 2435 2436 2437 2438 2439 2440 2441 2442
// Store 8 BGRA values.
#define STOREBGRA __asm {                                                      \
    /* Step 3: Weave into BGRA */                                              \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm0           /* GB */                             \
    __asm punpcklbw  xmm5, xmm2           /* AR */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* BGRA next 4 pixels */             \
2443 2444
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 ABGR values.
#define STOREABGR __asm {                                                      \
    /* Step 3: Weave into ABGR */                                              \
    __asm punpcklbw  xmm2, xmm1           /* RG */                             \
    __asm punpcklbw  xmm0, xmm5           /* BA */                             \
    __asm movdqa     xmm1, xmm2                                                \
    __asm punpcklwd  xmm2, xmm0           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm0           /* RGBA next 4 pixels */             \
2456 2457
    __asm movdqu     0[edx], xmm2                                              \
    __asm movdqu     16[edx], xmm1                                             \
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGBA values.
#define STORERGBA __asm {                                                      \
    /* Step 3: Weave into RGBA */                                              \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm2           /* GR */                             \
    __asm punpcklbw  xmm5, xmm0           /* AB */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* RGBA next 4 pixels */             \
2470 2471
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGB24 values.
#define STORERGB24 __asm {                                                     \
    /* Step 3: Weave into RRGB */                                              \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
    /* Step 4: RRGB -> RGB24 */                                                \
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2487 2488
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RAW values.
#define STORERAW __asm {                                                       \
    /* Step 3: Weave into RRGB */                                              \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
    /* Step 4: RRGB -> RAW */                                                  \
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2504 2505
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RGB565 values.
#define STORERGB565 __asm {                                                    \
    /* Step 3: Weave into RRGB */                                              \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
    /* Step 4: RRGB -> RGB565 */                                               \
    __asm movdqa     xmm3, xmm0    /* B  first 4 pixels of argb */             \
    __asm movdqa     xmm2, xmm0    /* G */                                     \
    __asm pslld      xmm0, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm0, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm0, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm0, xmm3    /* BGR */                                   \
    __asm movdqa     xmm3, xmm1    /* B  next 4 pixels of argb */              \
    __asm movdqa     xmm2, xmm1    /* G */                                     \
    __asm pslld      xmm1, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm1, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm1, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm1, xmm3    /* BGR */                                   \
    __asm packssdw   xmm0, xmm1                                                \
2541
    __asm movdqu     0[edx], xmm0  /* store 8 pixels of RGB565 */              \
2542 2543 2544
    __asm lea        edx, [edx + 16]                                           \
  }

2545
// 8 pixels.
2546
// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
2547
__declspec(naked)
2548
void I444ToARGBRow_SSSE3(const uint8* y_buf,
2549 2550
                         const uint8* u_buf,
                         const uint8* v_buf,
2551
                         uint8* dst_argb,
2552
                         int width) {
2553 2554 2555 2556 2557 2558
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2559
    mov        edx, [esp + 8 + 16]  // argb
2560 2561 2562 2563
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

2564
 convertloop:
2565
    READYUV444
2566
    YUVTORGB(kYuvConstants)
2567
    STOREARGB
2568

2569
    sub        ecx, 8
2570
    jg         convertloop
2571 2572 2573 2574 2575 2576 2577

    pop        edi
    pop        esi
    ret
  }
}

2578
// 8 pixels.
2579
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB24 (24 bytes).
2580
__declspec(naked)
2581 2582 2583
void I422ToRGB24Row_SSSE3(const uint8* y_buf,
                          const uint8* u_buf,
                          const uint8* v_buf,
2584
                          uint8* dst_rgb24,
2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
                          int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgb24
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    movdqa     xmm5, kShuffleMaskARGBToRGB24_0
    movdqa     xmm6, kShuffleMaskARGBToRGB24

 convertloop:
    READYUV422
2600
    YUVTORGB(kYuvConstants)
2601
    STORERGB24
2602 2603 2604 2605 2606 2607 2608 2609 2610 2611

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2612
// 8 pixels.
2613
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RAW (24 bytes).
2614
__declspec(naked)
2615 2616 2617
void I422ToRAWRow_SSSE3(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
2618
                        uint8* dst_raw,
2619
                        int width) {
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // raw
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    movdqa     xmm5, kShuffleMaskARGBToRAW_0
    movdqa     xmm6, kShuffleMaskARGBToRAW
2631 2632

 convertloop:
2633
    READYUV422
2634
    YUVTORGB(kYuvConstants)
2635
    STORERAW
2636 2637 2638 2639

    sub        ecx, 8
    jg         convertloop

2640
    pop        edi
2641 2642 2643 2644 2645
    pop        esi
    ret
  }
}

2646
// 8 pixels
2647
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes).
2648
__declspec(naked)
2649 2650 2651 2652 2653
void I422ToRGB565Row_SSSE3(const uint8* y_buf,
                           const uint8* u_buf,
                           const uint8* v_buf,
                           uint8* rgb565_buf,
                           int width) {
2654 2655 2656 2657 2658 2659
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2660
    mov        edx, [esp + 8 + 16]  // rgb565
2661 2662
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
2663 2664 2665 2666 2667 2668 2669
    pcmpeqb    xmm5, xmm5       // generate mask 0x0000001f
    psrld      xmm5, 27
    pcmpeqb    xmm6, xmm6       // generate mask 0x000007e0
    psrld      xmm6, 26
    pslld      xmm6, 5
    pcmpeqb    xmm7, xmm7       // generate mask 0xfffff800
    pslld      xmm7, 11
2670 2671

 convertloop:
2672
    READYUV422
2673
    YUVTORGB(kYuvConstants)
2674
    STORERGB565
2675

2676
    sub        ecx, 8
2677 2678 2679 2680 2681 2682 2683 2684
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2685
// 8 pixels.
2686
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2687
__declspec(naked)
2688 2689 2690 2691 2692
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2705
    READYUV422
2706
    YUVTORGB(kYuvConstants)
2707
    STOREARGB
2708

2709 2710 2711 2712 2713 2714 2715 2716 2717
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2718 2719 2720
// 8 pixels.
// JPeg color space version of I422ToARGB
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2721
__declspec(naked)
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
void J422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READYUV422
    YUVTORGB(kYuvJConstants)
    STOREARGB

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2752
// 8 pixels.
2753
// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2754
// Similar to I420 but duplicate UV once more.
2755
__declspec(naked)
2756 2757 2758 2759 2760
void I411ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
2761
  __asm {
2762
    push       ebx
2763 2764
    push       esi
    push       edi
2765 2766 2767 2768 2769
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ecx, [esp + 12 + 20]  // width
2770
    sub        edi, esi
2771
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
2772 2773

 convertloop:
2774
    READYUV411  // modifies EBX
2775
    YUVTORGB(kYuvConstants)
2776
    STOREARGB
2777

2778 2779 2780 2781 2782
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
2783
    pop        ebx
2784 2785 2786 2787
    ret
  }
}

2788
// 8 pixels.
2789
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2790
__declspec(naked)
2791 2792 2793 2794
void NV12ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
2805
    YUVTORGB(kYuvConstants)
2806
    STOREARGB
2807

2808 2809 2810 2811 2812 2813 2814 2815
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2816
// 8 pixels.
2817
// 4 VU values upsampled to 8 VU, mixed with 8 Y producing 8 ARGB (32 bytes).
2818
__declspec(naked)
2819 2820 2821 2822
void NV21ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2823 2824 2825
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
2826
    mov        esi, [esp + 4 + 8]   // UV
2827 2828 2829 2830 2831 2832
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
2833
    YUVTORGB(kYvuConstants)
2834
    STOREARGB
2835

2836 2837 2838 2839 2840 2841 2842 2843
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2844
__declspec(naked)
2845 2846 2847
void I422ToBGRARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2848
                         uint8* dst_bgra,
2849 2850 2851 2852 2853 2854 2855 2856
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // bgra
2857 2858 2859 2860
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
2861
    READYUV422
2862
    YUVTORGB(kYuvConstants)
2863
    STOREBGRA
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2874
__declspec(naked)
2875 2876 2877
void I422ToABGRRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2878
                         uint8* dst_abgr,
2879
                         int width) {
2880 2881 2882 2883 2884 2885
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2886
    mov        edx, [esp + 8 + 16]  // abgr
2887 2888 2889 2890 2891
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2892
    READYUV422
2893
    YUVTORGB(kYuvConstants)
2894
    STOREABGR
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2905
__declspec(naked)
2906 2907 2908
void I422ToRGBARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2909
                         uint8* dst_rgba,
2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgba
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
    READYUV422
2923
    YUVTORGB(kYuvConstants)
2924
    STORERGBA
2925

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

#endif  // HAS_I422TOARGBROW_SSSE3
2936

2937
#ifdef HAS_I400TOARGBROW_SSE2
2938
// 8 pixels of Y converted to 8 pixels of ARGB (32 bytes).
2939
__declspec(naked)
2940 2941 2942
void I400ToARGBRow_SSE2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
2943
  __asm {
2944
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
2945 2946
    movd       xmm2, eax
    pshufd     xmm2, xmm2,0
2947 2948 2949 2950 2951
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    movd       xmm3, eax
    pshufd     xmm3, xmm3, 0
    pcmpeqb    xmm4, xmm4           // generate mask 0xff000000
    pslld      xmm4, 24
2952

2953 2954 2955 2956
    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

2957
 convertloop:
2958
    // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
2959
    movq       xmm0, qword ptr [eax]
2960
    lea        eax, [eax + 8]
2961 2962
    punpcklbw  xmm0, xmm0           // Y.Y
    pmulhuw    xmm0, xmm2
2963
    psubusw    xmm0, xmm3
2964
    psrlw      xmm0, 6
2965 2966 2967 2968 2969 2970 2971
    packuswb   xmm0, xmm0           // G

    // Step 2: Weave into ARGB
    punpcklbw  xmm0, xmm0           // GG
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm1           // BGRA next 4 pixels
2972 2973
    por        xmm0, xmm4
    por        xmm1, xmm4
2974 2975
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
2976 2977
    lea        edx,  [edx + 32]
    sub        ecx, 8
2978
    jg         convertloop
2979 2980 2981
    ret
  }
}
2982
#endif  // HAS_I400TOARGBROW_SSE2
2983

2984
#ifdef HAS_I400TOARGBROW_AVX2
2985
// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
2986
// note: vpunpcklbw mutates and vpackuswb unmutates.
2987
__declspec(naked)
2988 2989 2990
void I400ToARGBRow_AVX2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
2991
  __asm {
2992
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
2993 2994
    vmovd      xmm2, eax
    vbroadcastss ymm2, xmm2
2995 2996 2997 2998 2999
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    vmovd      xmm3, eax
    vbroadcastss ymm3, xmm3
    vpcmpeqb   ymm4, ymm4, ymm4     // generate mask 0xff000000
    vpslld     ymm4, ymm4, 24
3000 3001 3002 3003 3004 3005

    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

 convertloop:
3006
    // Step 1: Scale Y contriportbution to 16 G values. G = (y - 16) * 1.164
3007 3008
    vmovdqu    xmm0, [eax]
    lea        eax, [eax + 16]
3009
    vpermq     ymm0, ymm0, 0xd8           // vpunpcklbw mutates
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
    vpunpcklbw ymm0, ymm0, ymm0           // Y.Y
    vpmulhuw   ymm0, ymm0, ymm2
    vpsubusw   ymm0, ymm0, ymm3
    vpsrlw     ymm0, ymm0, 6
    vpackuswb  ymm0, ymm0, ymm0           // G.  still mutated: 3120

    // TODO(fbarchard): Weave alpha with unpack.
    // Step 2: Weave into ARGB
    vpunpcklbw ymm1, ymm0, ymm0           // GG - mutates
    vpermq     ymm1, ymm1, 0xd8
3020 3021
    vpunpcklwd ymm0, ymm1, ymm1           // GGGG first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm1           // GGGG next 8 pixels
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
    vpor       ymm0, ymm0, ymm4
    vpor       ymm1, ymm1, ymm4
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop
    vzeroupper
    ret
  }
}
3033
#endif  // HAS_I400TOARGBROW_AVX2
3034

3035
#ifdef HAS_MIRRORROW_SSSE3
3036
// Shuffle table for reversing the bytes.
3037
static const uvec8 kShuffleMirror = {
3038 3039
  15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
3040

3041
// TODO(fbarchard): Replace lea with -16 offset.
3042
__declspec(naked)
3043
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
3044
  __asm {
3045 3046 3047
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3048
    movdqa    xmm5, kShuffleMirror
3049

3050
 convertloop:
3051
    movdqu    xmm0, [eax - 16 + ecx]
3052
    pshufb    xmm0, xmm5
3053
    movdqu    [edx], xmm0
3054
    lea       edx, [edx + 16]
3055
    sub       ecx, 16
3056
    jg        convertloop
3057 3058 3059
    ret
  }
}
3060
#endif  // HAS_MIRRORROW_SSSE3
3061

fbarchard@google.com's avatar
fbarchard@google.com committed
3062
#ifdef HAS_MIRRORROW_AVX2
3063
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3064 3065 3066 3067 3068
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3069
    vbroadcastf128 ymm5, kShuffleMirror
fbarchard@google.com's avatar
fbarchard@google.com committed
3070 3071

 convertloop:
3072
    vmovdqu   ymm0, [eax - 32 + ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
3073 3074 3075 3076
    vpshufb   ymm0, ymm0, ymm5
    vpermq    ymm0, ymm0, 0x4e  // swap high and low halfs
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3077
    sub       ecx, 32
fbarchard@google.com's avatar
fbarchard@google.com committed
3078
    jg        convertloop
3079
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3080 3081 3082 3083 3084
    ret
  }
}
#endif  // HAS_MIRRORROW_AVX2

3085
#ifdef HAS_MIRRORROW_SSE2
3086
__declspec(naked)
3087
void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3088
  __asm {
3089 3090 3091
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3092

3093
 convertloop:
3094
    movdqu    xmm0, [eax - 16 + ecx]
3095
    movdqa    xmm1, xmm0        // swap bytes
3096 3097 3098 3099 3100
    psllw     xmm0, 8
    psrlw     xmm1, 8
    por       xmm0, xmm1
    pshuflw   xmm0, xmm0, 0x1b  // swap words
    pshufhw   xmm0, xmm0, 0x1b
3101
    pshufd    xmm0, xmm0, 0x4e  // swap qwords
3102
    movdqu    [edx], xmm0
3103
    lea       edx, [edx + 16]
3104
    sub       ecx, 16
3105
    jg        convertloop
3106 3107 3108
    ret
  }
}
3109
#endif  // HAS_MIRRORROW_SSE2
3110

3111 3112
#ifdef HAS_MIRRORROW_UV_SSSE3
// Shuffle table for reversing the bytes of UV channels.
3113
static const uvec8 kShuffleMirrorUV = {
3114 3115 3116
  14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
};

3117
__declspec(naked)
3118
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130
                       int width) {
  __asm {
    push      edi
    mov       eax, [esp + 4 + 4]   // src
    mov       edx, [esp + 4 + 8]   // dst_u
    mov       edi, [esp + 4 + 12]  // dst_v
    mov       ecx, [esp + 4 + 16]  // width
    movdqa    xmm1, kShuffleMirrorUV
    lea       eax, [eax + ecx * 2 - 16]
    sub       edi, edx

 convertloop:
3131
    movdqu    xmm0, [eax]
3132 3133 3134 3135 3136
    lea       eax, [eax - 16]
    pshufb    xmm0, xmm1
    movlpd    qword ptr [edx], xmm0
    movhpd    qword ptr [edx + edi], xmm0
    lea       edx, [edx + 8]
3137
    sub       ecx, 8
3138
    jg        convertloop
3139 3140 3141 3142 3143

    pop       edi
    ret
  }
}
3144
#endif  // HAS_MIRRORROW_UV_SSSE3
3145

3146
#ifdef HAS_ARGBMIRRORROW_SSE2
3147
__declspec(naked)
3148
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3149
  __asm {
3150 3151 3152
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3153
    lea       eax, [eax - 16 + ecx * 4]  // last 4 pixels.
3154 3155

 convertloop:
3156
    movdqu    xmm0, [eax]
3157
    lea       eax, [eax - 16]
3158
    pshufd    xmm0, xmm0, 0x1b
3159
    movdqu    [edx], xmm0
3160
    lea       edx, [edx + 16]
3161
    sub       ecx, 4
3162 3163 3164 3165
    jg        convertloop
    ret
  }
}
3166
#endif  // HAS_ARGBMIRRORROW_SSE2
3167

fbarchard@google.com's avatar
fbarchard@google.com committed
3168 3169
#ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes.
3170
static const ulvec32 kARGBShuffleMirror_AVX2 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
3171 3172 3173
  7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};

3174
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3175 3176 3177 3178 3179
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3180
    vmovdqu   ymm5, kARGBShuffleMirror_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3181 3182

 convertloop:
3183
    vpermd    ymm0, ymm5, [eax - 32 + ecx * 4]  // permute dword order
fbarchard@google.com's avatar
fbarchard@google.com committed
3184 3185
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3186
    sub       ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3187
    jg        convertloop
3188
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3189 3190 3191
    ret
  }
}
3192
#endif  // HAS_ARGBMIRRORROW_AVX2
3193

3194
#ifdef HAS_SPLITUVROW_SSE2
3195
__declspec(naked)
3196
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    lea        eax,  [eax + 32]
    movdqa     xmm2, xmm0
    movdqa     xmm3, xmm1
    pand       xmm0, xmm5   // even bytes
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    psrlw      xmm2, 8      // odd bytes
    psrlw      xmm3, 8
    packuswb   xmm2, xmm3
    movdqu     [edx], xmm0
    movdqu     [edx + edi], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
3229

3230
#endif  // HAS_SPLITUVROW_SSE2
3231

3232
#ifdef HAS_SPLITUVROW_AVX2
3233
__declspec(naked)
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
3246 3247
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
3248 3249 3250 3251 3252 3253 3254 3255 3256
    lea        eax,  [eax + 64]
    vpsrlw     ymm2, ymm0, 8      // odd bytes
    vpsrlw     ymm3, ymm1, 8
    vpand      ymm0, ymm0, ymm5   // even bytes
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1
    vpackuswb  ymm2, ymm2, ymm3
    vpermq     ymm0, ymm0, 0xd8
    vpermq     ymm2, ymm2, 0xd8
3257 3258
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + edi], ymm2
3259 3260 3261 3262 3263
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3264
    vzeroupper
3265 3266 3267
    ret
  }
}
3268
#endif  // HAS_SPLITUVROW_AVX2
3269

3270
#ifdef HAS_MERGEUVROW_SSE2
3271
__declspec(naked)
3272 3273
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
3274 3275
  __asm {
    push       edi
3276 3277 3278 3279 3280
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax
3281 3282

  convertloop:
3283 3284
    movdqu     xmm0, [eax]      // read 16 U's
    movdqu     xmm1, [eax + edx]  // and 16 V's
3285 3286 3287 3288
    lea        eax,  [eax + 16]
    movdqa     xmm2, xmm0
    punpcklbw  xmm0, xmm1       // first 8 UV pairs
    punpckhbw  xmm2, xmm1       // next 8 UV pairs
3289 3290
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm2
3291 3292
    lea        edi, [edi + 32]
    sub        ecx, 16
3293 3294 3295 3296 3297 3298
    jg         convertloop

    pop        edi
    ret
  }
}
3299
#endif  //  HAS_MERGEUVROW_SSE2
3300

3301
#ifdef HAS_MERGEUVROW_AVX2
3302
__declspec(naked)
3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax

  convertloop:
    vmovdqu    ymm0, [eax]           // read 32 U's
    vmovdqu    ymm1, [eax + edx]     // and 32 V's
    lea        eax,  [eax + 32]
    vpunpcklbw ymm2, ymm0, ymm1      // low 16 UV pairs. mutated qqword 0,2
    vpunpckhbw ymm0, ymm0, ymm1      // high 16 UV pairs. mutated qqword 1,3
3319 3320 3321 3322
    vextractf128 [edi], ymm2, 0       // bytes 0..15
    vextractf128 [edi + 16], ymm0, 0  // bytes 16..31
    vextractf128 [edi + 32], ymm2, 1  // bytes 32..47
    vextractf128 [edi + 48], ymm0, 1  // bytes 47..63
3323 3324 3325 3326 3327
    lea        edi, [edi + 64]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3328
    vzeroupper
3329 3330 3331 3332 3333
    ret
  }
}
#endif  //  HAS_MERGEUVROW_AVX2

3334
#ifdef HAS_COPYROW_SSE2
3335
// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
3336
__declspec(naked)
3337 3338 3339 3340 3341
void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3342

3343
  convertloop:
3344 3345
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3346
    lea        eax, [eax + 32]
3347 3348
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3349
    lea        edx, [edx + 32]
3350
    sub        ecx, 32
3351
    jg         convertloop
3352 3353 3354 3355 3356
    ret
  }
}
#endif  // HAS_COPYROW_SSE2

3357 3358
#ifdef HAS_COPYROW_AVX
// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
3359
__declspec(naked)
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381
void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
    sub        ecx, 64
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_COPYROW_AVX

3382
// Multiple of 1.
3383
__declspec(naked)
3384
void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
  __asm {
    mov        eax, esi
    mov        edx, edi
    mov        esi, [esp + 4]   // src
    mov        edi, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    rep movsb
    mov        edi, edx
    mov        esi, eax
    ret
  }
}

3398 3399
#ifdef HAS_ARGBCOPYALPHAROW_SSE2
// width in pixels
3400
__declspec(naked)
3401 3402 3403
void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
fbarchard@google.com's avatar
fbarchard@google.com committed
3404
    mov        edx, [esp + 8]   // dst
3405
    mov        ecx, [esp + 12]  // count
fbarchard@google.com's avatar
fbarchard@google.com committed
3406 3407 3408 3409
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8
3410 3411

  convertloop:
3412 3413
    movdqu     xmm2, [eax]
    movdqu     xmm3, [eax + 16]
3414
    lea        eax, [eax + 32]
3415 3416
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
3417 3418 3419 3420 3421 3422
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3423 3424
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
3425
    lea        edx, [edx + 32]
3426 3427 3428 3429 3430 3431 3432 3433
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
3434 3435
#ifdef HAS_ARGBCOPYALPHAROW_AVX2
// width in pixels
3436
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3437 3438 3439 3440 3441
void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3442
    vpcmpeqb   ymm0, ymm0, ymm0
3443
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff
fbarchard@google.com's avatar
fbarchard@google.com committed
3444 3445

  convertloop:
3446 3447
    vmovdqu    ymm1, [eax]
    vmovdqu    ymm2, [eax + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
3448
    lea        eax, [eax + 64]
3449 3450 3451 3452
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
fbarchard@google.com's avatar
fbarchard@google.com committed
3453 3454 3455 3456 3457 3458 3459 3460 3461 3462
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_AVX2

3463 3464
#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
// width in pixels
3465
__declspec(naked)
3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481
void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8

  convertloop:
    movq       xmm2, qword ptr [eax]  // 8 Y's
    lea        eax, [eax + 8]
    punpcklbw  xmm2, xmm2
    punpckhwd  xmm3, xmm2
    punpcklwd  xmm2, xmm2
3482 3483
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
3484 3485 3486 3487 3488 3489
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3490 3491
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
    lea        edx, [edx + 32]
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_SSE2

#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
// width in pixels
3503
__declspec(naked)
3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531
void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    vpcmpeqb   ymm0, ymm0, ymm0
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff

  convertloop:
    vpmovzxbd  ymm1, qword ptr [eax]
    vpmovzxbd  ymm2, qword ptr [eax + 8]
    lea        eax, [eax + 16]
    vpslld     ymm1, ymm1, 24
    vpslld     ymm2, ymm2, 24
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_AVX2

3532
#ifdef HAS_SETROW_X86
3533 3534
// Write 'count' bytes using an 8 bit value repeated.
// Count should be multiple of 4.
3535
__declspec(naked)
3536
void SetRow_X86(uint8* dst, uint8 v8, int count) {
3537
  __asm {
3538 3539 3540
    movzx      eax, byte ptr [esp + 8]    // v8
    mov        edx, 0x01010101  // Duplicate byte to all bytes.
    mul        edx              // overwrites edx with upper part of result.
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        ecx, [esp + 12]  // count
    shr        ecx, 2
    rep stosd
    mov        edi, edx
    ret
  }
}

3551
// Write 'count' bytes using an 8 bit value repeated.
3552
__declspec(naked)
3553
void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
3554
  __asm {
3555 3556 3557 3558 3559 3560 3561 3562 3563
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v8
    mov        ecx, [esp + 12]  // count
    rep stosb
    mov        edi, edx
    ret
  }
}
3564

3565
// Write 'count' 32 bit values.
3566
__declspec(naked)
3567 3568 3569 3570 3571 3572
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
  __asm {
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v32
    mov        ecx, [esp + 12]  // count
3573
    rep stosd
3574
    mov        edi, edx
3575 3576 3577 3578 3579
    ret
  }
}
#endif  // HAS_SETROW_X86

3580
#ifdef HAS_YUY2TOYROW_AVX2
3581
__declspec(naked)
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600
void YUY2ToYRow_AVX2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // even bytes are Y
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3601
    sub        ecx, 32
3602
    jg         convertloop
3603
    vzeroupper
3604 3605 3606 3607
    ret
  }
}

3608
__declspec(naked)
3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3647
    vzeroupper
3648 3649 3650 3651
    ret
  }
}

3652
__declspec(naked)
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3686
    vzeroupper
3687 3688 3689 3690
    ret
  }
}

3691
__declspec(naked)
3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708
void UYVYToYRow_AVX2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // odd bytes are Y
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3709
    sub        ecx, 32
3710
    jg         convertloop
3711
    vzeroupper
3712
    ret
3713 3714 3715
  }
}

3716
__declspec(naked)
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3755
    vzeroupper
3756 3757 3758 3759
    ret
  }
}

3760
__declspec(naked)
3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3794
    vzeroupper
3795 3796 3797 3798 3799
    ret
  }
}
#endif  // HAS_YUY2TOYROW_AVX2

3800
#ifdef HAS_YUY2TOYROW_SSE2
3801
__declspec(naked)
3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
void YUY2ToYRow_SSE2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    pcmpeqb    xmm5, xmm5        // generate mask 0x00ff00ff
    psrlw      xmm5, 8

  convertloop:
3812 3813
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3814 3815 3816 3817
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // even bytes are Y
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
3818
    movdqu     [edx], xmm0
3819
    lea        edx, [edx + 16]
3820
    sub        ecx, 16
3821
    jg         convertloop
3822 3823 3824 3825
    ret
  }
}

3826
__declspec(naked)
3827
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
3828
                      uint8* dst_u, uint8* dst_v, int pix) {
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3842 3843 3844 3845
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3861
    jg         convertloop
3862 3863 3864 3865 3866 3867 3868

    pop        edi
    pop        esi
    ret
  }
}

3869
__declspec(naked)
3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3883 3884
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}

3905
__declspec(naked)
3906 3907 3908 3909 3910 3911 3912 3913
void UYVYToYRow_SSE2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
3914 3915
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3916 3917 3918 3919
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8    // odd bytes are Y
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
3920
    movdqu     [edx], xmm0
3921
    lea        edx, [edx + 16]
3922
    sub        ecx, 16
3923
    jg         convertloop
3924 3925 3926 3927
    ret
  }
}

3928
__declspec(naked)
3929
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
3930
                      uint8* dst_u, uint8* dst_v, int pix) {
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3944 3945 3946 3947
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3963
    jg         convertloop
3964 3965 3966 3967 3968 3969 3970

    pop        edi
    pop        esi
    ret
  }
}

3971
__declspec(naked)
3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984
void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3985 3986
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
4006
#endif  // HAS_YUY2TOYROW_SSE2
4007

4008
#ifdef HAS_ARGBBLENDROW_SSE2
4009
// Blend 8 pixels at a time.
4010
__declspec(naked)
4011 4012
void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                       uint8* dst_argb, int width) {
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm7, xmm7       // generate constant 1
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4027 4028
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4029

4030
    // 4 pixel loop.
4031
  convertloop4:
4032 4033
    movdqu     xmm3, [eax]      // src argb
    lea        eax, [eax + 16]
4034 4035
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4036
    movdqu     xmm2, [esi]      // _r_b
4037
    psrlw      xmm3, 8          // alpha
4038 4039
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
4040 4041 4042
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4043
    movdqu     xmm1, [esi]      // _a_g
4044
    lea        esi, [esi + 16]
4045 4046 4047 4048 4049 4050 4051
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4052
    movdqu     [edx], xmm0
4053
    lea        edx, [edx + 16]
4054
    sub        ecx, 4
4055
    jge        convertloop4
4056

4057 4058 4059
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4060

4061 4062
    // 1 pixel loop.
  convertloop1:
4063
    movd       xmm3, [eax]      // src argb
4064 4065 4066 4067 4068
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    psrlw      xmm3, 8          // alpha
4069 4070
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4085
    sub        ecx, 1
4086
    jge        convertloop1
4087

4088
  convertloop1b:
4089 4090 4091 4092
    pop        esi
    ret
  }
}
4093
#endif  // HAS_ARGBBLENDROW_SSE2
4094 4095

#ifdef HAS_ARGBBLENDROW_SSSE3
4096
// Shuffle table for isolating alpha.
4097
static const uvec8 kShuffleAlpha = {
4098 4099 4100
  3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
  11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
4101
// Same as SSE2, but replaces:
4102
//    psrlw      xmm3, 8          // alpha
4103 4104
//    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
//    pshuflw    xmm3, xmm3, 0F5h
4105 4106
// with..
//    pshufb     xmm3, kShuffleAlpha // alpha
4107
// Blend 8 pixels at a time.
4108

4109
__declspec(naked)
4110 4111
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
                        uint8* dst_argb, int width) {
4112 4113 4114 4115 4116 4117
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4118
    pcmpeqb    xmm7, xmm7       // generate constant 0x0001
4119 4120 4121 4122 4123 4124 4125
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4126 4127
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4128

4129
    // 4 pixel loop.
4130
  convertloop4:
4131
    movdqu     xmm3, [eax]      // src argb
4132 4133 4134
    lea        eax, [eax + 16]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4135
    movdqu     xmm2, [esi]      // _r_b
4136 4137 4138 4139
    pshufb     xmm3, kShuffleAlpha // alpha
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4140
    movdqu     xmm1, [esi]      // _a_g
4141 4142 4143 4144 4145 4146 4147 4148
    lea        esi, [esi + 16]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4149
    movdqu     [edx], xmm0
4150
    lea        edx, [edx + 16]
4151
    sub        ecx, 4
4152
    jge        convertloop4
4153

4154 4155 4156
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4157

4158 4159
    // 1 pixel loop.
  convertloop1:
4160
    movd       xmm3, [eax]      // src argb
4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    pshufb     xmm3, kShuffleAlpha // alpha
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4180
    sub        ecx, 1
4181
    jge        convertloop1
4182

4183
  convertloop1b:
4184 4185 4186 4187
    pop        esi
    ret
  }
}
4188
#endif  // HAS_ARGBBLENDROW_SSSE3
4189

4190
#ifdef HAS_ARGBATTENUATEROW_SSE2
4191
// Attenuate 4 pixels at a time.
4192
__declspec(naked)
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203
void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
    pcmpeqb    xmm5, xmm5       // generate mask 0x00ffffff
    psrld      xmm5, 8

 convertloop:
4204
    movdqu     xmm0, [eax]      // read 4 pixels
4205
    punpcklbw  xmm0, xmm0       // first 2
4206 4207
    pshufhw    xmm2, xmm0, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
4208
    pmulhuw    xmm0, xmm2       // rgb * a
4209
    movdqu     xmm1, [eax]      // read 4 pixels
4210
    punpckhbw  xmm1, xmm1       // next 2 pixels
4211 4212
    pshufhw    xmm2, xmm1, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
4213
    pmulhuw    xmm1, xmm2       // rgb * a
4214
    movdqu     xmm2, [eax]      // alphas
4215
    lea        eax, [eax + 16]
4216
    psrlw      xmm0, 8
4217
    pand       xmm2, xmm4
4218 4219 4220
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    pand       xmm0, xmm5       // keep original alphas
4221
    por        xmm0, xmm2
4222
    movdqu     [edx], xmm0
4223
    lea        edx, [edx + 16]
4224
    sub        ecx, 4
4225 4226 4227 4228 4229
    jg         convertloop

    ret
  }
}
4230
#endif  // HAS_ARGBATTENUATEROW_SSE2
4231

4232
#ifdef HAS_ARGBATTENUATEROW_SSSE3
4233
// Shuffle table duplicating alpha.
4234
static const uvec8 kShuffleAlpha0 = {
4235 4236
  3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
};
4237
static const uvec8 kShuffleAlpha1 = {
4238 4239 4240
  11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
  15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
};
4241
__declspec(naked)
4242
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4243
  __asm {
4244 4245 4246 4247 4248 4249 4250 4251 4252
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm3, xmm3       // generate mask 0xff000000
    pslld      xmm3, 24
    movdqa     xmm4, kShuffleAlpha0
    movdqa     xmm5, kShuffleAlpha1

 convertloop:
4253
    movdqu     xmm0, [eax]      // read 4 pixels
4254
    pshufb     xmm0, xmm4       // isolate first 2 alphas
4255
    movdqu     xmm1, [eax]      // read 4 pixels
4256 4257
    punpcklbw  xmm1, xmm1       // first 2 pixel rgbs
    pmulhuw    xmm0, xmm1       // rgb * a
4258
    movdqu     xmm1, [eax]      // read 4 pixels
4259
    pshufb     xmm1, xmm5       // isolate next 2 alphas
4260
    movdqu     xmm2, [eax]      // read 4 pixels
4261 4262
    punpckhbw  xmm2, xmm2       // next 2 pixel rgbs
    pmulhuw    xmm1, xmm2       // rgb * a
4263
    movdqu     xmm2, [eax]      // mask original alpha
4264
    lea        eax, [eax + 16]
4265 4266 4267 4268 4269
    pand       xmm2, xmm3
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    por        xmm0, xmm2       // copy original alpha
4270
    movdqu     [edx], xmm0
4271
    lea        edx, [edx + 16]
4272
    sub        ecx, 4
4273 4274 4275 4276 4277
    jg         convertloop

    ret
  }
}
4278
#endif  // HAS_ARGBATTENUATEROW_SSSE3
4279

fbarchard@google.com's avatar
fbarchard@google.com committed
4280 4281
#ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4282
static const uvec8 kShuffleAlpha_AVX2 = {
4283
  6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
4284
};
4285
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4286 4287 4288 4289 4290 4291
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
4292
    vbroadcastf128 ymm4,kShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310
    vpcmpeqb   ymm5, ymm5, ymm5 // generate mask 0xff000000
    vpslld     ymm5, ymm5, 24

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpshufb    ymm2, ymm0, ymm4  // low 4 alphas
    vpshufb    ymm3, ymm1, ymm4  // high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * a
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * a
    vpand      ymm6, ymm6, ymm5  // isolate alpha
    vpsrlw     ymm0, ymm0, 8
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vpor       ymm0, ymm0, ymm6  // copy original alpha
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4311
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4312 4313
    jg         convertloop

4314
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4315 4316 4317 4318 4319
    ret
  }
}
#endif  // HAS_ARGBATTENUATEROW_AVX2

4320
#ifdef HAS_ARGBUNATTENUATEROW_SSE2
4321
// Unattenuate 4 pixels at a time.
4322
__declspec(naked)
4323 4324 4325 4326 4327 4328 4329 4330 4331 4332
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb0
    mov        edx, [esp + 8 + 8]   // dst_argb
    mov        ecx, [esp + 8 + 12]  // width

 convertloop:
4333
    movdqu     xmm0, [eax]      // read 4 pixels
4334 4335 4336 4337 4338
    movzx      esi, byte ptr [eax + 3]  // first alpha
    movzx      edi, byte ptr [eax + 7]  // second alpha
    punpcklbw  xmm0, xmm0       // first 2
    movd       xmm2, dword ptr fixed_invtbl8[esi * 4]
    movd       xmm3, dword ptr fixed_invtbl8[edi * 4]
4339 4340
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words.  1, a, a, a
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4341 4342 4343
    movlhps    xmm2, xmm3
    pmulhuw    xmm0, xmm2       // rgb * a

4344
    movdqu     xmm1, [eax]      // read 4 pixels
4345 4346 4347 4348 4349
    movzx      esi, byte ptr [eax + 11]  // third alpha
    movzx      edi, byte ptr [eax + 15]  // forth alpha
    punpckhbw  xmm1, xmm1       // next 2
    movd       xmm2, dword ptr fixed_invtbl8[esi * 4]
    movd       xmm3, dword ptr fixed_invtbl8[edi * 4]
4350 4351
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4352 4353
    movlhps    xmm2, xmm3
    pmulhuw    xmm1, xmm2       // rgb * a
4354
    lea        eax, [eax + 16]
4355 4356

    packuswb   xmm0, xmm1
4357
    movdqu     [edx], xmm0
4358
    lea        edx, [edx + 16]
4359
    sub        ecx, 4
4360 4361 4362 4363 4364 4365
    jg         convertloop
    pop        edi
    pop        esi
    ret
  }
}
4366
#endif  // HAS_ARGBUNATTENUATEROW_SSE2
4367

fbarchard@google.com's avatar
fbarchard@google.com committed
4368 4369
#ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4370
static const uvec8 kUnattenShuffleAlpha_AVX2 = {
4371
  0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
fbarchard@google.com's avatar
fbarchard@google.com committed
4372
};
4373 4374 4375
// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
// USE_GATHER is not on by default, due to being a slow instruction.
#ifdef USE_GATHER
4376
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4377 4378 4379 4380 4381 4382 4383
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
4384
    vbroadcastf128 ymm4, kUnattenShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4385 4386 4387

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
4388
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0xffffffff for gather.
fbarchard@google.com's avatar
fbarchard@google.com committed
4389 4390 4391
    vpsrld     ymm2, ymm6, 24    // alpha in low 8 bits.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
4392 4393 4394 4395
    vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5  // ymm5 cleared.  1, a
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm4  // replicate low 4 alphas. 1, a, a, a
fbarchard@google.com's avatar
fbarchard@google.com committed
4396 4397 4398 4399 4400 4401
    vpshufb    ymm3, ymm3, ymm4  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4402
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4403 4404
    jg         convertloop

4405
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4406 4407 4408
    ret
  }
}
4409
#else  // USE_GATHER
4410
__declspec(naked)
4411 4412 4413 4414 4415 4416 4417 4418
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {

    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
4419
    vbroadcastf128 ymm5, kUnattenShuffleAlpha_AVX2
4420 4421 4422 4423 4424 4425

    push       esi
    push       edi

 convertloop:
    // replace VPGATHER
4426 4427
    movzx      esi, byte ptr [eax + 3]                 // alpha0
    movzx      edi, byte ptr [eax + 7]                 // alpha1
4428 4429
    vmovd      xmm0, dword ptr fixed_invtbl8[esi * 4]  // [1,a0]
    vmovd      xmm1, dword ptr fixed_invtbl8[edi * 4]  // [1,a1]
4430 4431
    movzx      esi, byte ptr [eax + 11]                // alpha2
    movzx      edi, byte ptr [eax + 15]                // alpha3
4432 4433 4434
    vpunpckldq xmm6, xmm0, xmm1                        // [1,a1,1,a0]
    vmovd      xmm2, dword ptr fixed_invtbl8[esi * 4]  // [1,a2]
    vmovd      xmm3, dword ptr fixed_invtbl8[edi * 4]  // [1,a3]
4435 4436
    movzx      esi, byte ptr [eax + 19]                // alpha4
    movzx      edi, byte ptr [eax + 23]                // alpha5
4437 4438 4439
    vpunpckldq xmm7, xmm2, xmm3                        // [1,a3,1,a2]
    vmovd      xmm0, dword ptr fixed_invtbl8[esi * 4]  // [1,a4]
    vmovd      xmm1, dword ptr fixed_invtbl8[edi * 4]  // [1,a5]
4440 4441
    movzx      esi, byte ptr [eax + 27]                // alpha6
    movzx      edi, byte ptr [eax + 31]                // alpha7
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462
    vpunpckldq xmm0, xmm0, xmm1                        // [1,a5,1,a4]
    vmovd      xmm2, dword ptr fixed_invtbl8[esi * 4]  // [1,a6]
    vmovd      xmm3, dword ptr fixed_invtbl8[edi * 4]  // [1,a7]
    vpunpckldq xmm2, xmm2, xmm3                        // [1,a7,1,a6]
    vpunpcklqdq xmm3, xmm6, xmm7                       // [1,a3,1,a2,1,a1,1,a0]
    vpunpcklqdq xmm0, xmm0, xmm2                       // [1,a7,1,a6,1,a5,1,a4]
    vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
    // end of VPGATHER

    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm5  // replicate low 4 alphas. 1, a, a, a
    vpshufb    ymm3, ymm3, ymm5  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4463
    sub        ecx, 8
4464 4465 4466 4467
    jg         convertloop

    pop        edi
    pop        esi
4468
    vzeroupper
4469 4470 4471 4472
    ret
  }
}
#endif  // USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
4473 4474
#endif  // HAS_ARGBATTENUATEROW_AVX2

4475
#ifdef HAS_ARGBGRAYROW_SSSE3
4476
// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
4477
__declspec(naked)
4478
void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4479
  __asm {
4480 4481 4482
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* width */
4483 4484
    movdqa     xmm4, kARGBToYJ
    movdqa     xmm5, kAddYJ64
4485 4486

 convertloop:
4487 4488
    movdqu     xmm0, [eax]  // G
    movdqu     xmm1, [eax + 16]
4489 4490 4491
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm0, xmm1
4492
    paddw      xmm0, xmm5  // Add .5 for rounding.
4493
    psrlw      xmm0, 7
4494
    packuswb   xmm0, xmm0   // 8 G bytes
4495 4496
    movdqu     xmm2, [eax]  // A
    movdqu     xmm3, [eax + 16]
4497
    lea        eax, [eax + 32]
4498 4499 4500 4501 4502 4503 4504
    psrld      xmm2, 24
    psrld      xmm3, 24
    packuswb   xmm2, xmm3
    packuswb   xmm2, xmm2   // 8 A bytes
    movdqa     xmm3, xmm0   // Weave into GG, GA, then GGGA
    punpcklbw  xmm0, xmm0   // 8 GG words
    punpcklbw  xmm3, xmm2   // 8 GA words
4505
    movdqa     xmm1, xmm0
4506 4507
    punpcklwd  xmm0, xmm3   // GGGA first 4
    punpckhwd  xmm1, xmm3   // GGGA next 4
4508 4509
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
4510
    lea        edx, [edx + 32]
4511
    sub        ecx, 8
4512 4513 4514 4515 4516
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBGRAYROW_SSSE3
4517 4518 4519 4520 4521

#ifdef HAS_ARGBSEPIAROW_SSSE3
//    b = (r * 35 + g * 68 + b * 17) >> 7
//    g = (r * 45 + g * 88 + b * 22) >> 7
//    r = (r * 50 + g * 98 + b * 24) >> 7
4522
// Constant for ARGB color to sepia tone.
4523
static const vec8 kARGBToSepiaB = {
4524 4525 4526
  17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
};

4527
static const vec8 kARGBToSepiaG = {
4528 4529 4530
  22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
};

4531
static const vec8 kARGBToSepiaR = {
4532 4533 4534
  24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
};

4535
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
4536
__declspec(naked)
4537 4538 4539 4540 4541 4542 4543 4544 4545
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   /* dst_argb */
    mov        ecx, [esp + 8]   /* width */
    movdqa     xmm2, kARGBToSepiaB
    movdqa     xmm3, kARGBToSepiaG
    movdqa     xmm4, kARGBToSepiaR

 convertloop:
4546 4547
    movdqu     xmm0, [eax]  // B
    movdqu     xmm6, [eax + 16]
4548 4549 4550 4551 4552
    pmaddubsw  xmm0, xmm2
    pmaddubsw  xmm6, xmm2
    phaddw     xmm0, xmm6
    psrlw      xmm0, 7
    packuswb   xmm0, xmm0   // 8 B values
4553 4554
    movdqu     xmm5, [eax]  // G
    movdqu     xmm1, [eax + 16]
4555 4556 4557 4558 4559 4560
    pmaddubsw  xmm5, xmm3
    pmaddubsw  xmm1, xmm3
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 G values
    punpcklbw  xmm0, xmm5   // 8 BG values
4561 4562
    movdqu     xmm5, [eax]  // R
    movdqu     xmm1, [eax + 16]
4563 4564 4565 4566 4567
    pmaddubsw  xmm5, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 R values
4568 4569
    movdqu     xmm6, [eax]  // A
    movdqu     xmm1, [eax + 16]
4570 4571 4572 4573 4574 4575 4576 4577
    psrld      xmm6, 24
    psrld      xmm1, 24
    packuswb   xmm6, xmm1
    packuswb   xmm6, xmm6   // 8 A values
    punpcklbw  xmm5, xmm6   // 8 RA values
    movdqa     xmm1, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm5   // BGRA first 4
    punpckhwd  xmm1, xmm5   // BGRA next 4
4578 4579
    movdqu     [eax], xmm0
    movdqu     [eax + 16], xmm1
4580
    lea        eax, [eax + 32]
4581
    sub        ecx, 8
4582 4583 4584 4585 4586
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBSEPIAROW_SSSE3
4587

4588 4589 4590
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided.
4591
// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
4592
// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
4593
__declspec(naked)
4594 4595
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                              const int8* matrix_argb, int width) {
4596
  __asm {
4597 4598 4599
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* matrix_argb */
4600 4601 4602 4603 4604
    movdqu     xmm5, [ecx]
    pshufd     xmm2, xmm5, 0x00
    pshufd     xmm3, xmm5, 0x55
    pshufd     xmm4, xmm5, 0xaa
    pshufd     xmm5, xmm5, 0xff
4605
    mov        ecx, [esp + 16]  /* width */
4606 4607

 convertloop:
4608 4609
    movdqu     xmm0, [eax]  // B
    movdqu     xmm7, [eax + 16]
4610
    pmaddubsw  xmm0, xmm2
4611
    pmaddubsw  xmm7, xmm2
4612 4613
    movdqu     xmm6, [eax]  // G
    movdqu     xmm1, [eax + 16]
4614
    pmaddubsw  xmm6, xmm3
4615
    pmaddubsw  xmm1, xmm3
4616 4617 4618 4619
    phaddsw    xmm0, xmm7   // B
    phaddsw    xmm6, xmm1   // G
    psraw      xmm0, 6      // B
    psraw      xmm6, 6      // G
4620
    packuswb   xmm0, xmm0   // 8 B values
4621 4622
    packuswb   xmm6, xmm6   // 8 G values
    punpcklbw  xmm0, xmm6   // 8 BG values
4623 4624
    movdqu     xmm1, [eax]  // R
    movdqu     xmm7, [eax + 16]
4625
    pmaddubsw  xmm1, xmm4
4626 4627
    pmaddubsw  xmm7, xmm4
    phaddsw    xmm1, xmm7   // R
4628 4629
    movdqu     xmm6, [eax]  // A
    movdqu     xmm7, [eax + 16]
4630 4631 4632 4633 4634 4635
    pmaddubsw  xmm6, xmm5
    pmaddubsw  xmm7, xmm5
    phaddsw    xmm6, xmm7   // A
    psraw      xmm1, 6      // R
    psraw      xmm6, 6      // A
    packuswb   xmm1, xmm1   // 8 R values
4636
    packuswb   xmm6, xmm6   // 8 A values
4637 4638 4639 4640
    punpcklbw  xmm1, xmm6   // 8 RA values
    movdqa     xmm6, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm1   // BGRA first 4
    punpckhwd  xmm6, xmm1   // BGRA next 4
4641 4642
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm6
4643
    lea        eax, [eax + 32]
4644
    lea        edx, [edx + 32]
4645
    sub        ecx, 8
4646 4647 4648 4649 4650 4651
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBCOLORMATRIXROW_SSSE3

4652 4653
#ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes).
4654
__declspec(naked)
4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
                          int interval_offset, int width) {
  __asm {
    mov        eax, [esp + 4]    /* dst_argb */
    movd       xmm2, [esp + 8]   /* scale */
    movd       xmm3, [esp + 12]  /* interval_size */
    movd       xmm4, [esp + 16]  /* interval_offset */
    mov        ecx, [esp + 20]   /* width */
    pshuflw    xmm2, xmm2, 040h
    pshufd     xmm2, xmm2, 044h
    pshuflw    xmm3, xmm3, 040h
    pshufd     xmm3, xmm3, 044h
    pshuflw    xmm4, xmm4, 040h
    pshufd     xmm4, xmm4, 044h
    pxor       xmm5, xmm5  // constant 0
    pcmpeqb    xmm6, xmm6  // generate mask 0xff000000
    pslld      xmm6, 24

 convertloop:
4674
    movdqu     xmm0, [eax]  // read 4 pixels
4675 4676
    punpcklbw  xmm0, xmm5   // first 2 pixels
    pmulhuw    xmm0, xmm2   // pixel * scale >> 16
4677
    movdqu     xmm1, [eax]  // read 4 pixels
4678 4679 4680
    punpckhbw  xmm1, xmm5   // next 2 pixels
    pmulhuw    xmm1, xmm2
    pmullw     xmm0, xmm3   // * interval_size
4681
    movdqu     xmm7, [eax]  // read 4 pixels
4682 4683 4684 4685 4686 4687
    pmullw     xmm1, xmm3
    pand       xmm7, xmm6   // mask alpha
    paddw      xmm0, xmm4   // + interval_size / 2
    paddw      xmm1, xmm4
    packuswb   xmm0, xmm1
    por        xmm0, xmm7
4688
    movdqu     [eax], xmm0
4689
    lea        eax, [eax + 16]
4690
    sub        ecx, 4
4691 4692 4693 4694 4695 4696
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBQUANTIZEROW_SSE2

4697 4698
#ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value.
4699
__declspec(naked)
4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
                       uint32 value) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    movd       xmm2, [esp + 16]  // value
    punpcklbw  xmm2, xmm2
    punpcklqdq xmm2, xmm2

 convertloop:
4711
    movdqu     xmm0, [eax]      // read 4 pixels
4712
    lea        eax, [eax + 16]
4713 4714 4715 4716 4717 4718 4719 4720
    movdqa     xmm1, xmm0
    punpcklbw  xmm0, xmm0       // first 2
    punpckhbw  xmm1, xmm1       // next 2
    pmulhuw    xmm0, xmm2       // argb * value
    pmulhuw    xmm1, xmm2       // argb * value
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
4721
    movdqu     [edx], xmm0
4722
    lea        edx, [edx + 16]
4723
    sub        ecx, 4
4724 4725 4726 4727 4728 4729 4730
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBSHADEROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
4731
#ifdef HAS_ARGBMULTIPLYROW_SSE2
4732
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
4733
__declspec(naked)
4734 4735
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4736
  __asm {
4737 4738 4739 4740 4741
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
fbarchard@google.com's avatar
fbarchard@google.com committed
4742 4743 4744
    pxor       xmm5, xmm5  // constant 0

 convertloop:
4745
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4746
    movdqu     xmm2, [esi]        // read 4 pixels from src_argb1
4747 4748
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
4749 4750 4751 4752 4753 4754 4755 4756
    punpcklbw  xmm0, xmm0         // first 2
    punpckhbw  xmm1, xmm1         // next 2
    punpcklbw  xmm2, xmm5         // first 2
    punpckhbw  xmm3, xmm5         // next 2
    pmulhuw    xmm0, xmm2         // src_argb0 * src_argb1 first 2
    pmulhuw    xmm1, xmm3         // src_argb0 * src_argb1 next 2
    lea        eax, [eax + 16]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4757
    packuswb   xmm0, xmm1
4758 4759
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4760
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
4761 4762
    jg         convertloop

4763
    pop        esi
fbarchard@google.com's avatar
fbarchard@google.com committed
4764 4765 4766 4767 4768
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_SSE2

4769 4770
#ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time.
4771
// TODO(fbarchard): Port this to posix, neon and other math functions.
4772
__declspec(naked)
4773 4774 4775 4776 4777 4778 4779 4780 4781
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

4782 4783 4784 4785 4786
    sub        ecx, 4
    jl         convertloop49

 convertloop4:
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4787 4788 4789
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4790
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4791 4792
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4793
    sub        ecx, 4
4794 4795 4796 4797 4798
    jge        convertloop4

 convertloop49:
    add        ecx, 4 - 1
    jl         convertloop19
4799

4800 4801
 convertloop1:
    movd       xmm0, [eax]        // read 1 pixels from src_argb0
4802 4803 4804
    lea        eax, [eax + 4]
    movd       xmm1, [esi]        // read 1 pixels from src_argb1
    lea        esi, [esi + 4]
4805
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4806 4807
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4808
    sub        ecx, 1
4809 4810 4811
    jge        convertloop1

 convertloop19:
4812 4813 4814 4815 4816 4817
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBADDROW_SSE2

4818 4819
#ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
4820
__declspec(naked)
4821 4822 4823 4824 4825 4826 4827 4828 4829 4830
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
4831
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4832 4833 4834
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4835
    psubusb    xmm0, xmm1         // src_argb0 - src_argb1
4836 4837
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4838
    sub        ecx, 4
4839 4840 4841 4842 4843 4844 4845 4846
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_SSE2

4847 4848
#ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
4849
__declspec(naked)
4850 4851 4852 4853 4854 4855 4856 4857
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4858
    vpxor      ymm5, ymm5, ymm5     // constant 0
4859 4860 4861

 convertloop:
    vmovdqu    ymm1, [eax]        // read 8 pixels from src_argb0
4862 4863 4864
    lea        eax, [eax + 32]
    vmovdqu    ymm3, [esi]        // read 8 pixels from src_argb1
    lea        esi, [esi + 32]
4865 4866 4867 4868 4869 4870 4871
    vpunpcklbw ymm0, ymm1, ymm1   // low 4
    vpunpckhbw ymm1, ymm1, ymm1   // high 4
    vpunpcklbw ymm2, ymm3, ymm5   // low 4
    vpunpckhbw ymm3, ymm3, ymm5   // high 4
    vpmulhuw   ymm0, ymm0, ymm2   // src_argb0 * src_argb1 low 4
    vpmulhuw   ymm1, ymm1, ymm3   // src_argb0 * src_argb1 high 4
    vpackuswb  ymm0, ymm0, ymm1
4872 4873
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4874
    sub        ecx, 8
4875 4876 4877
    jg         convertloop

    pop        esi
4878
    vzeroupper
4879 4880 4881 4882 4883 4884 4885
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_AVX2

#ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
4886
__declspec(naked)
4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4899 4900 4901 4902
    vpaddusb   ymm0, ymm0, [esi]        // add 8 pixels from src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4903
    sub        ecx, 8
4904 4905 4906
    jg         convertloop

    pop        esi
4907
    vzeroupper
4908 4909 4910 4911 4912 4913 4914
    ret
  }
}
#endif  // HAS_ARGBADDROW_AVX2

#ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
4915
__declspec(naked)
4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4928 4929 4930 4931
    vpsubusb   ymm0, ymm0, [esi]        // src_argb0 - src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4932
    sub        ecx, 8
4933 4934 4935
    jg         convertloop

    pop        esi
4936
    vzeroupper
4937 4938 4939 4940 4941
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_AVX2

4942
#ifdef HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4943 4944 4945 4946
// SobelX as a matrix is
// -1  0  1
// -2  0  2
// -1  0  1
4947
__declspec(naked)
4948 4949
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    const uint8* src_y2, uint8* dst_sobelx, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_y0
    mov        esi, [esp + 8 + 8]   // src_y1
    mov        edi, [esp + 8 + 12]  // src_y2
    mov        edx, [esp + 8 + 16]  // dst_sobelx
    mov        ecx, [esp + 8 + 20]  // width
    sub        esi, eax
    sub        edi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    movq       xmm2, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + edi]      // read 8 pixels from src_y2[0]
    movq       xmm3, qword ptr [eax + edi + 2]  // read 8 pixels from src_y2[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
4982 4983 4984
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
4985 4986 4987
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
4988
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4989 4990 4991 4992 4993 4994 4995
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
4996
#endif  // HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4997

4998
#ifdef HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4999 5000 5001 5002
// SobelY as a matrix is
// -1 -2 -1
//  0  0  0
//  1  2  1
5003
__declspec(naked)
5004 5005
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    uint8* dst_sobely, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_y0
    mov        esi, [esp + 4 + 8]   // src_y1
    mov        edx, [esp + 4 + 12]  // dst_sobely
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + 1]        // read 8 pixels from src_y0[1]
    movq       xmm2, qword ptr [eax + esi + 1]  // read 8 pixels from src_y1[1]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    movq       xmm3, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5035 5036 5037
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5038 5039 5040
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5041
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5042 5043 5044 5045 5046 5047
    jg         convertloop

    pop        esi
    ret
  }
}
5048
#endif  // HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5049

5050 5051 5052 5053 5054 5055
#ifdef HAS_SOBELROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
// A = 255
// R = Sobel
// G = Sobel
// B = Sobel
5056
__declspec(naked)
5057
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
5058
                   uint8* dst_argb, int width) {
5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    pcmpeqb    xmm5, xmm5           // alpha 255
    pslld      xmm5, 24             // 0xff000000

 convertloop:
5070 5071
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
    movdqa     xmm2, xmm0             // GG
    punpcklbw  xmm2, xmm0             // First 8
    punpckhbw  xmm0, xmm0             // Next 8
    movdqa     xmm1, xmm2             // GGGG
    punpcklwd  xmm1, xmm2             // First 4
    punpckhwd  xmm2, xmm2             // Next 4
    por        xmm1, xmm5             // GGGA
    por        xmm2, xmm5
    movdqa     xmm3, xmm0             // GGGG
    punpcklwd  xmm3, xmm0             // Next 4
    punpckhwd  xmm0, xmm0             // Last 4
    por        xmm3, xmm5             // GGGA
    por        xmm0, xmm5
5087 5088 5089 5090
    movdqu     [edx], xmm1
    movdqu     [edx + 16], xmm2
    movdqu     [edx + 32], xmm3
    movdqu     [edx + 48], xmm0
5091
    lea        edx, [edx + 64]
5092
    sub        ecx, 16
5093 5094 5095 5096 5097 5098 5099 5100
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELROW_SSE2

5101 5102
#ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane.
5103
__declspec(naked)
5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                          uint8* dst_y, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax

 convertloop:
5115 5116
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5117 5118
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
5119
    movdqu     [edx], xmm0
5120
    lea        edx, [edx + 16]
5121
    sub        ecx, 16
5122 5123 5124 5125 5126 5127 5128 5129
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELTOPLANEROW_SSE2

5130 5131 5132 5133 5134 5135
#ifdef HAS_SOBELXYROW_SSE2
// Mixes Sobel X, Sobel Y and Sobel into ARGB.
// A = 255
// R = Sobel X
// G = Sobel
// B = Sobel Y
5136
__declspec(naked)
5137 5138 5139 5140 5141 5142 5143 5144 5145
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
5146
    pcmpeqb    xmm5, xmm5           // alpha 255
5147 5148

 convertloop:
5149 5150
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165
    lea        eax, [eax + 16]
    movdqa     xmm2, xmm0
    paddusb    xmm2, xmm1             // sobel = sobelx + sobely
    movdqa     xmm3, xmm0             // XA
    punpcklbw  xmm3, xmm5
    punpckhbw  xmm0, xmm5
    movdqa     xmm4, xmm1             // YS
    punpcklbw  xmm4, xmm2
    punpckhbw  xmm1, xmm2
    movdqa     xmm6, xmm4             // YSXA
    punpcklwd  xmm6, xmm3             // First 4
    punpckhwd  xmm4, xmm3             // Next 4
    movdqa     xmm7, xmm1             // YSXA
    punpcklwd  xmm7, xmm0             // Next 4
    punpckhwd  xmm1, xmm0             // Last 4
5166 5167 5168 5169
    movdqu     [edx], xmm6
    movdqu     [edx + 16], xmm4
    movdqu     [edx + 32], xmm7
    movdqu     [edx + 48], xmm1
5170
    lea        edx, [edx + 64]
5171
    sub        ecx, 16
5172 5173 5174 5175 5176 5177
    jg         convertloop

    pop        esi
    ret
  }
}
5178
#endif  // HAS_SOBELXYROW_SSE2
5179

5180
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191
// Consider float CumulativeSum.
// Consider calling CumulativeSum one row at time as needed.
// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
// Convert cumulative sum for an area to an average for 1 pixel.
// topleft is pointer to top left of CumulativeSum buffer for area.
// botleft is pointer to bottom left of CumulativeSum buffer.
// width is offset from left to right of area in CumulativeSum buffer measured
//   in number of ints.
// area is the number of pixels in the area being averaged.
// dst points to pixel to store result to.
// count is number of averaged pixels to produce.
5192
// Does 4 pixels at a time.
5193 5194 5195
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
                                    int width, int area, uint8* dst,
                                    int count) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5196 5197 5198 5199
  __asm {
    mov        eax, topleft  // eax topleft
    mov        esi, botleft  // esi botleft
    mov        edx, width
5200
    movd       xmm5, area
fbarchard@google.com's avatar
fbarchard@google.com committed
5201 5202
    mov        edi, dst
    mov        ecx, count
5203 5204
    cvtdq2ps   xmm5, xmm5
    rcpss      xmm4, xmm5  // 1.0f / area
fbarchard@google.com's avatar
fbarchard@google.com committed
5205 5206 5207 5208
    pshufd     xmm4, xmm4, 0
    sub        ecx, 4
    jl         l4b

5209 5210 5211
    cmp        area, 128  // 128 pixels will not overflow 15 bits.
    ja         l4

5212 5213 5214 5215 5216 5217
    pshufd     xmm5, xmm5, 0        // area
    pcmpeqb    xmm6, xmm6           // constant of 65536.0 - 1 = 65535.0
    psrld      xmm6, 16
    cvtdq2ps   xmm6, xmm6
    addps      xmm5, xmm6           // (65536.0 + area - 1)
    mulps      xmm5, xmm4           // (65536.0 + area - 1) * 1 / area
5218
    cvtps2dq   xmm5, xmm5           // 0.16 fixed point
5219
    packssdw   xmm5, xmm5           // 16 bit shorts
5220 5221 5222 5223

    // 4 pixel loop small blocks.
  s4:
    // top left
5224 5225 5226 5227
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    packssdw   xmm0, xmm1  // pack 4 pixels into 2 registers
    packssdw   xmm2, xmm3

    pmulhuw    xmm0, xmm5
    pmulhuw    xmm2, xmm5

    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        s4

    jmp        l4b

fbarchard@google.com's avatar
fbarchard@google.com committed
5263 5264 5265
    // 4 pixel loop
  l4:
    // top left
5266 5267 5268 5269
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
fbarchard@google.com's avatar
fbarchard@google.com committed
5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    cvtdq2ps   xmm0, xmm0   // Average = Sum * 1 / Area
    cvtdq2ps   xmm1, xmm1
    mulps      xmm0, xmm4
    mulps      xmm1, xmm4
    cvtdq2ps   xmm2, xmm2
    cvtdq2ps   xmm3, xmm3
    mulps      xmm2, xmm4
    mulps      xmm3, xmm4
    cvtps2dq   xmm0, xmm0
    cvtps2dq   xmm1, xmm1
    cvtps2dq   xmm2, xmm2
    cvtps2dq   xmm3, xmm3
    packssdw   xmm0, xmm1
    packssdw   xmm2, xmm3
    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
5317
    movdqu     xmm0, [eax]
fbarchard@google.com's avatar
fbarchard@google.com committed
5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334
    psubd      xmm0, [eax + edx * 4]
    lea        eax, [eax + 16]
    psubd      xmm0, [esi]
    paddd      xmm0, [esi + edx * 4]
    lea        esi, [esi + 16]
    cvtdq2ps   xmm0, xmm0
    mulps      xmm0, xmm4
    cvtps2dq   xmm0, xmm0
    packssdw   xmm0, xmm0
    packuswb   xmm0, xmm0
    movd       dword ptr [edi], xmm0
    lea        edi, [edi + 4]
    sub        ecx, 1
    jge        l1
  l1b:
  }
}
5335
#endif  // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5336 5337 5338 5339 5340

#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
5341
                                  const int32* previous_cumsum, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371
  __asm {
    mov        eax, row
    mov        edx, cumsum
    mov        esi, previous_cumsum
    mov        ecx, width
    pxor       xmm0, xmm0
    pxor       xmm1, xmm1

    sub        ecx, 4
    jl         l4b
    test       edx, 15
    jne        l4b

    // 4 pixel loop
  l4:
    movdqu     xmm2, [eax]  // 4 argb pixels 16 bytes.
    lea        eax, [eax + 16]
    movdqa     xmm4, xmm2

    punpcklbw  xmm2, xmm1
    movdqa     xmm3, xmm2
    punpcklwd  xmm2, xmm1
    punpckhwd  xmm3, xmm1

    punpckhbw  xmm4, xmm1
    movdqa     xmm5, xmm4
    punpcklwd  xmm4, xmm1
    punpckhwd  xmm5, xmm1

    paddd      xmm0, xmm2
5372
    movdqu     xmm2, [esi]  // previous row above.
fbarchard@google.com's avatar
fbarchard@google.com committed
5373 5374 5375
    paddd      xmm2, xmm0

    paddd      xmm0, xmm3
5376
    movdqu     xmm3, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5377 5378 5379
    paddd      xmm3, xmm0

    paddd      xmm0, xmm4
5380
    movdqu     xmm4, [esi + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5381 5382 5383
    paddd      xmm4, xmm0

    paddd      xmm0, xmm5
5384
    movdqu     xmm5, [esi + 48]
5385
    lea        esi, [esi + 64]
fbarchard@google.com's avatar
fbarchard@google.com committed
5386 5387
    paddd      xmm5, xmm0

5388 5389 5390 5391
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
    movdqu     [edx + 32], xmm4
    movdqu     [edx + 48], xmm5
fbarchard@google.com's avatar
fbarchard@google.com committed
5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404

    lea        edx, [edx + 64]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
    movd       xmm2, dword ptr [eax]  // 1 argb pixel 4 bytes.
    lea        eax, [eax + 4]
5405 5406
    punpcklbw  xmm2, xmm1
    punpcklwd  xmm2, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5407
    paddd      xmm0, xmm2
5408 5409
    movdqu     xmm2, [esi]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420
    paddd      xmm2, xmm0
    movdqu     [edx], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 1
    jge        l1

 l1b:
  }
}
#endif  // HAS_COMPUTECUMULATIVESUMROW_SSE2

5421 5422
#ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination.
5423
__declspec(naked)
5424
LIBYUV_API
5425 5426 5427 5428
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                        uint8* dst_argb, const float* uv_dudv, int width) {
  __asm {
    push       esi
5429
    push       edi
5430
    mov        eax, [esp + 12]  // src_argb
5431 5432 5433
    mov        esi, [esp + 16]  // stride
    mov        edx, [esp + 20]  // dst_argb
    mov        ecx, [esp + 24]  // pointer to uv_dudv
5434
    movq       xmm2, qword ptr [ecx]  // uv
5435
    movq       xmm7, qword ptr [ecx + 8]  // dudv
5436
    mov        ecx, [esp + 28]  // width
5437 5438
    shl        esi, 16          // 4, stride
    add        esi, 4
5439 5440 5441
    movd       xmm5, esi
    sub        ecx, 4
    jl         l4b
5442

5443 5444 5445
    // setup for 4 pixel loop
    pshufd     xmm7, xmm7, 0x44  // dup dudv
    pshufd     xmm5, xmm5, 0  // dup 4, stride
5446
    movdqa     xmm0, xmm2    // x0, y0, x1, y1
5447
    addps      xmm0, xmm7
5448
    movlhps    xmm2, xmm0
5449 5450 5451 5452 5453
    movdqa     xmm4, xmm7
    addps      xmm4, xmm4    // dudv *= 2
    movdqa     xmm3, xmm2    // x2, y2, x3, y3
    addps      xmm3, xmm4
    addps      xmm4, xmm4    // dudv *= 4
5454

5455 5456 5457 5458 5459 5460 5461 5462
    // 4 pixel loop
  l4:
    cvttps2dq  xmm0, xmm2    // x, y float to int first 2
    cvttps2dq  xmm1, xmm3    // x, y float to int next 2
    packssdw   xmm0, xmm1    // x, y as 8 shorts
    pmaddwd    xmm0, xmm5    // offsets = x * 4 + y * stride.
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5463
    movd       edi, xmm0
5464
    pshufd     xmm0, xmm0, 0x39  // shift right
5465 5466
    movd       xmm1, [eax + esi]  // read pixel 0
    movd       xmm6, [eax + edi]  // read pixel 1
5467
    punpckldq  xmm1, xmm6     // combine pixel 0 and 1
5468 5469
    addps      xmm2, xmm4    // x, y += dx, dy first 2
    movq       qword ptr [edx], xmm1
5470 5471
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5472
    movd       edi, xmm0
5473
    movd       xmm6, [eax + esi]  // read pixel 2
5474
    movd       xmm0, [eax + edi]  // read pixel 3
5475
    punpckldq  xmm6, xmm0     // combine pixel 2 and 3
5476 5477
    addps      xmm3, xmm4    // x, y += dx, dy next 2
    movq       qword ptr 8[edx], xmm6
5478
    lea        edx, [edx + 16]
5479
    sub        ecx, 4
5480
    jge        l4
5481

5482 5483
  l4b:
    add        ecx, 4 - 1
5484 5485 5486 5487
    jl         l1b

    // 1 pixel loop
  l1:
5488 5489 5490 5491 5492
    cvttps2dq  xmm0, xmm2    // x, y float to int
    packssdw   xmm0, xmm0    // x, y as shorts
    pmaddwd    xmm0, xmm5    // offset = x * 4 + y * stride
    addps      xmm2, xmm7    // x, y += dx, dy
    movd       esi, xmm0
5493 5494 5495
    movd       xmm0, [eax + esi]  // copy a pixel
    movd       [edx], xmm0
    lea        edx, [edx + 4]
5496
    sub        ecx, 1
5497 5498
    jge        l1
  l1b:
5499
    pop        edi
5500 5501 5502 5503 5504 5505
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBAFFINEROW_SSE2

5506
#ifdef HAS_INTERPOLATEROW_AVX2
5507
// Bilinear filter 32x2 -> 32x1
5508
__declspec(naked)
5509
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
5510 5511
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552
  __asm {
    push       esi
    push       edi
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    shr        eax, 1
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
    sub        edi, esi
    cmp        eax, 32
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
    cmp        eax, 64
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
    cmp        eax, 96
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.

    vmovd      xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    vmovd      xmm5, eax  // low fraction 128..1
    vpunpcklbw xmm5, xmm5, xmm0
    vpunpcklwd xmm5, xmm5, xmm5
    vpxor      ymm0, ymm0, ymm0
    vpermd     ymm5, ymm0, ymm5

  xloop:
    vmovdqu    ymm0, [esi]
    vmovdqu    ymm2, [esi + edx]
    vpunpckhbw ymm1, ymm0, ymm2  // mutates
    vpunpcklbw ymm0, ymm0, ymm2  // mutates
    vpmaddubsw ymm0, ymm0, ymm5
    vpmaddubsw ymm1, ymm1, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm1, ymm1, 7
    vpackuswb  ymm0, ymm0, ymm1  // unmutates
    vmovdqu    [esi + edi], ymm0
    lea        esi, [esi + 32]
5553
    sub        ecx, 32
5554 5555 5556
    jg         xloop
    jmp        xloop99

5557 5558 5559 5560 5561 5562 5563 5564
   // Blend 25 / 75.
 xloop25:
   vmovdqu    ymm0, [esi]
   vmovdqu    ymm1, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5565
   sub        ecx, 32
5566 5567 5568 5569 5570 5571
   jg         xloop25
   jmp        xloop99

   // Blend 50 / 50.
 xloop50:
   vmovdqu    ymm0, [esi]
5572
   vpavgb     ymm0, ymm0, [esi + edx]
5573 5574
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5575
   sub        ecx, 32
5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586
   jg         xloop50
   jmp        xloop99

   // Blend 75 / 25.
 xloop75:
   vmovdqu    ymm1, [esi]
   vmovdqu    ymm0, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5587
   sub        ecx, 32
5588 5589 5590 5591 5592 5593
   jg         xloop75
   jmp        xloop99

   // Blend 100 / 0 - Copy row unchanged.
 xloop100:
   rep movsb
5594 5595 5596 5597 5598 5599 5600 5601 5602 5603

  xloop99:
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_INTERPOLATEROW_AVX2

5604
// Bilinear filter 16x2 -> 16x1
5605
__declspec(naked)
5606 5607 5608
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
                          ptrdiff_t src_stride, int dst_width,
                          int source_y_fraction) {
5609 5610 5611
  __asm {
    push       esi
    push       edi
5612 5613
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5614 5615 5616 5617 5618
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    shr        eax, 1
5619 5620 5621
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
5622
    cmp        eax, 32
5623
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
5624
    cmp        eax, 64
5625
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
5626
    cmp        eax, 96
5627
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.
5628

5629 5630 5631 5632 5633 5634 5635 5636
    movd       xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    movd       xmm5, eax  // low fraction 128..1
    punpcklbw  xmm5, xmm0
    punpcklwd  xmm5, xmm5
    pshufd     xmm5, xmm5, 0

5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649
  xloop:
    movdqu     xmm0, [esi]
    movdqu     xmm2, [esi + edx]
    movdqu     xmm1, xmm0
    punpcklbw  xmm0, xmm2
    punpckhbw  xmm1, xmm2
    pmaddubsw  xmm0, xmm5
    pmaddubsw  xmm1, xmm5
    psrlw      xmm0, 7
    psrlw      xmm1, 7
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5650
    sub        ecx, 16
5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5662
    sub        ecx, 16
5663 5664 5665 5666 5667 5668 5669 5670 5671 5672
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5673
    sub        ecx, 16
5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5685
    sub        ecx, 16
5686 5687 5688 5689 5690 5691 5692 5693
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5694
    sub        ecx, 16
5695 5696 5697 5698 5699 5700 5701 5702 5703
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}

5704
#ifdef HAS_INTERPOLATEROW_SSE2
5705
// Bilinear filter 16x2 -> 16x1
5706
__declspec(naked)
5707 5708 5709
void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5710 5711 5712
  __asm {
    push       esi
    push       edi
5713 5714
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 256.  Blend 100 / 0.
    cmp        eax, 64
    je         xloop75   // 64 / 256 is 0.25.  Blend 75 / 25.
    cmp        eax, 128
    je         xloop50   // 128 / 256 is 0.50.  Blend 50 / 50.
    cmp        eax, 192
    je         xloop25   // 192 / 256 is 0.75.  Blend 25 / 75.

    movd       xmm5, eax            // xmm5 = y fraction
    punpcklbw  xmm5, xmm5
    psrlw      xmm5, 1
    punpcklwd  xmm5, xmm5
    punpckldq  xmm5, xmm5
    punpcklqdq xmm5, xmm5
    pxor       xmm4, xmm4

  xloop:
    movdqu     xmm0, [esi]  // row0
    movdqu     xmm2, [esi + edx]  // row1
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
    punpcklbw  xmm2, xmm4
    punpckhbw  xmm3, xmm4
    punpcklbw  xmm0, xmm4
    punpckhbw  xmm1, xmm4
    psubw      xmm2, xmm0  // row1 - row0
    psubw      xmm3, xmm1
    paddw      xmm2, xmm2  // 9 bits * 15 bits = 8.16
    paddw      xmm3, xmm3
    pmulhw     xmm2, xmm5  // scale diff
    pmulhw     xmm3, xmm5
    paddw      xmm0, xmm2  // sum rows
    paddw      xmm1, xmm3
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5757
    sub        ecx, 16
5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5769
    sub        ecx, 16
5770 5771 5772 5773 5774 5775 5776 5777 5778 5779
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5780
    sub        ecx, 16
5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5792
    sub        ecx, 16
5793 5794 5795 5796 5797 5798 5799 5800
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5801
    sub        ecx, 16
5802 5803 5804 5805 5806 5807 5808 5809
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}
5810
#endif  // HAS_INTERPOLATEROW_SSE2
5811

fbarchard@google.com's avatar
fbarchard@google.com committed
5812
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
5813
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5814 5815 5816 5817
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                          const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_argb
5818
    mov        edx, [esp + 8]    // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5819
    mov        ecx, [esp + 12]   // shuffler
5820
    movdqu     xmm5, [ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
5821 5822 5823
    mov        ecx, [esp + 16]   // pix

  wloop:
5824 5825
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5826 5827 5828
    lea        eax, [eax + 32]
    pshufb     xmm0, xmm5
    pshufb     xmm1, xmm5
5829 5830
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5831
    lea        edx, [edx + 32]
5832
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5833 5834 5835 5836 5837 5838
    jg         wloop
    ret
  }
}

#ifdef HAS_ARGBSHUFFLEROW_AVX2
5839
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5840 5841 5842 5843
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]     // src_argb
5844
    mov        edx, [esp + 8]     // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5845
    mov        ecx, [esp + 12]    // shuffler
5846
    vbroadcastf128 ymm5, [ecx]    // same shuffle in high as low.
fbarchard@google.com's avatar
fbarchard@google.com committed
5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857
    mov        ecx, [esp + 16]    // pix

  wloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vpshufb    ymm0, ymm0, ymm5
    vpshufb    ymm1, ymm1, ymm5
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
5858
    sub        ecx, 16
fbarchard@google.com's avatar
fbarchard@google.com committed
5859
    jg         wloop
5860 5861

    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
5862 5863 5864
    ret
  }
}
5865
#endif  // HAS_ARGBSHUFFLEROW_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
5866

5867
__declspec(naked)
5868 5869 5870 5871 5872 5873 5874 5875 5876
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    push       ebx
    push       esi
    mov        eax, [esp + 8 + 4]    // src_argb
    mov        edx, [esp + 8 + 8]    // dst_argb
    mov        esi, [esp + 8 + 12]   // shuffler
    mov        ecx, [esp + 8 + 16]   // pix
5877
    pxor       xmm5, xmm5
5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912

    mov        ebx, [esi]   // shuffler
    cmp        ebx, 0x03000102
    je         shuf_3012
    cmp        ebx, 0x00010203
    je         shuf_0123
    cmp        ebx, 0x00030201
    je         shuf_0321
    cmp        ebx, 0x02010003
    je         shuf_2103

  // TODO(fbarchard): Use one source pointer and 3 offsets.
  shuf_any1:
    movzx      ebx, byte ptr [esi]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx], bl
    movzx      ebx, byte ptr [esi + 1]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 1], bl
    movzx      ebx, byte ptr [esi + 2]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 2], bl
    movzx      ebx, byte ptr [esi + 3]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 3], bl
    lea        eax, [eax + 4]
    lea        edx, [edx + 4]
    sub        ecx, 1
    jg         shuf_any1
    jmp        shuf99

  shuf_0123:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5913 5914
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5915 5916 5917 5918 5919 5920 5921
    pshufhw    xmm0, xmm0, 01Bh   // 1B = 00011011 = 0x0123 = BGRAToARGB
    pshuflw    xmm0, xmm0, 01Bh
    pshufhw    xmm1, xmm1, 01Bh
    pshuflw    xmm1, xmm1, 01Bh
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5922
    sub        ecx, 4
5923 5924 5925 5926 5927 5928 5929
    jg         shuf_0123
    jmp        shuf99

  shuf_0321:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5930 5931
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5932 5933 5934 5935 5936 5937 5938
    pshufhw    xmm0, xmm0, 039h   // 39 = 00111001 = 0x0321 = RGBAToARGB
    pshuflw    xmm0, xmm0, 039h
    pshufhw    xmm1, xmm1, 039h
    pshuflw    xmm1, xmm1, 039h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5939
    sub        ecx, 4
5940 5941 5942 5943 5944 5945 5946
    jg         shuf_0321
    jmp        shuf99

  shuf_2103:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5947 5948
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5949 5950 5951 5952 5953 5954 5955
    pshufhw    xmm0, xmm0, 093h   // 93 = 10010011 = 0x2103 = ARGBToRGBA
    pshuflw    xmm0, xmm0, 093h
    pshufhw    xmm1, xmm1, 093h
    pshuflw    xmm1, xmm1, 093h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5956
    sub        ecx, 4
5957 5958 5959 5960 5961 5962 5963
    jg         shuf_2103
    jmp        shuf99

  shuf_3012:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5964 5965
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5966 5967 5968 5969 5970 5971 5972
    pshufhw    xmm0, xmm0, 0C6h   // C6 = 11000110 = 0x3012 = ABGRToARGB
    pshuflw    xmm0, xmm0, 0C6h
    pshufhw    xmm1, xmm1, 0C6h
    pshuflw    xmm1, xmm1, 0C6h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5973
    sub        ecx, 4
5974 5975 5976 5977 5978 5979 5980 5981 5982
    jg         shuf_3012

  shuf99:
    pop        esi
    pop        ebx
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
5983 5984 5985 5986 5987 5988
// YUY2 - Macro-pixel = 2 image pixels
// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....

// UYVY - Macro-pixel = 2 image pixels
// U0Y0V0Y1

5989
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008
void I422ToYUY2Row_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6009
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6010
    lea        eax, [eax + 16]
6011
    movdqa     xmm1, xmm0
fbarchard@google.com's avatar
fbarchard@google.com committed
6012 6013
    punpcklbw  xmm0, xmm2 // YUYV
    punpckhbw  xmm1, xmm2
6014 6015
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
6016 6017 6018 6019 6020 6021 6022 6023 6024 6025
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

6026
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045
void I422ToUYVYRow_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6046
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6047 6048 6049 6050
    movdqa     xmm1, xmm2
    lea        eax, [eax + 16]
    punpcklbw  xmm1, xmm0 // UYVY
    punpckhbw  xmm2, xmm0
6051 6052
    movdqu     [edi], xmm1
    movdqu     [edi + 16], xmm2
fbarchard@google.com's avatar
fbarchard@google.com committed
6053 6054 6055 6056 6057 6058 6059 6060 6061
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
6062

6063
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
6064
__declspec(naked)
6065 6066 6067 6068
void ARGBPolynomialRow_SSE2(const uint8* src_argb,
                            uint8* dst_argb, const float* poly,
                            int width) {
  __asm {
6069 6070 6071 6072 6073
    push       esi
    mov        eax, [esp + 4 + 4]   /* src_argb */
    mov        edx, [esp + 4 + 8]   /* dst_argb */
    mov        esi, [esp + 4 + 12]  /* poly */
    mov        ecx, [esp + 4 + 16]  /* width */
6074
    pxor       xmm3, xmm3  // 0 constant for zero extending bytes to ints.
6075

6076
    // 2 pixel loop.
6077
 convertloop:
6078 6079
//    pmovzxbd  xmm0, dword ptr [eax]  // BGRA pixel
//    pmovzxbd  xmm4, dword ptr [eax + 4]  // BGRA pixel
6080 6081
    movq       xmm0, qword ptr [eax]  // BGRABGRA
    lea        eax, [eax + 8]
6082
    punpcklbw  xmm0, xmm3
6083 6084 6085
    movdqa     xmm4, xmm0
    punpcklwd  xmm0, xmm3  // pixel 0
    punpckhwd  xmm4, xmm3  // pixel 1
6086
    cvtdq2ps   xmm0, xmm0  // 4 floats
6087
    cvtdq2ps   xmm4, xmm4
6088
    movdqa     xmm1, xmm0  // X
6089 6090 6091 6092 6093
    movdqa     xmm5, xmm4
    mulps      xmm0, [esi + 16]  // C1 * X
    mulps      xmm4, [esi + 16]
    addps      xmm0, [esi]  // result = C0 + C1 * X
    addps      xmm4, [esi]
6094
    movdqa     xmm2, xmm1
6095
    movdqa     xmm6, xmm5
6096
    mulps      xmm2, xmm1  // X * X
6097
    mulps      xmm6, xmm5
6098
    mulps      xmm1, xmm2  // X * X * X
6099 6100 6101 6102 6103
    mulps      xmm5, xmm6
    mulps      xmm2, [esi + 32]  // C2 * X * X
    mulps      xmm6, [esi + 32]
    mulps      xmm1, [esi + 48]  // C3 * X * X * X
    mulps      xmm5, [esi + 48]
6104
    addps      xmm0, xmm2  // result += C2 * X * X
6105
    addps      xmm4, xmm6
6106
    addps      xmm0, xmm1  // result += C3 * X * X * X
6107
    addps      xmm4, xmm5
6108
    cvttps2dq  xmm0, xmm0
6109 6110
    cvttps2dq  xmm4, xmm4
    packuswb   xmm0, xmm4
6111
    packuswb   xmm0, xmm0
6112 6113
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
6114
    sub        ecx, 2
6115
    jg         convertloop
6116
    pop        esi
6117 6118 6119 6120 6121
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_SSE2

6122
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
6123
__declspec(naked)
6124
void ARGBPolynomialRow_AVX2(const uint8* src_argb,
6125 6126
                            uint8* dst_argb, const float* poly,
                            int width) {
6127 6128 6129
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
6130 6131 6132 6133 6134
    mov        ecx, [esp + 12]   /* poly */
    vbroadcastf128 ymm4, [ecx]       // C0
    vbroadcastf128 ymm5, [ecx + 16]  // C1
    vbroadcastf128 ymm6, [ecx + 32]  // C2
    vbroadcastf128 ymm7, [ecx + 48]  // C3
6135 6136
    mov        ecx, [esp + 16]  /* width */

6137
    // 2 pixel loop.
6138
 convertloop:
6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152
    vpmovzxbd   ymm0, qword ptr [eax]  // 2 BGRA pixels
    lea         eax, [eax + 8]
    vcvtdq2ps   ymm0, ymm0        // X 8 floats
    vmulps      ymm2, ymm0, ymm0  // X * X
    vmulps      ymm3, ymm0, ymm7  // C3 * X
    vfmadd132ps ymm0, ymm4, ymm5  // result = C0 + C1 * X
    vfmadd231ps ymm0, ymm2, ymm6  // result += C2 * X * X
    vfmadd231ps ymm0, ymm2, ymm3  // result += C3 * X * X * X
    vcvttps2dq  ymm0, ymm0
    vpackusdw   ymm0, ymm0, ymm0  // b0g0r0a0_00000000_b0g0r0a0_00000000
    vpermq      ymm0, ymm0, 0xd8  // b0g0r0a0_b0g0r0a0_00000000_00000000
    vpackuswb   xmm0, xmm0, xmm0  // bgrabgra_00000000_00000000_00000000
    vmovq       qword ptr [edx], xmm0
    lea         edx, [edx + 8]
6153
    sub         ecx, 2
6154
    jg          convertloop
6155 6156 6157 6158 6159 6160
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_AVX2

fbarchard@google.com's avatar
fbarchard@google.com committed
6161 6162
#ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table.
6163
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6164 6165 6166 6167 6168 6169 6170
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
                           int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */
6171

fbarchard@google.com's avatar
fbarchard@google.com committed
6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193
    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    movzx      edx, byte ptr [eax - 4 + 3]
    movzx      edx, byte ptr [esi + edx * 4 + 3]
    mov        byte ptr [eax - 4 + 3], dl
    dec        ecx
    jg         convertloop
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBCOLORTABLEROW_X86
6194

fbarchard@google.com's avatar
fbarchard@google.com committed
6195 6196
#ifdef HAS_RGBCOLORTABLEROW_X86
// Tranform RGB pixels with color table.
6197
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6198
void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
6199
  __asm {
fbarchard@google.com's avatar
fbarchard@google.com committed
6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */

    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    dec        ecx
    jg         convertloop

    pop        esi
6221 6222 6223
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6224
#endif  // HAS_RGBCOLORTABLEROW_X86
6225

fbarchard@google.com's avatar
fbarchard@google.com committed
6226 6227
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table.
6228
__declspec(naked)
6229 6230 6231
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                                 int width,
                                 const uint8* luma, uint32 lumacoeff) {
fbarchard@google.com's avatar
fbarchard@google.com committed
6232 6233 6234 6235 6236
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   /* src_argb */
    mov        edi, [esp + 8 + 8]   /* dst_argb */
6237 6238 6239
    mov        ecx, [esp + 8 + 12]  /* width */
    movd       xmm2, dword ptr [esp + 8 + 16]  // luma table
    movd       xmm3, dword ptr [esp + 8 + 20]  // lumacoeff
fbarchard@google.com's avatar
fbarchard@google.com committed
6240
    pshufd     xmm2, xmm2, 0
6241
    pshufd     xmm3, xmm3, 0
6242
    pcmpeqb    xmm4, xmm4        // generate mask 0xff00ff00
fbarchard@google.com's avatar
fbarchard@google.com committed
6243 6244 6245 6246 6247
    psllw      xmm4, 8
    pxor       xmm5, xmm5

    // 4 pixel loop.
  convertloop:
6248
    movdqu     xmm0, qword ptr [eax]      // generate luma ptr
fbarchard@google.com's avatar
fbarchard@google.com committed
6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314
    pmaddubsw  xmm0, xmm3
    phaddw     xmm0, xmm0
    pand       xmm0, xmm4  // mask out low bits
    punpcklwd  xmm0, xmm5
    paddd      xmm0, xmm2  // add table base
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi], dl
    movzx      edx, byte ptr [eax + 1]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 1], dl
    movzx      edx, byte ptr [eax + 2]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 2], dl
    movzx      edx, byte ptr [eax + 3]  // copy alpha.
    mov        byte ptr [edi + 3], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 4]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 4], dl
    movzx      edx, byte ptr [eax + 5]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 5], dl
    movzx      edx, byte ptr [eax + 6]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 6], dl
    movzx      edx, byte ptr [eax + 7]  // copy alpha.
    mov        byte ptr [edi + 7], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 8]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 8], dl
    movzx      edx, byte ptr [eax + 9]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 9], dl
    movzx      edx, byte ptr [eax + 10]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 10], dl
    movzx      edx, byte ptr [eax + 11]  // copy alpha.
    mov        byte ptr [edi + 11], dl

    movd       esi, xmm0

    movzx      edx, byte ptr [eax + 12]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 12], dl
    movzx      edx, byte ptr [eax + 13]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 13], dl
    movzx      edx, byte ptr [eax + 14]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 14], dl
    movzx      edx, byte ptr [eax + 15]  // copy alpha.
    mov        byte ptr [edi + 15], dl

    lea        eax, [eax + 16]
    lea        edi, [edi + 16]
6315
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
6316 6317 6318 6319 6320
    jg         convertloop

    pop        edi
    pop        esi
    ret
6321 6322
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6323
#endif  // HAS_ARGBLUMACOLORTABLEROW_SSSE3
6324

6325
#endif  // defined(_M_X64)
6326
#endif  // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64))
6327

6328
#ifdef __cplusplus
6329
}  // extern "C"
6330 6331
}  // namespace libyuv
#endif