row_win.cc 177 KB
Newer Older
1
/*
2
 *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 4 5 6
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS. All contributing project authors may
8 9 10
 *  be found in the AUTHORS file in the root of the source tree.
 */

11
#include "libyuv/row.h"
12

13
#if defined (_M_X64) && !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER)
14 15 16 17
#include <emmintrin.h>
#include <tmmintrin.h>  // For _mm_maddubs_epi16
#endif

18 19
#ifdef __cplusplus
namespace libyuv {
20
extern "C" {
21
#endif
22

23
// This module is for Visual C.
24 25
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
    (defined(_M_IX86) || defined(_M_X64))
26

27
// C reference code that mimics the YUV assembly.
28 29
#define YG 19071 /* round(1.164 * 64 * 256) */
#define YGB 1192  /* round(1.164 * 64 * 16) */
30

31 32
#define UB -128 /* -min(128, round(2.018 * 64)) */
#define UG 25 /* -round(-0.391 * 64) */
33 34 35
#define UR 0

#define VB 0
36 37
#define VG 52 /* -round(-0.813 * 64) */
#define VR -102 /* -round(1.596 * 64) */
38 39

// Bias
40 41 42
#define BB (UB * 128 + VB * 128 - YGB)
#define BG (UG * 128 + VG * 128 - YGB)
#define BR (UR * 128 + VR * 128 - YGB)
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

static const vec8 kUVToB = {
  UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB
};

static const vec8 kUVToR = {
  UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR
};

static const vec8 kUVToG = {
  UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG
};

static const vec8 kVUToB = {
  VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB,
};

static const vec8 kVUToR = {
  VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR,
};

static const vec8 kVUToG = {
  VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
};

static const vec16 kYToRgb = { YG, YG, YG, YG, YG, YG, YG, YG };
static const vec16 kUVBiasB = { BB, BB, BB, BB, BB, BB, BB, BB };
static const vec16 kUVBiasG = { BG, BG, BG, BG, BG, BG, BG, BG };
static const vec16 kUVBiasR = { BR, BR, BR, BR, BR, BR, BR, BR };

// 64 bit
#if defined(_M_X64)

__declspec(align(16))
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
  __m128i xmm0, xmm1, xmm2, xmm3;
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;

  while (width > 0) {
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);
91 92
    xmm1 = _mm_loadu_si128(&xmm0);
    xmm2 = _mm_loadu_si128(&xmm0);
93 94 95 96 97 98 99
    xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)kUVToB);
    xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)kUVToG);
    xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)kUVToR);
    xmm0 = _mm_sub_epi16(xmm0, *(__m128i*)kUVBiasB);
    xmm1 = _mm_sub_epi16(xmm1, *(__m128i*)kUVBiasG);
    xmm2 = _mm_sub_epi16(xmm2, *(__m128i*)kUVBiasR);
    xmm3 = _mm_loadl_epi64((__m128i*)y_buf);
100 101
    xmm3 = _mm_unpacklo_epi8(xmm3, xmm3);
    xmm3 = _mm_mulhi_epu16(xmm3, *(__m128i*)kYToRgb);
102 103 104 105 106 107 108 109 110 111 112
    xmm0 = _mm_adds_epi16(xmm0, xmm3);
    xmm1 = _mm_adds_epi16(xmm1, xmm3);
    xmm2 = _mm_adds_epi16(xmm2, xmm3);
    xmm0 = _mm_srai_epi16(xmm0, 6);
    xmm1 = _mm_srai_epi16(xmm1, 6);
    xmm2 = _mm_srai_epi16(xmm2, 6);
    xmm0 = _mm_packus_epi16(xmm0, xmm0);
    xmm1 = _mm_packus_epi16(xmm1, xmm1);
    xmm2 = _mm_packus_epi16(xmm2, xmm2);
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
113
    xmm1 = _mm_loadu_si128(&xmm0);
114 115 116
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);

117 118
    _mm_storeu_si128((__m128i *)dst_argb, xmm0);
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);
119 120 121 122 123 124 125 126 127 128

    y_buf += 8;
    u_buf += 4;
    dst_argb += 32;
    width -= 8;
  }
}

// 32 bit
#else  // defined(_M_X64)
129

130 131
#ifdef HAS_ARGBTOYROW_SSSE3

132
// Constants for ARGB.
133
static const vec8 kARGBToY = {
134 135 136
  13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
};

137
// JPeg full range.
138
static const vec8 kARGBToYJ = {
139
  15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
140 141
};

142
static const vec8 kARGBToU = {
143 144 145
  112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
};

146
static const vec8 kARGBToUJ = {
147 148 149
  127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
};

150
static const vec8 kARGBToV = {
151 152 153
  -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
};

154
static const vec8 kARGBToVJ = {
155 156 157
  -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
};

158
// vpshufb for vphaddw + vpackuswb packed to shorts.
159
static const lvec8 kShufARGBToUV_AVX = {
160
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
161
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
162 163
};

164
// Constants for BGRA.
165
static const vec8 kBGRAToY = {
166 167 168
  0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
};

169
static const vec8 kBGRAToU = {
170 171 172
  0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
};

173
static const vec8 kBGRAToV = {
174 175 176
  0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
};

177
// Constants for ABGR.
178
static const vec8 kABGRToY = {
179 180 181
  33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
};

182
static const vec8 kABGRToU = {
183 184 185
  -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
};

186
static const vec8 kABGRToV = {
187 188 189
  112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
};

190
// Constants for RGBA.
191
static const vec8 kRGBAToY = {
192 193 194
  0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
};

195
static const vec8 kRGBAToU = {
196 197 198
  0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
};

199
static const vec8 kRGBAToV = {
200 201 202
  0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
};

203
static const uvec8 kAddY16 = {
204
  16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
205 206
};

207
// 7 bit fixed point 0.5.
208
static const vec16 kAddYJ64 = {
209 210
  64, 64, 64, 64, 64, 64, 64, 64
};
211

212
static const uvec8 kAddUV128 = {
213 214
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
215 216
};

217
static const uvec16 kAddUVJ128 = {
218 219 220
  0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
};

221
// Shuffle table for converting RGB24 to ARGB.
222
static const uvec8 kShuffleMaskRGB24ToARGB = {
223 224 225 226
  0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
};

// Shuffle table for converting RAW to ARGB.
227
static const uvec8 kShuffleMaskRAWToARGB = {
228 229 230
  2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
};

231
// Shuffle table for converting ARGB to RGB24.
232
static const uvec8 kShuffleMaskARGBToRGB24 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
233 234
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
};
235 236

// Shuffle table for converting ARGB to RAW.
237
static const uvec8 kShuffleMaskARGBToRAW = {
238
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
239
};
240

241
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24.  First 8 + next 4
242
static const uvec8 kShuffleMaskARGBToRGB24_0 = {
243 244 245 246
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
};

// Shuffle table for converting ARGB to RAW.
247
static const uvec8 kShuffleMaskARGBToRAW_0 = {
248 249 250
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
};

251
// Duplicates gray value 3 times and fills in alpha opaque.
252
__declspec(naked) __declspec(align(16))
253 254 255 256 257 258 259 260
void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
  __asm {
    mov        eax, [esp + 4]        // src_y
    mov        edx, [esp + 8]        // dst_argb
    mov        ecx, [esp + 12]       // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
    pslld      xmm5, 24

261
  convertloop:
262 263 264 265 266 267 268 269
    movq       xmm0, qword ptr [eax]
    lea        eax,  [eax + 8]
    punpcklbw  xmm0, xmm0
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0
    punpckhwd  xmm1, xmm1
    por        xmm0, xmm5
    por        xmm1, xmm5
270 271
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
272 273
    lea        edx, [edx + 32]
    sub        ecx, 8
274
    jg         convertloop
275 276 277 278
    ret
  }
}

279
__declspec(naked) __declspec(align(16))
280
void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
281
  __asm {
282
    mov       eax, [esp + 4]   // src_rgb24
283 284 285 286
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
287
    movdqa    xmm4, kShuffleMaskRGB24ToARGB
288

289
 convertloop:
290 291 292
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
293 294 295 296 297 298 299
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
300
    movdqu    [edx + 32], xmm2
301 302
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
303
    movdqu    [edx], xmm0
304 305 306
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
307
    movdqu    [edx + 16], xmm1
308
    por       xmm3, xmm5
309
    movdqu    [edx + 48], xmm3
310
    lea       edx, [edx + 64]
311
      sub       ecx, 16
312
    jg        convertloop
313 314 315 316
    ret
  }
}

317
__declspec(naked) __declspec(align(16))
318 319
void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
                        int pix) {
320
  __asm {
321 322 323 324 325
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
326
    movdqa    xmm4, kShuffleMaskRAWToARGB
327

328
 convertloop:
329 330 331
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
332 333 334 335 336 337 338
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
339
    movdqu    [edx + 32], xmm2
340 341
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
342
    movdqu    [edx], xmm0
343 344 345
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
346
    movdqu    [edx + 16], xmm1
347
    por       xmm3, xmm5
348
    movdqu    [edx + 48], xmm3
349
    lea       edx, [edx + 64]
350
    sub       ecx, 16
351
    jg        convertloop
352 353 354 355
    ret
  }
}

356 357
// pmul method to replicate bits.
// Math to replicate bits:
358 359 360 361
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
362
// 20 instructions.
363
__declspec(naked) __declspec(align(16))
364 365
void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
366
  __asm {
367 368 369
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
370
    mov       eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
    pcmpeqb   xmm4, xmm4       // generate mask 0x07e007e0 for Green
    psllw     xmm4, 10
    psrlw     xmm4, 5
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_rgb565
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
388
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgr565
389 390 391 392 393 394 395 396 397 398 399 400 401 402
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    pand      xmm1, xmm3    // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    pand      xmm0, xmm4    // G in middle 6 bits
    pmulhuw   xmm0, xmm6    // << 5 * (256 + 4)
    por       xmm0, xmm7    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
403 404
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
405 406
    lea       eax, [eax + 16]
    sub       ecx, 8
407
    jg        convertloop
408 409 410 411 412
    ret
  }
}

// 24 instructions
413
__declspec(naked) __declspec(align(16))
414 415
void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
416
  __asm {
417 418 419 420 421 422 423 424
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
    mov       eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
425
    movdqa    xmm4, xmm3       // generate mask 0x03e003e0 for Green
426 427 428 429 430 431 432 433 434 435 436
    psrlw     xmm4, 6
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_argb1555
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
437
    movdqu    xmm0, [eax]   // fetch 8 pixels of 1555
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    psllw     xmm1, 1       // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pand      xmm1, xmm3
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // G in middle 5 bits
    psraw     xmm2, 8       // A
    pmulhuw   xmm0, xmm6    // << 6 * (256 + 8)
    pand      xmm2, xmm7
    por       xmm0, xmm2    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
456 457
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
458 459
    lea       eax, [eax + 16]
    sub       ecx, 8
460
    jg        convertloop
461 462 463
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
464

465
// 18 instructions.
466
__declspec(naked) __declspec(align(16))
467 468
void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
469
  __asm {
470 471 472 473 474 475 476 477
    mov       eax, 0x0f0f0f0f  // generate mask 0x0f0f0f0f
    movd      xmm4, eax
    pshufd    xmm4, xmm4, 0
    movdqa    xmm5, xmm4       // 0xf0f0f0f0 for high nibbles
    pslld     xmm5, 4
    mov       eax, [esp + 4]   // src_argb4444
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
478 479
    sub       edx, eax
    sub       edx, eax
480 481

 convertloop:
482
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgra4444
483 484 485 486 487 488 489 490 491
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // mask low nibbles
    pand      xmm2, xmm5    // mask high nibbles
    movdqa    xmm1, xmm0
    movdqa    xmm3, xmm2
    psllw     xmm1, 4
    psrlw     xmm3, 4
    por       xmm0, xmm1
    por       xmm2, xmm3
492
    movdqa    xmm1, xmm0
493
    punpcklbw xmm0, xmm2
494
    punpckhbw xmm1, xmm2
495 496
    movdqu    [eax * 2 + edx], xmm0  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm1  // store next 4 pixels of ARGB
497
    lea       eax, [eax + 16]
498
    sub       ecx, 8
499
    jg        convertloop
500 501 502 503
    ret
  }
}

504
__declspec(naked) __declspec(align(16))
505
void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
506
  __asm {
507 508 509
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
510
    movdqa    xmm6, kShuffleMaskARGBToRGB24
511 512

 convertloop:
513 514 515 516
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
517
    lea       eax, [eax + 64]
518 519 520 521 522 523 524 525 526 527
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
528
    movdqu    [edx], xmm0  // store 0
529 530 531 532
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
533 534
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
535 536
    lea       edx, [edx + 48]
    sub       ecx, 16
537
    jg        convertloop
538 539 540 541
    ret
  }
}

542
__declspec(naked) __declspec(align(16))
543
void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
544
  __asm {
545 546 547
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
548
    movdqa    xmm6, kShuffleMaskARGBToRAW
549 550

 convertloop:
551 552 553 554
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
555
    lea       eax, [eax + 64]
556 557 558 559 560 561 562 563 564 565
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
566
    movdqu    [edx], xmm0  // store 0
567 568 569 570
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
571 572
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
573 574
    lea       edx, [edx + 48]
    sub       ecx, 16
575
    jg        convertloop
576 577 578 579
    ret
  }
}

580
__declspec(naked) __declspec(align(16))
581
void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
582
  __asm {
583 584 585
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
586 587 588 589 590 591 592
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11
593 594

 convertloop:
595
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
596 597
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
598 599 600 601 602 603 604 605 606
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
607
    packssdw  xmm0, xmm0
608
    lea       eax, [eax + 16]
609
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
610 611
    lea       edx, [edx + 8]
    sub       ecx, 4
612
    jg        convertloop
613 614 615 616
    ret
  }
}

617
// TODO(fbarchard): Improve sign extension/packing.
618
__declspec(naked) __declspec(align(16))
619
void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
620
  __asm {
621 622 623
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
624 625 626 627 628 629 630 631
    pcmpeqb   xmm4, xmm4       // generate mask 0x0000001f
    psrld     xmm4, 27
    movdqa    xmm5, xmm4       // generate mask 0x000003e0
    pslld     xmm5, 5
    movdqa    xmm6, xmm4       // generate mask 0x00007c00
    pslld     xmm6, 10
    pcmpeqb   xmm7, xmm7       // generate mask 0xffff8000
    pslld     xmm7, 15
632 633

 convertloop:
634
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
635 636
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
637 638 639 640 641 642 643 644 645 646 647 648
    movdqa    xmm3, xmm0    // R
    psrad     xmm0, 16      // A
    psrld     xmm1, 3       // B
    psrld     xmm2, 6       // G
    psrld     xmm3, 9       // R
    pand      xmm0, xmm7    // A
    pand      xmm1, xmm4    // B
    pand      xmm2, xmm5    // G
    pand      xmm3, xmm6    // R
    por       xmm0, xmm1    // BA
    por       xmm2, xmm3    // GR
    por       xmm0, xmm2    // BGRA
649 650 651
    packssdw  xmm0, xmm0
    lea       eax, [eax + 16]
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB1555
652 653
    lea       edx, [edx + 8]
    sub       ecx, 4
654
    jg        convertloop
655 656 657 658
    ret
  }
}

659
__declspec(naked) __declspec(align(16))
660
void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
661
  __asm {
662 663 664
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
665 666 667 668 669 670
    pcmpeqb   xmm4, xmm4       // generate mask 0xf000f000
    psllw     xmm4, 12
    movdqa    xmm3, xmm4       // generate mask 0x00f000f0
    psrlw     xmm3, 8

 convertloop:
671
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
672 673 674 675 676 677 678
    movdqa    xmm1, xmm0
    pand      xmm0, xmm3    // low nibble
    pand      xmm1, xmm4    // high nibble
    psrl      xmm0, 4
    psrl      xmm1, 8
    por       xmm0, xmm1
    packuswb  xmm0, xmm0
679
    lea       eax, [eax + 16]
680 681 682
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB4444
    lea       edx, [edx + 8]
    sub       ecx, 4
683
    jg        convertloop
684 685 686 687
    ret
  }
}

688
// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
689
__declspec(naked) __declspec(align(16))
690
void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
691
  __asm {
692 693 694
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
695
    movdqa     xmm4, kARGBToY
696
    movdqa     xmm5, kAddY16
697

698
 convertloop:
699 700 701 702
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
703 704 705 706
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
707 708 709 710 711 712
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
713
    paddb      xmm0, xmm5
714
    movdqu     [edx], xmm0
715
    lea        edx, [edx + 16]
716
    sub        ecx, 16
717
    jg         convertloop
718 719 720 721
    ret
  }
}

722 723
// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
724 725 726 727 728 729
__declspec(naked) __declspec(align(16))
void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
730 731
    movdqa     xmm4, kARGBToYJ
    movdqa     xmm5, kAddYJ64
732 733

 convertloop:
734 735 736 737
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
738 739 740 741 742 743 744
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
745
    paddw      xmm0, xmm5  // Add .5 for rounding.
746
    paddw      xmm2, xmm5
747 748 749
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
750
    movdqu     [edx], xmm0
751
    lea        edx, [edx + 16]
752
    sub        ecx, 16
753 754 755 756 757
    jg         convertloop
    ret
  }
}

758
#ifdef HAS_ARGBTOYROW_AVX2
759 760 761 762 763
// vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = {
  0, 4, 1, 5, 2, 6, 3, 7
};

764 765 766 767 768 769 770
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
__declspec(naked) __declspec(align(32))
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
771 772
    vbroadcastf128 ymm4, kARGBToY
    vbroadcastf128 ymm5, kAddY16
773
    vmovdqu    ymm6, kPermdARGBToY_AVX
774 775

 convertloop:
776 777 778 779
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
780 781 782 783 784
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
785
    vphaddw    ymm0, ymm0, ymm1  // mutates.
786 787 788
    vphaddw    ymm2, ymm2, ymm3
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
789
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
790
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
791
    vpaddb     ymm0, ymm0, ymm5  // add 16 for Y
792
    vmovdqu    [edx], ymm0
793
    lea        edx, [edx + 32]
794
    sub        ecx, 32
795
    jg         convertloop
796
    vzeroupper
797 798 799 800 801
    ret
  }
}
#endif  //  HAS_ARGBTOYROW_AVX2

802 803 804 805 806 807 808 809
#ifdef HAS_ARGBTOYROW_AVX2
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
__declspec(naked) __declspec(align(32))
void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
810 811
    vbroadcastf128 ymm4, kARGBToYJ
    vbroadcastf128 ymm5, kAddYJ64
812
    vmovdqu    ymm6, kPermdARGBToY_AVX
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833

 convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
    vphaddw    ymm0, ymm0, ymm1  // mutates.
    vphaddw    ymm2, ymm2, ymm3
    vpaddw     ymm0, ymm0, ymm5  // Add .5 for rounding.
    vpaddw     ymm2, ymm2, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
834
    sub        ecx, 32
835 836 837 838 839 840 841 842
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  //  HAS_ARGBTOYJROW_AVX2

843
__declspec(naked) __declspec(align(16))
844
void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
845
  __asm {
846 847 848
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
849
    movdqa     xmm4, kBGRAToY
850
    movdqa     xmm5, kAddY16
851

852
 convertloop:
853 854 855 856
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
857 858 859 860
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
861 862 863 864 865 866
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
867
    paddb      xmm0, xmm5
868
    movdqu     [edx], xmm0
869
    lea        edx, [edx + 16]
870
    sub        ecx, 16
871
    jg         convertloop
872 873 874 875
    ret
  }
}

876
__declspec(naked) __declspec(align(16))
877
void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
878
  __asm {
879 880 881
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
882
    movdqa     xmm4, kABGRToY
883
    movdqa     xmm5, kAddY16
884

885
 convertloop:
886 887 888 889
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
890 891 892 893
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
894 895 896 897 898 899
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
900
    paddb      xmm0, xmm5
901
    movdqu     [edx], xmm0
902
    lea        edx, [edx + 16]
903
    sub        ecx, 16
904
    jg         convertloop
905 906 907 908
    ret
  }
}

909 910
__declspec(naked) __declspec(align(16))
void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
911
  __asm {
912 913 914 915
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
    movdqa     xmm4, kRGBAToY
916
    movdqa     xmm5, kAddY16
917 918

 convertloop:
919 920 921 922
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
923 924 925 926 927 928 929 930 931 932 933
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
    paddb      xmm0, xmm5
934
    movdqu     [edx], xmm0
935
    lea        edx, [edx + 16]
936
    sub        ecx, 16
937 938 939 940 941
    jg         convertloop
    ret
  }
}

942
__declspec(naked) __declspec(align(16))
943 944
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
945
  __asm {
946 947 948 949 950 951 952
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
953
    movdqa     xmm5, kAddUV128
954 955
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
956
    sub        edi, edx             // stride from u to v
957

958
 convertloop:
959
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
960
    movdqu     xmm0, [eax]
961
    movdqu     xmm4, [eax + esi]
962
    pavgb      xmm0, xmm4
963
    movdqu     xmm1, [eax + 16]
964
    movdqu     xmm4, [eax + esi + 16]
965
    pavgb      xmm1, xmm4
966
    movdqu     xmm2, [eax + 32]
967
    movdqu     xmm4, [eax + esi + 32]
968
    pavgb      xmm2, xmm4
969
    movdqu     xmm3, [eax + 48]
970
    movdqu     xmm4, [eax + esi + 48]
971 972
    pavgb      xmm3, xmm4

973 974
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
975
    shufps     xmm0, xmm1, 0x88
976 977 978 979 980 981
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4
982 983 984

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
985
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
986
    movdqa     xmm1, xmm0
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1003
    sub        ecx, 16
1004 1005
    jg         convertloop

1006 1007 1008 1009 1010 1011
    pop        edi
    pop        esi
    ret
  }
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
__declspec(naked) __declspec(align(16))
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                        uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
    movdqa     xmm5, kAddUVJ128
1024 1025
    movdqa     xmm6, kARGBToVJ
    movdqa     xmm7, kARGBToUJ
1026 1027 1028 1029
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1030
    movdqu     xmm0, [eax]
1031
    movdqu     xmm4, [eax + esi]
1032
    pavgb      xmm0, xmm4
1033
    movdqu     xmm1, [eax + 16]
1034
    movdqu     xmm4, [eax + esi + 16]
1035
    pavgb      xmm1, xmm4
1036
    movdqu     xmm2, [eax + 32]
1037
    movdqu     xmm4, [eax + esi + 32]
1038
    pavgb      xmm2, xmm4
1039
    movdqu     xmm3, [eax + 48]
1040
    movdqu     xmm4, [eax + esi + 48]
1041 1042
    pavgb      xmm3, xmm4

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    paddw      xmm0, xmm5            // +.5 rounding -> unsigned
    paddw      xmm1, xmm5
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1074
    sub        ecx, 16
1075 1076 1077 1078 1079 1080 1081 1082
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
#ifdef HAS_ARGBTOUVROW_AVX2
__declspec(naked) __declspec(align(32))
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
                      uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1095 1096 1097
    vbroadcastf128 ymm5, kAddUV128
    vbroadcastf128 ymm6, kARGBToV
    vbroadcastf128 ymm7, kARGBToU
1098 1099 1100
    sub        edi, edx             // stride from u to v

 convertloop:
1101
    /* step 1 - subsample 32x2 argb pixels to 16x1 */
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    vpavgb     ymm2, ymm2, [eax + esi + 64]
    vpavgb     ymm3, ymm3, [eax + esi + 96]
    lea        eax,  [eax + 128]
    vshufps    ymm4, ymm0, ymm1, 0x88
    vshufps    ymm0, ymm0, ymm1, 0xdd
    vpavgb     ymm0, ymm0, ymm4  // mutated by vshufps
    vshufps    ymm4, ymm2, ymm3, 0x88
    vshufps    ymm2, ymm2, ymm3, 0xdd
    vpavgb     ymm2, ymm2, ymm4  // mutated by vshufps
1117 1118 1119 1120

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 32 different pixels, its 16 pixels of U and 16 of V
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
    vpmaddubsw ymm1, ymm0, ymm7  // U
    vpmaddubsw ymm3, ymm2, ymm7
    vpmaddubsw ymm0, ymm0, ymm6  // V
    vpmaddubsw ymm2, ymm2, ymm6
    vphaddw    ymm1, ymm1, ymm3  // mutates
    vphaddw    ymm0, ymm0, ymm2
    vpsraw     ymm1, ymm1, 8
    vpsraw     ymm0, ymm0, 8
    vpacksswb  ymm0, ymm1, ymm0  // mutates
    vpermq     ymm0, ymm0, 0xd8  // For vpacksswb
    vpshufb    ymm0, ymm0, kShufARGBToUV_AVX  // For vshufps + vphaddw
    vpaddb     ymm0, ymm0, ymm5  // -> unsigned
1133 1134

    // step 3 - store 16 U and 16 V values
1135 1136
    vextractf128 [edx], ymm0, 0 // U
    vextractf128 [edx + edi], ymm0, 1 // V
1137
    lea        edx, [edx + 16]
1138
    sub        ecx, 32
1139 1140 1141 1142
    jg         convertloop

    pop        edi
    pop        esi
1143
    vzeroupper
1144 1145 1146 1147 1148
    ret
  }
}
#endif  // HAS_ARGBTOUVROW_AVX2

1149
__declspec(naked) __declspec(align(16))
1150 1151
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1152
  __asm {
1153
    push       edi
1154 1155 1156 1157
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
1158
    movdqa     xmm5, kAddUV128
1159 1160
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
1161 1162 1163
    sub        edi, edx             // stride from u to v

 convertloop:
1164 1165
    /* convert to U and V */
    movdqu     xmm0, [eax]          // U
1166 1167 1168 1169 1170 1171 1172 1173 1174
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm7
    pmaddubsw  xmm1, xmm7
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm3, xmm7
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1175 1176 1177
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
    paddb      xmm0, xmm5
    movdqu     [edx], xmm0

    movdqu     xmm0, [eax]          // V
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm6
    pmaddubsw  xmm1, xmm6
    pmaddubsw  xmm2, xmm6
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1191 1192 1193
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1194 1195 1196 1197
    paddb      xmm0, xmm5
    lea        eax,  [eax + 64]
    movdqu     [edx + edi], xmm0
    lea        edx,  [edx + 16]
1198
    sub        ecx,  16
1199 1200 1201 1202 1203 1204 1205
    jg         convertloop

    pop        edi
    ret
  }
}

1206 1207 1208
__declspec(naked) __declspec(align(16))
void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1209
  __asm {
1210 1211 1212 1213 1214 1215
    push       edi
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
    movdqa     xmm5, kAddUV128
1216 1217
    movdqa     xmm6, kARGBToV
    movdqa     xmm7, kARGBToU
1218 1219 1220 1221
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1222 1223 1224 1225
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1256
    sub        ecx, 16
1257 1258 1259 1260 1261 1262 1263
    jg         convertloop

    pop        edi
    ret
  }
}

1264
__declspec(naked) __declspec(align(16))
1265 1266
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1267
  __asm {
1268 1269 1270 1271 1272 1273 1274
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1275
    movdqa     xmm5, kAddUV128
1276 1277
    movdqa     xmm6, kBGRAToV
    movdqa     xmm7, kBGRAToU
1278
    sub        edi, edx             // stride from u to v
1279

1280
 convertloop:
1281
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1282
    movdqu     xmm0, [eax]
1283
    movdqu     xmm4, [eax + esi]
1284
    pavgb      xmm0, xmm4
1285
    movdqu     xmm1, [eax + 16]
1286
    movdqu     xmm4, [eax + esi + 16]
1287
    pavgb      xmm1, xmm4
1288
    movdqu     xmm2, [eax + 32]
1289
    movdqu     xmm4, [eax + esi + 32]
1290
    pavgb      xmm2, xmm4
1291
    movdqu     xmm3, [eax + 48]
1292
    movdqu     xmm4, [eax + esi + 48]
1293 1294
    pavgb      xmm3, xmm4

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1325
    sub        ecx, 16
1326 1327
    jg         convertloop

1328 1329 1330 1331
    pop        edi
    pop        esi
    ret
  }
1332 1333
}

1334
__declspec(naked) __declspec(align(16))
1335 1336
void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1337
  __asm {
1338 1339 1340 1341 1342 1343 1344
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
1345
    movdqa     xmm5, kAddUV128
1346 1347
    movdqa     xmm6, kABGRToV
    movdqa     xmm7, kABGRToU
1348 1349
    sub        edi, edx             // stride from u to v

1350
 convertloop:
1351
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1352
    movdqu     xmm0, [eax]
1353
    movdqu     xmm4, [eax + esi]
1354
    pavgb      xmm0, xmm4
1355
    movdqu     xmm1, [eax + 16]
1356
    movdqu     xmm4, [eax + esi + 16]
1357
    pavgb      xmm1, xmm4
1358
    movdqu     xmm2, [eax + 32]
1359
    movdqu     xmm4, [eax + esi + 32]
1360
    pavgb      xmm2, xmm4
1361
    movdqu     xmm3, [eax + 48]
1362
    movdqu     xmm4, [eax + esi + 48]
1363 1364
    pavgb      xmm3, xmm4

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1395
    sub        ecx, 16
1396 1397
    jg         convertloop

1398 1399 1400 1401 1402 1403
    pop        edi
    pop        esi
    ret
  }
}

1404 1405 1406
__declspec(naked) __declspec(align(16))
void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1407
  __asm {
1408 1409 1410 1411 1412 1413 1414 1415
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
    movdqa     xmm5, kAddUV128
1416 1417
    movdqa     xmm6, kRGBAToV
    movdqa     xmm7, kRGBAToU
1418 1419 1420 1421
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1422
    movdqu     xmm0, [eax]
1423
    movdqu     xmm4, [eax + esi]
1424
    pavgb      xmm0, xmm4
1425
    movdqu     xmm1, [eax + 16]
1426
    movdqu     xmm4, [eax + esi + 16]
1427
    pavgb      xmm1, xmm4
1428
    movdqu     xmm2, [eax + 32]
1429
    movdqu     xmm4, [eax + esi + 32]
1430
    pavgb      xmm2, xmm4
1431
    movdqu     xmm3, [eax + 48]
1432
    movdqu     xmm4, [eax + esi + 48]
1433 1434
    pavgb      xmm3, xmm4

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1465
    sub        ecx, 16
1466 1467 1468 1469 1470 1471 1472
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
1473
#endif  // HAS_ARGBTOYROW_SSSE3
1474

1475
#if defined(HAS_I422TOARGBROW_AVX2) || defined(HAS_I422TOBGRAROW_AVX2)
1476
static const lvec8 kUVToB_AVX = {
1477 1478 1479
  UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB,
  UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB
};
1480
static const lvec8 kUVToR_AVX = {
1481 1482 1483
  UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR,
  UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR, UR, VR
};
1484
static const lvec8 kUVToG_AVX = {
1485 1486 1487
  UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
  UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG
};
1488
static const lvec16 kYToRgb_AVX = {
fbarchard@google.com's avatar
fbarchard@google.com committed
1489 1490
  YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG
};
1491
static const lvec16 kUVBiasB_AVX = {
fbarchard@google.com's avatar
fbarchard@google.com committed
1492 1493
  BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB
};
1494
static const lvec16 kUVBiasG_AVX = {
fbarchard@google.com's avatar
fbarchard@google.com committed
1495 1496
  BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG
};
1497
static const lvec16 kUVBiasR_AVX = {
fbarchard@google.com's avatar
fbarchard@google.com committed
1498 1499
  BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR
};
1500
#endif  // defined(HAS_I422TOARGBROW_AVX2) || defined(HAS_I422TOBGRAROW_AVX2)
1501

1502 1503
// Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 __asm {                                                \
1504 1505
    __asm vmovq      xmm0, qword ptr [esi]        /* U */         /* NOLINT */ \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */         /* NOLINT */ \
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
  }

// Convert 16 pixels: 16 UV and 16 Y.
#define YUVTORGB_AVX2 __asm {                                                  \
    /* Step 1: Find 8 UV contributions to 16 R,G,B values */                   \
    __asm vpmaddubsw ymm2, ymm0, kUVToR_AVX        /* scale R UV */            \
    __asm vpmaddubsw ymm1, ymm0, kUVToG_AVX        /* scale G UV */            \
    __asm vpmaddubsw ymm0, ymm0, kUVToB_AVX        /* scale B UV */            \
1518 1519 1520 1521 1522 1523
    __asm vmovdqu    ymm3, kUVBiasR_AVX                                        \
    __asm vpsubw     ymm2, ymm3, ymm2                                          \
    __asm vmovdqu    ymm3, kUVBiasG_AVX                                        \
    __asm vpsubw     ymm1, ymm3, ymm1                                          \
    __asm vmovdqu    ymm3, kUVBiasB_AVX                                        \
    __asm vpsubw     ymm0, ymm3, ymm0                                          \
1524 1525 1526 1527
    /* Step 2: Find Y contribution to 16 R,G,B values */                       \
    __asm vmovdqu    xmm3, [eax]                  /* NOLINT */                 \
    __asm lea        eax, [eax + 16]                                           \
    __asm vpermq     ymm3, ymm3, 0xd8                                          \
1528 1529
    __asm vpunpcklbw ymm3, ymm3, ymm3                                          \
    __asm vpmulhuw   ymm3, ymm3, kYToRgb_AVX                                   \
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
    __asm vpaddsw    ymm0, ymm0, ymm3           /* B += Y */                   \
    __asm vpaddsw    ymm1, ymm1, ymm3           /* G += Y */                   \
    __asm vpaddsw    ymm2, ymm2, ymm3           /* R += Y */                   \
    __asm vpsraw     ymm0, ymm0, 6                                             \
    __asm vpsraw     ymm1, ymm1, 6                                             \
    __asm vpsraw     ymm2, ymm2, 6                                             \
    __asm vpackuswb  ymm0, ymm0, ymm0           /* B */                        \
    __asm vpackuswb  ymm1, ymm1, ymm1           /* G */                        \
    __asm vpackuswb  ymm2, ymm2, ymm2           /* R */                        \
  }

1541
#ifdef HAS_I422TOARGBROW_AVX2
1542 1543 1544 1545
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
__declspec(naked) __declspec(align(16))
void I422ToARGBRow_AVX2(const uint8* y_buf,
1546 1547 1548 1549
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
1562 1563
    READYUV422_AVX2
    YUVTORGB_AVX2
1564 1565

    // Step 3: Weave into ARGB
1566
    vpunpcklbw ymm0, ymm0, ymm1           // BG
1567
    vpermq     ymm0, ymm0, 0xd8
1568 1569 1570 1571
    vpunpcklbw ymm2, ymm2, ymm5           // RA
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm1, ymm0, ymm2           // BGRA first 8 pixels
    vpunpckhwd ymm0, ymm0, ymm2           // BGRA next 8 pixels
1572
    vmovdqu    [edx], ymm1
1573
    vmovdqu    [edx + 32], ymm0
1574 1575 1576 1577 1578 1579
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
1580
    vzeroupper
1581 1582 1583
    ret
  }
}
1584
#endif  // HAS_I422TOARGBROW_AVX2
1585

1586
#ifdef HAS_I422TOBGRAROW_AVX2
1587 1588
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
1589
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
__declspec(naked) __declspec(align(16))
void I422ToBGRARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
1608 1609
    READYUV422_AVX2
    YUVTORGB_AVX2
1610 1611

    // Step 3: Weave into BGRA
1612
    vpunpcklbw ymm1, ymm1, ymm0           // GB
1613
    vpermq     ymm1, ymm1, 0xd8
1614 1615 1616 1617 1618 1619
    vpunpcklbw ymm2, ymm5, ymm2           // AR
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm2, ymm1           // ARGB first 8 pixels
    vpunpckhwd ymm2, ymm2, ymm1           // ARGB next 8 pixels
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm2
1620 1621 1622 1623 1624 1625
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
1626
    vzeroupper
1627 1628 1629
    ret
  }
}
1630
#endif  // HAS_I422TOBGRAROW_AVX2
1631

1632
#ifdef HAS_I422TORGBAROW_AVX2
1633 1634
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
1635
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
__declspec(naked) __declspec(align(16))
void I422ToRGBARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
1651
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
1652 1653 1654 1655 1656 1657 1658 1659

 convertloop:
    READYUV422_AVX2
    YUVTORGB_AVX2

    // Step 3: Weave into RGBA
    vpunpcklbw ymm1, ymm1, ymm2           // GR
    vpermq     ymm1, ymm1, 0xd8
1660 1661 1662 1663
    vpunpcklbw ymm2, ymm5, ymm0           // AB
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm2, ymm1           // ABGR first 8 pixels
    vpunpckhwd ymm1, ymm2, ymm1           // ABGR next 8 pixels
1664 1665 1666 1667 1668 1669 1670 1671
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
1672
    vzeroupper
1673 1674 1675
    ret
  }
}
1676
#endif  // HAS_I422TORGBAROW_AVX2
1677

1678
#ifdef HAS_I422TOABGRROW_AVX2
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
__declspec(naked) __declspec(align(16))
void I422ToABGRRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
    YUVTORGB_AVX2

    // Step 3: Weave into ABGR
    vpunpcklbw ymm1, ymm2, ymm1           // RG
    vpermq     ymm1, ymm1, 0xd8
    vpunpcklbw ymm2, ymm0, ymm5           // BA
    vpermq     ymm2, ymm2, 0xd8
    vpunpcklwd ymm0, ymm1, ymm2           // RGBA first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm2           // RGBA next 8 pixels
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
1718
    vzeroupper
1719 1720 1721
    ret
  }
}
1722
#endif  // HAS_I422TOABGRROW_AVX2
1723

1724
#if defined(HAS_I422TOARGBROW_SSSE3)
1725
// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
1726

1727
// Read 8 UV from 444.
1728
#define READYUV444 __asm {                                                     \
1729 1730
    __asm movq       xmm0, qword ptr [esi] /* U */                /* NOLINT */ \
    __asm movq       xmm1, qword ptr [esi + edi] /* V */          /* NOLINT */ \
1731 1732 1733 1734
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
  }

1735
// Read 4 UV from 422, upsample to 8 UV.
1736
#define READYUV422 __asm {                                                     \
1737 1738 1739 1740 1741 1742 1743
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

1744
// Read 2 UV from 411, upsample to 8 UV.
1745
#define READYUV411 __asm {                                                     \
1746
    __asm movzx      ebx, word ptr [esi]        /* U */           /* NOLINT */ \
1747
    __asm movd       xmm0, ebx                                                 \
1748
    __asm movzx      ebx, word ptr [esi + edi]  /* V */           /* NOLINT */ \
1749
    __asm movd       xmm1, ebx                                                 \
1750 1751 1752 1753
    __asm lea        esi,  [esi + 2]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
    __asm punpckldq  xmm0, xmm0           /* UVUV (upsample) */                \
1754 1755
  }

1756
// Read 4 UV from NV12, upsample to 8 UV.
1757
#define READNV12 __asm {                                                       \
1758
    __asm movq       xmm0, qword ptr [esi] /* UV */               /* NOLINT */ \
1759 1760 1761 1762
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

1763
// Convert 8 pixels: 8 UV and 8 Y.
1764
#define YUVTORGB __asm {                                                       \
1765
    /* Step 1: Find 4 UV contributions to 8 R,G,B values */                    \
1766 1767
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
    __asm movdqa     xmm3, xmm0                                                \
    __asm movdqa     xmm0, kUVBiasB      /* unbias back to signed */           \
    __asm pmaddubsw  xmm1, kUVToB        /* scale B UV */                      \
    __asm psubw      xmm0, xmm1                                                \
    __asm movdqa     xmm1, kUVBiasG                                            \
    __asm pmaddubsw  xmm2, kUVToG        /* scale G UV */                      \
    __asm psubw      xmm1, xmm2                                                \
    __asm movdqa     xmm2, kUVBiasR                                            \
    __asm pmaddubsw  xmm3, kUVToR        /* scale R UV */                      \
    __asm psubw      xmm2, xmm3                                                \
1778 1779 1780
    /* Step 2: Find Y contribution to 8 R,G,B values */                        \
    __asm movq       xmm3, qword ptr [eax]                        /* NOLINT */ \
    __asm lea        eax, [eax + 8]                                            \
1781 1782
    __asm punpcklbw  xmm3, xmm3                                                \
    __asm pmulhuw    xmm3, kYToRgb                                             \
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
    __asm paddsw     xmm0, xmm3           /* B += Y */                         \
    __asm paddsw     xmm1, xmm3           /* G += Y */                         \
    __asm paddsw     xmm2, xmm3           /* R += Y */                         \
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

1794
// Convert 8 pixels: 8 VU and 8 Y.
1795
#define YVUTORGB __asm {                                                       \
1796 1797 1798
    /* Step 1: Find 4 UV contributions to 8 R,G,B values */                    \
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
    __asm movdqa     xmm3, xmm0                                                \
    __asm movdqa     xmm0, kUVBiasB      /* unbias back to signed */           \
    __asm pmaddubsw  xmm1, kVUToB        /* scale B UV */                      \
    __asm psubw      xmm0, xmm1                                                \
    __asm movdqa     xmm1, kUVBiasG                                            \
    __asm pmaddubsw  xmm2, kVUToG        /* scale G UV */                      \
    __asm psubw      xmm1, xmm2                                                \
    __asm movdqa     xmm2, kUVBiasR                                            \
    __asm pmaddubsw  xmm3, kVUToR        /* scale R UV */                      \
    __asm psubw      xmm2, xmm3                                                \
1809 1810 1811
    /* Step 2: Find Y contribution to 8 R,G,B values */                        \
    __asm movq       xmm3, qword ptr [eax]                        /* NOLINT */ \
    __asm lea        eax, [eax + 8]                                            \
1812 1813
    __asm punpcklbw  xmm3, xmm3                                                \
    __asm pmulhuw    xmm3, kYToRgb                                             \
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
    __asm paddsw     xmm0, xmm3           /* B += Y */                         \
    __asm paddsw     xmm1, xmm3           /* G += Y */                         \
    __asm paddsw     xmm2, xmm3           /* R += Y */                         \
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

1825
// 8 pixels.
1826
// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
1827
__declspec(naked) __declspec(align(16))
1828
void I444ToARGBRow_SSSE3(const uint8* y_buf,
1829 1830
                         const uint8* u_buf,
                         const uint8* v_buf,
1831
                         uint8* dst_argb,
1832
                         int width) {
1833 1834 1835 1836 1837 1838
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
1839
    mov        edx, [esp + 8 + 16]  // argb
1840 1841 1842 1843
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

1844
 convertloop:
1845 1846
    READYUV444
    YUVTORGB
1847 1848 1849 1850 1851

    // Step 3: Weave into ARGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm5           // RA
    movdqa     xmm1, xmm0
1852 1853
    punpcklwd  xmm0, xmm2           // ABGR first 4 pixels
    punpckhwd  xmm1, xmm2           // ABGR next 4 pixels
1854 1855
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
1856 1857
    lea        edx,  [edx + 32]
    sub        ecx, 8
1858
    jg         convertloop
1859 1860 1861 1862 1863 1864 1865

    pop        edi
    pop        esi
    ret
  }
}

1866
// 8 pixels.
1867 1868 1869 1870 1871
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
__declspec(naked) __declspec(align(16))
void I422ToRGB24Row_SSSE3(const uint8* y_buf,
                          const uint8* u_buf,
                          const uint8* v_buf,
1872
                          uint8* dst_rgb24,
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
                          int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgb24
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    movdqa     xmm5, kShuffleMaskARGBToRGB24_0
    movdqa     xmm6, kShuffleMaskARGBToRGB24

 convertloop:
    READYUV422
    YUVTORGB

    // Step 3: Weave into RRGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm2           // RR
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm2           // BGRR first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRR next 4 pixels
    pshufb     xmm0, xmm5           // Pack into first 8 and last 4 bytes.
    pshufb     xmm1, xmm6           // Pack into first 12 bytes.
    palignr    xmm1, xmm0, 12       // last 4 bytes of xmm0 + 12 from xmm1
    movq       qword ptr [edx], xmm0  // First 8 bytes
    movdqu     [edx + 8], xmm1      // Last 16 bytes. = 24 bytes, 8 RGB pixels.
    lea        edx,  [edx + 24]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1911
// 8 pixels.
1912 1913 1914 1915 1916
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
__declspec(naked) __declspec(align(16))
void I422ToRAWRow_SSSE3(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
1917
                        uint8* dst_raw,
1918
                        int width) {
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // raw
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    movdqa     xmm5, kShuffleMaskARGBToRAW_0
    movdqa     xmm6, kShuffleMaskARGBToRAW
1930 1931

 convertloop:
1932 1933
    READYUV422
    YUVTORGB
1934

1935
    // Step 3: Weave into RRGB
1936
    punpcklbw  xmm0, xmm1           // BG
1937
    punpcklbw  xmm2, xmm2           // RR
1938
    movdqa     xmm1, xmm0
1939 1940 1941 1942 1943 1944 1945 1946
    punpcklwd  xmm0, xmm2           // BGRR first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRR next 4 pixels
    pshufb     xmm0, xmm5           // Pack into first 8 and last 4 bytes.
    pshufb     xmm1, xmm6           // Pack into first 12 bytes.
    palignr    xmm1, xmm0, 12       // last 4 bytes of xmm0 + 12 from xmm1
    movq       qword ptr [edx], xmm0  // First 8 bytes
    movdqu     [edx + 8], xmm1      // Last 16 bytes. = 24 bytes, 8 RGB pixels.
    lea        edx,  [edx + 24]
1947 1948 1949
    sub        ecx, 8
    jg         convertloop

1950
    pop        edi
1951 1952 1953 1954 1955
    pop        esi
    ret
  }
}

1956 1957
// 8 pixels
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
1958
__declspec(naked) __declspec(align(16))
1959 1960 1961 1962 1963
void I422ToRGB565Row_SSSE3(const uint8* y_buf,
                           const uint8* u_buf,
                           const uint8* v_buf,
                           uint8* rgb565_buf,
                           int width) {
1964 1965 1966 1967 1968 1969
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
1970
    mov        edx, [esp + 8 + 16]  // rgb565
1971 1972
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
1973 1974 1975 1976 1977 1978 1979
    pcmpeqb    xmm5, xmm5       // generate mask 0x0000001f
    psrld      xmm5, 27
    pcmpeqb    xmm6, xmm6       // generate mask 0x000007e0
    psrld      xmm6, 26
    pslld      xmm6, 5
    pcmpeqb    xmm7, xmm7       // generate mask 0xfffff800
    pslld      xmm7, 11
1980 1981

 convertloop:
1982
    READYUV422
1983
    YUVTORGB
1984

1985
    // Step 3: Weave into RRGB
1986
    punpcklbw  xmm0, xmm1           // BG
1987
    punpcklbw  xmm2, xmm2           // RR
1988
    movdqa     xmm1, xmm0
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
    punpcklwd  xmm0, xmm2           // BGRR first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRR next 4 pixels

    // Step 3b: RRGB -> RGB565
    movdqa     xmm3, xmm0    // B  first 4 pixels of argb
    movdqa     xmm2, xmm0    // G
    pslld      xmm0, 8       // R
    psrld      xmm3, 3       // B
    psrld      xmm2, 5       // G
    psrad      xmm0, 16      // R
    pand       xmm3, xmm5    // B
    pand       xmm2, xmm6    // G
    pand       xmm0, xmm7    // R
    por        xmm3, xmm2    // BG
    por        xmm0, xmm3    // BGR
    movdqa     xmm3, xmm1    // B  next 4 pixels of argb
    movdqa     xmm2, xmm1    // G
    pslld      xmm1, 8       // R
    psrld      xmm3, 3       // B
    psrld      xmm2, 5       // G
    psrad      xmm1, 16      // R
    pand       xmm3, xmm5    // B
    pand       xmm2, xmm6    // G
    pand       xmm1, xmm7    // R
    por        xmm3, xmm2    // BG
    por        xmm1, xmm3    // BGR
    packssdw   xmm0, xmm1
    movdqu     [edx], xmm0   // store 8 pixels of RGB565
    lea        edx, [edx + 16]
2018
    sub        ecx, 8
2019 2020 2021 2022 2023 2024 2025 2026
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2027
// 8 pixels.
2028
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2029
__declspec(naked) __declspec(align(16))
2030 2031 2032 2033 2034
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2047 2048
    READYUV422
    YUVTORGB
2049 2050 2051 2052 2053 2054 2055

    // Step 3: Weave into ARGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm5           // RA
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm2           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRA next 4 pixels
2056 2057
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2068
// 8 pixels.
2069
// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2070
// Similar to I420 but duplicate UV once more.
2071
__declspec(naked) __declspec(align(16))
2072 2073 2074 2075 2076
void I411ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
2077
  __asm {
2078
    push       ebx
2079 2080
    push       esi
    push       edi
2081 2082 2083 2084 2085
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ecx, [esp + 12 + 20]  // width
2086 2087 2088 2089
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2090
    READYUV411  // modifies EBX
2091
    YUVTORGB
2092 2093 2094 2095 2096 2097 2098

    // Step 3: Weave into ARGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm5           // RA
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm2           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRA next 4 pixels
2099 2100
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
2101 2102 2103 2104 2105 2106
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
2107
    pop        ebx
2108 2109 2110 2111
    ret
  }
}

2112
// 8 pixels.
2113
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2114
__declspec(naked) __declspec(align(16))
2115 2116 2117 2118
void NV12ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
    YUVTORGB

    // Step 3: Weave into ARGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm5           // RA
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm2           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRA next 4 pixels
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2148
// 8 pixels.
2149
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2150
__declspec(naked) __declspec(align(16))
2151 2152 2153 2154
void NV21ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // VU
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
    YVUTORGB

    // Step 3: Weave into ARGB
    punpcklbw  xmm0, xmm1           // BG
    punpcklbw  xmm2, xmm5           // RA
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm2           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm2           // BGRA next 4 pixels
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2184 2185 2186 2187
__declspec(naked) __declspec(align(16))
void I422ToBGRARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2188
                         uint8* dst_bgra,
2189 2190 2191 2192 2193 2194 2195 2196
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // bgra
2197 2198 2199 2200
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
2201 2202
    READYUV422
    YUVTORGB
2203 2204 2205 2206 2207 2208 2209 2210

    // Step 3: Weave into BGRA
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha
    punpcklbw  xmm1, xmm0           // GB
    punpcklbw  xmm5, xmm2           // AR
    movdqa     xmm0, xmm5
    punpcklwd  xmm5, xmm1           // BGRA first 4 pixels
    punpckhwd  xmm0, xmm1           // BGRA next 4 pixels
2211 2212
    movdqu     [edx], xmm5
    movdqu     [edx + 16], xmm0
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2223
__declspec(naked) __declspec(align(16))
2224 2225 2226
void I422ToABGRRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2227
                         uint8* dst_abgr,
2228
                         int width) {
2229 2230 2231 2232 2233 2234
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2235
    mov        edx, [esp + 8 + 16]  // abgr
2236 2237 2238 2239 2240
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2241 2242
    READYUV422
    YUVTORGB
2243 2244 2245 2246 2247 2248 2249

    // Step 3: Weave into ARGB
    punpcklbw  xmm2, xmm1           // RG
    punpcklbw  xmm0, xmm5           // BA
    movdqa     xmm1, xmm2
    punpcklwd  xmm2, xmm0           // RGBA first 4 pixels
    punpckhwd  xmm1, xmm0           // RGBA next 4 pixels
2250 2251
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm1
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2262 2263 2264 2265
__declspec(naked) __declspec(align(16))
void I422ToRGBARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2266
                         uint8* dst_rgba,
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgba
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
    READYUV422
    YUVTORGB

    // Step 3: Weave into RGBA
2283
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha
2284 2285
    punpcklbw  xmm1, xmm2           // GR
    punpcklbw  xmm5, xmm0           // AB
2286
    movdqa     xmm0, xmm5
2287 2288
    punpcklwd  xmm5, xmm1           // RGBA first 4 pixels
    punpckhwd  xmm0, xmm1           // RGBA next 4 pixels
2289 2290
    movdqu     [edx], xmm5
    movdqu     [edx + 16], xmm0
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
    lea        edx,  [edx + 32]
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

#endif  // HAS_I422TOARGBROW_SSSE3
2302

2303
// TODO(fbarchard): Remove shift by 6.
2304
#ifdef HAS_YTOARGBROW_SSE2
2305
__declspec(naked) __declspec(align(16))
2306 2307 2308
void YToARGBRow_SSE2(const uint8* y_buf,
                     uint8* rgb_buf,
                     int width) {
2309
  __asm {
2310 2311
    pcmpeqb    xmm4, xmm4           // generate mask 0xff000000
    pslld      xmm4, 24
2312
    mov        eax, 0x04a804a8      // 04a8 = 1192 = round(1.164 * 64 * 16)
2313 2314
    movd       xmm3, eax
    pshufd     xmm3, xmm3, 0
2315
    mov        eax, 0x4a7f4a7f      // 4a7f = 19071 = round(1.164 * 64 * 256)
2316 2317
    movd       xmm2, eax
    pshufd     xmm2, xmm2,0
2318

2319 2320 2321 2322
    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

2323
 convertloop:
2324
    // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
2325
    movq       xmm0, qword ptr [eax]
2326
    lea        eax, [eax + 8]
2327 2328 2329
    punpcklbw  xmm0, xmm0           // Y.Y
    pmulhuw    xmm0, xmm2
    psubusw    xmm0, xmm3           // TODO(fbarchard): round 0.5
2330
    psrlw      xmm0, 6
2331 2332 2333 2334 2335 2336 2337
    packuswb   xmm0, xmm0           // G

    // Step 2: Weave into ARGB
    punpcklbw  xmm0, xmm0           // GG
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm1           // BGRA next 4 pixels
2338 2339
    por        xmm0, xmm4
    por        xmm1, xmm4
2340 2341
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
2342 2343
    lea        edx,  [edx + 32]
    sub        ecx, 8
2344
    jg         convertloop
2345 2346 2347 2348

    ret
  }
}
2349
#endif  // HAS_YTOARGBROW_SSE2
2350

2351
#ifdef HAS_MIRRORROW_SSSE3
2352
// Shuffle table for reversing the bytes.
2353
static const uvec8 kShuffleMirror = {
2354 2355
  15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
2356

2357
// TODO(fbarchard): Replace lea with -16 offset.
2358
__declspec(naked) __declspec(align(16))
2359
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
2360
  __asm {
2361 2362 2363
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
2364
    movdqa    xmm5, kShuffleMirror
2365

2366
 convertloop:
2367
    movdqu    xmm0, [eax - 16 + ecx]
2368
    pshufb    xmm0, xmm5
2369
    movdqu    [edx], xmm0
2370
    lea       edx, [edx + 16]
2371
    sub       ecx, 16
2372
    jg        convertloop
2373 2374 2375
    ret
  }
}
2376
#endif  // HAS_MIRRORROW_SSSE3
2377

fbarchard@google.com's avatar
fbarchard@google.com committed
2378 2379 2380 2381 2382 2383 2384
#ifdef HAS_MIRRORROW_AVX2
__declspec(naked) __declspec(align(16))
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
2385
    vbroadcastf128 ymm5, kShuffleMirror
fbarchard@google.com's avatar
fbarchard@google.com committed
2386 2387

 convertloop:
2388
    vmovdqu   ymm0, [eax - 32 + ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
2389 2390 2391 2392
    vpshufb   ymm0, ymm0, ymm5
    vpermq    ymm0, ymm0, 0x4e  // swap high and low halfs
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
2393
    sub       ecx, 32
fbarchard@google.com's avatar
fbarchard@google.com committed
2394
    jg        convertloop
2395
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
2396 2397 2398 2399 2400
    ret
  }
}
#endif  // HAS_MIRRORROW_AVX2

2401
#ifdef HAS_MIRRORROW_SSE2
2402
__declspec(naked) __declspec(align(16))
2403
void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
2404
  __asm {
2405 2406 2407
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
2408

2409
 convertloop:
2410
    movdqu    xmm0, [eax - 16 + ecx]
2411
    movdqa    xmm1, xmm0        // swap bytes
2412 2413 2414 2415 2416
    psllw     xmm0, 8
    psrlw     xmm1, 8
    por       xmm0, xmm1
    pshuflw   xmm0, xmm0, 0x1b  // swap words
    pshufhw   xmm0, xmm0, 0x1b
2417
    pshufd    xmm0, xmm0, 0x4e  // swap qwords
2418
    movdqu    [edx], xmm0
2419
    lea       edx, [edx + 16]
2420
    sub       ecx, 16
2421
    jg        convertloop
2422 2423 2424
    ret
  }
}
2425
#endif  // HAS_MIRRORROW_SSE2
2426

2427 2428
#ifdef HAS_MIRRORROW_UV_SSSE3
// Shuffle table for reversing the bytes of UV channels.
2429
static const uvec8 kShuffleMirrorUV = {
2430 2431 2432
  14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
};

2433
__declspec(naked) __declspec(align(16))
2434
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
                       int width) {
  __asm {
    push      edi
    mov       eax, [esp + 4 + 4]   // src
    mov       edx, [esp + 4 + 8]   // dst_u
    mov       edi, [esp + 4 + 12]  // dst_v
    mov       ecx, [esp + 4 + 16]  // width
    movdqa    xmm1, kShuffleMirrorUV
    lea       eax, [eax + ecx * 2 - 16]
    sub       edi, edx

 convertloop:
2447
    movdqu    xmm0, [eax]
2448 2449 2450 2451 2452
    lea       eax, [eax - 16]
    pshufb    xmm0, xmm1
    movlpd    qword ptr [edx], xmm0
    movhpd    qword ptr [edx + edi], xmm0
    lea       edx, [edx + 8]
2453
    sub       ecx, 8
2454
    jg        convertloop
2455 2456 2457 2458 2459

    pop       edi
    ret
  }
}
2460
#endif  // HAS_MIRRORROW_UV_SSSE3
2461

2462
#ifdef HAS_ARGBMIRRORROW_SSE2
2463
__declspec(naked) __declspec(align(16))
2464
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
2465
  __asm {
2466 2467 2468
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
2469
    lea       eax, [eax - 16 + ecx * 4]  // last 4 pixels.
2470 2471

 convertloop:
2472
    movdqu    xmm0, [eax]
2473
    lea       eax, [eax - 16]
2474
    pshufd    xmm0, xmm0, 0x1b
2475
    movdqu    [edx], xmm0
2476
    lea       edx, [edx + 16]
2477
    sub       ecx, 4
2478 2479 2480 2481
    jg        convertloop
    ret
  }
}
2482
#endif  // HAS_ARGBMIRRORROW_SSE2
2483

fbarchard@google.com's avatar
fbarchard@google.com committed
2484 2485
#ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes.
2486
static const ulvec32 kARGBShuffleMirror_AVX2 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
2487 2488 2489 2490 2491 2492 2493 2494 2495
  7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};

__declspec(naked) __declspec(align(16))
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
2496
    vmovdqu   ymm5, kARGBShuffleMirror_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
2497 2498

 convertloop:
2499
    vpermd    ymm0, ymm5, [eax - 32 + ecx * 4]  // permute dword order
fbarchard@google.com's avatar
fbarchard@google.com committed
2500 2501
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
2502
    sub       ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
2503
    jg        convertloop
2504
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
2505 2506 2507
    ret
  }
}
2508
#endif  // HAS_ARGBMIRRORROW_AVX2
2509

2510
#ifdef HAS_SPLITUVROW_SSE2
2511
__declspec(naked) __declspec(align(16))
2512
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    lea        eax,  [eax + 32]
    movdqa     xmm2, xmm0
    movdqa     xmm3, xmm1
    pand       xmm0, xmm5   // even bytes
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    psrlw      xmm2, 8      // odd bytes
    psrlw      xmm3, 8
    packuswb   xmm2, xmm3
    movdqu     [edx], xmm0
    movdqu     [edx + edi], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
2545

2546
#endif  // HAS_SPLITUVROW_SSE2
2547

2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
#ifdef HAS_SPLITUVROW_AVX2
__declspec(naked) __declspec(align(16))
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
2562 2563
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
2564 2565 2566 2567 2568 2569 2570 2571 2572
    lea        eax,  [eax + 64]
    vpsrlw     ymm2, ymm0, 8      // odd bytes
    vpsrlw     ymm3, ymm1, 8
    vpand      ymm0, ymm0, ymm5   // even bytes
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1
    vpackuswb  ymm2, ymm2, ymm3
    vpermq     ymm0, ymm0, 0xd8
    vpermq     ymm2, ymm2, 0xd8
2573 2574
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + edi], ymm2
2575 2576 2577 2578 2579
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloop

    pop        edi
2580
    vzeroupper
2581 2582 2583
    ret
  }
}
2584
#endif  // HAS_SPLITUVROW_AVX2
2585

2586
#ifdef HAS_MERGEUVROW_SSE2
2587
__declspec(naked) __declspec(align(16))
2588 2589
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
2590 2591
  __asm {
    push       edi
2592 2593 2594 2595 2596
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax
2597 2598

  convertloop:
2599 2600
    movdqu     xmm0, [eax]      // read 16 U's
    movdqu     xmm1, [eax + edx]  // and 16 V's
2601 2602 2603 2604
    lea        eax,  [eax + 16]
    movdqa     xmm2, xmm0
    punpcklbw  xmm0, xmm1       // first 8 UV pairs
    punpckhbw  xmm2, xmm1       // next 8 UV pairs
2605 2606
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm2
2607 2608
    lea        edi, [edi + 32]
    sub        ecx, 16
2609 2610 2611 2612 2613 2614
    jg         convertloop

    pop        edi
    ret
  }
}
2615
#endif  //  HAS_MERGEUVROW_SSE2
2616

2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
#ifdef HAS_MERGEUVROW_AVX2
__declspec(naked) __declspec(align(16))
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax

  convertloop:
    vmovdqu    ymm0, [eax]           // read 32 U's
    vmovdqu    ymm1, [eax + edx]     // and 32 V's
    lea        eax,  [eax + 32]
    vpunpcklbw ymm2, ymm0, ymm1      // low 16 UV pairs. mutated qqword 0,2
    vpunpckhbw ymm0, ymm0, ymm1      // high 16 UV pairs. mutated qqword 1,3
2635 2636 2637 2638
    vextractf128 [edi], ymm2, 0       // bytes 0..15
    vextractf128 [edi + 16], ymm0, 0  // bytes 16..31
    vextractf128 [edi + 32], ymm2, 1  // bytes 32..47
    vextractf128 [edi + 48], ymm0, 1  // bytes 47..63
2639 2640 2641 2642 2643
    lea        edi, [edi + 64]
    sub        ecx, 32
    jg         convertloop

    pop        edi
2644
    vzeroupper
2645 2646 2647 2648 2649
    ret
  }
}
#endif  //  HAS_MERGEUVROW_AVX2

2650
#ifdef HAS_COPYROW_SSE2
2651
// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
2652
__declspec(naked) __declspec(align(16))
2653 2654 2655 2656 2657
void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
2658

2659
  convertloop:
2660 2661
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
2662
    lea        eax, [eax + 32]
2663 2664
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
2665
    lea        edx, [edx + 32]
2666
    sub        ecx, 32
2667
    jg         convertloop
2668 2669 2670 2671 2672
    ret
  }
}
#endif  // HAS_COPYROW_SSE2

2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
#ifdef HAS_COPYROW_AVX
// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
__declspec(naked) __declspec(align(16))
void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
    sub        ecx, 64
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_COPYROW_AVX

2698
// Multiple of 1.
2699
__declspec(naked) __declspec(align(16))
2700
void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
  __asm {
    mov        eax, esi
    mov        edx, edi
    mov        esi, [esp + 4]   // src
    mov        edi, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    rep movsb
    mov        edi, edx
    mov        esi, eax
    ret
  }
}

2714 2715 2716 2717 2718 2719
#ifdef HAS_ARGBCOPYALPHAROW_SSE2
// width in pixels
__declspec(naked) __declspec(align(16))
void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
fbarchard@google.com's avatar
fbarchard@google.com committed
2720
    mov        edx, [esp + 8]   // dst
2721
    mov        ecx, [esp + 12]  // count
fbarchard@google.com's avatar
fbarchard@google.com committed
2722 2723 2724 2725
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8
2726 2727

  convertloop:
2728 2729
    movdqu     xmm2, [eax]
    movdqu     xmm3, [eax + 16]
2730
    lea        eax, [eax + 32]
2731 2732
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
2733 2734 2735 2736 2737 2738
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
2739 2740
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
2741
    lea        edx, [edx + 32]
2742 2743 2744 2745 2746 2747 2748 2749
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
2750 2751 2752 2753 2754 2755 2756 2757
#ifdef HAS_ARGBCOPYALPHAROW_AVX2
// width in pixels
__declspec(naked) __declspec(align(16))
void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
2758
    vpcmpeqb   ymm0, ymm0, ymm0
2759
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff
fbarchard@google.com's avatar
fbarchard@google.com committed
2760 2761

  convertloop:
2762 2763
    vmovdqu    ymm1, [eax]
    vmovdqu    ymm2, [eax + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
2764
    lea        eax, [eax + 64]
2765 2766 2767 2768
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
fbarchard@google.com's avatar
fbarchard@google.com committed
2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_AVX2

2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
// width in pixels
__declspec(naked) __declspec(align(16))
void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8

  convertloop:
    movq       xmm2, qword ptr [eax]  // 8 Y's
    lea        eax, [eax + 8]
    punpcklbw  xmm2, xmm2
    punpckhwd  xmm3, xmm2
    punpcklwd  xmm2, xmm2
2798 2799
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
2800 2801 2802 2803 2804 2805
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
2806 2807
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
    lea        edx, [edx + 32]
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_SSE2

#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
// width in pixels
__declspec(naked) __declspec(align(16))
void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    vpcmpeqb   ymm0, ymm0, ymm0
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff

  convertloop:
    vpmovzxbd  ymm1, qword ptr [eax]
    vpmovzxbd  ymm2, qword ptr [eax + 8]
    lea        eax, [eax + 16]
    vpslld     ymm1, ymm1, 24
    vpslld     ymm2, ymm2, 24
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_AVX2

2848
#ifdef HAS_SETROW_X86
2849 2850
// Write 'count' bytes using an 8 bit value repeated.
// Count should be multiple of 4.
2851
__declspec(naked) __declspec(align(16))
2852
void SetRow_X86(uint8* dst, uint8 v8, int count) {
2853
  __asm {
2854 2855 2856
    movzx      eax, byte ptr [esp + 8]    // v8
    mov        edx, 0x01010101  // Duplicate byte to all bytes.
    mul        edx              // overwrites edx with upper part of result.
2857 2858 2859 2860 2861 2862 2863 2864 2865 2866
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        ecx, [esp + 12]  // count
    shr        ecx, 2
    rep stosd
    mov        edi, edx
    ret
  }
}

2867
// Write 'count' bytes using an 8 bit value repeated.
2868
__declspec(naked) __declspec(align(16))
2869
void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
2870
  __asm {
2871 2872 2873 2874 2875 2876 2877 2878 2879
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v8
    mov        ecx, [esp + 12]  // count
    rep stosb
    mov        edi, edx
    ret
  }
}
2880

2881 2882 2883 2884 2885 2886 2887 2888
// Write 'count' 32 bit values.
__declspec(naked) __declspec(align(16))
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
  __asm {
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v32
    mov        ecx, [esp + 12]  // count
2889
    rep stosd
2890
    mov        edi, edx
2891 2892 2893 2894 2895
    ret
  }
}
#endif  // HAS_SETROW_X86

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
#ifdef HAS_YUY2TOYROW_AVX2
__declspec(naked) __declspec(align(16))
void YUY2ToYRow_AVX2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // even bytes are Y
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
2917
    sub        ecx, 32
2918
    jg         convertloop
2919
    vzeroupper
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962
    ret
  }
}

__declspec(naked) __declspec(align(16))
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
2963
    vzeroupper
2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
    ret
  }
}

__declspec(naked) __declspec(align(16))
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3002
    vzeroupper
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
    ret
  }
}

__declspec(naked) __declspec(align(16))
void UYVYToYRow_AVX2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // odd bytes are Y
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3025
    sub        ecx, 32
3026
    jg         convertloop
3027
    vzeroupper
3028
    ret
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
  }
}

__declspec(naked) __declspec(align(16))
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3071
    vzeroupper
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
    ret
  }
}

__declspec(naked) __declspec(align(16))
void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3110
    vzeroupper
3111 3112 3113 3114 3115
    ret
  }
}
#endif  // HAS_YUY2TOYROW_AVX2

3116
#ifdef HAS_YUY2TOYROW_SSE2
3117
__declspec(naked) __declspec(align(16))
3118 3119 3120 3121 3122 3123 3124 3125 3126 3127
void YUY2ToYRow_SSE2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    pcmpeqb    xmm5, xmm5        // generate mask 0x00ff00ff
    psrlw      xmm5, 8

  convertloop:
3128 3129
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3130 3131 3132 3133
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // even bytes are Y
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
3134
    movdqu     [edx], xmm0
3135
    lea        edx, [edx + 16]
3136
    sub        ecx, 16
3137
    jg         convertloop
3138 3139 3140 3141
    ret
  }
}

3142
__declspec(naked) __declspec(align(16))
3143
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
3144
                      uint8* dst_u, uint8* dst_v, int pix) {
3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3158 3159 3160 3161
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3177
    jg         convertloop
3178 3179 3180 3181 3182 3183 3184

    pop        edi
    pop        esi
    ret
  }
}

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
__declspec(naked) __declspec(align(16))
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3199 3200
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}

3221
__declspec(naked) __declspec(align(16))
3222 3223 3224 3225 3226 3227 3228 3229
void UYVYToYRow_SSE2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
3230 3231
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3232 3233 3234 3235
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8    // odd bytes are Y
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
3236
    movdqu     [edx], xmm0
3237
    lea        edx, [edx + 16]
3238
    sub        ecx, 16
3239
    jg         convertloop
3240 3241 3242 3243
    ret
  }
}

3244
__declspec(naked) __declspec(align(16))
3245
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
3246
                      uint8* dst_u, uint8* dst_v, int pix) {
3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3260 3261 3262 3263
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3279
    jg         convertloop
3280 3281 3282 3283 3284 3285 3286

    pop        edi
    pop        esi
    ret
  }
}

3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300
__declspec(naked) __declspec(align(16))
void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3301 3302
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
3322
#endif  // HAS_YUY2TOYROW_SSE2
3323

3324
#ifdef HAS_ARGBBLENDROW_SSE2
3325
// Blend 8 pixels at a time.
3326
__declspec(naked) __declspec(align(16))
3327 3328
void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                       uint8* dst_argb, int width) {
3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm7, xmm7       // generate constant 1
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24

3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
    sub        ecx, 1
    je         convertloop1     // only 1 pixel?
    jl         convertloop1b

    // 1 pixel loop until destination pointer is aligned.
  alignloop1:
    test       edx, 15          // aligned?
    je         alignloop1b
    movd       xmm3, [eax]
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    psrlw      xmm3, 8          // alpha
3358 3359
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
3374
    sub        ecx, 1
3375 3376 3377 3378 3379 3380
    jge        alignloop1

  alignloop1b:
    add        ecx, 1 - 4
    jl         convertloop4b

3381
    // 4 pixel loop.
3382
  convertloop4:
3383 3384
    movdqu     xmm3, [eax]      // src argb
    lea        eax, [eax + 16]
3385 3386
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
3387
    movdqu     xmm2, [esi]      // _r_b
3388
    psrlw      xmm3, 8          // alpha
3389 3390
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
3391 3392 3393
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
3394
    movdqu     xmm1, [esi]      // _a_g
3395
    lea        esi, [esi + 16]
3396 3397 3398 3399 3400 3401 3402
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
3403
    movdqu     [edx], xmm0
3404
    lea        edx, [edx + 16]
3405
    sub        ecx, 4
3406
    jge        convertloop4
3407

3408 3409 3410
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
3411

3412 3413
    // 1 pixel loop.
  convertloop1:
3414
    movd       xmm3, [eax]      // src argb
3415 3416 3417 3418 3419
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    psrlw      xmm3, 8          // alpha
3420 3421
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
3436
    sub        ecx, 1
3437
    jge        convertloop1
3438

3439
  convertloop1b:
3440 3441 3442 3443
    pop        esi
    ret
  }
}
3444
#endif  // HAS_ARGBBLENDROW_SSE2
3445 3446

#ifdef HAS_ARGBBLENDROW_SSSE3
3447
// Shuffle table for isolating alpha.
3448
static const uvec8 kShuffleAlpha = {
3449 3450 3451
  3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
  11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
3452
// Same as SSE2, but replaces:
3453
//    psrlw      xmm3, 8          // alpha
3454 3455
//    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
//    pshuflw    xmm3, xmm3, 0F5h
3456 3457
// with..
//    pshufb     xmm3, kShuffleAlpha // alpha
3458
// Blend 8 pixels at a time.
3459 3460

__declspec(naked) __declspec(align(16))
3461 3462
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
                        uint8* dst_argb, int width) {
3463 3464 3465 3466 3467 3468
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
3469
    pcmpeqb    xmm7, xmm7       // generate constant 0x0001
3470 3471 3472 3473 3474 3475 3476 3477
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24

3478 3479 3480 3481 3482 3483
    sub        ecx, 1
    je         convertloop1     // only 1 pixel?
    jl         convertloop1b

    // 1 pixel loop until destination pointer is aligned.
  alignloop1:
3484 3485
    test       edx, 15          // aligned?
    je         alignloop1b
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
    movd       xmm3, [eax]
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    pshufb     xmm3, kShuffleAlpha // alpha
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
3506
    sub        ecx, 1
3507 3508 3509 3510 3511 3512
    jge        alignloop1

  alignloop1b:
    add        ecx, 1 - 4
    jl         convertloop4b

3513
    // 4 pixel loop.
3514
  convertloop4:
3515
    movdqu     xmm3, [eax]      // src argb
3516 3517 3518
    lea        eax, [eax + 16]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
3519
    movdqu     xmm2, [esi]      // _r_b
3520 3521 3522 3523
    pshufb     xmm3, kShuffleAlpha // alpha
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
3524
    movdqu     xmm1, [esi]      // _a_g
3525 3526 3527 3528 3529 3530 3531 3532
    lea        esi, [esi + 16]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
3533
    movdqu     [edx], xmm0
3534
    lea        edx, [edx + 16]
3535
    sub        ecx, 4
3536
    jge        convertloop4
3537

3538 3539 3540
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
3541

3542 3543
    // 1 pixel loop.
  convertloop1:
3544
    movd       xmm3, [eax]      // src argb
3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    pshufb     xmm3, kShuffleAlpha // alpha
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
3564
    sub        ecx, 1
3565
    jge        convertloop1
3566

3567
  convertloop1b:
3568 3569 3570 3571
    pop        esi
    ret
  }
}
3572
#endif  // HAS_ARGBBLENDROW_SSSE3
3573

3574
#ifdef HAS_ARGBATTENUATEROW_SSE2
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
// Attenuate 4 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
    pcmpeqb    xmm5, xmm5       // generate mask 0x00ffffff
    psrld      xmm5, 8

 convertloop:
3588
    movdqu     xmm0, [eax]      // read 4 pixels
3589
    punpcklbw  xmm0, xmm0       // first 2
3590 3591
    pshufhw    xmm2, xmm0, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
3592
    pmulhuw    xmm0, xmm2       // rgb * a
3593
    movdqu     xmm1, [eax]      // read 4 pixels
3594
    punpckhbw  xmm1, xmm1       // next 2 pixels
3595 3596
    pshufhw    xmm2, xmm1, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
3597
    pmulhuw    xmm1, xmm2       // rgb * a
3598
    movdqu     xmm2, [eax]      // alphas
3599
    lea        eax, [eax + 16]
3600
    psrlw      xmm0, 8
3601
    pand       xmm2, xmm4
3602 3603 3604
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    pand       xmm0, xmm5       // keep original alphas
3605
    por        xmm0, xmm2
3606
    movdqu     [edx], xmm0
3607
    lea        edx, [edx + 16]
3608
    sub        ecx, 4
3609 3610 3611 3612 3613
    jg         convertloop

    ret
  }
}
3614
#endif  // HAS_ARGBATTENUATEROW_SSE2
3615

3616
#ifdef HAS_ARGBATTENUATEROW_SSSE3
3617
// Shuffle table duplicating alpha.
3618
static const uvec8 kShuffleAlpha0 = {
3619 3620
  3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
};
3621
static const uvec8 kShuffleAlpha1 = {
3622 3623 3624 3625 3626
  11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
  15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
};
__declspec(naked) __declspec(align(16))
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
3627
  __asm {
3628 3629 3630 3631 3632 3633 3634 3635 3636
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm3, xmm3       // generate mask 0xff000000
    pslld      xmm3, 24
    movdqa     xmm4, kShuffleAlpha0
    movdqa     xmm5, kShuffleAlpha1

 convertloop:
3637
    movdqu     xmm0, [eax]      // read 4 pixels
3638
    pshufb     xmm0, xmm4       // isolate first 2 alphas
3639
    movdqu     xmm1, [eax]      // read 4 pixels
3640 3641
    punpcklbw  xmm1, xmm1       // first 2 pixel rgbs
    pmulhuw    xmm0, xmm1       // rgb * a
3642
    movdqu     xmm1, [eax]      // read 4 pixels
3643
    pshufb     xmm1, xmm5       // isolate next 2 alphas
3644
    movdqu     xmm2, [eax]      // read 4 pixels
3645 3646
    punpckhbw  xmm2, xmm2       // next 2 pixel rgbs
    pmulhuw    xmm1, xmm2       // rgb * a
3647
    movdqu     xmm2, [eax]      // mask original alpha
3648
    lea        eax, [eax + 16]
3649 3650 3651 3652 3653
    pand       xmm2, xmm3
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    por        xmm0, xmm2       // copy original alpha
3654
    movdqu     [edx], xmm0
3655
    lea        edx, [edx + 16]
3656
    sub        ecx, 4
3657 3658 3659 3660 3661
    jg         convertloop

    ret
  }
}
3662
#endif  // HAS_ARGBATTENUATEROW_SSSE3
3663

fbarchard@google.com's avatar
fbarchard@google.com committed
3664 3665
#ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
3666
static const uvec8 kShuffleAlpha_AVX2 = {
3667
  6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
3668 3669 3670 3671 3672 3673 3674 3675
};
__declspec(naked) __declspec(align(16))
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
3676
    vbroadcastf128 ymm4,kShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
    vpcmpeqb   ymm5, ymm5, ymm5 // generate mask 0xff000000
    vpslld     ymm5, ymm5, 24

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpshufb    ymm2, ymm0, ymm4  // low 4 alphas
    vpshufb    ymm3, ymm1, ymm4  // high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * a
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * a
    vpand      ymm6, ymm6, ymm5  // isolate alpha
    vpsrlw     ymm0, ymm0, 8
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vpor       ymm0, ymm0, ymm6  // copy original alpha
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
3695
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3696 3697
    jg         convertloop

3698
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3699 3700 3701 3702 3703
    ret
  }
}
#endif  // HAS_ARGBATTENUATEROW_AVX2

3704
#ifdef HAS_ARGBUNATTENUATEROW_SSE2
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716
// Unattenuate 4 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb0
    mov        edx, [esp + 8 + 8]   // dst_argb
    mov        ecx, [esp + 8 + 12]  // width

 convertloop:
3717
    movdqu     xmm0, [eax]      // read 4 pixels
3718 3719 3720 3721 3722
    movzx      esi, byte ptr [eax + 3]  // first alpha
    movzx      edi, byte ptr [eax + 7]  // second alpha
    punpcklbw  xmm0, xmm0       // first 2
    movd       xmm2, dword ptr fixed_invtbl8[esi * 4]
    movd       xmm3, dword ptr fixed_invtbl8[edi * 4]
3723 3724
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words.  1, a, a, a
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
3725 3726 3727
    movlhps    xmm2, xmm3
    pmulhuw    xmm0, xmm2       // rgb * a

3728
    movdqu     xmm1, [eax]      // read 4 pixels
3729 3730 3731 3732 3733
    movzx      esi, byte ptr [eax + 11]  // third alpha
    movzx      edi, byte ptr [eax + 15]  // forth alpha
    punpckhbw  xmm1, xmm1       // next 2
    movd       xmm2, dword ptr fixed_invtbl8[esi * 4]
    movd       xmm3, dword ptr fixed_invtbl8[edi * 4]
3734 3735
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
3736 3737
    movlhps    xmm2, xmm3
    pmulhuw    xmm1, xmm2       // rgb * a
3738
    lea        eax, [eax + 16]
3739 3740

    packuswb   xmm0, xmm1
3741
    movdqu     [edx], xmm0
3742
    lea        edx, [edx + 16]
3743
    sub        ecx, 4
3744 3745 3746 3747 3748 3749
    jg         convertloop
    pop        edi
    pop        esi
    ret
  }
}
3750
#endif  // HAS_ARGBUNATTENUATEROW_SSE2
3751

fbarchard@google.com's avatar
fbarchard@google.com committed
3752 3753
#ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
3754
static const uvec8 kUnattenShuffleAlpha_AVX2 = {
3755
  0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
fbarchard@google.com's avatar
fbarchard@google.com committed
3756
};
3757 3758 3759
// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
// USE_GATHER is not on by default, due to being a slow instruction.
#ifdef USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
3760 3761 3762 3763 3764 3765 3766 3767
__declspec(naked) __declspec(align(16))
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
3768
    vbroadcastf128 ymm4, kUnattenShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3769 3770 3771

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
3772
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0xffffffff for gather.
fbarchard@google.com's avatar
fbarchard@google.com committed
3773 3774 3775
    vpsrld     ymm2, ymm6, 24    // alpha in low 8 bits.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
3776 3777 3778 3779
    vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5  // ymm5 cleared.  1, a
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm4  // replicate low 4 alphas. 1, a, a, a
fbarchard@google.com's avatar
fbarchard@google.com committed
3780 3781 3782 3783 3784 3785
    vpshufb    ymm3, ymm3, ymm4  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
3786
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3787 3788
    jg         convertloop

3789
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3790 3791 3792
    ret
  }
}
3793 3794 3795 3796 3797 3798 3799 3800 3801 3802
#else  // USE_GATHER
__declspec(naked) __declspec(align(16))
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {

    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
3803
    vbroadcastf128 ymm5, kUnattenShuffleAlpha_AVX2
3804 3805 3806 3807 3808 3809

    push       esi
    push       edi

 convertloop:
    // replace VPGATHER
3810 3811
    movzx      esi, byte ptr [eax + 3]                 // alpha0
    movzx      edi, byte ptr [eax + 7]                 // alpha1
3812 3813
    vmovd      xmm0, dword ptr fixed_invtbl8[esi * 4]  // [1,a0]
    vmovd      xmm1, dword ptr fixed_invtbl8[edi * 4]  // [1,a1]
3814 3815
    movzx      esi, byte ptr [eax + 11]                // alpha2
    movzx      edi, byte ptr [eax + 15]                // alpha3
3816 3817 3818
    vpunpckldq xmm6, xmm0, xmm1                        // [1,a1,1,a0]
    vmovd      xmm2, dword ptr fixed_invtbl8[esi * 4]  // [1,a2]
    vmovd      xmm3, dword ptr fixed_invtbl8[edi * 4]  // [1,a3]
3819 3820
    movzx      esi, byte ptr [eax + 19]                // alpha4
    movzx      edi, byte ptr [eax + 23]                // alpha5
3821 3822 3823
    vpunpckldq xmm7, xmm2, xmm3                        // [1,a3,1,a2]
    vmovd      xmm0, dword ptr fixed_invtbl8[esi * 4]  // [1,a4]
    vmovd      xmm1, dword ptr fixed_invtbl8[edi * 4]  // [1,a5]
3824 3825
    movzx      esi, byte ptr [eax + 27]                // alpha6
    movzx      edi, byte ptr [eax + 31]                // alpha7
3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
    vpunpckldq xmm0, xmm0, xmm1                        // [1,a5,1,a4]
    vmovd      xmm2, dword ptr fixed_invtbl8[esi * 4]  // [1,a6]
    vmovd      xmm3, dword ptr fixed_invtbl8[edi * 4]  // [1,a7]
    vpunpckldq xmm2, xmm2, xmm3                        // [1,a7,1,a6]
    vpunpcklqdq xmm3, xmm6, xmm7                       // [1,a3,1,a2,1,a1,1,a0]
    vpunpcklqdq xmm0, xmm0, xmm2                       // [1,a7,1,a6,1,a5,1,a4]
    vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
    // end of VPGATHER

    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm5  // replicate low 4 alphas. 1, a, a, a
    vpshufb    ymm3, ymm3, ymm5  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
3847
    sub        ecx, 8
3848 3849 3850 3851
    jg         convertloop

    pop        edi
    pop        esi
3852
    vzeroupper
3853 3854 3855 3856
    ret
  }
}
#endif  // USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
3857 3858
#endif  // HAS_ARGBATTENUATEROW_AVX2

3859
#ifdef HAS_ARGBGRAYROW_SSSE3
3860
// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
3861
__declspec(naked) __declspec(align(16))
3862
void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
3863
  __asm {
3864 3865 3866
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* width */
3867 3868
    movdqa     xmm4, kARGBToYJ
    movdqa     xmm5, kAddYJ64
3869 3870

 convertloop:
3871 3872
    movdqu     xmm0, [eax]  // G
    movdqu     xmm1, [eax + 16]
3873 3874 3875
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm0, xmm1
3876
    paddw      xmm0, xmm5  // Add .5 for rounding.
3877
    psrlw      xmm0, 7
3878
    packuswb   xmm0, xmm0   // 8 G bytes
3879 3880
    movdqu     xmm2, [eax]  // A
    movdqu     xmm3, [eax + 16]
3881
    lea        eax, [eax + 32]
3882 3883 3884 3885 3886 3887 3888
    psrld      xmm2, 24
    psrld      xmm3, 24
    packuswb   xmm2, xmm3
    packuswb   xmm2, xmm2   // 8 A bytes
    movdqa     xmm3, xmm0   // Weave into GG, GA, then GGGA
    punpcklbw  xmm0, xmm0   // 8 GG words
    punpcklbw  xmm3, xmm2   // 8 GA words
3889
    movdqa     xmm1, xmm0
3890 3891
    punpcklwd  xmm0, xmm3   // GGGA first 4
    punpckhwd  xmm1, xmm3   // GGGA next 4
3892 3893
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3894
    lea        edx, [edx + 32]
3895
    sub        ecx, 8
3896 3897 3898 3899 3900
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBGRAYROW_SSSE3
3901 3902 3903 3904 3905

#ifdef HAS_ARGBSEPIAROW_SSSE3
//    b = (r * 35 + g * 68 + b * 17) >> 7
//    g = (r * 45 + g * 88 + b * 22) >> 7
//    r = (r * 50 + g * 98 + b * 24) >> 7
3906
// Constant for ARGB color to sepia tone.
3907
static const vec8 kARGBToSepiaB = {
3908 3909 3910
  17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
};

3911
static const vec8 kARGBToSepiaG = {
3912 3913 3914
  22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
};

3915
static const vec8 kARGBToSepiaR = {
3916 3917 3918
  24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
};

3919
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
__declspec(naked) __declspec(align(16))
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   /* dst_argb */
    mov        ecx, [esp + 8]   /* width */
    movdqa     xmm2, kARGBToSepiaB
    movdqa     xmm3, kARGBToSepiaG
    movdqa     xmm4, kARGBToSepiaR

 convertloop:
3930 3931
    movdqu     xmm0, [eax]  // B
    movdqu     xmm6, [eax + 16]
3932 3933 3934 3935 3936
    pmaddubsw  xmm0, xmm2
    pmaddubsw  xmm6, xmm2
    phaddw     xmm0, xmm6
    psrlw      xmm0, 7
    packuswb   xmm0, xmm0   // 8 B values
3937 3938
    movdqu     xmm5, [eax]  // G
    movdqu     xmm1, [eax + 16]
3939 3940 3941 3942 3943 3944
    pmaddubsw  xmm5, xmm3
    pmaddubsw  xmm1, xmm3
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 G values
    punpcklbw  xmm0, xmm5   // 8 BG values
3945 3946
    movdqu     xmm5, [eax]  // R
    movdqu     xmm1, [eax + 16]
3947 3948 3949 3950 3951
    pmaddubsw  xmm5, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 R values
3952 3953
    movdqu     xmm6, [eax]  // A
    movdqu     xmm1, [eax + 16]
3954 3955 3956 3957 3958 3959 3960 3961
    psrld      xmm6, 24
    psrld      xmm1, 24
    packuswb   xmm6, xmm1
    packuswb   xmm6, xmm6   // 8 A values
    punpcklbw  xmm5, xmm6   // 8 RA values
    movdqa     xmm1, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm5   // BGRA first 4
    punpckhwd  xmm1, xmm5   // BGRA next 4
3962 3963
    movdqu     [eax], xmm0
    movdqu     [eax + 16], xmm1
3964
    lea        eax, [eax + 32]
3965
    sub        ecx, 8
3966 3967 3968 3969 3970
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBSEPIAROW_SSSE3
3971

3972 3973 3974
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided.
3975
// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
3976 3977
// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
__declspec(naked) __declspec(align(16))
3978 3979
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                              const int8* matrix_argb, int width) {
3980
  __asm {
3981 3982 3983
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* matrix_argb */
3984 3985 3986 3987 3988
    movdqu     xmm5, [ecx]
    pshufd     xmm2, xmm5, 0x00
    pshufd     xmm3, xmm5, 0x55
    pshufd     xmm4, xmm5, 0xaa
    pshufd     xmm5, xmm5, 0xff
3989
    mov        ecx, [esp + 16]  /* width */
3990 3991

 convertloop:
3992 3993
    movdqu     xmm0, [eax]  // B
    movdqu     xmm7, [eax + 16]
3994
    pmaddubsw  xmm0, xmm2
3995
    pmaddubsw  xmm7, xmm2
3996 3997
    movdqu     xmm6, [eax]  // G
    movdqu     xmm1, [eax + 16]
3998
    pmaddubsw  xmm6, xmm3
3999
    pmaddubsw  xmm1, xmm3
4000 4001 4002 4003
    phaddsw    xmm0, xmm7   // B
    phaddsw    xmm6, xmm1   // G
    psraw      xmm0, 6      // B
    psraw      xmm6, 6      // G
4004
    packuswb   xmm0, xmm0   // 8 B values
4005 4006
    packuswb   xmm6, xmm6   // 8 G values
    punpcklbw  xmm0, xmm6   // 8 BG values
4007 4008
    movdqu     xmm1, [eax]  // R
    movdqu     xmm7, [eax + 16]
4009
    pmaddubsw  xmm1, xmm4
4010 4011
    pmaddubsw  xmm7, xmm4
    phaddsw    xmm1, xmm7   // R
4012 4013
    movdqu     xmm6, [eax]  // A
    movdqu     xmm7, [eax + 16]
4014 4015 4016 4017 4018 4019
    pmaddubsw  xmm6, xmm5
    pmaddubsw  xmm7, xmm5
    phaddsw    xmm6, xmm7   // A
    psraw      xmm1, 6      // R
    psraw      xmm6, 6      // A
    packuswb   xmm1, xmm1   // 8 R values
4020
    packuswb   xmm6, xmm6   // 8 A values
4021 4022 4023 4024
    punpcklbw  xmm1, xmm6   // 8 RA values
    movdqa     xmm6, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm1   // BGRA first 4
    punpckhwd  xmm6, xmm1   // BGRA next 4
4025 4026
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm6
4027
    lea        eax, [eax + 32]
4028
    lea        edx, [edx + 32]
4029
    sub        ecx, 8
4030 4031 4032 4033 4034 4035
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBCOLORMATRIXROW_SSSE3

4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057
#ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes).
__declspec(naked) __declspec(align(16))
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
                          int interval_offset, int width) {
  __asm {
    mov        eax, [esp + 4]    /* dst_argb */
    movd       xmm2, [esp + 8]   /* scale */
    movd       xmm3, [esp + 12]  /* interval_size */
    movd       xmm4, [esp + 16]  /* interval_offset */
    mov        ecx, [esp + 20]   /* width */
    pshuflw    xmm2, xmm2, 040h
    pshufd     xmm2, xmm2, 044h
    pshuflw    xmm3, xmm3, 040h
    pshufd     xmm3, xmm3, 044h
    pshuflw    xmm4, xmm4, 040h
    pshufd     xmm4, xmm4, 044h
    pxor       xmm5, xmm5  // constant 0
    pcmpeqb    xmm6, xmm6  // generate mask 0xff000000
    pslld      xmm6, 24

 convertloop:
4058
    movdqu     xmm0, [eax]  // read 4 pixels
4059 4060
    punpcklbw  xmm0, xmm5   // first 2 pixels
    pmulhuw    xmm0, xmm2   // pixel * scale >> 16
4061
    movdqu     xmm1, [eax]  // read 4 pixels
4062 4063 4064
    punpckhbw  xmm1, xmm5   // next 2 pixels
    pmulhuw    xmm1, xmm2
    pmullw     xmm0, xmm3   // * interval_size
4065
    movdqu     xmm7, [eax]  // read 4 pixels
4066 4067 4068 4069 4070 4071
    pmullw     xmm1, xmm3
    pand       xmm7, xmm6   // mask alpha
    paddw      xmm0, xmm4   // + interval_size / 2
    paddw      xmm1, xmm4
    packuswb   xmm0, xmm1
    por        xmm0, xmm7
4072
    movdqu     [eax], xmm0
4073
    lea        eax, [eax + 16]
4074
    sub        ecx, 4
4075 4076 4077 4078 4079 4080
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBQUANTIZEROW_SSE2

4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094
#ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value.
__declspec(naked) __declspec(align(16))
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
                       uint32 value) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    movd       xmm2, [esp + 16]  // value
    punpcklbw  xmm2, xmm2
    punpcklqdq xmm2, xmm2

 convertloop:
4095
    movdqu     xmm0, [eax]      // read 4 pixels
4096
    lea        eax, [eax + 16]
4097 4098 4099 4100 4101 4102 4103 4104
    movdqa     xmm1, xmm0
    punpcklbw  xmm0, xmm0       // first 2
    punpckhbw  xmm1, xmm1       // next 2
    pmulhuw    xmm0, xmm2       // argb * value
    pmulhuw    xmm1, xmm2       // argb * value
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
4105
    movdqu     [edx], xmm0
4106
    lea        edx, [edx + 16]
4107
    sub        ecx, 4
4108 4109 4110 4111 4112 4113 4114
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBSHADEROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
4115
#ifdef HAS_ARGBMULTIPLYROW_SSE2
4116
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
fbarchard@google.com's avatar
fbarchard@google.com committed
4117
__declspec(naked) __declspec(align(16))
4118 4119
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4120
  __asm {
4121 4122 4123 4124 4125
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
fbarchard@google.com's avatar
fbarchard@google.com committed
4126 4127 4128
    pxor       xmm5, xmm5  // constant 0

 convertloop:
4129
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4130
    movdqu     xmm2, [esi]        // read 4 pixels from src_argb1
4131 4132
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
4133 4134 4135 4136 4137 4138 4139 4140
    punpcklbw  xmm0, xmm0         // first 2
    punpckhbw  xmm1, xmm1         // next 2
    punpcklbw  xmm2, xmm5         // first 2
    punpckhbw  xmm3, xmm5         // next 2
    pmulhuw    xmm0, xmm2         // src_argb0 * src_argb1 first 2
    pmulhuw    xmm1, xmm3         // src_argb0 * src_argb1 next 2
    lea        eax, [eax + 16]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4141
    packuswb   xmm0, xmm1
4142 4143
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4144
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
4145 4146
    jg         convertloop

4147
    pop        esi
fbarchard@google.com's avatar
fbarchard@google.com committed
4148 4149 4150 4151 4152
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_SSE2

4153 4154
#ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time.
4155
// TODO(fbarchard): Port this to posix, neon and other math functions.
4156 4157 4158 4159 4160 4161 4162 4163 4164 4165
__declspec(naked) __declspec(align(16))
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

4166 4167 4168 4169 4170
    sub        ecx, 4
    jl         convertloop49

 convertloop4:
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4171 4172 4173
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4174
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4175 4176
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4177
    sub        ecx, 4
4178 4179 4180 4181 4182
    jge        convertloop4

 convertloop49:
    add        ecx, 4 - 1
    jl         convertloop19
4183

4184 4185
 convertloop1:
    movd       xmm0, [eax]        // read 1 pixels from src_argb0
4186 4187 4188
    lea        eax, [eax + 4]
    movd       xmm1, [esi]        // read 1 pixels from src_argb1
    lea        esi, [esi + 4]
4189
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4190 4191
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4192
    sub        ecx, 1
4193 4194 4195
    jge        convertloop1

 convertloop19:
4196 4197 4198 4199 4200 4201
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBADDROW_SSE2

4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214
#ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
4215
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4216 4217 4218
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4219
    psubusb    xmm0, xmm1         // src_argb0 - src_argb1
4220 4221
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4222
    sub        ecx, 4
4223 4224 4225 4226 4227 4228 4229 4230
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_SSE2

4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241
#ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4242
    vpxor      ymm5, ymm5, ymm5     // constant 0
4243 4244 4245

 convertloop:
    vmovdqu    ymm1, [eax]        // read 8 pixels from src_argb0
4246 4247 4248
    lea        eax, [eax + 32]
    vmovdqu    ymm3, [esi]        // read 8 pixels from src_argb1
    lea        esi, [esi + 32]
4249 4250 4251 4252 4253 4254 4255
    vpunpcklbw ymm0, ymm1, ymm1   // low 4
    vpunpckhbw ymm1, ymm1, ymm1   // high 4
    vpunpcklbw ymm2, ymm3, ymm5   // low 4
    vpunpckhbw ymm3, ymm3, ymm5   // high 4
    vpmulhuw   ymm0, ymm0, ymm2   // src_argb0 * src_argb1 low 4
    vpmulhuw   ymm1, ymm1, ymm3   // src_argb0 * src_argb1 high 4
    vpackuswb  ymm0, ymm0, ymm1
4256 4257
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4258
    sub        ecx, 8
4259 4260 4261
    jg         convertloop

    pop        esi
4262
    vzeroupper
4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_AVX2

#ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4283 4284 4285 4286
    vpaddusb   ymm0, ymm0, [esi]        // add 8 pixels from src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4287
    sub        ecx, 8
4288 4289 4290
    jg         convertloop

    pop        esi
4291
    vzeroupper
4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311
    ret
  }
}
#endif  // HAS_ARGBADDROW_AVX2

#ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
__declspec(naked) __declspec(align(16))
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4312 4313 4314 4315
    vpsubusb   ymm0, ymm0, [esi]        // src_argb0 - src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4316
    sub        ecx, 8
4317 4318 4319
    jg         convertloop

    pop        esi
4320
    vzeroupper
4321 4322 4323 4324 4325
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_AVX2

4326
#ifdef HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4327 4328 4329 4330 4331
// SobelX as a matrix is
// -1  0  1
// -2  0  2
// -1  0  1
__declspec(naked) __declspec(align(16))
4332 4333
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    const uint8* src_y2, uint8* dst_sobelx, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_y0
    mov        esi, [esp + 8 + 8]   // src_y1
    mov        edi, [esp + 8 + 12]  // src_y2
    mov        edx, [esp + 8 + 16]  // dst_sobelx
    mov        ecx, [esp + 8 + 20]  // width
    sub        esi, eax
    sub        edi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    movq       xmm2, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + edi]      // read 8 pixels from src_y2[0]
    movq       xmm3, qword ptr [eax + edi + 2]  // read 8 pixels from src_y2[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
4366 4367 4368
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
4369 4370 4371
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
4372
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4373 4374 4375 4376 4377 4378 4379
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
4380
#endif  // HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4381

4382
#ifdef HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4383 4384 4385 4386 4387
// SobelY as a matrix is
// -1 -2 -1
//  0  0  0
//  1  2  1
__declspec(naked) __declspec(align(16))
4388 4389
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    uint8* dst_sobely, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_y0
    mov        esi, [esp + 4 + 8]   // src_y1
    mov        edx, [esp + 4 + 12]  // dst_sobely
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + 1]        // read 8 pixels from src_y0[1]
    movq       xmm2, qword ptr [eax + esi + 1]  // read 8 pixels from src_y1[1]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    movq       xmm3, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
4419 4420 4421
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
4422 4423 4424
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
4425
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4426 4427 4428 4429 4430 4431
    jg         convertloop

    pop        esi
    ret
  }
}
4432
#endif  // HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4433

4434 4435 4436 4437 4438 4439 4440 4441
#ifdef HAS_SOBELROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
// A = 255
// R = Sobel
// G = Sobel
// B = Sobel
__declspec(naked) __declspec(align(16))
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
4442
                   uint8* dst_argb, int width) {
4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    pcmpeqb    xmm5, xmm5           // alpha 255
    pslld      xmm5, 24             // 0xff000000

 convertloop:
4454 4455
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
    movdqa     xmm2, xmm0             // GG
    punpcklbw  xmm2, xmm0             // First 8
    punpckhbw  xmm0, xmm0             // Next 8
    movdqa     xmm1, xmm2             // GGGG
    punpcklwd  xmm1, xmm2             // First 4
    punpckhwd  xmm2, xmm2             // Next 4
    por        xmm1, xmm5             // GGGA
    por        xmm2, xmm5
    movdqa     xmm3, xmm0             // GGGG
    punpcklwd  xmm3, xmm0             // Next 4
    punpckhwd  xmm0, xmm0             // Last 4
    por        xmm3, xmm5             // GGGA
    por        xmm0, xmm5
4471 4472 4473 4474
    movdqu     [edx], xmm1
    movdqu     [edx + 16], xmm2
    movdqu     [edx + 32], xmm3
    movdqu     [edx + 48], xmm0
4475
    lea        edx, [edx + 64]
4476
    sub        ecx, 16
4477 4478 4479 4480 4481 4482 4483 4484
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELROW_SSE2

4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498
#ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane.
__declspec(naked) __declspec(align(16))
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                          uint8* dst_y, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax

 convertloop:
4499 4500
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
4501 4502
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
4503
    movdqu     [edx], xmm0
4504
    lea        edx, [edx + 16]
4505
    sub        ecx, 16
4506 4507 4508 4509 4510 4511 4512 4513
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELTOPLANEROW_SSE2

4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529
#ifdef HAS_SOBELXYROW_SSE2
// Mixes Sobel X, Sobel Y and Sobel into ARGB.
// A = 255
// R = Sobel X
// G = Sobel
// B = Sobel Y
__declspec(naked) __declspec(align(16))
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
4530
    pcmpeqb    xmm5, xmm5           // alpha 255
4531 4532

 convertloop:
4533 4534
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549
    lea        eax, [eax + 16]
    movdqa     xmm2, xmm0
    paddusb    xmm2, xmm1             // sobel = sobelx + sobely
    movdqa     xmm3, xmm0             // XA
    punpcklbw  xmm3, xmm5
    punpckhbw  xmm0, xmm5
    movdqa     xmm4, xmm1             // YS
    punpcklbw  xmm4, xmm2
    punpckhbw  xmm1, xmm2
    movdqa     xmm6, xmm4             // YSXA
    punpcklwd  xmm6, xmm3             // First 4
    punpckhwd  xmm4, xmm3             // Next 4
    movdqa     xmm7, xmm1             // YSXA
    punpcklwd  xmm7, xmm0             // Next 4
    punpckhwd  xmm1, xmm0             // Last 4
4550 4551 4552 4553
    movdqu     [edx], xmm6
    movdqu     [edx + 16], xmm4
    movdqu     [edx + 32], xmm7
    movdqu     [edx + 48], xmm1
4554
    lea        edx, [edx + 64]
4555
    sub        ecx, 16
4556 4557 4558 4559 4560 4561
    jg         convertloop

    pop        esi
    ret
  }
}
4562
#endif  // HAS_SOBELXYROW_SSE2
4563

4564
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575
// Consider float CumulativeSum.
// Consider calling CumulativeSum one row at time as needed.
// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
// Convert cumulative sum for an area to an average for 1 pixel.
// topleft is pointer to top left of CumulativeSum buffer for area.
// botleft is pointer to bottom left of CumulativeSum buffer.
// width is offset from left to right of area in CumulativeSum buffer measured
//   in number of ints.
// area is the number of pixels in the area being averaged.
// dst points to pixel to store result to.
// count is number of averaged pixels to produce.
4576
// Does 4 pixels at a time.
4577 4578 4579
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
                                    int width, int area, uint8* dst,
                                    int count) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4580 4581 4582 4583
  __asm {
    mov        eax, topleft  // eax topleft
    mov        esi, botleft  // esi botleft
    mov        edx, width
4584
    movd       xmm5, area
fbarchard@google.com's avatar
fbarchard@google.com committed
4585 4586
    mov        edi, dst
    mov        ecx, count
4587 4588
    cvtdq2ps   xmm5, xmm5
    rcpss      xmm4, xmm5  // 1.0f / area
fbarchard@google.com's avatar
fbarchard@google.com committed
4589 4590 4591 4592
    pshufd     xmm4, xmm4, 0
    sub        ecx, 4
    jl         l4b

4593 4594 4595
    cmp        area, 128  // 128 pixels will not overflow 15 bits.
    ja         l4

4596 4597 4598 4599 4600 4601
    pshufd     xmm5, xmm5, 0        // area
    pcmpeqb    xmm6, xmm6           // constant of 65536.0 - 1 = 65535.0
    psrld      xmm6, 16
    cvtdq2ps   xmm6, xmm6
    addps      xmm5, xmm6           // (65536.0 + area - 1)
    mulps      xmm5, xmm4           // (65536.0 + area - 1) * 1 / area
4602
    cvtps2dq   xmm5, xmm5           // 0.16 fixed point
4603
    packssdw   xmm5, xmm5           // 16 bit shorts
4604 4605 4606 4607

    // 4 pixel loop small blocks.
  s4:
    // top left
4608 4609 4610 4611
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    packssdw   xmm0, xmm1  // pack 4 pixels into 2 registers
    packssdw   xmm2, xmm3

    pmulhuw    xmm0, xmm5
    pmulhuw    xmm2, xmm5

    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        s4

    jmp        l4b

fbarchard@google.com's avatar
fbarchard@google.com committed
4647 4648 4649
    // 4 pixel loop
  l4:
    // top left
4650 4651 4652 4653
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
fbarchard@google.com's avatar
fbarchard@google.com committed
4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    cvtdq2ps   xmm0, xmm0   // Average = Sum * 1 / Area
    cvtdq2ps   xmm1, xmm1
    mulps      xmm0, xmm4
    mulps      xmm1, xmm4
    cvtdq2ps   xmm2, xmm2
    cvtdq2ps   xmm3, xmm3
    mulps      xmm2, xmm4
    mulps      xmm3, xmm4
    cvtps2dq   xmm0, xmm0
    cvtps2dq   xmm1, xmm1
    cvtps2dq   xmm2, xmm2
    cvtps2dq   xmm3, xmm3
    packssdw   xmm0, xmm1
    packssdw   xmm2, xmm3
    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
4701
    movdqu     xmm0, [eax]
fbarchard@google.com's avatar
fbarchard@google.com committed
4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718
    psubd      xmm0, [eax + edx * 4]
    lea        eax, [eax + 16]
    psubd      xmm0, [esi]
    paddd      xmm0, [esi + edx * 4]
    lea        esi, [esi + 16]
    cvtdq2ps   xmm0, xmm0
    mulps      xmm0, xmm4
    cvtps2dq   xmm0, xmm0
    packssdw   xmm0, xmm0
    packuswb   xmm0, xmm0
    movd       dword ptr [edi], xmm0
    lea        edi, [edi + 4]
    sub        ecx, 1
    jge        l1
  l1b:
  }
}
4719
#endif  // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4720 4721 4722 4723 4724

#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
4725
                                  const int32* previous_cumsum, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755
  __asm {
    mov        eax, row
    mov        edx, cumsum
    mov        esi, previous_cumsum
    mov        ecx, width
    pxor       xmm0, xmm0
    pxor       xmm1, xmm1

    sub        ecx, 4
    jl         l4b
    test       edx, 15
    jne        l4b

    // 4 pixel loop
  l4:
    movdqu     xmm2, [eax]  // 4 argb pixels 16 bytes.
    lea        eax, [eax + 16]
    movdqa     xmm4, xmm2

    punpcklbw  xmm2, xmm1
    movdqa     xmm3, xmm2
    punpcklwd  xmm2, xmm1
    punpckhwd  xmm3, xmm1

    punpckhbw  xmm4, xmm1
    movdqa     xmm5, xmm4
    punpcklwd  xmm4, xmm1
    punpckhwd  xmm5, xmm1

    paddd      xmm0, xmm2
4756
    movdqu     xmm2, [esi]  // previous row above.
fbarchard@google.com's avatar
fbarchard@google.com committed
4757 4758 4759
    paddd      xmm2, xmm0

    paddd      xmm0, xmm3
4760
    movdqu     xmm3, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4761 4762 4763
    paddd      xmm3, xmm0

    paddd      xmm0, xmm4
4764
    movdqu     xmm4, [esi + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4765 4766 4767
    paddd      xmm4, xmm0

    paddd      xmm0, xmm5
4768
    movdqu     xmm5, [esi + 48]
4769
    lea        esi, [esi + 64]
fbarchard@google.com's avatar
fbarchard@google.com committed
4770 4771
    paddd      xmm5, xmm0

4772 4773 4774 4775
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
    movdqu     [edx + 32], xmm4
    movdqu     [edx + 48], xmm5
fbarchard@google.com's avatar
fbarchard@google.com committed
4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788

    lea        edx, [edx + 64]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
    movd       xmm2, dword ptr [eax]  // 1 argb pixel 4 bytes.
    lea        eax, [eax + 4]
4789 4790
    punpcklbw  xmm2, xmm1
    punpcklwd  xmm2, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
4791
    paddd      xmm0, xmm2
4792 4793
    movdqu     xmm2, [esi]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804
    paddd      xmm2, xmm0
    movdqu     [edx], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 1
    jge        l1

 l1b:
  }
}
#endif  // HAS_COMPUTECUMULATIVESUMROW_SSE2

4805 4806 4807
#ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination.
__declspec(naked) __declspec(align(16))
4808
LIBYUV_API
4809 4810 4811 4812
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                        uint8* dst_argb, const float* uv_dudv, int width) {
  __asm {
    push       esi
4813
    push       edi
4814
    mov        eax, [esp + 12]  // src_argb
4815 4816 4817
    mov        esi, [esp + 16]  // stride
    mov        edx, [esp + 20]  // dst_argb
    mov        ecx, [esp + 24]  // pointer to uv_dudv
4818
    movq       xmm2, qword ptr [ecx]  // uv
4819
    movq       xmm7, qword ptr [ecx + 8]  // dudv
4820
    mov        ecx, [esp + 28]  // width
4821 4822
    shl        esi, 16          // 4, stride
    add        esi, 4
4823 4824 4825
    movd       xmm5, esi
    sub        ecx, 4
    jl         l4b
4826

4827 4828 4829
    // setup for 4 pixel loop
    pshufd     xmm7, xmm7, 0x44  // dup dudv
    pshufd     xmm5, xmm5, 0  // dup 4, stride
4830
    movdqa     xmm0, xmm2    // x0, y0, x1, y1
4831
    addps      xmm0, xmm7
4832
    movlhps    xmm2, xmm0
4833 4834 4835 4836 4837
    movdqa     xmm4, xmm7
    addps      xmm4, xmm4    // dudv *= 2
    movdqa     xmm3, xmm2    // x2, y2, x3, y3
    addps      xmm3, xmm4
    addps      xmm4, xmm4    // dudv *= 4
4838

4839 4840 4841 4842 4843 4844 4845 4846
    // 4 pixel loop
  l4:
    cvttps2dq  xmm0, xmm2    // x, y float to int first 2
    cvttps2dq  xmm1, xmm3    // x, y float to int next 2
    packssdw   xmm0, xmm1    // x, y as 8 shorts
    pmaddwd    xmm0, xmm5    // offsets = x * 4 + y * stride.
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
4847
    movd       edi, xmm0
4848
    pshufd     xmm0, xmm0, 0x39  // shift right
4849 4850
    movd       xmm1, [eax + esi]  // read pixel 0
    movd       xmm6, [eax + edi]  // read pixel 1
4851
    punpckldq  xmm1, xmm6     // combine pixel 0 and 1
4852 4853
    addps      xmm2, xmm4    // x, y += dx, dy first 2
    movq       qword ptr [edx], xmm1
4854 4855
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
4856
    movd       edi, xmm0
4857
    movd       xmm6, [eax + esi]  // read pixel 2
4858
    movd       xmm0, [eax + edi]  // read pixel 3
4859
    punpckldq  xmm6, xmm0     // combine pixel 2 and 3
4860 4861
    addps      xmm3, xmm4    // x, y += dx, dy next 2
    movq       qword ptr 8[edx], xmm6
4862
    lea        edx, [edx + 16]
4863
    sub        ecx, 4
4864
    jge        l4
4865

4866 4867
  l4b:
    add        ecx, 4 - 1
4868 4869 4870 4871
    jl         l1b

    // 1 pixel loop
  l1:
4872 4873 4874 4875 4876
    cvttps2dq  xmm0, xmm2    // x, y float to int
    packssdw   xmm0, xmm0    // x, y as shorts
    pmaddwd    xmm0, xmm5    // offset = x * 4 + y * stride
    addps      xmm2, xmm7    // x, y += dx, dy
    movd       esi, xmm0
4877 4878 4879
    movd       xmm0, [eax + esi]  // copy a pixel
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4880
    sub        ecx, 1
4881 4882
    jge        l1
  l1b:
4883
    pop        edi
4884 4885 4886 4887 4888 4889
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBAFFINEROW_SSE2

4890
#ifdef HAS_INTERPOLATEROW_AVX2
4891
// Bilinear filter 32x2 -> 32x1
4892 4893
__declspec(naked) __declspec(align(16))
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
4894 4895
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936
  __asm {
    push       esi
    push       edi
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    shr        eax, 1
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
    sub        edi, esi
    cmp        eax, 32
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
    cmp        eax, 64
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
    cmp        eax, 96
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.

    vmovd      xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    vmovd      xmm5, eax  // low fraction 128..1
    vpunpcklbw xmm5, xmm5, xmm0
    vpunpcklwd xmm5, xmm5, xmm5
    vpxor      ymm0, ymm0, ymm0
    vpermd     ymm5, ymm0, ymm5

  xloop:
    vmovdqu    ymm0, [esi]
    vmovdqu    ymm2, [esi + edx]
    vpunpckhbw ymm1, ymm0, ymm2  // mutates
    vpunpcklbw ymm0, ymm0, ymm2  // mutates
    vpmaddubsw ymm0, ymm0, ymm5
    vpmaddubsw ymm1, ymm1, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm1, ymm1, 7
    vpackuswb  ymm0, ymm0, ymm1  // unmutates
    vmovdqu    [esi + edi], ymm0
    lea        esi, [esi + 32]
4937
    sub        ecx, 32
4938 4939 4940
    jg         xloop
    jmp        xloop99

4941 4942 4943 4944 4945 4946 4947 4948
   // Blend 25 / 75.
 xloop25:
   vmovdqu    ymm0, [esi]
   vmovdqu    ymm1, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
4949
   sub        ecx, 32
4950 4951 4952 4953 4954 4955
   jg         xloop25
   jmp        xloop99

   // Blend 50 / 50.
 xloop50:
   vmovdqu    ymm0, [esi]
4956
   vpavgb     ymm0, ymm0, [esi + edx]
4957 4958
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
4959
   sub        ecx, 32
4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970
   jg         xloop50
   jmp        xloop99

   // Blend 75 / 25.
 xloop75:
   vmovdqu    ymm1, [esi]
   vmovdqu    ymm0, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
4971
   sub        ecx, 32
4972 4973 4974 4975 4976 4977
   jg         xloop75
   jmp        xloop99

   // Blend 100 / 0 - Copy row unchanged.
 xloop100:
   rep movsb
4978 4979 4980 4981 4982 4983 4984 4985 4986 4987

  xloop99:
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_INTERPOLATEROW_AVX2

4988
// Bilinear filter 16x2 -> 16x1
4989
__declspec(naked) __declspec(align(16))
4990 4991 4992
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
                          ptrdiff_t src_stride, int dst_width,
                          int source_y_fraction) {
4993 4994 4995
  __asm {
    push       esi
    push       edi
4996 4997
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
4998 4999 5000 5001 5002
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    shr        eax, 1
5003 5004 5005
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
5006
    cmp        eax, 32
5007
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
5008
    cmp        eax, 64
5009
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
5010
    cmp        eax, 96
5011
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.
5012

5013 5014 5015 5016 5017 5018 5019 5020
    movd       xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    movd       xmm5, eax  // low fraction 128..1
    punpcklbw  xmm5, xmm0
    punpcklwd  xmm5, xmm5
    pshufd     xmm5, xmm5, 0

5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033
  xloop:
    movdqu     xmm0, [esi]
    movdqu     xmm2, [esi + edx]
    movdqu     xmm1, xmm0
    punpcklbw  xmm0, xmm2
    punpckhbw  xmm1, xmm2
    pmaddubsw  xmm0, xmm5
    pmaddubsw  xmm1, xmm5
    psrlw      xmm0, 7
    psrlw      xmm1, 7
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5034
    sub        ecx, 16
5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5046
    sub        ecx, 16
5047 5048 5049 5050 5051 5052 5053 5054 5055 5056
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5057
    sub        ecx, 16
5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5069
    sub        ecx, 16
5070 5071 5072 5073 5074 5075 5076 5077
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5078
    sub        ecx, 16
5079 5080 5081 5082 5083 5084 5085 5086 5087
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}

5088
#ifdef HAS_INTERPOLATEROW_SSE2
5089
// Bilinear filter 16x2 -> 16x1
5090
__declspec(naked) __declspec(align(16))
5091 5092 5093
void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5094 5095 5096
  __asm {
    push       esi
    push       edi
5097 5098
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 256.  Blend 100 / 0.
    cmp        eax, 64
    je         xloop75   // 64 / 256 is 0.25.  Blend 75 / 25.
    cmp        eax, 128
    je         xloop50   // 128 / 256 is 0.50.  Blend 50 / 50.
    cmp        eax, 192
    je         xloop25   // 192 / 256 is 0.75.  Blend 25 / 75.

    movd       xmm5, eax            // xmm5 = y fraction
    punpcklbw  xmm5, xmm5
    psrlw      xmm5, 1
    punpcklwd  xmm5, xmm5
    punpckldq  xmm5, xmm5
    punpcklqdq xmm5, xmm5
    pxor       xmm4, xmm4

  xloop:
    movdqu     xmm0, [esi]  // row0
    movdqu     xmm2, [esi + edx]  // row1
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
    punpcklbw  xmm2, xmm4
    punpckhbw  xmm3, xmm4
    punpcklbw  xmm0, xmm4
    punpckhbw  xmm1, xmm4
    psubw      xmm2, xmm0  // row1 - row0
    psubw      xmm3, xmm1
    paddw      xmm2, xmm2  // 9 bits * 15 bits = 8.16
    paddw      xmm3, xmm3
    pmulhw     xmm2, xmm5  // scale diff
    pmulhw     xmm3, xmm5
    paddw      xmm0, xmm2  // sum rows
    paddw      xmm1, xmm3
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5141
    sub        ecx, 16
5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5153
    sub        ecx, 16
5154 5155 5156 5157 5158 5159 5160 5161 5162 5163
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5164
    sub        ecx, 16
5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5176
    sub        ecx, 16
5177 5178 5179 5180 5181 5182 5183 5184
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5185
    sub        ecx, 16
5186 5187 5188 5189 5190 5191 5192 5193
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}
5194
#endif  // HAS_INTERPOLATEROW_SSE2
5195

5196
__declspec(naked) __declspec(align(16))
fbarchard@google.com's avatar
fbarchard@google.com committed
5197 5198
void ARGBToBayerRow_SSSE3(const uint8* src_argb, uint8* dst_bayer,
                          uint32 selector, int pix) {
5199 5200 5201 5202 5203 5204 5205 5206
  __asm {
    mov        eax, [esp + 4]    // src_argb
    mov        edx, [esp + 8]    // dst_bayer
    movd       xmm5, [esp + 12]  // selector
    mov        ecx, [esp + 16]   // pix
    pshufd     xmm5, xmm5, 0

  wloop:
5207 5208
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
5209
    lea        eax, [eax + 32]
5210
    pshufb     xmm0, xmm5
5211 5212 5213 5214
    pshufb     xmm1, xmm5
    punpckldq  xmm0, xmm1
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
5215
    sub        ecx, 8
5216 5217 5218 5219 5220
    jg         wloop
    ret
  }
}

5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233
// Specialized ARGB to Bayer that just isolates G channel.
__declspec(naked) __declspec(align(16))
void ARGBToBayerGGRow_SSE2(const uint8* src_argb, uint8* dst_bayer,
                           uint32 selector, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_argb
    mov        edx, [esp + 8]    // dst_bayer
                                 // selector
    mov        ecx, [esp + 16]   // pix
    pcmpeqb    xmm5, xmm5        // generate mask 0x000000ff
    psrld      xmm5, 24

  wloop:
5234 5235
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
5236 5237 5238 5239 5240
    lea        eax, [eax + 32]
    psrld      xmm0, 8  // Move green to bottom.
    psrld      xmm1, 8
    pand       xmm0, xmm5
    pand       xmm1, xmm5
5241
    packssdw   xmm0, xmm1
5242 5243 5244
    packuswb   xmm0, xmm1
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
5245
    sub        ecx, 8
5246 5247 5248 5249 5250
    jg         wloop
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
5251 5252 5253 5254 5255 5256
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
__declspec(naked) __declspec(align(16))
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                          const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_argb
5257
    mov        edx, [esp + 8]    // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5258
    mov        ecx, [esp + 12]   // shuffler
5259
    movdqu     xmm5, [ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
5260 5261 5262
    mov        ecx, [esp + 16]   // pix

  wloop:
5263 5264
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5265 5266 5267
    lea        eax, [eax + 32]
    pshufb     xmm0, xmm5
    pshufb     xmm1, xmm5
5268 5269
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5270
    lea        edx, [edx + 32]
5271
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282
    jg         wloop
    ret
  }
}

#ifdef HAS_ARGBSHUFFLEROW_AVX2
__declspec(naked) __declspec(align(16))
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]     // src_argb
5283
    mov        edx, [esp + 8]     // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5284
    mov        ecx, [esp + 12]    // shuffler
5285
    vbroadcastf128 ymm5, [ecx]    // same shuffle in high as low.
fbarchard@google.com's avatar
fbarchard@google.com committed
5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296
    mov        ecx, [esp + 16]    // pix

  wloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vpshufb    ymm0, ymm0, ymm5
    vpshufb    ymm1, ymm1, ymm5
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
5297
    sub        ecx, 16
fbarchard@google.com's avatar
fbarchard@google.com committed
5298
    jg         wloop
5299 5300

    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
5301 5302 5303
    ret
  }
}
5304
#endif  // HAS_ARGBSHUFFLEROW_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
5305

5306 5307 5308 5309 5310 5311 5312 5313 5314 5315
__declspec(naked) __declspec(align(16))
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    push       ebx
    push       esi
    mov        eax, [esp + 8 + 4]    // src_argb
    mov        edx, [esp + 8 + 8]    // dst_argb
    mov        esi, [esp + 8 + 12]   // shuffler
    mov        ecx, [esp + 8 + 16]   // pix
5316
    pxor       xmm5, xmm5
5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351

    mov        ebx, [esi]   // shuffler
    cmp        ebx, 0x03000102
    je         shuf_3012
    cmp        ebx, 0x00010203
    je         shuf_0123
    cmp        ebx, 0x00030201
    je         shuf_0321
    cmp        ebx, 0x02010003
    je         shuf_2103

  // TODO(fbarchard): Use one source pointer and 3 offsets.
  shuf_any1:
    movzx      ebx, byte ptr [esi]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx], bl
    movzx      ebx, byte ptr [esi + 1]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 1], bl
    movzx      ebx, byte ptr [esi + 2]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 2], bl
    movzx      ebx, byte ptr [esi + 3]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 3], bl
    lea        eax, [eax + 4]
    lea        edx, [edx + 4]
    sub        ecx, 1
    jg         shuf_any1
    jmp        shuf99

  shuf_0123:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5352 5353
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5354 5355 5356 5357 5358 5359 5360
    pshufhw    xmm0, xmm0, 01Bh   // 1B = 00011011 = 0x0123 = BGRAToARGB
    pshuflw    xmm0, xmm0, 01Bh
    pshufhw    xmm1, xmm1, 01Bh
    pshuflw    xmm1, xmm1, 01Bh
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5361
    sub        ecx, 4
5362 5363 5364 5365 5366 5367 5368
    jg         shuf_0123
    jmp        shuf99

  shuf_0321:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5369 5370
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5371 5372 5373 5374 5375 5376 5377
    pshufhw    xmm0, xmm0, 039h   // 39 = 00111001 = 0x0321 = RGBAToARGB
    pshuflw    xmm0, xmm0, 039h
    pshufhw    xmm1, xmm1, 039h
    pshuflw    xmm1, xmm1, 039h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5378
    sub        ecx, 4
5379 5380 5381 5382 5383 5384 5385
    jg         shuf_0321
    jmp        shuf99

  shuf_2103:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5386 5387
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5388 5389 5390 5391 5392 5393 5394
    pshufhw    xmm0, xmm0, 093h   // 93 = 10010011 = 0x2103 = ARGBToRGBA
    pshuflw    xmm0, xmm0, 093h
    pshufhw    xmm1, xmm1, 093h
    pshuflw    xmm1, xmm1, 093h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5395
    sub        ecx, 4
5396 5397 5398 5399 5400 5401 5402
    jg         shuf_2103
    jmp        shuf99

  shuf_3012:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5403 5404
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5405 5406 5407 5408 5409 5410 5411
    pshufhw    xmm0, xmm0, 0C6h   // C6 = 11000110 = 0x3012 = ABGRToARGB
    pshuflw    xmm0, xmm0, 0C6h
    pshufhw    xmm1, xmm1, 0C6h
    pshuflw    xmm1, xmm1, 0C6h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5412
    sub        ecx, 4
5413 5414 5415 5416 5417 5418 5419 5420 5421
    jg         shuf_3012

  shuf99:
    pop        esi
    pop        ebx
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447
// YUY2 - Macro-pixel = 2 image pixels
// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....

// UYVY - Macro-pixel = 2 image pixels
// U0Y0V0Y1

__declspec(naked) __declspec(align(16))
void I422ToYUY2Row_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
5448
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
5449
    lea        eax, [eax + 16]
5450
    movdqa     xmm1, xmm0
fbarchard@google.com's avatar
fbarchard@google.com committed
5451 5452
    punpcklbw  xmm0, xmm2 // YUYV
    punpckhbw  xmm1, xmm2
5453 5454
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

__declspec(naked) __declspec(align(16))
void I422ToUYVYRow_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
5485
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
5486 5487 5488 5489
    movdqa     xmm1, xmm2
    lea        eax, [eax + 16]
    punpcklbw  xmm1, xmm0 // UYVY
    punpckhbw  xmm2, xmm0
5490 5491
    movdqu     [edi], xmm1
    movdqu     [edi + 16], xmm2
fbarchard@google.com's avatar
fbarchard@google.com committed
5492 5493 5494 5495 5496 5497 5498 5499 5500
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
5501

5502 5503 5504 5505 5506 5507
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
__declspec(naked) __declspec(align(16))
void ARGBPolynomialRow_SSE2(const uint8* src_argb,
                            uint8* dst_argb, const float* poly,
                            int width) {
  __asm {
5508 5509 5510 5511 5512
    push       esi
    mov        eax, [esp + 4 + 4]   /* src_argb */
    mov        edx, [esp + 4 + 8]   /* dst_argb */
    mov        esi, [esp + 4 + 12]  /* poly */
    mov        ecx, [esp + 4 + 16]  /* width */
5513
    pxor       xmm3, xmm3  // 0 constant for zero extending bytes to ints.
5514

5515
    // 2 pixel loop.
5516
 convertloop:
5517 5518
//    pmovzxbd  xmm0, dword ptr [eax]  // BGRA pixel
//    pmovzxbd  xmm4, dword ptr [eax + 4]  // BGRA pixel
5519 5520
    movq       xmm0, qword ptr [eax]  // BGRABGRA
    lea        eax, [eax + 8]
5521
    punpcklbw  xmm0, xmm3
5522 5523 5524
    movdqa     xmm4, xmm0
    punpcklwd  xmm0, xmm3  // pixel 0
    punpckhwd  xmm4, xmm3  // pixel 1
5525
    cvtdq2ps   xmm0, xmm0  // 4 floats
5526
    cvtdq2ps   xmm4, xmm4
5527
    movdqa     xmm1, xmm0  // X
5528 5529 5530 5531 5532
    movdqa     xmm5, xmm4
    mulps      xmm0, [esi + 16]  // C1 * X
    mulps      xmm4, [esi + 16]
    addps      xmm0, [esi]  // result = C0 + C1 * X
    addps      xmm4, [esi]
5533
    movdqa     xmm2, xmm1
5534
    movdqa     xmm6, xmm5
5535
    mulps      xmm2, xmm1  // X * X
5536
    mulps      xmm6, xmm5
5537
    mulps      xmm1, xmm2  // X * X * X
5538 5539 5540 5541 5542
    mulps      xmm5, xmm6
    mulps      xmm2, [esi + 32]  // C2 * X * X
    mulps      xmm6, [esi + 32]
    mulps      xmm1, [esi + 48]  // C3 * X * X * X
    mulps      xmm5, [esi + 48]
5543
    addps      xmm0, xmm2  // result += C2 * X * X
5544
    addps      xmm4, xmm6
5545
    addps      xmm0, xmm1  // result += C3 * X * X * X
5546
    addps      xmm4, xmm5
5547
    cvttps2dq  xmm0, xmm0
5548 5549
    cvttps2dq  xmm4, xmm4
    packuswb   xmm0, xmm4
5550
    packuswb   xmm0, xmm0
5551 5552
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
5553
    sub        ecx, 2
5554
    jg         convertloop
5555
    pop        esi
5556 5557 5558 5559 5560
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_SSE2

5561 5562 5563
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
__declspec(naked) __declspec(align(16))
void ARGBPolynomialRow_AVX2(const uint8* src_argb,
5564 5565
                            uint8* dst_argb, const float* poly,
                            int width) {
5566 5567 5568
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
5569 5570 5571 5572 5573
    mov        ecx, [esp + 12]   /* poly */
    vbroadcastf128 ymm4, [ecx]       // C0
    vbroadcastf128 ymm5, [ecx + 16]  // C1
    vbroadcastf128 ymm6, [ecx + 32]  // C2
    vbroadcastf128 ymm7, [ecx + 48]  // C3
5574 5575
    mov        ecx, [esp + 16]  /* width */

5576
    // 2 pixel loop.
5577
 convertloop:
5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591
    vpmovzxbd   ymm0, qword ptr [eax]  // 2 BGRA pixels
    lea         eax, [eax + 8]
    vcvtdq2ps   ymm0, ymm0        // X 8 floats
    vmulps      ymm2, ymm0, ymm0  // X * X
    vmulps      ymm3, ymm0, ymm7  // C3 * X
    vfmadd132ps ymm0, ymm4, ymm5  // result = C0 + C1 * X
    vfmadd231ps ymm0, ymm2, ymm6  // result += C2 * X * X
    vfmadd231ps ymm0, ymm2, ymm3  // result += C3 * X * X * X
    vcvttps2dq  ymm0, ymm0
    vpackusdw   ymm0, ymm0, ymm0  // b0g0r0a0_00000000_b0g0r0a0_00000000
    vpermq      ymm0, ymm0, 0xd8  // b0g0r0a0_b0g0r0a0_00000000_00000000
    vpackuswb   xmm0, xmm0, xmm0  // bgrabgra_00000000_00000000_00000000
    vmovq       qword ptr [edx], xmm0
    lea         edx, [edx + 8]
5592
    sub         ecx, 2
5593
    jg          convertloop
5594 5595 5596 5597 5598 5599
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_AVX2

fbarchard@google.com's avatar
fbarchard@google.com committed
5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
#ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table.
__declspec(naked) __declspec(align(16))
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
                           int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */
5610

fbarchard@google.com's avatar
fbarchard@google.com committed
5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632
    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    movzx      edx, byte ptr [eax - 4 + 3]
    movzx      edx, byte ptr [esi + edx * 4 + 3]
    mov        byte ptr [eax - 4 + 3], dl
    dec        ecx
    jg         convertloop
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBCOLORTABLEROW_X86
5633

fbarchard@google.com's avatar
fbarchard@google.com committed
5634 5635
#ifdef HAS_RGBCOLORTABLEROW_X86
// Tranform RGB pixels with color table.
5636
__declspec(naked) __declspec(align(16))
fbarchard@google.com's avatar
fbarchard@google.com committed
5637
void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
5638
  __asm {
fbarchard@google.com's avatar
fbarchard@google.com committed
5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */

    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    dec        ecx
    jg         convertloop

    pop        esi
5660 5661 5662
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
5663
#endif  // HAS_RGBCOLORTABLEROW_X86
5664

fbarchard@google.com's avatar
fbarchard@google.com committed
5665 5666 5667
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table.
__declspec(naked) __declspec(align(16))
5668 5669 5670
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                                 int width,
                                 const uint8* luma, uint32 lumacoeff) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5671 5672 5673 5674 5675
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   /* src_argb */
    mov        edi, [esp + 8 + 8]   /* dst_argb */
5676 5677 5678
    mov        ecx, [esp + 8 + 12]  /* width */
    movd       xmm2, dword ptr [esp + 8 + 16]  // luma table
    movd       xmm3, dword ptr [esp + 8 + 20]  // lumacoeff
fbarchard@google.com's avatar
fbarchard@google.com committed
5679
    pshufd     xmm2, xmm2, 0
5680
    pshufd     xmm3, xmm3, 0
5681
    pcmpeqb    xmm4, xmm4        // generate mask 0xff00ff00
fbarchard@google.com's avatar
fbarchard@google.com committed
5682 5683 5684 5685 5686
    psllw      xmm4, 8
    pxor       xmm5, xmm5

    // 4 pixel loop.
  convertloop:
5687
    movdqu     xmm0, qword ptr [eax]      // generate luma ptr
fbarchard@google.com's avatar
fbarchard@google.com committed
5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753
    pmaddubsw  xmm0, xmm3
    phaddw     xmm0, xmm0
    pand       xmm0, xmm4  // mask out low bits
    punpcklwd  xmm0, xmm5
    paddd      xmm0, xmm2  // add table base
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi], dl
    movzx      edx, byte ptr [eax + 1]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 1], dl
    movzx      edx, byte ptr [eax + 2]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 2], dl
    movzx      edx, byte ptr [eax + 3]  // copy alpha.
    mov        byte ptr [edi + 3], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 4]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 4], dl
    movzx      edx, byte ptr [eax + 5]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 5], dl
    movzx      edx, byte ptr [eax + 6]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 6], dl
    movzx      edx, byte ptr [eax + 7]  // copy alpha.
    mov        byte ptr [edi + 7], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 8]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 8], dl
    movzx      edx, byte ptr [eax + 9]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 9], dl
    movzx      edx, byte ptr [eax + 10]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 10], dl
    movzx      edx, byte ptr [eax + 11]  // copy alpha.
    mov        byte ptr [edi + 11], dl

    movd       esi, xmm0

    movzx      edx, byte ptr [eax + 12]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 12], dl
    movzx      edx, byte ptr [eax + 13]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 13], dl
    movzx      edx, byte ptr [eax + 14]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 14], dl
    movzx      edx, byte ptr [eax + 15]  // copy alpha.
    mov        byte ptr [edi + 15], dl

    lea        eax, [eax + 16]
    lea        edi, [edi + 16]
5754
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
5755 5756 5757 5758 5759
    jg         convertloop

    pop        edi
    pop        esi
    ret
5760 5761
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
5762
#endif  // HAS_ARGBLUMACOLORTABLEROW_SSSE3
5763

5764 5765
#endif  // defined(_M_X64)
#endif  // !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER)
5766

5767
#ifdef __cplusplus
5768
}  // extern "C"
5769 5770
}  // namespace libyuv
#endif