row_win.cc 203 KB
Newer Older
1
/*
2
 *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 4 5 6
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS. All contributing project authors may
8 9 10
 *  be found in the AUTHORS file in the root of the source tree.
 */

11
#include "libyuv/row.h"
12

13 14
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \
    defined(_MSC_VER) && !defined(__clang__)
15 16 17 18
#include <emmintrin.h>
#include <tmmintrin.h>  // For _mm_maddubs_epi16
#endif

19 20
#ifdef __cplusplus
namespace libyuv {
21
extern "C" {
22
#endif
23

24 25
// This module is for Visual C 32/64 bit and clangcl 32 bit
#if !defined(LIBYUV_DISABLE_X86) && \
Frank Barchard's avatar
Frank Barchard committed
26
    (defined(_M_IX86) || (defined(_M_X64) && !defined(__clang__)))
27 28 29

// 64 bit
#if defined(_M_X64)
30 31 32

// Read 4 UV from 422, upsample to 8 UV.
#define READYUV422                                                             \
33 34 35 36 37 38 39 40 41 42 43
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);                                 \
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);                                     \
    u_buf += 4;                                                                \
    xmm4 = _mm_loadl_epi64((__m128i*)y_buf);                                   \
    xmm4 = _mm_unpacklo_epi8(xmm4, xmm4);                                      \
    y_buf += 8;

// Read 4 UV from 422, upsample to 8 UV.  With 8 Alpha.
#define READYUVA422                                                            \
44 45 46 47
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);                                 \
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);                                     \
48 49
    u_buf += 4;                                                                \
    xmm4 = _mm_loadl_epi64((__m128i*)y_buf);                                   \
50
    xmm4 = _mm_unpacklo_epi8(xmm4, xmm4);                                      \
51
    y_buf += 8;                                                                \
52 53
    xmm5 = _mm_loadl_epi64((__m128i*)a_buf);                                   \
    a_buf += 8;
54 55

// Convert 8 pixels: 8 UV and 8 Y.
56
#define YUVTORGB(yuvconstants)                                                 \
57 58
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm2 = _mm_loadu_si128(&xmm0);                                             \
59 60 61 62 63 64 65
    xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)yuvconstants->kUVToB);           \
    xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)yuvconstants->kUVToG);           \
    xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)yuvconstants->kUVToR);           \
    xmm0 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasB, xmm0);             \
    xmm1 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasG, xmm1);             \
    xmm2 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasR, xmm2);             \
    xmm4 = _mm_mulhi_epu16(xmm4, *(__m128i*)yuvconstants->kYToRgb);            \
66 67 68
    xmm0 = _mm_adds_epi16(xmm0, xmm4);                                         \
    xmm1 = _mm_adds_epi16(xmm1, xmm4);                                         \
    xmm2 = _mm_adds_epi16(xmm2, xmm4);                                         \
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    xmm0 = _mm_srai_epi16(xmm0, 6);                                            \
    xmm1 = _mm_srai_epi16(xmm1, 6);                                            \
    xmm2 = _mm_srai_epi16(xmm2, 6);                                            \
    xmm0 = _mm_packus_epi16(xmm0, xmm0);                                       \
    xmm1 = _mm_packus_epi16(xmm1, xmm1);                                       \
    xmm2 = _mm_packus_epi16(xmm2, xmm2);

// Store 8 ARGB values.
#define STOREARGB                                                              \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);                                      \
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);                                     \
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);                                     \
    _mm_storeu_si128((__m128i *)dst_argb, xmm0);                               \
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);                        \
    dst_argb += 32;


88 89 90 91 92
#if defined(HAS_I422TOARGBROW_SSSE3)
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
93
                         const struct YuvConstants* yuvconstants,
94
                         int width) {
95
  __m128i xmm0, xmm1, xmm2, xmm4;
96 97 98
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
99
    READYUV422
100
    YUVTORGB(yuvconstants)
101 102 103 104 105
    STOREARGB
    width -= 8;
  }
}
#endif
106

107 108 109 110 111 112
#if defined(HAS_I422ALPHATOARGBROW_SSSE3)
void I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_argb,
113
                              const struct YuvConstants* yuvconstants,
114 115 116 117 118 119 120 121 122 123 124 125
                              int width) {
  __m128i xmm0, xmm1, xmm2, xmm4, xmm5;
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
    READYUVA422
    YUVTORGB(yuvconstants)
    STOREARGB
    width -= 8;
  }
}
#endif

126 127
// 32 bit
#else  // defined(_M_X64)
128 129
#ifdef HAS_ARGBTOYROW_SSSE3

130
// Constants for ARGB.
131
static const vec8 kARGBToY = {
132 133 134
  13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
};

135
// JPeg full range.
136
static const vec8 kARGBToYJ = {
137
  15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
138 139
};

140
static const vec8 kARGBToU = {
141 142 143
  112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
};

144
static const vec8 kARGBToUJ = {
145 146 147
  127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
};

148
static const vec8 kARGBToV = {
149 150 151
  -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
};

152
static const vec8 kARGBToVJ = {
153 154 155
  -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
};

156
// vpshufb for vphaddw + vpackuswb packed to shorts.
157
static const lvec8 kShufARGBToUV_AVX = {
158
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
159
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
160 161
};

162
// Constants for BGRA.
163
static const vec8 kBGRAToY = {
164 165 166
  0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
};

167
static const vec8 kBGRAToU = {
168 169 170
  0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
};

171
static const vec8 kBGRAToV = {
172 173 174
  0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
};

175
// Constants for ABGR.
176
static const vec8 kABGRToY = {
177 178 179
  33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
};

180
static const vec8 kABGRToU = {
181 182 183
  -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
};

184
static const vec8 kABGRToV = {
185 186 187
  112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
};

188
// Constants for RGBA.
189
static const vec8 kRGBAToY = {
190 191 192
  0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
};

193
static const vec8 kRGBAToU = {
194 195 196
  0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
};

197
static const vec8 kRGBAToV = {
198 199 200
  0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
};

201
static const uvec8 kAddY16 = {
202
  16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
203 204
};

205
// 7 bit fixed point 0.5.
206
static const vec16 kAddYJ64 = {
207 208
  64, 64, 64, 64, 64, 64, 64, 64
};
209

210
static const uvec8 kAddUV128 = {
211 212
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
213 214
};

215
static const uvec16 kAddUVJ128 = {
216 217 218
  0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
};

219
// Shuffle table for converting RGB24 to ARGB.
220
static const uvec8 kShuffleMaskRGB24ToARGB = {
221 222 223 224
  0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
};

// Shuffle table for converting RAW to ARGB.
225
static const uvec8 kShuffleMaskRAWToARGB = {
226 227 228
  2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
};

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
// Shuffle table for converting RAW to RGB24.  First 8.
static const uvec8 kShuffleMaskRAWToRGB24_0 = {
  2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
};

// Shuffle table for converting RAW to RGB24.  Middle 8.
static const uvec8 kShuffleMaskRAWToRGB24_1 = {
  2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
};

// Shuffle table for converting RAW to RGB24.  Last 8.
static const uvec8 kShuffleMaskRAWToRGB24_2 = {
  8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
};

247
// Shuffle table for converting ARGB to RGB24.
248
static const uvec8 kShuffleMaskARGBToRGB24 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
249 250
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
};
251 252

// Shuffle table for converting ARGB to RAW.
253
static const uvec8 kShuffleMaskARGBToRAW = {
254
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
255
};
256

257
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24.  First 8 + next 4
258
static const uvec8 kShuffleMaskARGBToRGB24_0 = {
259 260 261
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
};

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
// YUY2 shuf 16 Y to 32 Y.
static const lvec8 kShuffleYUY2Y = {
  0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14,
  0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14
};

// YUY2 shuf 8 UV to 16 UV.
static const lvec8 kShuffleYUY2UV = {
  1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15,
  1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15
};

// UYVY shuf 16 Y to 32 Y.
static const lvec8 kShuffleUYVYY = {
  1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15,
  1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15
};

// UYVY shuf 8 UV to 16 UV.
static const lvec8 kShuffleUYVYUV = {
  0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14,
  0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14
};

286 287 288 289 290 291
// NV21 shuf 8 VU to 16 UV.
static const lvec8 kShuffleNV21 = {
  1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
  1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
};

292
// Duplicates gray value 3 times and fills in alpha opaque.
293
__declspec(naked)
294
void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width) {
295 296 297
  __asm {
    mov        eax, [esp + 4]        // src_y
    mov        edx, [esp + 8]        // dst_argb
298
    mov        ecx, [esp + 12]       // width
299 300 301
    pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
    pslld      xmm5, 24

302
  convertloop:
303 304 305 306 307 308 309 310
    movq       xmm0, qword ptr [eax]
    lea        eax,  [eax + 8]
    punpcklbw  xmm0, xmm0
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0
    punpckhwd  xmm1, xmm1
    por        xmm0, xmm5
    por        xmm1, xmm5
311 312
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
313 314
    lea        edx, [edx + 32]
    sub        ecx, 8
315
    jg         convertloop
316 317 318 319
    ret
  }
}

320
#ifdef HAS_J400TOARGBROW_AVX2
321
// Duplicates gray value 3 times and fills in alpha opaque.
322
__declspec(naked)
323
void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int width) {
324 325 326
  __asm {
    mov         eax, [esp + 4]        // src_y
    mov         edx, [esp + 8]        // dst_argb
327
    mov         ecx, [esp + 12]       // width
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
    vpcmpeqb    ymm5, ymm5, ymm5      // generate mask 0xff000000
    vpslld      ymm5, ymm5, 24

  convertloop:
    vmovdqu     xmm0, [eax]
    lea         eax,  [eax + 16]
    vpermq      ymm0, ymm0, 0xd8
    vpunpcklbw  ymm0, ymm0, ymm0
    vpermq      ymm0, ymm0, 0xd8
    vpunpckhwd  ymm1, ymm0, ymm0
    vpunpcklwd  ymm0, ymm0, ymm0
    vpor        ymm0, ymm0, ymm5
    vpor        ymm1, ymm1, ymm5
    vmovdqu     [edx], ymm0
    vmovdqu     [edx + 32], ymm1
    lea         edx, [edx + 64]
    sub         ecx, 16
    jg          convertloop
    vzeroupper
    ret
  }
}
350
#endif  // HAS_J400TOARGBROW_AVX2
351

352
__declspec(naked)
353
void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int width) {
354
  __asm {
355
    mov       eax, [esp + 4]   // src_rgb24
356
    mov       edx, [esp + 8]   // dst_argb
357
    mov       ecx, [esp + 12]  // width
358 359
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
360
    movdqa    xmm4, xmmword ptr kShuffleMaskRGB24ToARGB
361

362
 convertloop:
363 364 365
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
366 367 368 369 370 371 372
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
373
    movdqu    [edx + 32], xmm2
374 375
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
376
    movdqu    [edx], xmm0
377 378 379
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
380
    movdqu    [edx + 16], xmm1
381
    por       xmm3, xmm5
382
    movdqu    [edx + 48], xmm3
383
    lea       edx, [edx + 64]
384
    sub       ecx, 16
385
    jg        convertloop
386 387 388 389
    ret
  }
}

390
__declspec(naked)
391
void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
392
                        int width) {
393
  __asm {
394 395
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_argb
396
    mov       ecx, [esp + 12]  // width
397 398
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
399
    movdqa    xmm4, xmmword ptr kShuffleMaskRAWToARGB
400

401
 convertloop:
402 403 404
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
405 406 407 408 409 410 411
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
412
    movdqu    [edx + 32], xmm2
413 414
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
415
    movdqu    [edx], xmm0
416 417 418
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
419
    movdqu    [edx + 16], xmm1
420
    por       xmm3, xmm5
421
    movdqu    [edx + 48], xmm3
422
    lea       edx, [edx + 64]
423
    sub       ecx, 16
424
    jg        convertloop
425 426 427 428
    ret
  }
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
__declspec(naked)
void RAWToRGB24Row_SSSE3(const uint8* src_raw, uint8* dst_rgb24, int width) {
  __asm {
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_rgb24
    mov       ecx, [esp + 12]  // width
    movdqa    xmm3, xmmword ptr kShuffleMaskRAWToRGB24_0
    movdqa    xmm4, xmmword ptr kShuffleMaskRAWToRGB24_1
    movdqa    xmm5, xmmword ptr kShuffleMaskRAWToRGB24_2

 convertloop:
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 4]
    movdqu    xmm2, [eax + 8]
    lea       eax, [eax + 24]
    pshufb    xmm0, xmm3
    pshufb    xmm1, xmm4
    pshufb    xmm2, xmm5
    movq      qword ptr [edx], xmm0
    movq      qword ptr [edx + 8], xmm1
    movq      qword ptr [edx + 16], xmm2
    lea       edx, [edx + 24]
    sub       ecx, 8
    jg        convertloop
    ret
  }
}

457 458
// pmul method to replicate bits.
// Math to replicate bits:
459 460 461 462
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
463
// 20 instructions.
464
__declspec(naked)
465
void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
466
                          int width) {
467
  __asm {
468 469 470
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
471
    mov       eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
472 473 474 475 476 477 478 479 480 481 482 483
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
    pcmpeqb   xmm4, xmm4       // generate mask 0x07e007e0 for Green
    psllw     xmm4, 10
    psrlw     xmm4, 5
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_rgb565
    mov       edx, [esp + 8]   // dst_argb
484
    mov       ecx, [esp + 12]  // width
485 486 487 488
    sub       edx, eax
    sub       edx, eax

 convertloop:
489
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgr565
490 491 492 493 494 495 496 497 498 499 500 501 502 503
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    pand      xmm1, xmm3    // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    pand      xmm0, xmm4    // G in middle 6 bits
    pmulhuw   xmm0, xmm6    // << 5 * (256 + 4)
    por       xmm0, xmm7    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
504 505
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
506 507
    lea       eax, [eax + 16]
    sub       ecx, 8
508
    jg        convertloop
509 510 511 512
    ret
  }
}

513 514 515 516 517 518 519
#ifdef HAS_RGB565TOARGBROW_AVX2
// pmul method to replicate bits.
// Math to replicate bits:
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
520
__declspec(naked)
521
void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb,
522
                          int width) {
523 524 525 526 527
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
528
    vmovd      xmm6, eax
529 530 531 532 533 534 535 536 537 538 539
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3       // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpcmpeqb   ymm4, ymm4, ymm4       // generate mask 0x07e007e0 for Green
    vpsllw     ymm4, ymm4, 10
    vpsrlw     ymm4, ymm4, 5
    vpcmpeqb   ymm7, ymm7, ymm7       // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax, [esp + 4]   // src_rgb565
    mov        edx, [esp + 8]   // dst_argb
540
    mov        ecx, [esp + 12]  // width
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
    sub        edx, eax
    sub        edx, eax

 convertloop:
    vmovdqu    ymm0, [eax]   // fetch 16 pixels of bgr565
    vpand      ymm1, ymm0, ymm3    // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpand      ymm0, ymm0, ymm4    // G in middle 6 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 5 * (256 + 4)
    vpor       ymm0, ymm0, ymm7    // AG
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 4 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 4 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
565
    ret
566 567
  }
}
568
#endif  // HAS_RGB565TOARGBROW_AVX2
569

570
#ifdef HAS_ARGB1555TOARGBROW_AVX2
571
__declspec(naked)
572
void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
573
                            int width) {
574 575 576 577 578
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
579
    vmovd      xmm6, eax
580 581 582 583 584 585 586 587 588
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpsrlw     ymm4, ymm3, 6    // generate mask 0x03e003e0 for Green
    vpcmpeqb   ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax,  [esp + 4]   // src_argb1555
    mov        edx,  [esp + 8]   // dst_argb
589
    mov        ecx,  [esp + 12]  // width
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
    sub        edx,  eax
    sub        edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of 1555
    vpsllw     ymm1, ymm0, 1       // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpand      ymm1, ymm1, ymm3
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpsraw     ymm2, ymm0, 8       // A
    vpand      ymm0, ymm0, ymm4    // G in middle 5 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 6 * (256 + 8)
    vpand      ymm2, ymm2, ymm7
    vpor       ymm0, ymm0, ymm2    // AG
607 608
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
609 610 611 612 613 614 615 616 617 618 619 620 621 622
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB1555TOARGBROW_AVX2

#ifdef HAS_ARGB4444TOARGBROW_AVX2
623
__declspec(naked)
624
void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
625
                            int width) {
626
  __asm {
627 628
    mov       eax,  0x0f0f0f0f  // generate mask 0x0f0f0f0f
    vmovd     xmm4, eax
629
    vbroadcastss ymm4, xmm4
630
    vpslld    ymm5, ymm4, 4     // 0xf0f0f0f0 for high nibbles
631 632
    mov       eax,  [esp + 4]   // src_argb4444
    mov       edx,  [esp + 8]   // dst_argb
633
    mov       ecx,  [esp + 12]  // width
634 635 636 637 638 639 640 641 642 643 644
    sub       edx,  eax
    sub       edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of bgra4444
    vpand      ymm2, ymm0, ymm5    // mask high nibbles
    vpand      ymm0, ymm0, ymm4    // mask low nibbles
    vpsrlw     ymm3, ymm2, 4
    vpsllw     ymm1, ymm0, 4
    vpor       ymm2, ymm2, ymm3
    vpor       ymm0, ymm0, ymm1
645 646
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm2, ymm2, 0xd8
647 648 649 650 651 652 653 654 655 656 657 658 659
    vpunpckhbw ymm1, ymm0, ymm2
    vpunpcklbw ymm0, ymm0, ymm2
    vmovdqu    [eax * 2 + edx], ymm0  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm1  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB4444TOARGBROW_AVX2

660
// 24 instructions
661
__declspec(naked)
662
void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
663
                            int width) {
664
  __asm {
665 666 667 668 669 670 671 672
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
    mov       eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
673
    movdqa    xmm4, xmm3       // generate mask 0x03e003e0 for Green
674 675 676 677 678 679
    psrlw     xmm4, 6
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_argb1555
    mov       edx, [esp + 8]   // dst_argb
680
    mov       ecx, [esp + 12]  // width
681 682 683 684
    sub       edx, eax
    sub       edx, eax

 convertloop:
685
    movdqu    xmm0, [eax]   // fetch 8 pixels of 1555
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    psllw     xmm1, 1       // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pand      xmm1, xmm3
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // G in middle 5 bits
    psraw     xmm2, 8       // A
    pmulhuw   xmm0, xmm6    // << 6 * (256 + 8)
    pand      xmm2, xmm7
    por       xmm0, xmm2    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
704 705
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
706 707
    lea       eax, [eax + 16]
    sub       ecx, 8
708
    jg        convertloop
709 710 711
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
712

713
// 18 instructions.
714
__declspec(naked)
715
void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
716
                            int width) {
717
  __asm {
718 719 720 721 722 723 724
    mov       eax, 0x0f0f0f0f  // generate mask 0x0f0f0f0f
    movd      xmm4, eax
    pshufd    xmm4, xmm4, 0
    movdqa    xmm5, xmm4       // 0xf0f0f0f0 for high nibbles
    pslld     xmm5, 4
    mov       eax, [esp + 4]   // src_argb4444
    mov       edx, [esp + 8]   // dst_argb
725
    mov       ecx, [esp + 12]  // width
726 727
    sub       edx, eax
    sub       edx, eax
728 729

 convertloop:
730
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgra4444
731 732 733 734 735 736 737 738 739
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // mask low nibbles
    pand      xmm2, xmm5    // mask high nibbles
    movdqa    xmm1, xmm0
    movdqa    xmm3, xmm2
    psllw     xmm1, 4
    psrlw     xmm3, 4
    por       xmm0, xmm1
    por       xmm2, xmm3
740
    movdqa    xmm1, xmm0
741
    punpcklbw xmm0, xmm2
742
    punpckhbw xmm1, xmm2
743 744
    movdqu    [eax * 2 + edx], xmm0  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm1  // store next 4 pixels of ARGB
745
    lea       eax, [eax + 16]
746
    sub       ecx, 8
747
    jg        convertloop
748 749 750 751
    ret
  }
}

752
__declspec(naked)
753
void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width) {
754
  __asm {
755 756
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
757
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
758
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRGB24
759 760

 convertloop:
761 762 763 764
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
765
    lea       eax, [eax + 64]
766 767 768 769 770 771 772 773 774 775
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
776
    movdqu    [edx], xmm0  // store 0
777 778 779 780
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
781 782
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
783 784
    lea       edx, [edx + 48]
    sub       ecx, 16
785
    jg        convertloop
786 787 788 789
    ret
  }
}

790
__declspec(naked)
791
void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width) {
792
  __asm {
793 794
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
795
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
796
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRAW
797 798

 convertloop:
799 800 801 802
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
803
    lea       eax, [eax + 64]
804 805 806 807 808 809 810 811 812 813
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
814
    movdqu    [edx], xmm0  // store 0
815 816 817 818
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
819 820
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
821 822
    lea       edx, [edx + 48]
    sub       ecx, 16
823
    jg        convertloop
824 825 826 827
    ret
  }
}

828
__declspec(naked)
829
void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) {
830
  __asm {
831 832
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
833
    mov       ecx, [esp + 12]  // width
834 835 836 837 838 839 840
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11
841 842

 convertloop:
843
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
844 845
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
846 847 848 849 850 851 852 853 854
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
855
    packssdw  xmm0, xmm0
856
    lea       eax, [eax + 16]
857
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
858 859
    lea       edx, [edx + 8]
    sub       ecx, 4
860
    jg        convertloop
861 862 863 864
    ret
  }
}

865
__declspec(naked)
866
void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
867
                                const uint32 dither4, int width) {
868 869 870 871
  __asm {

    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
872
    movd      xmm6, [esp + 12] // dither4
873
    mov       ecx, [esp + 16]  // width
874 875 876 877
    punpcklbw xmm6, xmm6       // make dither 16 bytes
    movdqa    xmm7, xmm6
    punpcklwd xmm6, xmm6
    punpckhwd xmm7, xmm7
878 879 880 881 882 883 884 885 886 887
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11

 convertloop:
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
888
    paddusb   xmm0, xmm6    // add dither
889 890 891 892 893 894 895 896 897 898 899 900
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
    packssdw  xmm0, xmm0
901
    lea       eax, [eax + 16]
902
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
903 904
    lea       edx, [edx + 8]
    sub       ecx, 4
905 906 907 908 909
    jg        convertloop
    ret
  }
}

910
#ifdef HAS_ARGBTORGB565DITHERROW_AVX2
911
__declspec(naked)
912
void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
913
                                const uint32 dither4, int width) {
914 915 916
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
917
    vbroadcastss xmm6, [esp + 12]  // dither4
918
    mov        ecx, [esp + 16]     // width
919 920 921
    vpunpcklbw xmm6, xmm6, xmm6    // make dither 32 bytes
    vpermq     ymm6, ymm6, 0xd8
    vpunpcklwd ymm6, ymm6, ymm6
922 923 924 925 926
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
927
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
928 929 930

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
931
    vpaddusb   ymm0, ymm0, ymm6    // add dither
932 933
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
934
    vpsrld     ymm0, ymm0, 8       // R
935 936 937 938 939
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
940
    vpackusdw  ymm0, ymm0, ymm0
941 942 943 944 945 946 947 948 949 950 951 952
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565DITHERROW_AVX2

953
// TODO(fbarchard): Improve sign extension/packing.
954
__declspec(naked)
955
void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) {
956
  __asm {
957 958
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
959
    mov       ecx, [esp + 12]  // width
960 961 962 963 964 965 966 967
    pcmpeqb   xmm4, xmm4       // generate mask 0x0000001f
    psrld     xmm4, 27
    movdqa    xmm5, xmm4       // generate mask 0x000003e0
    pslld     xmm5, 5
    movdqa    xmm6, xmm4       // generate mask 0x00007c00
    pslld     xmm6, 10
    pcmpeqb   xmm7, xmm7       // generate mask 0xffff8000
    pslld     xmm7, 15
968 969

 convertloop:
970
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
971 972
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
973 974 975 976 977 978 979 980 981 982 983 984
    movdqa    xmm3, xmm0    // R
    psrad     xmm0, 16      // A
    psrld     xmm1, 3       // B
    psrld     xmm2, 6       // G
    psrld     xmm3, 9       // R
    pand      xmm0, xmm7    // A
    pand      xmm1, xmm4    // B
    pand      xmm2, xmm5    // G
    pand      xmm3, xmm6    // R
    por       xmm0, xmm1    // BA
    por       xmm2, xmm3    // GR
    por       xmm0, xmm2    // BGRA
985 986 987
    packssdw  xmm0, xmm0
    lea       eax, [eax + 16]
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB1555
988 989
    lea       edx, [edx + 8]
    sub       ecx, 4
990
    jg        convertloop
991 992 993 994
    ret
  }
}

995
__declspec(naked)
996
void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) {
997
  __asm {
998 999
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
1000
    mov       ecx, [esp + 12]  // width
1001 1002 1003 1004 1005 1006
    pcmpeqb   xmm4, xmm4       // generate mask 0xf000f000
    psllw     xmm4, 12
    movdqa    xmm3, xmm4       // generate mask 0x00f000f0
    psrlw     xmm3, 8

 convertloop:
1007
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
1008 1009 1010
    movdqa    xmm1, xmm0
    pand      xmm0, xmm3    // low nibble
    pand      xmm1, xmm4    // high nibble
1011 1012
    psrld     xmm0, 4
    psrld     xmm1, 8
1013 1014
    por       xmm0, xmm1
    packuswb  xmm0, xmm0
1015
    lea       eax, [eax + 16]
1016 1017 1018
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB4444
    lea       edx, [edx + 8]
    sub       ecx, 4
1019
    jg        convertloop
1020 1021 1022 1023
    ret
  }
}

1024
#ifdef HAS_ARGBTORGB565ROW_AVX2
1025
__declspec(naked)
1026
void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) {
1027 1028 1029
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
1030
    mov        ecx, [esp + 12]     // width
1031 1032 1033 1034 1035
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
1036
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
1037 1038 1039 1040 1041

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
1042
    vpsrld     ymm0, ymm0, 8       // R
1043 1044 1045 1046 1047
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
1048
    vpackusdw  ymm0, ymm0, ymm0
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565ROW_AVX2

#ifdef HAS_ARGBTOARGB1555ROW_AVX2
1062
__declspec(naked)
1063
void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) {
1064 1065 1066
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
1067
    mov        ecx, [esp + 12]     // width
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
    vpcmpeqb   ymm4, ymm4, ymm4
    vpsrld     ymm4, ymm4, 27      // generate mask 0x0000001f
    vpslld     ymm5, ymm4, 5       // generate mask 0x000003e0
    vpslld     ymm6, ymm4, 10      // generate mask 0x00007c00
    vpcmpeqb   ymm7, ymm7, ymm7    // generate mask 0xffff8000
    vpslld     ymm7, ymm7, 15

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm3, ymm0, 9       // R
    vpsrld     ymm2, ymm0, 6       // G
    vpsrld     ymm1, ymm0, 3       // B
    vpsrad     ymm0, ymm0, 16      // A
    vpand      ymm3, ymm3, ymm6    // R
    vpand      ymm2, ymm2, ymm5    // G
    vpand      ymm1, ymm1, ymm4    // B
    vpand      ymm0, ymm0, ymm7    // A
    vpor       ymm0, ymm0, ymm1    // BA
    vpor       ymm2, ymm2, ymm3    // GR
    vpor       ymm0, ymm0, ymm2    // BGRA
    vpackssdw  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB1555
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB1555ROW_AVX2

1101
#ifdef HAS_ARGBTOARGB4444ROW_AVX2
1102
__declspec(naked)
1103
void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) {
1104 1105 1106
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_rgb
1107
    mov        ecx, [esp + 12]  // width
1108
    vpcmpeqb   ymm4, ymm4, ymm4   // generate mask 0xf000f000
1109
    vpsllw     ymm4, ymm4, 12
1110
    vpsrlw     ymm3, ymm4, 8      // generate mask 0x00f000f0
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpand      ymm1, ymm0, ymm4    // high nibble
    vpand      ymm0, ymm0, ymm3    // low nibble
    vpsrld     ymm1, ymm1, 8
    vpsrld     ymm0, ymm0, 4
    vpor       ymm0, ymm0, ymm1
    vpackuswb  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB4444
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB4444ROW_AVX2

1132
// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
1133
__declspec(naked)
1134
void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
1135
  __asm {
1136 1137
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1138
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1139 1140
    movdqa     xmm4, xmmword ptr kARGBToY
    movdqa     xmm5, xmmword ptr kAddY16
1141

1142
 convertloop:
1143 1144 1145 1146
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1147 1148 1149 1150
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1151 1152 1153 1154 1155 1156
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1157
    paddb      xmm0, xmm5
1158
    movdqu     [edx], xmm0
1159
    lea        edx, [edx + 16]
1160
    sub        ecx, 16
1161
    jg         convertloop
1162 1163 1164 1165
    ret
  }
}

1166 1167
// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
1168
__declspec(naked)
1169
void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
1170 1171 1172
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1173
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1174 1175
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
1176 1177

 convertloop:
1178 1179 1180 1181
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1182 1183 1184 1185 1186 1187 1188
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
1189
    paddw      xmm0, xmm5  // Add .5 for rounding.
1190
    paddw      xmm2, xmm5
1191 1192 1193
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1194
    movdqu     [edx], xmm0
1195
    lea        edx, [edx + 16]
1196
    sub        ecx, 16
1197 1198 1199 1200 1201
    jg         convertloop
    ret
  }
}

1202
#ifdef HAS_ARGBTOYROW_AVX2
1203 1204 1205 1206 1207
// vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = {
  0, 4, 1, 5, 2, 6, 3, 7
};

1208
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1209
__declspec(naked)
1210
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
1211 1212 1213
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1214
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1215 1216 1217
    vbroadcastf128 ymm4, xmmword ptr kARGBToY
    vbroadcastf128 ymm5, xmmword ptr kAddY16
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1218 1219

 convertloop:
1220 1221 1222 1223
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
1224 1225 1226 1227 1228
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
1229
    vphaddw    ymm0, ymm0, ymm1  // mutates.
1230 1231 1232
    vphaddw    ymm2, ymm2, ymm3
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
1233
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
1234
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
1235
    vpaddb     ymm0, ymm0, ymm5  // add 16 for Y
1236
    vmovdqu    [edx], ymm0
1237
    lea        edx, [edx + 32]
1238
    sub        ecx, 32
1239
    jg         convertloop
1240
    vzeroupper
1241 1242 1243 1244 1245
    ret
  }
}
#endif  //  HAS_ARGBTOYROW_AVX2

1246
#ifdef HAS_ARGBTOYJROW_AVX2
1247
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1248
__declspec(naked)
1249
void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
1250 1251 1252
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1253
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1254 1255 1256
    vbroadcastf128 ymm4, xmmword ptr kARGBToYJ
    vbroadcastf128 ymm5, xmmword ptr kAddYJ64
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277

 convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
    vphaddw    ymm0, ymm0, ymm1  // mutates.
    vphaddw    ymm2, ymm2, ymm3
    vpaddw     ymm0, ymm0, ymm5  // Add .5 for rounding.
    vpaddw     ymm2, ymm2, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
1278
    sub        ecx, 32
1279 1280 1281 1282 1283 1284 1285 1286
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  //  HAS_ARGBTOYJROW_AVX2

1287
__declspec(naked)
1288
void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
1289
  __asm {
1290 1291
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1292
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1293 1294
    movdqa     xmm4, xmmword ptr kBGRAToY
    movdqa     xmm5, xmmword ptr kAddY16
1295

1296
 convertloop:
1297 1298 1299 1300
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1301 1302 1303 1304
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1305 1306 1307 1308 1309 1310
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1311
    paddb      xmm0, xmm5
1312
    movdqu     [edx], xmm0
1313
    lea        edx, [edx + 16]
1314
    sub        ecx, 16
1315
    jg         convertloop
1316 1317 1318 1319
    ret
  }
}

1320
__declspec(naked)
1321
void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
1322
  __asm {
1323 1324
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1325
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1326 1327
    movdqa     xmm4, xmmword ptr kABGRToY
    movdqa     xmm5, xmmword ptr kAddY16
1328

1329
 convertloop:
1330 1331 1332 1333
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1334 1335 1336 1337
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1338 1339 1340 1341 1342 1343
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1344
    paddb      xmm0, xmm5
1345
    movdqu     [edx], xmm0
1346
    lea        edx, [edx + 16]
1347
    sub        ecx, 16
1348
    jg         convertloop
1349 1350 1351 1352
    ret
  }
}

1353
__declspec(naked)
1354
void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
1355
  __asm {
1356 1357
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
1358
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
1359 1360
    movdqa     xmm4, xmmword ptr kRGBAToY
    movdqa     xmm5, xmmword ptr kAddY16
1361 1362

 convertloop:
1363 1364 1365 1366
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
    paddb      xmm0, xmm5
1378
    movdqu     [edx], xmm0
1379
    lea        edx, [edx + 16]
1380
    sub        ecx, 16
1381 1382 1383 1384 1385
    jg         convertloop
    ret
  }
}

1386
__declspec(naked)
1387 1388
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1389
  __asm {
1390 1391 1392 1393 1394 1395
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1396
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1397 1398 1399
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1400
    sub        edi, edx             // stride from u to v
1401

1402
 convertloop:
1403
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1404
    movdqu     xmm0, [eax]
1405
    movdqu     xmm4, [eax + esi]
1406
    pavgb      xmm0, xmm4
1407
    movdqu     xmm1, [eax + 16]
1408
    movdqu     xmm4, [eax + esi + 16]
1409
    pavgb      xmm1, xmm4
1410
    movdqu     xmm2, [eax + 32]
1411
    movdqu     xmm4, [eax + esi + 32]
1412
    pavgb      xmm2, xmm4
1413
    movdqu     xmm3, [eax + 48]
1414
    movdqu     xmm4, [eax + esi + 48]
1415 1416
    pavgb      xmm3, xmm4

1417 1418
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
1419
    shufps     xmm0, xmm1, 0x88
1420 1421 1422 1423 1424 1425
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4
1426 1427 1428

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
1429
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
1430
    movdqa     xmm1, xmm0
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1447
    sub        ecx, 16
1448 1449
    jg         convertloop

1450 1451 1452 1453 1454 1455
    pop        edi
    pop        esi
    ret
  }
}

1456
__declspec(naked)
1457 1458 1459 1460 1461 1462 1463 1464 1465
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                        uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1466
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1467 1468 1469
    movdqa     xmm5, xmmword ptr kAddUVJ128
    movdqa     xmm6, xmmword ptr kARGBToVJ
    movdqa     xmm7, xmmword ptr kARGBToUJ
1470 1471 1472 1473
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1474
    movdqu     xmm0, [eax]
1475
    movdqu     xmm4, [eax + esi]
1476
    pavgb      xmm0, xmm4
1477
    movdqu     xmm1, [eax + 16]
1478
    movdqu     xmm4, [eax + esi + 16]
1479
    pavgb      xmm1, xmm4
1480
    movdqu     xmm2, [eax + 32]
1481
    movdqu     xmm4, [eax + esi + 32]
1482
    pavgb      xmm2, xmm4
1483
    movdqu     xmm3, [eax + 48]
1484
    movdqu     xmm4, [eax + esi + 48]
1485 1486
    pavgb      xmm3, xmm4

1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    paddw      xmm0, xmm5            // +.5 rounding -> unsigned
    paddw      xmm1, xmm5
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1518
    sub        ecx, 16
1519 1520 1521 1522 1523 1524 1525 1526
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1527
#ifdef HAS_ARGBTOUVROW_AVX2
1528
__declspec(naked)
1529 1530 1531 1532 1533 1534 1535 1536 1537
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
                      uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1538
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1539 1540 1541
    vbroadcastf128 ymm5, xmmword ptr kAddUV128
    vbroadcastf128 ymm6, xmmword ptr kARGBToV
    vbroadcastf128 ymm7, xmmword ptr kARGBToU
1542 1543 1544
    sub        edi, edx             // stride from u to v

 convertloop:
1545
    /* step 1 - subsample 32x2 argb pixels to 16x1 */
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    vpavgb     ymm2, ymm2, [eax + esi + 64]
    vpavgb     ymm3, ymm3, [eax + esi + 96]
    lea        eax,  [eax + 128]
    vshufps    ymm4, ymm0, ymm1, 0x88
    vshufps    ymm0, ymm0, ymm1, 0xdd
    vpavgb     ymm0, ymm0, ymm4  // mutated by vshufps
    vshufps    ymm4, ymm2, ymm3, 0x88
    vshufps    ymm2, ymm2, ymm3, 0xdd
    vpavgb     ymm2, ymm2, ymm4  // mutated by vshufps
1561 1562 1563 1564

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 32 different pixels, its 16 pixels of U and 16 of V
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
    vpmaddubsw ymm1, ymm0, ymm7  // U
    vpmaddubsw ymm3, ymm2, ymm7
    vpmaddubsw ymm0, ymm0, ymm6  // V
    vpmaddubsw ymm2, ymm2, ymm6
    vphaddw    ymm1, ymm1, ymm3  // mutates
    vphaddw    ymm0, ymm0, ymm2
    vpsraw     ymm1, ymm1, 8
    vpsraw     ymm0, ymm0, 8
    vpacksswb  ymm0, ymm1, ymm0  // mutates
    vpermq     ymm0, ymm0, 0xd8  // For vpacksswb
Frank Barchard's avatar
Frank Barchard committed
1575
    vpshufb    ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX  // for vshufps/vphaddw
1576
    vpaddb     ymm0, ymm0, ymm5  // -> unsigned
1577 1578

    // step 3 - store 16 U and 16 V values
1579 1580
    vextractf128 [edx], ymm0, 0 // U
    vextractf128 [edx + edi], ymm0, 1 // V
1581
    lea        edx, [edx + 16]
1582
    sub        ecx, 32
1583 1584 1585 1586
    jg         convertloop

    pop        edi
    pop        esi
1587
    vzeroupper
1588 1589 1590 1591 1592
    ret
  }
}
#endif  // HAS_ARGBTOUVROW_AVX2

1593
__declspec(naked)
1594 1595
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1596
  __asm {
1597
    push       edi
1598 1599 1600
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
1601
    mov        ecx, [esp + 4 + 16]  // width
Frank Barchard's avatar
Frank Barchard committed
1602 1603 1604
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1605 1606 1607
    sub        edi, edx             // stride from u to v

 convertloop:
1608 1609
    /* convert to U and V */
    movdqu     xmm0, [eax]          // U
1610 1611 1612 1613 1614 1615 1616 1617 1618
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm7
    pmaddubsw  xmm1, xmm7
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm3, xmm7
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1619 1620 1621
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
    paddb      xmm0, xmm5
    movdqu     [edx], xmm0

    movdqu     xmm0, [eax]          // V
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm6
    pmaddubsw  xmm1, xmm6
    pmaddubsw  xmm2, xmm6
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1635 1636 1637
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1638 1639 1640 1641
    paddb      xmm0, xmm5
    lea        eax,  [eax + 64]
    movdqu     [edx + edi], xmm0
    lea        edx,  [edx + 16]
1642
    sub        ecx,  16
1643 1644 1645 1646 1647 1648 1649
    jg         convertloop

    pop        edi
    ret
  }
}

1650
__declspec(naked)
1651 1652
void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1653
  __asm {
1654 1655 1656 1657
    push       edi
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
1658
    mov        ecx, [esp + 4 + 16]  // width
Frank Barchard's avatar
Frank Barchard committed
1659 1660 1661
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1662 1663 1664 1665
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1666 1667 1668 1669
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1700
    sub        ecx, 16
1701 1702 1703 1704 1705 1706 1707
    jg         convertloop

    pop        edi
    ret
  }
}

1708
__declspec(naked)
1709 1710
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1711
  __asm {
1712 1713 1714 1715 1716 1717
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1718
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1719 1720 1721
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kBGRAToV
    movdqa     xmm7, xmmword ptr kBGRAToU
1722
    sub        edi, edx             // stride from u to v
1723

1724
 convertloop:
1725
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1726
    movdqu     xmm0, [eax]
1727
    movdqu     xmm4, [eax + esi]
1728
    pavgb      xmm0, xmm4
1729
    movdqu     xmm1, [eax + 16]
1730
    movdqu     xmm4, [eax + esi + 16]
1731
    pavgb      xmm1, xmm4
1732
    movdqu     xmm2, [eax + 32]
1733
    movdqu     xmm4, [eax + esi + 32]
1734
    pavgb      xmm2, xmm4
1735
    movdqu     xmm3, [eax + 48]
1736
    movdqu     xmm4, [eax + esi + 48]
1737 1738
    pavgb      xmm3, xmm4

1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1769
    sub        ecx, 16
1770 1771
    jg         convertloop

1772 1773 1774 1775
    pop        edi
    pop        esi
    ret
  }
1776 1777
}

1778
__declspec(naked)
1779 1780
void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1781
  __asm {
1782 1783 1784 1785 1786 1787
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1788
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1789 1790 1791
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kABGRToV
    movdqa     xmm7, xmmword ptr kABGRToU
1792 1793
    sub        edi, edx             // stride from u to v

1794
 convertloop:
1795
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1796
    movdqu     xmm0, [eax]
1797
    movdqu     xmm4, [eax + esi]
1798
    pavgb      xmm0, xmm4
1799
    movdqu     xmm1, [eax + 16]
1800
    movdqu     xmm4, [eax + esi + 16]
1801
    pavgb      xmm1, xmm4
1802
    movdqu     xmm2, [eax + 32]
1803
    movdqu     xmm4, [eax + esi + 32]
1804
    pavgb      xmm2, xmm4
1805
    movdqu     xmm3, [eax + 48]
1806
    movdqu     xmm4, [eax + esi + 48]
1807 1808
    pavgb      xmm3, xmm4

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1839
    sub        ecx, 16
1840 1841
    jg         convertloop

1842 1843 1844 1845 1846 1847
    pop        edi
    pop        esi
    ret
  }
}

1848
__declspec(naked)
1849 1850
void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1851
  __asm {
1852 1853 1854 1855 1856 1857
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
1858
    mov        ecx, [esp + 8 + 20]  // width
Frank Barchard's avatar
Frank Barchard committed
1859 1860 1861
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kRGBAToV
    movdqa     xmm7, xmmword ptr kRGBAToU
1862 1863 1864 1865
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1866
    movdqu     xmm0, [eax]
1867
    movdqu     xmm4, [eax + esi]
1868
    pavgb      xmm0, xmm4
1869
    movdqu     xmm1, [eax + 16]
1870
    movdqu     xmm4, [eax + esi + 16]
1871
    pavgb      xmm1, xmm4
1872
    movdqu     xmm2, [eax + 32]
1873
    movdqu     xmm4, [eax + esi + 32]
1874
    pavgb      xmm2, xmm4
1875
    movdqu     xmm3, [eax + 48]
1876
    movdqu     xmm4, [eax + esi + 48]
1877 1878
    pavgb      xmm3, xmm4

1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1909
    sub        ecx, 16
1910 1911 1912 1913 1914 1915 1916
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
1917
#endif  // HAS_ARGBTOYROW_SSSE3
1918

1919 1920
// Read 16 UV from 444
#define READYUV444_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1921 1922
    __asm vmovdqu    xmm0, [esi]                  /* U */                      \
    __asm vmovdqu    xmm1, [esi + edi]            /* V */                      \
1923 1924 1925 1926
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
1927
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1928 1929
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1930
    __asm lea        eax, [eax + 16]                                           \
1931 1932
  }

1933 1934
// Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1935 1936
    __asm vmovq      xmm0, qword ptr [esi]        /* U */                      \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */                      \
1937 1938 1939 1940
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
1941
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1942 1943
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1944
    __asm lea        eax, [eax + 16]                                           \
1945 1946
  }

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
// Read 8 UV from 422, upsample to 16 UV.  With 16 Alpha.
#define READYUVA422_AVX2 __asm {                                               \
    __asm vmovq      xmm0, qword ptr [esi]        /* U */                      \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */                      \
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
    __asm lea        eax, [eax + 16]                                           \
    __asm vmovdqu    xmm5, [ebp]                  /* A */                      \
    __asm vpermq     ymm5, ymm5, 0xd8                                          \
    __asm lea        ebp, [ebp + 16]                                           \
  }

1964 1965
// Read 4 UV from 411, upsample to 16 UV.
#define READYUV411_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
1966 1967
    __asm vmovd      xmm0, dword ptr [esi]        /* U */                      \
    __asm vmovd      xmm1, dword ptr [esi + edi]  /* V */                      \
1968 1969 1970 1971 1972
    __asm lea        esi,  [esi + 4]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpckldq ymm0, ymm0, ymm0             /* UVUVUVUV (upsample) */    \
1973
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1974 1975
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1976
    __asm lea        eax, [eax + 16]                                           \
1977 1978
  }

1979 1980 1981 1982 1983 1984
// Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
1985
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
1986 1987
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
1988
    __asm lea        eax, [eax + 16]                                           \
1989 1990
  }

1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
// Read 8 UV from NV21, upsample to 16 UV.
#define READNV21_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleNV21                      \
    __asm vmovdqu    xmm4, [eax]                  /* Y */                      \
    __asm vpermq     ymm4, ymm4, 0xd8                                          \
    __asm vpunpcklbw ymm4, ymm4, ymm4                                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
// Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV.
#define READYUY2_AVX2 __asm {                                                  \
    __asm vmovdqu    ymm4, [eax]          /* YUY2 */                           \
    __asm vpshufb    ymm4, ymm4, ymmword ptr kShuffleYUY2Y                     \
    __asm vmovdqu    ymm0, [eax]          /* UV */                             \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleYUY2UV                    \
    __asm lea        eax, [eax + 32]                                           \
  }

// Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV.
#define READUYVY_AVX2 __asm {                                                  \
    __asm vmovdqu    ymm4, [eax]          /* UYVY */                           \
    __asm vpshufb    ymm4, ymm4, ymmword ptr kShuffleUYVYY                     \
    __asm vmovdqu    ymm0, [eax]          /* UV */                             \
    __asm vpshufb    ymm0, ymm0, ymmword ptr kShuffleUYVYUV                    \
    __asm lea        eax, [eax + 32]                                           \
  }

2021
// Convert 16 pixels: 16 UV and 16 Y.
2022
#define YUVTORGB_AVX2(YuvConstants) __asm {                                    \
Frank Barchard's avatar
Frank Barchard committed
2023 2024 2025 2026
    __asm vpmaddubsw ymm2, ymm0, ymmword ptr [YuvConstants + KUVTOR] /* R UV */\
    __asm vpmaddubsw ymm1, ymm0, ymmword ptr [YuvConstants + KUVTOG] /* G UV */\
    __asm vpmaddubsw ymm0, ymm0, ymmword ptr [YuvConstants + KUVTOB] /* B UV */\
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASR]               \
2027
    __asm vpsubw     ymm2, ymm3, ymm2                                          \
Frank Barchard's avatar
Frank Barchard committed
2028
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASG]               \
2029
    __asm vpsubw     ymm1, ymm3, ymm1                                          \
Frank Barchard's avatar
Frank Barchard committed
2030
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASB]               \
2031
    __asm vpsubw     ymm0, ymm3, ymm0                                          \
2032
    /* Step 2: Find Y contribution to 16 R,G,B values */                       \
2033 2034 2035 2036
    __asm vpmulhuw   ymm4, ymm4, ymmword ptr [YuvConstants + KYTORGB]          \
    __asm vpaddsw    ymm0, ymm0, ymm4           /* B += Y */                   \
    __asm vpaddsw    ymm1, ymm1, ymm4           /* G += Y */                   \
    __asm vpaddsw    ymm2, ymm2, ymm4           /* R += Y */                   \
2037 2038 2039 2040 2041 2042 2043 2044
    __asm vpsraw     ymm0, ymm0, 6                                             \
    __asm vpsraw     ymm1, ymm1, 6                                             \
    __asm vpsraw     ymm2, ymm2, 6                                             \
    __asm vpackuswb  ymm0, ymm0, ymm0           /* B */                        \
    __asm vpackuswb  ymm1, ymm1, ymm1           /* G */                        \
    __asm vpackuswb  ymm2, ymm2, ymm2           /* R */                        \
  }

2045 2046 2047 2048 2049 2050 2051 2052
// Store 16 ARGB values.
#define STOREARGB_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm0, ymm0, ymm1           /* BG */                       \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm2, ymm5           /* RA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm1, ymm0, ymm2           /* BGRA first 8 pixels */      \
    __asm vpunpckhwd ymm0, ymm0, ymm2           /* BGRA next 8 pixels */       \
2053 2054
    __asm vmovdqu    0[edx], ymm1                                              \
    __asm vmovdqu    32[edx], ymm0                                             \
2055 2056 2057
    __asm lea        edx,  [edx + 64]                                          \
  }

2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
// Store 16 RGBA values.
#define STORERGBA_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm1, ymm2           /* GR */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm5, ymm0           /* AB */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm2, ymm1           /* ABGR first 8 pixels */      \
    __asm vpunpckhwd ymm1, ymm2, ymm1           /* ABGR next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm1                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

2071
#ifdef HAS_I422TOARGBROW_AVX2
2072 2073
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2074
__declspec(naked)
2075 2076 2077 2078
void I422ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2079
                        const struct YuvConstants* yuvconstants,
2080
                        int width) {
2081 2082 2083
  __asm {
    push       esi
    push       edi
2084
    push       ebx
2085 2086 2087 2088
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2089
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2090
    mov        ecx, [esp + 12 + 24]  // width
2091 2092 2093 2094 2095
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2096
    YUVTORGB_AVX2(ebx)
2097 2098 2099 2100 2101
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2102
    pop        ebx
2103 2104 2105 2106 2107 2108
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2109
#endif  // HAS_I422TOARGBROW_AVX2
2110

2111 2112 2113 2114 2115 2116 2117 2118 2119
#ifdef HAS_I422ALPHATOARGBROW_AVX2
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB.
__declspec(naked)
void I422AlphaToARGBRow_AVX2(const uint8* y_buf,
                             const uint8* u_buf,
                             const uint8* v_buf,
                             const uint8* a_buf,
                             uint8* dst_argb,
2120
                             const struct YuvConstants* yuvconstants,
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
                             int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // argb
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422_AVX2
    YUVTORGB_AVX2(ebx)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I422ALPHATOARGBROW_AVX2

2154
#ifdef HAS_I444TOARGBROW_AVX2
2155 2156
// 16 pixels
// 16 UV values with 16 Y producing 16 ARGB (64 bytes).
2157
__declspec(naked)
2158 2159 2160 2161
void I444ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2162
                        const struct YuvConstants* yuvconstants,
2163
                        int width) {
2164 2165 2166
  __asm {
    push       esi
    push       edi
2167
    push       ebx
2168 2169 2170 2171
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2172
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2173
    mov        ecx, [esp + 12 + 24]  // width
2174 2175 2176 2177
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
 convertloop:
    READYUV444_AVX2
2178
    YUVTORGB_AVX2(ebx)
2179 2180 2181 2182 2183
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2184
    pop        ebx
2185 2186 2187 2188 2189 2190
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2191
#endif  // HAS_I444TOARGBROW_AVX2
2192

2193 2194 2195
#ifdef HAS_I411TOARGBROW_AVX2
// 16 pixels
// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2196
__declspec(naked)
2197 2198 2199 2200
void I411ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2201
                        const struct YuvConstants* yuvconstants,
2202 2203 2204 2205
                        int width) {
  __asm {
    push       esi
    push       edi
2206
    push       ebx
2207 2208 2209 2210
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2211
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2212
    mov        ecx, [esp + 12 + 24]  // width
2213 2214
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2215

2216 2217
 convertloop:
    READYUV411_AVX2
2218
    YUVTORGB_AVX2(ebx)
2219 2220 2221 2222 2223
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2224
    pop        ebx
2225 2226 2227 2228 2229 2230 2231
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I411TOARGBROW_AVX2
2232

2233
#ifdef HAS_NV12TOARGBROW_AVX2
2234 2235
// 16 pixels.
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2236
__declspec(naked)
2237 2238 2239
void NV12ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
2240
                        const struct YuvConstants* yuvconstants,
2241 2242 2243
                        int width) {
  __asm {
    push       esi
2244
    push       ebx
2245 2246 2247
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // UV
    mov        edx, [esp + 8 + 12]  // argb
2248
    mov        ebx, [esp + 8 + 16]  // yuvconstants
2249
    mov        ecx, [esp + 8 + 20]  // width
2250 2251 2252 2253
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
2254
    YUVTORGB_AVX2(ebx)
2255 2256 2257 2258 2259
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2260
    pop        ebx
2261
    pop        esi
2262
    vzeroupper
2263 2264 2265
    ret
  }
}
2266
#endif  // HAS_NV12TOARGBROW_AVX2
2267

2268 2269 2270 2271 2272 2273 2274
#ifdef HAS_NV21TOARGBROW_AVX2
// 16 pixels.
// 8 VU values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
__declspec(naked)
void NV21ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* vu_buf,
                        uint8* dst_argb,
2275
                        const struct YuvConstants* yuvconstants,
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
                        int width) {
  __asm {
    push       esi
    push       ebx
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // VU
    mov        edx, [esp + 8 + 12]  // argb
    mov        ebx, [esp + 8 + 16]  // yuvconstants
    mov        ecx, [esp + 8 + 20]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV21_AVX2
    YUVTORGB_AVX2(ebx)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        ebx
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_NV21TOARGBROW_AVX2

2303
#ifdef HAS_YUY2TOARGBROW_AVX2
2304 2305 2306 2307 2308
// 16 pixels.
// 8 YUY2 values with 16 Y and 8 UV producing 16 ARGB (64 bytes).
__declspec(naked)
void YUY2ToARGBRow_AVX2(const uint8* src_yuy2,
                        uint8* dst_argb,
2309
                        const struct YuvConstants* yuvconstants,
2310 2311
                        int width) {
  __asm {
2312
    push       ebx
2313 2314
    mov        eax, [esp + 4 + 4]   // yuy2
    mov        edx, [esp + 4 + 8]   // argb
2315
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2316 2317 2318 2319 2320
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUY2_AVX2
2321
    YUVTORGB_AVX2(ebx)
2322 2323 2324 2325 2326
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2327
    pop        ebx
2328 2329 2330 2331
    vzeroupper
    ret
  }
}
2332
#endif  // HAS_YUY2TOARGBROW_AVX2
2333

2334
#ifdef HAS_UYVYTOARGBROW_AVX2
2335 2336 2337 2338 2339
// 16 pixels.
// 8 UYVY values with 16 Y and 8 UV producing 16 ARGB (64 bytes).
__declspec(naked)
void UYVYToARGBRow_AVX2(const uint8* src_uyvy,
                        uint8* dst_argb,
2340
                        const struct YuvConstants* yuvconstants,
2341 2342
                        int width) {
  __asm {
2343
    push       ebx
2344 2345
    mov        eax, [esp + 4 + 4]   // uyvy
    mov        edx, [esp + 4 + 8]   // argb
2346
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2347 2348 2349 2350 2351
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READUYVY_AVX2
2352
    YUVTORGB_AVX2(ebx)
2353 2354 2355 2356 2357
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2358
    pop        ebx
2359 2360 2361 2362
    vzeroupper
    ret
  }
}
2363
#endif  // HAS_UYVYTOARGBROW_AVX2
2364

2365
#ifdef HAS_I422TORGBAROW_AVX2
2366 2367
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
2368
__declspec(naked)
2369 2370 2371 2372
void I422ToRGBARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
2373
                        const struct YuvConstants* yuvconstants,
2374 2375 2376 2377
                        int width) {
  __asm {
    push       esi
    push       edi
2378
    push       ebx
2379 2380 2381 2382
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // abgr
2383
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2384
    mov        ecx, [esp + 12 + 24]  // width
2385
    sub        edi, esi
2386
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2387 2388 2389

 convertloop:
    READYUV422_AVX2
2390
    YUVTORGB_AVX2(ebx)
2391
    STORERGBA_AVX2
2392 2393 2394 2395

    sub        ecx, 16
    jg         convertloop

2396
    pop        ebx
2397 2398
    pop        edi
    pop        esi
2399
    vzeroupper
2400 2401 2402
    ret
  }
}
2403
#endif  // HAS_I422TORGBAROW_AVX2
2404

2405
#if defined(HAS_I422TOARGBROW_SSSE3)
2406
// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
2407
// Allows a conversion with half size scaling.
2408

2409
// Read 8 UV from 444.
2410
#define READYUV444 __asm {                                                     \
Frank Barchard's avatar
Frank Barchard committed
2411 2412
    __asm movq       xmm0, qword ptr [esi] /* U */                             \
    __asm movq       xmm1, qword ptr [esi + edi] /* V */                       \
2413 2414
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
2415
    __asm movq       xmm4, qword ptr [eax]                                     \
2416
    __asm punpcklbw  xmm4, xmm4                                                \
2417
    __asm lea        eax, [eax + 8]                                            \
2418 2419
  }

2420
// Read 4 UV from 422, upsample to 8 UV.
2421
#define READYUV422 __asm {                                                     \
2422 2423 2424 2425 2426
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
2427
    __asm movq       xmm4, qword ptr [eax]                                     \
2428
    __asm punpcklbw  xmm4, xmm4                                                \
2429
    __asm lea        eax, [eax + 8]                                            \
2430 2431
  }

2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
// Read 4 UV from 422, upsample to 8 UV.  With 8 Alpha.
#define READYUVA422 __asm {                                                    \
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
    __asm movq       xmm4, qword ptr [eax]   /* Y */                           \
    __asm punpcklbw  xmm4, xmm4                                                \
    __asm lea        eax, [eax + 8]                                            \
    __asm movq       xmm5, qword ptr [ebp]   /* A */                           \
    __asm lea        ebp, [ebp + 8]                                            \
  }

2446
// Read 2 UV from 411, upsample to 8 UV.
2447 2448 2449 2450 2451 2452 2453 2454
// drmemory fails with memory fault if pinsrw used. libyuv bug: 525
//  __asm pinsrw     xmm0, [esi], 0        /* U */
//  __asm pinsrw     xmm1, [esi + edi], 0  /* V */
#define READYUV411_EBX __asm {                                                 \
    __asm movzx      ebx, word ptr [esi]        /* U */                        \
    __asm movd       xmm0, ebx                                                 \
    __asm movzx      ebx, word ptr [esi + edi]  /* V */                        \
    __asm movd       xmm1, ebx                                                 \
2455
    __asm lea        esi,  [esi + 2]                                           \
2456 2457 2458
    __asm punpcklbw  xmm0, xmm1            /* UV */                            \
    __asm punpcklwd  xmm0, xmm0            /* UVUV (upsample) */               \
    __asm punpckldq  xmm0, xmm0            /* UVUVUVUV (upsample) */           \
2459
    __asm movq       xmm4, qword ptr [eax]                                     \
2460
    __asm punpcklbw  xmm4, xmm4                                                \
2461
    __asm lea        eax, [eax + 8]                                            \
2462 2463
  }

2464
// Read 4 UV from NV12, upsample to 8 UV.
2465
#define READNV12 __asm {                                                       \
Frank Barchard's avatar
Frank Barchard committed
2466
    __asm movq       xmm0, qword ptr [esi] /* UV */                            \
2467 2468
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
2469
    __asm movq       xmm4, qword ptr [eax]                                     \
2470
    __asm punpcklbw  xmm4, xmm4                                                \
2471
    __asm lea        eax, [eax + 8]                                            \
2472 2473
  }

2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
// Read 4 VU from NV21, upsample to 8 UV.
#define READNV21 __asm {                                                       \
    __asm movq       xmm0, qword ptr [esi] /* UV */                            \
    __asm lea        esi,  [esi + 8]                                           \
    __asm pshufb     xmm0, xmmword ptr kShuffleNV21                            \
    __asm movq       xmm4, qword ptr [eax]                                     \
    __asm punpcklbw  xmm4, xmm4                                                \
    __asm lea        eax, [eax + 8]                                            \
  }

2484
// Read 4 YUY2 with 8 Y and upsample 4 UV to 8 UV.
2485 2486 2487 2488 2489 2490 2491 2492
#define READYUY2 __asm {                                                       \
    __asm movdqu     xmm4, [eax]          /* YUY2 */                           \
    __asm pshufb     xmm4, xmmword ptr kShuffleYUY2Y                           \
    __asm movdqu     xmm0, [eax]          /* UV */                             \
    __asm pshufb     xmm0, xmmword ptr kShuffleYUY2UV                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2493
// Read 4 UYVY with 8 Y and upsample 4 UV to 8 UV.
2494 2495 2496 2497 2498 2499 2500 2501
#define READUYVY __asm {                                                       \
    __asm movdqu     xmm4, [eax]          /* UYVY */                           \
    __asm pshufb     xmm4, xmmword ptr kShuffleUYVYY                           \
    __asm movdqu     xmm0, [eax]          /* UV */                             \
    __asm pshufb     xmm0, xmmword ptr kShuffleUYVYUV                          \
    __asm lea        eax, [eax + 16]                                           \
  }

2502
// Convert 8 pixels: 8 UV and 8 Y.
2503
#define YUVTORGB(YuvConstants) __asm {                                         \
2504 2505
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
2506
    __asm movdqa     xmm3, xmm0                                                \
Frank Barchard's avatar
Frank Barchard committed
2507 2508
    __asm movdqa     xmm0, xmmword ptr [YuvConstants + KUVBIASB]               \
    __asm pmaddubsw  xmm1, xmmword ptr [YuvConstants + KUVTOB]                 \
2509
    __asm psubw      xmm0, xmm1                                                \
Frank Barchard's avatar
Frank Barchard committed
2510 2511
    __asm movdqa     xmm1, xmmword ptr [YuvConstants + KUVBIASG]               \
    __asm pmaddubsw  xmm2, xmmword ptr [YuvConstants + KUVTOG]                 \
2512
    __asm psubw      xmm1, xmm2                                                \
Frank Barchard's avatar
Frank Barchard committed
2513 2514
    __asm movdqa     xmm2, xmmword ptr [YuvConstants + KUVBIASR]               \
    __asm pmaddubsw  xmm3, xmmword ptr [YuvConstants + KUVTOR]                 \
2515
    __asm psubw      xmm2, xmm3                                                \
2516 2517 2518 2519
    __asm pmulhuw    xmm4, xmmword ptr [YuvConstants + KYTORGB]                \
    __asm paddsw     xmm0, xmm4           /* B += Y */                         \
    __asm paddsw     xmm1, xmm4           /* G += Y */                         \
    __asm paddsw     xmm2, xmm4           /* R += Y */                         \
2520 2521 2522 2523 2524 2525 2526 2527
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

2528 2529 2530 2531 2532 2533 2534
// Store 8 ARGB values.
#define STOREARGB __asm {                                                      \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm5           /* RA */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRA next 4 pixels */             \
2535 2536
    __asm movdqu     0[edx], xmm0                                              \
    __asm movdqu     16[edx], xmm1                                             \
2537 2538 2539
    __asm lea        edx,  [edx + 32]                                          \
  }

2540 2541 2542 2543 2544 2545 2546 2547
// Store 8 BGRA values.
#define STOREBGRA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm0           /* GB */                             \
    __asm punpcklbw  xmm5, xmm2           /* AR */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* BGRA next 4 pixels */             \
2548 2549
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGBA values.
#define STORERGBA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm2           /* GR */                             \
    __asm punpcklbw  xmm5, xmm0           /* AB */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* RGBA next 4 pixels */             \
2561 2562
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2563 2564 2565 2566 2567
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGB24 values.
#define STORERGB24 __asm {                                                     \
2568
    /* Weave into RRGB */                                                      \
2569 2570 2571 2572 2573
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2574
    /* RRGB -> RGB24 */                                                        \
2575 2576 2577
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2578 2579
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2580 2581 2582 2583 2584
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RGB565 values.
#define STORERGB565 __asm {                                                    \
2585
    /* Weave into RRGB */                                                      \
2586 2587 2588 2589 2590
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2591
    /* RRGB -> RGB565 */                                                       \
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
    __asm movdqa     xmm3, xmm0    /* B  first 4 pixels of argb */             \
    __asm movdqa     xmm2, xmm0    /* G */                                     \
    __asm pslld      xmm0, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm0, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm0, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm0, xmm3    /* BGR */                                   \
    __asm movdqa     xmm3, xmm1    /* B  next 4 pixels of argb */              \
    __asm movdqa     xmm2, xmm1    /* G */                                     \
    __asm pslld      xmm1, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm1, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm1, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm1, xmm3    /* BGR */                                   \
    __asm packssdw   xmm0, xmm1                                                \
2615
    __asm movdqu     0[edx], xmm0  /* store 8 pixels of RGB565 */              \
2616 2617 2618
    __asm lea        edx, [edx + 16]                                           \
  }

2619
// 8 pixels.
2620
// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
2621
__declspec(naked)
2622 2623 2624 2625
void I444ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
2626
                         const struct YuvConstants* yuvconstants,
2627
                         int width) {
2628 2629 2630
  __asm {
    push       esi
    push       edi
2631
    push       ebx
2632 2633 2634 2635
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2636
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2637
    mov        ecx, [esp + 12 + 24]  // width
2638
    sub        edi, esi
2639
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
2640

2641
 convertloop:
2642
    READYUV444
2643
    YUVTORGB(ebx)
2644
    STOREARGB
2645

2646
    sub        ecx, 8
2647
    jg         convertloop
2648

2649
    pop        ebx
2650 2651 2652 2653 2654 2655
    pop        edi
    pop        esi
    ret
  }
}

2656
// 8 pixels.
2657
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB24 (24 bytes).
2658
__declspec(naked)
2659 2660 2661
void I422ToRGB24Row_SSSE3(const uint8* y_buf,
                          const uint8* u_buf,
                          const uint8* v_buf,
2662
                          uint8* dst_rgb24,
2663
                          const struct YuvConstants* yuvconstants,
2664 2665 2666 2667
                          int width) {
  __asm {
    push       esi
    push       edi
2668
    push       ebx
2669 2670 2671 2672
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2673
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2674
    mov        ecx, [esp + 12 + 24]  // width
2675
    sub        edi, esi
Frank Barchard's avatar
Frank Barchard committed
2676 2677
    movdqa     xmm5, xmmword ptr kShuffleMaskARGBToRGB24_0
    movdqa     xmm6, xmmword ptr kShuffleMaskARGBToRGB24
2678 2679 2680

 convertloop:
    READYUV422
2681
    YUVTORGB(ebx)
2682
    STORERGB24
2683 2684 2685 2686

    sub        ecx, 8
    jg         convertloop

2687
    pop        ebx
2688 2689 2690 2691 2692 2693
    pop        edi
    pop        esi
    ret
  }
}

2694
// 8 pixels
2695
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes).
2696
__declspec(naked)
2697 2698 2699 2700
void I422ToRGB565Row_SSSE3(const uint8* y_buf,
                           const uint8* u_buf,
                           const uint8* v_buf,
                           uint8* rgb565_buf,
2701
                           const struct YuvConstants* yuvconstants,
2702
                           int width) {
2703 2704 2705
  __asm {
    push       esi
    push       edi
2706
    push       ebx
2707 2708 2709 2710
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2711
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2712
    mov        ecx, [esp + 12 + 24]  // width
2713
    sub        edi, esi
2714 2715 2716 2717 2718 2719 2720
    pcmpeqb    xmm5, xmm5       // generate mask 0x0000001f
    psrld      xmm5, 27
    pcmpeqb    xmm6, xmm6       // generate mask 0x000007e0
    psrld      xmm6, 26
    pslld      xmm6, 5
    pcmpeqb    xmm7, xmm7       // generate mask 0xfffff800
    pslld      xmm7, 11
2721 2722

 convertloop:
2723
    READYUV422
2724
    YUVTORGB(ebx)
2725
    STORERGB565
2726

2727
    sub        ecx, 8
2728 2729
    jg         convertloop

2730
    pop        ebx
2731 2732 2733 2734 2735 2736
    pop        edi
    pop        esi
    ret
  }
}

2737
// 8 pixels.
2738
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2739
__declspec(naked)
2740 2741 2742 2743
void I422ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
2744
                         const struct YuvConstants* yuvconstants,
2745
                         int width) {
2746 2747 2748
  __asm {
    push       esi
    push       edi
2749
    push       ebx
2750 2751 2752 2753
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2754
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2755
    mov        ecx, [esp + 12 + 24]  // width
2756 2757 2758 2759
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2760
    READYUV422
2761
    YUVTORGB(ebx)
2762
    STOREARGB
2763

2764 2765 2766
    sub        ecx, 8
    jg         convertloop

2767
    pop        ebx
2768 2769 2770 2771 2772 2773
    pop        edi
    pop        esi
    ret
  }
}

2774
// 8 pixels.
2775
// 4 UV values upsampled to 8 UV, mixed with 8 Y and 8 A producing 8 ARGB.
2776 2777 2778 2779 2780 2781
__declspec(naked)
void I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              const uint8* a_buf,
                              uint8* dst_argb,
2782
                              const struct YuvConstants* yuvconstants,
2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
                              int width) {
  __asm {
    push       esi
    push       edi
    push       ebx
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        ebp, [esp + 16 + 16]  // A
    mov        edx, [esp + 16 + 20]  // argb
    mov        ebx, [esp + 16 + 24]  // yuvconstants
    mov        ecx, [esp + 16 + 28]  // width
    sub        edi, esi

 convertloop:
    READYUVA422
    YUVTORGB(ebx)
    STOREARGB

    sub        ecx, 8
    jg         convertloop

    pop        ebp
    pop        ebx
    pop        edi
    pop        esi
    ret
  }
}

2814
// 8 pixels.
2815
// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2816
// Similar to I420 but duplicate UV once more.
2817
__declspec(naked)
2818 2819 2820 2821
void I411ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
2822
                         const struct YuvConstants* yuvconstants,
2823
                         int width) {
2824 2825 2826
  __asm {
    push       esi
    push       edi
2827
    push       ebx
2828 2829 2830 2831 2832 2833 2834
    push       ebp
    mov        eax, [esp + 16 + 4]   // Y
    mov        esi, [esp + 16 + 8]   // U
    mov        edi, [esp + 16 + 12]  // V
    mov        edx, [esp + 16 + 16]  // abgr
    mov        ebp, [esp + 16 + 20]  // yuvconstants
    mov        ecx, [esp + 16 + 24]  // width
2835
    sub        edi, esi
2836
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
2837 2838

 convertloop:
2839 2840
    READYUV411_EBX
    YUVTORGB(ebp)
2841
    STOREARGB
2842

2843 2844 2845
    sub        ecx, 8
    jg         convertloop

2846
    pop        ebp
2847
    pop        ebx
2848 2849 2850 2851 2852 2853
    pop        edi
    pop        esi
    ret
  }
}

2854
// 8 pixels.
2855
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2856
__declspec(naked)
2857 2858 2859
void NV12ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
2860
                         const struct YuvConstants* yuvconstants,
2861
                         int width) {
2862 2863
  __asm {
    push       esi
2864
    push       ebx
2865 2866 2867
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // UV
    mov        edx, [esp + 8 + 12]  // argb
2868
    mov        ebx, [esp + 8 + 16]  // yuvconstants
2869
    mov        ecx, [esp + 8 + 20]  // width
2870 2871 2872 2873
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
2874
    YUVTORGB(ebx)
2875
    STOREARGB
2876

2877 2878 2879
    sub        ecx, 8
    jg         convertloop

2880
    pop        ebx
2881 2882 2883 2884 2885
    pop        esi
    ret
  }
}

2886 2887 2888 2889 2890 2891
// 8 pixels.
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
__declspec(naked)
void NV21ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* vu_buf,
                         uint8* dst_argb,
2892
                         const struct YuvConstants* yuvconstants,
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
                         int width) {
  __asm {
    push       esi
    push       ebx
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // VU
    mov        edx, [esp + 8 + 12]  // argb
    mov        ebx, [esp + 8 + 16]  // yuvconstants
    mov        ecx, [esp + 8 + 20]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV21
    YUVTORGB(ebx)
    STOREARGB

    sub        ecx, 8
    jg         convertloop

    pop        ebx
    pop        esi
    ret
  }
}

2918 2919 2920 2921 2922
// 8 pixels.
// 4 YUY2 values with 8 Y and 4 UV producing 8 ARGB (32 bytes).
__declspec(naked)
void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2,
                         uint8* dst_argb,
2923
                         const struct YuvConstants* yuvconstants,
2924 2925
                         int width) {
  __asm {
2926
    push       ebx
2927 2928
    mov        eax, [esp + 4 + 4]   // yuy2
    mov        edx, [esp + 4 + 8]   // argb
2929
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2930 2931 2932 2933 2934
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READYUY2
2935
    YUVTORGB(ebx)
2936 2937 2938 2939 2940
    STOREARGB

    sub        ecx, 8
    jg         convertloop

2941
    pop        ebx
2942 2943 2944 2945 2946 2947 2948 2949 2950
    ret
  }
}

// 8 pixels.
// 4 UYVY values with 8 Y and 4 UV producing 8 ARGB (32 bytes).
__declspec(naked)
void UYVYToARGBRow_SSSE3(const uint8* src_uyvy,
                         uint8* dst_argb,
2951
                         const struct YuvConstants* yuvconstants,
2952 2953
                         int width) {
  __asm {
2954
    push       ebx
2955 2956
    mov        eax, [esp + 4 + 4]   // uyvy
    mov        edx, [esp + 4 + 8]   // argb
2957
    mov        ebx, [esp + 4 + 12]  // yuvconstants
2958 2959 2960 2961 2962
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READUYVY
2963
    YUVTORGB(ebx)
2964 2965 2966 2967 2968
    STOREARGB

    sub        ecx, 8
    jg         convertloop

2969
    pop        ebx
2970 2971 2972 2973
    ret
  }
}

2974
__declspec(naked)
2975 2976 2977
void I422ToRGBARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2978
                         uint8* dst_rgba,
2979
                         const struct YuvConstants* yuvconstants,
2980 2981 2982 2983
                         int width) {
  __asm {
    push       esi
    push       edi
2984
    push       ebx
2985 2986 2987 2988
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2989
    mov        ebx, [esp + 12 + 20]  // yuvconstants
2990
    mov        ecx, [esp + 12 + 24]  // width
2991 2992 2993 2994
    sub        edi, esi

 convertloop:
    READYUV422
2995
    YUVTORGB(ebx)
2996
    STORERGBA
2997

2998 2999 3000
    sub        ecx, 8
    jg         convertloop

3001
    pop        ebx
3002 3003 3004 3005 3006 3007
    pop        edi
    pop        esi
    ret
  }
}
#endif  // HAS_I422TOARGBROW_SSSE3
3008

3009
#ifdef HAS_I400TOARGBROW_SSE2
3010
// 8 pixels of Y converted to 8 pixels of ARGB (32 bytes).
3011
__declspec(naked)
3012 3013 3014
void I400ToARGBRow_SSE2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
3015
  __asm {
3016
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
3017 3018
    movd       xmm2, eax
    pshufd     xmm2, xmm2,0
3019 3020 3021 3022 3023
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    movd       xmm3, eax
    pshufd     xmm3, xmm3, 0
    pcmpeqb    xmm4, xmm4           // generate mask 0xff000000
    pslld      xmm4, 24
3024

3025 3026 3027 3028
    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

3029
 convertloop:
3030
    // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
3031
    movq       xmm0, qword ptr [eax]
3032
    lea        eax, [eax + 8]
3033 3034
    punpcklbw  xmm0, xmm0           // Y.Y
    pmulhuw    xmm0, xmm2
3035
    psubusw    xmm0, xmm3
3036
    psrlw      xmm0, 6
3037 3038 3039 3040 3041 3042 3043
    packuswb   xmm0, xmm0           // G

    // Step 2: Weave into ARGB
    punpcklbw  xmm0, xmm0           // GG
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm1           // BGRA next 4 pixels
3044 3045
    por        xmm0, xmm4
    por        xmm1, xmm4
3046 3047
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3048 3049
    lea        edx,  [edx + 32]
    sub        ecx, 8
3050
    jg         convertloop
3051 3052 3053
    ret
  }
}
3054
#endif  // HAS_I400TOARGBROW_SSE2
3055

3056
#ifdef HAS_I400TOARGBROW_AVX2
3057
// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
3058
// note: vpunpcklbw mutates and vpackuswb unmutates.
3059
__declspec(naked)
3060 3061 3062
void I400ToARGBRow_AVX2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
3063
  __asm {
3064
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
3065 3066
    vmovd      xmm2, eax
    vbroadcastss ymm2, xmm2
3067 3068 3069 3070 3071
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    vmovd      xmm3, eax
    vbroadcastss ymm3, xmm3
    vpcmpeqb   ymm4, ymm4, ymm4     // generate mask 0xff000000
    vpslld     ymm4, ymm4, 24
3072 3073 3074 3075 3076 3077

    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

 convertloop:
3078
    // Step 1: Scale Y contriportbution to 16 G values. G = (y - 16) * 1.164
3079 3080
    vmovdqu    xmm0, [eax]
    lea        eax, [eax + 16]
3081
    vpermq     ymm0, ymm0, 0xd8           // vpunpcklbw mutates
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
    vpunpcklbw ymm0, ymm0, ymm0           // Y.Y
    vpmulhuw   ymm0, ymm0, ymm2
    vpsubusw   ymm0, ymm0, ymm3
    vpsrlw     ymm0, ymm0, 6
    vpackuswb  ymm0, ymm0, ymm0           // G.  still mutated: 3120

    // TODO(fbarchard): Weave alpha with unpack.
    // Step 2: Weave into ARGB
    vpunpcklbw ymm1, ymm0, ymm0           // GG - mutates
    vpermq     ymm1, ymm1, 0xd8
3092 3093
    vpunpcklwd ymm0, ymm1, ymm1           // GGGG first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm1           // GGGG next 8 pixels
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104
    vpor       ymm0, ymm0, ymm4
    vpor       ymm1, ymm1, ymm4
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop
    vzeroupper
    ret
  }
}
3105
#endif  // HAS_I400TOARGBROW_AVX2
3106

3107
#ifdef HAS_MIRRORROW_SSSE3
3108
// Shuffle table for reversing the bytes.
3109
static const uvec8 kShuffleMirror = {
3110 3111
  15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
3112

3113
// TODO(fbarchard): Replace lea with -16 offset.
3114
__declspec(naked)
3115
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
3116
  __asm {
3117 3118 3119
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3120
    movdqa    xmm5, xmmword ptr kShuffleMirror
3121

3122
 convertloop:
3123
    movdqu    xmm0, [eax - 16 + ecx]
3124
    pshufb    xmm0, xmm5
3125
    movdqu    [edx], xmm0
3126
    lea       edx, [edx + 16]
3127
    sub       ecx, 16
3128
    jg        convertloop
3129 3130 3131
    ret
  }
}
3132
#endif  // HAS_MIRRORROW_SSSE3
3133

fbarchard@google.com's avatar
fbarchard@google.com committed
3134
#ifdef HAS_MIRRORROW_AVX2
3135
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3136 3137 3138 3139 3140
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3141
    vbroadcastf128 ymm5, xmmword ptr kShuffleMirror
fbarchard@google.com's avatar
fbarchard@google.com committed
3142 3143

 convertloop:
3144
    vmovdqu   ymm0, [eax - 32 + ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
3145 3146 3147 3148
    vpshufb   ymm0, ymm0, ymm5
    vpermq    ymm0, ymm0, 0x4e  // swap high and low halfs
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3149
    sub       ecx, 32
fbarchard@google.com's avatar
fbarchard@google.com committed
3150
    jg        convertloop
3151
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3152 3153 3154 3155 3156
    ret
  }
}
#endif  // HAS_MIRRORROW_AVX2

3157 3158
#ifdef HAS_MIRRORROW_UV_SSSE3
// Shuffle table for reversing the bytes of UV channels.
3159
static const uvec8 kShuffleMirrorUV = {
3160 3161 3162
  14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
};

3163
__declspec(naked)
3164
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
3165 3166 3167 3168 3169 3170 3171
                       int width) {
  __asm {
    push      edi
    mov       eax, [esp + 4 + 4]   // src
    mov       edx, [esp + 4 + 8]   // dst_u
    mov       edi, [esp + 4 + 12]  // dst_v
    mov       ecx, [esp + 4 + 16]  // width
Frank Barchard's avatar
Frank Barchard committed
3172
    movdqa    xmm1, xmmword ptr kShuffleMirrorUV
3173 3174 3175 3176
    lea       eax, [eax + ecx * 2 - 16]
    sub       edi, edx

 convertloop:
3177
    movdqu    xmm0, [eax]
3178 3179 3180 3181 3182
    lea       eax, [eax - 16]
    pshufb    xmm0, xmm1
    movlpd    qword ptr [edx], xmm0
    movhpd    qword ptr [edx + edi], xmm0
    lea       edx, [edx + 8]
3183
    sub       ecx, 8
3184
    jg        convertloop
3185 3186 3187 3188 3189

    pop       edi
    ret
  }
}
3190
#endif  // HAS_MIRRORROW_UV_SSSE3
3191

3192
#ifdef HAS_ARGBMIRRORROW_SSE2
3193
__declspec(naked)
3194
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3195
  __asm {
3196 3197 3198
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3199
    lea       eax, [eax - 16 + ecx * 4]  // last 4 pixels.
3200 3201

 convertloop:
3202
    movdqu    xmm0, [eax]
3203
    lea       eax, [eax - 16]
3204
    pshufd    xmm0, xmm0, 0x1b
3205
    movdqu    [edx], xmm0
3206
    lea       edx, [edx + 16]
3207
    sub       ecx, 4
3208 3209 3210 3211
    jg        convertloop
    ret
  }
}
3212
#endif  // HAS_ARGBMIRRORROW_SSE2
3213

fbarchard@google.com's avatar
fbarchard@google.com committed
3214 3215
#ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes.
3216
static const ulvec32 kARGBShuffleMirror_AVX2 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
3217 3218 3219
  7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};

3220
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3221 3222 3223 3224 3225
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3226
    vmovdqu   ymm5, ymmword ptr kARGBShuffleMirror_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3227 3228

 convertloop:
3229
    vpermd    ymm0, ymm5, [eax - 32 + ecx * 4]  // permute dword order
fbarchard@google.com's avatar
fbarchard@google.com committed
3230 3231
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3232
    sub       ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3233
    jg        convertloop
3234
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3235 3236 3237
    ret
  }
}
3238
#endif  // HAS_ARGBMIRRORROW_AVX2
3239

3240
#ifdef HAS_SPLITUVROW_SSE2
3241
__declspec(naked)
3242 3243
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
                     int width) {
3244 3245 3246 3247 3248
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
3249
    mov        ecx, [esp + 4 + 16]   // width
3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    lea        eax,  [eax + 32]
    movdqa     xmm2, xmm0
    movdqa     xmm3, xmm1
    pand       xmm0, xmm5   // even bytes
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    psrlw      xmm2, 8      // odd bytes
    psrlw      xmm3, 8
    packuswb   xmm2, xmm3
    movdqu     [edx], xmm0
    movdqu     [edx + edi], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
3276

3277
#endif  // HAS_SPLITUVROW_SSE2
3278

3279
#ifdef HAS_SPLITUVROW_AVX2
3280
__declspec(naked)
3281 3282
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
                     int width) {
3283 3284 3285 3286 3287
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
3288
    mov        ecx, [esp + 4 + 16]   // width
3289 3290 3291 3292 3293
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
3294 3295
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
3296 3297 3298 3299 3300 3301 3302 3303 3304
    lea        eax,  [eax + 64]
    vpsrlw     ymm2, ymm0, 8      // odd bytes
    vpsrlw     ymm3, ymm1, 8
    vpand      ymm0, ymm0, ymm5   // even bytes
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1
    vpackuswb  ymm2, ymm2, ymm3
    vpermq     ymm0, ymm0, 0xd8
    vpermq     ymm2, ymm2, 0xd8
3305 3306
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + edi], ymm2
3307 3308 3309 3310 3311
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3312
    vzeroupper
3313 3314 3315
    ret
  }
}
3316
#endif  // HAS_SPLITUVROW_AVX2
3317

3318
#ifdef HAS_MERGEUVROW_SSE2
3319
__declspec(naked)
3320 3321
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
3322 3323
  __asm {
    push       edi
3324 3325 3326 3327 3328
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax
3329 3330

  convertloop:
3331 3332
    movdqu     xmm0, [eax]      // read 16 U's
    movdqu     xmm1, [eax + edx]  // and 16 V's
3333 3334 3335 3336
    lea        eax,  [eax + 16]
    movdqa     xmm2, xmm0
    punpcklbw  xmm0, xmm1       // first 8 UV pairs
    punpckhbw  xmm2, xmm1       // next 8 UV pairs
3337 3338
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm2
3339 3340
    lea        edi, [edi + 32]
    sub        ecx, 16
3341 3342 3343 3344 3345 3346
    jg         convertloop

    pop        edi
    ret
  }
}
3347
#endif  //  HAS_MERGEUVROW_SSE2
3348

3349
#ifdef HAS_MERGEUVROW_AVX2
3350
__declspec(naked)
3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax

  convertloop:
    vmovdqu    ymm0, [eax]           // read 32 U's
    vmovdqu    ymm1, [eax + edx]     // and 32 V's
    lea        eax,  [eax + 32]
    vpunpcklbw ymm2, ymm0, ymm1      // low 16 UV pairs. mutated qqword 0,2
    vpunpckhbw ymm0, ymm0, ymm1      // high 16 UV pairs. mutated qqword 1,3
3367 3368 3369 3370
    vextractf128 [edi], ymm2, 0       // bytes 0..15
    vextractf128 [edi + 16], ymm0, 0  // bytes 16..31
    vextractf128 [edi + 32], ymm2, 1  // bytes 32..47
    vextractf128 [edi + 48], ymm0, 1  // bytes 47..63
3371 3372 3373 3374 3375
    lea        edi, [edi + 64]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3376
    vzeroupper
3377 3378 3379 3380 3381
    ret
  }
}
#endif  //  HAS_MERGEUVROW_AVX2

3382
#ifdef HAS_COPYROW_SSE2
3383
// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
3384
__declspec(naked)
3385 3386 3387 3388 3389
void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
    test       eax, 15
    jne        convertloopu
    test       edx, 15
    jne        convertloopu

  convertloopa:
    movdqa     xmm0, [eax]
    movdqa     xmm1, [eax + 16]
    lea        eax, [eax + 32]
    movdqa     [edx], xmm0
    movdqa     [edx + 16], xmm1
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloopa
    ret
3405

3406
  convertloopu:
3407 3408
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3409
    lea        eax, [eax + 32]
3410 3411
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3412
    lea        edx, [edx + 32]
3413
    sub        ecx, 32
3414
    jg         convertloopu
3415 3416 3417 3418 3419
    ret
  }
}
#endif  // HAS_COPYROW_SSE2

3420 3421
#ifdef HAS_COPYROW_AVX
// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
3422
__declspec(naked)
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
    sub        ecx, 64
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_COPYROW_AVX

3445
// Multiple of 1.
3446
__declspec(naked)
3447
void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
  __asm {
    mov        eax, esi
    mov        edx, edi
    mov        esi, [esp + 4]   // src
    mov        edi, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    rep movsb
    mov        edi, edx
    mov        esi, eax
    ret
  }
}

3461 3462
#ifdef HAS_ARGBCOPYALPHAROW_SSE2
// width in pixels
3463
__declspec(naked)
3464 3465 3466
void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
fbarchard@google.com's avatar
fbarchard@google.com committed
3467
    mov        edx, [esp + 8]   // dst
3468
    mov        ecx, [esp + 12]  // count
fbarchard@google.com's avatar
fbarchard@google.com committed
3469 3470 3471 3472
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8
3473 3474

  convertloop:
3475 3476
    movdqu     xmm2, [eax]
    movdqu     xmm3, [eax + 16]
3477
    lea        eax, [eax + 32]
3478 3479
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
3480 3481 3482 3483 3484 3485
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3486 3487
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
3488
    lea        edx, [edx + 32]
3489 3490 3491 3492 3493 3494 3495 3496
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
3497 3498
#ifdef HAS_ARGBCOPYALPHAROW_AVX2
// width in pixels
3499
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3500 3501 3502 3503 3504
void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3505
    vpcmpeqb   ymm0, ymm0, ymm0
3506
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff
fbarchard@google.com's avatar
fbarchard@google.com committed
3507 3508

  convertloop:
3509 3510
    vmovdqu    ymm1, [eax]
    vmovdqu    ymm2, [eax + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
3511
    lea        eax, [eax + 64]
3512 3513 3514 3515
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
fbarchard@google.com's avatar
fbarchard@google.com committed
3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_AVX2

3526 3527
#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
// width in pixels
3528
__declspec(naked)
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544
void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8

  convertloop:
    movq       xmm2, qword ptr [eax]  // 8 Y's
    lea        eax, [eax + 8]
    punpcklbw  xmm2, xmm2
    punpckhwd  xmm3, xmm2
    punpcklwd  xmm2, xmm2
3545 3546
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
3547 3548 3549 3550 3551 3552
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3553 3554
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565
    lea        edx, [edx + 32]
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_SSE2

#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
// width in pixels
3566
__declspec(naked)
3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594
void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    vpcmpeqb   ymm0, ymm0, ymm0
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff

  convertloop:
    vpmovzxbd  ymm1, qword ptr [eax]
    vpmovzxbd  ymm2, qword ptr [eax + 8]
    lea        eax, [eax + 16]
    vpslld     ymm1, ymm1, 24
    vpslld     ymm2, ymm2, 24
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_AVX2

3595
#ifdef HAS_SETROW_X86
3596 3597
// Write 'count' bytes using an 8 bit value repeated.
// Count should be multiple of 4.
3598
__declspec(naked)
3599
void SetRow_X86(uint8* dst, uint8 v8, int count) {
3600
  __asm {
3601 3602 3603
    movzx      eax, byte ptr [esp + 8]    // v8
    mov        edx, 0x01010101  // Duplicate byte to all bytes.
    mul        edx              // overwrites edx with upper part of result.
3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        ecx, [esp + 12]  // count
    shr        ecx, 2
    rep stosd
    mov        edi, edx
    ret
  }
}

3614
// Write 'count' bytes using an 8 bit value repeated.
3615
__declspec(naked)
3616
void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
3617
  __asm {
3618 3619 3620 3621 3622 3623 3624 3625 3626
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v8
    mov        ecx, [esp + 12]  // count
    rep stosb
    mov        edi, edx
    ret
  }
}
3627

3628
// Write 'count' 32 bit values.
3629
__declspec(naked)
3630 3631 3632 3633 3634 3635
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
  __asm {
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v32
    mov        ecx, [esp + 12]  // count
3636
    rep stosd
3637
    mov        edi, edx
3638 3639 3640 3641 3642
    ret
  }
}
#endif  // HAS_SETROW_X86

3643
#ifdef HAS_YUY2TOYROW_AVX2
3644
__declspec(naked)
3645
void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) {
3646 3647 3648
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
3649
    mov        ecx, [esp + 12]   // width
3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // even bytes are Y
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3663
    sub        ecx, 32
3664
    jg         convertloop
3665
    vzeroupper
3666 3667 3668 3669
    ret
  }
}

3670
__declspec(naked)
3671
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
3672
                      uint8* dst_u, uint8* dst_v, int width) {
3673 3674 3675 3676 3677 3678 3679
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
3680
    mov        ecx, [esp + 8 + 20]   // width
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3709
    vzeroupper
3710 3711 3712 3713
    ret
  }
}

3714
__declspec(naked)
3715
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
3716
                         uint8* dst_u, uint8* dst_v, int width) {
3717 3718 3719 3720 3721
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
3722
    mov        ecx, [esp + 4 + 16]   // width
3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3748
    vzeroupper
3749 3750 3751 3752
    ret
  }
}

3753
__declspec(naked)
3754
void UYVYToYRow_AVX2(const uint8* src_uyvy,
3755
                     uint8* dst_y, int width) {
3756 3757 3758
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
3759
    mov        ecx, [esp + 12]   // width
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // odd bytes are Y
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3771
    sub        ecx, 32
3772
    jg         convertloop
3773
    vzeroupper
3774
    ret
3775 3776 3777
  }
}

3778
__declspec(naked)
3779
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
3780
                      uint8* dst_u, uint8* dst_v, int width) {
3781 3782 3783 3784 3785 3786 3787
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
3788
    mov        ecx, [esp + 8 + 20]   // width
3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3817
    vzeroupper
3818 3819 3820 3821
    ret
  }
}

3822
__declspec(naked)
3823
void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
3824
                         uint8* dst_u, uint8* dst_v, int width) {
3825 3826 3827 3828 3829
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
3830
    mov        ecx, [esp + 4 + 16]   // width
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3856
    vzeroupper
3857 3858 3859 3860 3861
    ret
  }
}
#endif  // HAS_YUY2TOYROW_AVX2

3862
#ifdef HAS_YUY2TOYROW_SSE2
3863
__declspec(naked)
3864
void YUY2ToYRow_SSE2(const uint8* src_yuy2,
3865
                     uint8* dst_y, int width) {
3866 3867 3868
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
3869
    mov        ecx, [esp + 12]   // width
3870 3871 3872 3873
    pcmpeqb    xmm5, xmm5        // generate mask 0x00ff00ff
    psrlw      xmm5, 8

  convertloop:
3874 3875
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3876 3877 3878 3879
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // even bytes are Y
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
3880
    movdqu     [edx], xmm0
3881
    lea        edx, [edx + 16]
3882
    sub        ecx, 16
3883
    jg         convertloop
3884 3885 3886 3887
    ret
  }
}

3888
__declspec(naked)
3889
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
3890
                      uint8* dst_u, uint8* dst_v, int width) {
3891 3892 3893 3894 3895 3896 3897
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
3898
    mov        ecx, [esp + 8 + 20]   // width
3899 3900 3901 3902 3903
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3904 3905 3906 3907
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3923
    jg         convertloop
3924 3925 3926 3927 3928 3929 3930

    pop        edi
    pop        esi
    ret
  }
}

3931
__declspec(naked)
3932
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
3933
                         uint8* dst_u, uint8* dst_v, int width) {
3934 3935 3936 3937 3938
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
3939
    mov        ecx, [esp + 4 + 16]   // width
3940 3941 3942 3943 3944
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3945 3946
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}

3967
__declspec(naked)
3968
void UYVYToYRow_SSE2(const uint8* src_uyvy,
3969
                     uint8* dst_y, int width) {
3970 3971 3972
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
3973
    mov        ecx, [esp + 12]   // width
3974 3975

  convertloop:
3976 3977
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3978 3979 3980 3981
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8    // odd bytes are Y
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
3982
    movdqu     [edx], xmm0
3983
    lea        edx, [edx + 16]
3984
    sub        ecx, 16
3985
    jg         convertloop
3986 3987 3988 3989
    ret
  }
}

3990
__declspec(naked)
3991
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
3992
                      uint8* dst_u, uint8* dst_v, int width) {
3993 3994 3995 3996 3997 3998 3999
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
4000
    mov        ecx, [esp + 8 + 20]   // width
4001 4002 4003 4004 4005
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4006 4007 4008 4009
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
4025
    jg         convertloop
4026 4027 4028 4029 4030 4031 4032

    pop        edi
    pop        esi
    ret
  }
}

4033
__declspec(naked)
4034
void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
4035
                         uint8* dst_u, uint8* dst_v, int width) {
4036 4037 4038 4039 4040
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
4041
    mov        ecx, [esp + 4 + 16]   // width
4042 4043 4044 4045 4046
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4047 4048
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
4068
#endif  // HAS_YUY2TOYROW_SSE2
4069

4070 4071
#ifdef HAS_BLENDPLANEROW_SSSE3
// Blend 8 pixels at a time.
4072 4073 4074 4075
// unsigned version of math
// =((A2*C2)+(B2*(255-C2))+255)/256
// signed version of math
// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115
__declspec(naked)
void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
                         const uint8* alpha, uint8* dst, int width) {
  __asm {
    push       esi
    push       edi
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    mov        eax, 0x80808080  // 128 for biasing image to signed.
    movd       xmm6, eax
    pshufd     xmm6, xmm6, 0x00

    mov        eax, 0x807f807f  // 32768 + 127 for unbias and round.
    movd       xmm7, eax
    pshufd     xmm7, xmm7, 0x00
    mov        eax, [esp + 8 + 4]   // src0
    mov        edx, [esp + 8 + 8]   // src1
    mov        esi, [esp + 8 + 12]  // alpha
    mov        edi, [esp + 8 + 16]  // dst
    mov        ecx, [esp + 8 + 20]  // width
    sub        eax, esi
    sub        edx, esi
    sub        edi, esi

    // 8 pixel loop.
  convertloop8:
    movq       xmm0, qword ptr [esi]        // alpha
    punpcklbw  xmm0, xmm0
    pxor       xmm0, xmm5         // a, 255-a
    movq       xmm1, qword ptr [eax + esi]  // src0
    movq       xmm2, qword ptr [edx + esi]  // src1
    punpcklbw  xmm1, xmm2
    psubb      xmm1, xmm6         // bias src0/1 - 128
    pmaddubsw  xmm0, xmm1
    paddw      xmm0, xmm7         // unbias result - 32768 and round.
    psrlw      xmm0, 8
    packuswb   xmm0, xmm0
    movq       qword ptr [edi + esi], xmm0
    lea        esi, [esi + 8]
    sub        ecx, 8
4116
    jg         convertloop8
4117 4118 4119 4120 4121 4122 4123 4124

    pop        edi
    pop        esi
    ret
  }
}
#endif  // HAS_BLENDPLANEROW_SSSE3

4125
#ifdef HAS_BLENDPLANEROW_AVX2
4126 4127 4128 4129 4130
// Blend 32 pixels at a time.
// unsigned version of math
// =((A2*C2)+(B2*(255-C2))+255)/256
// signed version of math
// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
__declspec(naked)
void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1,
                         const uint8* alpha, uint8* dst, int width) {
  __asm {
    push        esi
    push        edi
    vpcmpeqb    ymm5, ymm5, ymm5       // generate mask 0xff00ff00
    vpsllw      ymm5, ymm5, 8
    mov         eax, 0x80808080  // 128 for biasing image to signed.
    vmovd       xmm6, eax
    vbroadcastss ymm6, xmm6
    mov         eax, 0x807f807f  // 32768 + 127 for unbias and round.
    vmovd       xmm7, eax
    vbroadcastss ymm7, xmm7
    mov         eax, [esp + 8 + 4]   // src0
    mov         edx, [esp + 8 + 8]   // src1
    mov         esi, [esp + 8 + 12]  // alpha
    mov         edi, [esp + 8 + 16]  // dst
    mov         ecx, [esp + 8 + 20]  // width
    sub         eax, esi
    sub         edx, esi
    sub         edi, esi

4154 4155 4156 4157 4158 4159
    // 32 pixel loop.
  convertloop32:
    vmovdqu     ymm0, [esi]        // alpha
    vpunpckhbw  ymm3, ymm0, ymm0   // 8..15, 24..31
    vpunpcklbw  ymm0, ymm0, ymm0   // 0..7, 16..23
    vpxor       ymm3, ymm3, ymm5   // a, 255-a
4160
    vpxor       ymm0, ymm0, ymm5   // a, 255-a
4161 4162 4163
    vmovdqu     ymm1, [eax + esi]  // src0
    vmovdqu     ymm2, [edx + esi]  // src1
    vpunpckhbw  ymm4, ymm1, ymm2
4164
    vpunpcklbw  ymm1, ymm1, ymm2
4165
    vpsubb      ymm4, ymm4, ymm6   // bias src0/1 - 128
4166
    vpsubb      ymm1, ymm1, ymm6   // bias src0/1 - 128
4167
    vpmaddubsw  ymm3, ymm3, ymm4
4168
    vpmaddubsw  ymm0, ymm0, ymm1
4169
    vpaddw      ymm3, ymm3, ymm7   // unbias result - 32768 and round.
4170
    vpaddw      ymm0, ymm0, ymm7   // unbias result - 32768 and round.
4171
    vpsrlw      ymm3, ymm3, 8
4172
    vpsrlw      ymm0, ymm0, 8
4173 4174 4175 4176 4177
    vpackuswb   ymm0, ymm0, ymm3
    vmovdqu     [edi + esi], ymm0
    lea         esi, [esi + 32]
    sub         ecx, 32
    jg          convertloop32
4178 4179 4180 4181 4182 4183 4184 4185 4186

    pop         edi
    pop         esi
    vzeroupper
    ret
  }
}
#endif  // HAS_BLENDPLANEROW_AVX2

4187
#ifdef HAS_ARGBBLENDROW_SSSE3
4188
// Shuffle table for isolating alpha.
4189
static const uvec8 kShuffleAlpha = {
4190 4191 4192
  3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
  11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
4193

4194
// Blend 8 pixels at a time.
4195
__declspec(naked)
4196 4197
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
                        uint8* dst_argb, int width) {
4198 4199 4200 4201 4202 4203
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4204
    pcmpeqb    xmm7, xmm7       // generate constant 0x0001
4205 4206 4207 4208 4209 4210 4211
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4212 4213
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4214

4215
    // 4 pixel loop.
4216
  convertloop4:
4217
    movdqu     xmm3, [eax]      // src argb
4218 4219 4220
    lea        eax, [eax + 16]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4221
    movdqu     xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4222
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4223 4224 4225
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4226
    movdqu     xmm1, [esi]      // _a_g
4227 4228 4229 4230 4231 4232 4233 4234
    lea        esi, [esi + 16]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4235
    movdqu     [edx], xmm0
4236
    lea        edx, [edx + 16]
4237
    sub        ecx, 4
4238
    jge        convertloop4
4239

4240 4241 4242
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4243

4244 4245
    // 1 pixel loop.
  convertloop1:
4246
    movd       xmm3, [eax]      // src argb
4247 4248 4249 4250
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4251
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4266
    sub        ecx, 1
4267
    jge        convertloop1
4268

4269
  convertloop1b:
4270 4271 4272 4273
    pop        esi
    ret
  }
}
4274
#endif  // HAS_ARGBBLENDROW_SSSE3
4275

4276
#ifdef HAS_ARGBATTENUATEROW_SSSE3
4277
// Shuffle table duplicating alpha.
4278
static const uvec8 kShuffleAlpha0 = {
4279 4280
  3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
};
4281
static const uvec8 kShuffleAlpha1 = {
4282 4283 4284
  11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
  15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
};
4285
__declspec(naked)
4286
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4287
  __asm {
4288 4289 4290 4291 4292
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm3, xmm3       // generate mask 0xff000000
    pslld      xmm3, 24
Frank Barchard's avatar
Frank Barchard committed
4293 4294
    movdqa     xmm4, xmmword ptr kShuffleAlpha0
    movdqa     xmm5, xmmword ptr kShuffleAlpha1
4295 4296

 convertloop:
4297
    movdqu     xmm0, [eax]      // read 4 pixels
4298
    pshufb     xmm0, xmm4       // isolate first 2 alphas
4299
    movdqu     xmm1, [eax]      // read 4 pixels
4300 4301
    punpcklbw  xmm1, xmm1       // first 2 pixel rgbs
    pmulhuw    xmm0, xmm1       // rgb * a
4302
    movdqu     xmm1, [eax]      // read 4 pixels
4303
    pshufb     xmm1, xmm5       // isolate next 2 alphas
4304
    movdqu     xmm2, [eax]      // read 4 pixels
4305 4306
    punpckhbw  xmm2, xmm2       // next 2 pixel rgbs
    pmulhuw    xmm1, xmm2       // rgb * a
4307
    movdqu     xmm2, [eax]      // mask original alpha
4308
    lea        eax, [eax + 16]
4309 4310 4311 4312 4313
    pand       xmm2, xmm3
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    por        xmm0, xmm2       // copy original alpha
4314
    movdqu     [edx], xmm0
4315
    lea        edx, [edx + 16]
4316
    sub        ecx, 4
4317 4318 4319 4320 4321
    jg         convertloop

    ret
  }
}
4322
#endif  // HAS_ARGBATTENUATEROW_SSSE3
4323

fbarchard@google.com's avatar
fbarchard@google.com committed
4324 4325
#ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4326
static const uvec8 kShuffleAlpha_AVX2 = {
4327
  6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
4328
};
4329
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4330 4331 4332 4333 4334 4335
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4336
    vbroadcastf128 ymm4, xmmword ptr kShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354
    vpcmpeqb   ymm5, ymm5, ymm5 // generate mask 0xff000000
    vpslld     ymm5, ymm5, 24

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpshufb    ymm2, ymm0, ymm4  // low 4 alphas
    vpshufb    ymm3, ymm1, ymm4  // high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * a
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * a
    vpand      ymm6, ymm6, ymm5  // isolate alpha
    vpsrlw     ymm0, ymm0, 8
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vpor       ymm0, ymm0, ymm6  // copy original alpha
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4355
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4356 4357
    jg         convertloop

4358
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4359 4360 4361 4362 4363
    ret
  }
}
#endif  // HAS_ARGBATTENUATEROW_AVX2

4364
#ifdef HAS_ARGBUNATTENUATEROW_SSE2
4365
// Unattenuate 4 pixels at a time.
4366
__declspec(naked)
4367 4368 4369
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
4370
    push       ebx
4371 4372
    push       esi
    push       edi
4373 4374 4375 4376
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    lea        ebx, fixed_invtbl8
4377 4378

 convertloop:
4379
    movdqu     xmm0, [eax]      // read 4 pixels
4380 4381 4382
    movzx      esi, byte ptr [eax + 3]  // first alpha
    movzx      edi, byte ptr [eax + 7]  // second alpha
    punpcklbw  xmm0, xmm0       // first 2
4383 4384
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4385 4386
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words.  1, a, a, a
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4387 4388 4389
    movlhps    xmm2, xmm3
    pmulhuw    xmm0, xmm2       // rgb * a

4390
    movdqu     xmm1, [eax]      // read 4 pixels
4391 4392 4393
    movzx      esi, byte ptr [eax + 11]  // third alpha
    movzx      edi, byte ptr [eax + 15]  // forth alpha
    punpckhbw  xmm1, xmm1       // next 2
4394 4395
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4396 4397
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4398 4399
    movlhps    xmm2, xmm3
    pmulhuw    xmm1, xmm2       // rgb * a
4400
    lea        eax, [eax + 16]
4401
    packuswb   xmm0, xmm1
4402
    movdqu     [edx], xmm0
4403
    lea        edx, [edx + 16]
4404
    sub        ecx, 4
4405
    jg         convertloop
4406

4407 4408
    pop        edi
    pop        esi
4409
    pop        ebx
4410 4411 4412
    ret
  }
}
4413
#endif  // HAS_ARGBUNATTENUATEROW_SSE2
4414

fbarchard@google.com's avatar
fbarchard@google.com committed
4415 4416
#ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4417
static const uvec8 kUnattenShuffleAlpha_AVX2 = {
4418
  0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
fbarchard@google.com's avatar
fbarchard@google.com committed
4419
};
4420 4421 4422
// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
// USE_GATHER is not on by default, due to being a slow instruction.
#ifdef USE_GATHER
4423
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4424 4425 4426 4427 4428 4429 4430
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4431
    vbroadcastf128 ymm4, xmmword ptr kUnattenShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4432 4433 4434

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
4435
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0xffffffff for gather.
fbarchard@google.com's avatar
fbarchard@google.com committed
4436 4437 4438
    vpsrld     ymm2, ymm6, 24    // alpha in low 8 bits.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
4439 4440 4441 4442
    vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5  // ymm5 cleared.  1, a
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm4  // replicate low 4 alphas. 1, a, a, a
fbarchard@google.com's avatar
fbarchard@google.com committed
4443 4444 4445 4446 4447 4448
    vpshufb    ymm3, ymm3, ymm4  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4449
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4450 4451
    jg         convertloop

4452
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4453 4454 4455
    ret
  }
}
4456
#else  // USE_GATHER
4457
__declspec(naked)
4458 4459 4460 4461
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {

4462
    push       ebx
4463 4464
    push       esi
    push       edi
4465 4466 4467 4468 4469 4470
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    sub        edx, eax
    lea        ebx, fixed_invtbl8
    vbroadcastf128 ymm5, xmmword ptr kUnattenShuffleAlpha_AVX2
4471 4472 4473

 convertloop:
    // replace VPGATHER
4474 4475
    movzx      esi, byte ptr [eax + 3]                 // alpha0
    movzx      edi, byte ptr [eax + 7]                 // alpha1
4476 4477
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a0]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a1]
4478 4479
    movzx      esi, byte ptr [eax + 11]                // alpha2
    movzx      edi, byte ptr [eax + 15]                // alpha3
4480
    vpunpckldq xmm6, xmm0, xmm1                        // [1,a1,1,a0]
4481 4482
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a2]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a3]
4483 4484
    movzx      esi, byte ptr [eax + 19]                // alpha4
    movzx      edi, byte ptr [eax + 23]                // alpha5
4485
    vpunpckldq xmm7, xmm2, xmm3                        // [1,a3,1,a2]
4486 4487
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a4]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a5]
4488 4489
    movzx      esi, byte ptr [eax + 27]                // alpha6
    movzx      edi, byte ptr [eax + 31]                // alpha7
4490
    vpunpckldq xmm0, xmm0, xmm1                        // [1,a5,1,a4]
4491 4492
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a6]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a7]
4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510
    vpunpckldq xmm2, xmm2, xmm3                        // [1,a7,1,a6]
    vpunpcklqdq xmm3, xmm6, xmm7                       // [1,a3,1,a2,1,a1,1,a0]
    vpunpcklqdq xmm0, xmm0, xmm2                       // [1,a7,1,a6,1,a5,1,a4]
    vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
    // end of VPGATHER

    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm5  // replicate low 4 alphas. 1, a, a, a
    vpshufb    ymm3, ymm3, ymm5  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4511
    sub        ecx, 8
4512 4513 4514 4515
    jg         convertloop

    pop        edi
    pop        esi
4516
    pop        ebx
4517
    vzeroupper
4518 4519 4520 4521
    ret
  }
}
#endif  // USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
4522 4523
#endif  // HAS_ARGBATTENUATEROW_AVX2

4524
#ifdef HAS_ARGBGRAYROW_SSSE3
4525
// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
4526
__declspec(naked)
4527
void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4528
  __asm {
4529 4530 4531
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
4532 4533
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
4534 4535

 convertloop:
4536 4537
    movdqu     xmm0, [eax]  // G
    movdqu     xmm1, [eax + 16]
4538 4539 4540
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm0, xmm1
4541
    paddw      xmm0, xmm5  // Add .5 for rounding.
4542
    psrlw      xmm0, 7
4543
    packuswb   xmm0, xmm0   // 8 G bytes
4544 4545
    movdqu     xmm2, [eax]  // A
    movdqu     xmm3, [eax + 16]
4546
    lea        eax, [eax + 32]
4547 4548 4549 4550 4551 4552 4553
    psrld      xmm2, 24
    psrld      xmm3, 24
    packuswb   xmm2, xmm3
    packuswb   xmm2, xmm2   // 8 A bytes
    movdqa     xmm3, xmm0   // Weave into GG, GA, then GGGA
    punpcklbw  xmm0, xmm0   // 8 GG words
    punpcklbw  xmm3, xmm2   // 8 GA words
4554
    movdqa     xmm1, xmm0
4555 4556
    punpcklwd  xmm0, xmm3   // GGGA first 4
    punpckhwd  xmm1, xmm3   // GGGA next 4
4557 4558
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
4559
    lea        edx, [edx + 32]
4560
    sub        ecx, 8
4561 4562 4563 4564 4565
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBGRAYROW_SSSE3
4566 4567 4568 4569 4570

#ifdef HAS_ARGBSEPIAROW_SSSE3
//    b = (r * 35 + g * 68 + b * 17) >> 7
//    g = (r * 45 + g * 88 + b * 22) >> 7
//    r = (r * 50 + g * 98 + b * 24) >> 7
4571
// Constant for ARGB color to sepia tone.
4572
static const vec8 kARGBToSepiaB = {
4573 4574 4575
  17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
};

4576
static const vec8 kARGBToSepiaG = {
4577 4578 4579
  22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
};

4580
static const vec8 kARGBToSepiaR = {
4581 4582 4583
  24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
};

4584
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
4585
__declspec(naked)
4586 4587 4588 4589
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   /* dst_argb */
    mov        ecx, [esp + 8]   /* width */
Frank Barchard's avatar
Frank Barchard committed
4590 4591 4592
    movdqa     xmm2, xmmword ptr kARGBToSepiaB
    movdqa     xmm3, xmmword ptr kARGBToSepiaG
    movdqa     xmm4, xmmword ptr kARGBToSepiaR
4593 4594

 convertloop:
4595 4596
    movdqu     xmm0, [eax]  // B
    movdqu     xmm6, [eax + 16]
4597 4598 4599 4600 4601
    pmaddubsw  xmm0, xmm2
    pmaddubsw  xmm6, xmm2
    phaddw     xmm0, xmm6
    psrlw      xmm0, 7
    packuswb   xmm0, xmm0   // 8 B values
4602 4603
    movdqu     xmm5, [eax]  // G
    movdqu     xmm1, [eax + 16]
4604 4605 4606 4607 4608 4609
    pmaddubsw  xmm5, xmm3
    pmaddubsw  xmm1, xmm3
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 G values
    punpcklbw  xmm0, xmm5   // 8 BG values
4610 4611
    movdqu     xmm5, [eax]  // R
    movdqu     xmm1, [eax + 16]
4612 4613 4614 4615 4616
    pmaddubsw  xmm5, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 R values
4617 4618
    movdqu     xmm6, [eax]  // A
    movdqu     xmm1, [eax + 16]
4619 4620 4621 4622 4623 4624 4625 4626
    psrld      xmm6, 24
    psrld      xmm1, 24
    packuswb   xmm6, xmm1
    packuswb   xmm6, xmm6   // 8 A values
    punpcklbw  xmm5, xmm6   // 8 RA values
    movdqa     xmm1, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm5   // BGRA first 4
    punpckhwd  xmm1, xmm5   // BGRA next 4
4627 4628
    movdqu     [eax], xmm0
    movdqu     [eax + 16], xmm1
4629
    lea        eax, [eax + 32]
4630
    sub        ecx, 8
4631 4632 4633 4634 4635
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBSEPIAROW_SSSE3
4636

4637 4638 4639
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided.
4640
// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
4641
// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
4642
__declspec(naked)
4643 4644
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                              const int8* matrix_argb, int width) {
4645
  __asm {
4646 4647 4648
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* matrix_argb */
4649 4650 4651 4652 4653
    movdqu     xmm5, [ecx]
    pshufd     xmm2, xmm5, 0x00
    pshufd     xmm3, xmm5, 0x55
    pshufd     xmm4, xmm5, 0xaa
    pshufd     xmm5, xmm5, 0xff
4654
    mov        ecx, [esp + 16]  /* width */
4655 4656

 convertloop:
4657 4658
    movdqu     xmm0, [eax]  // B
    movdqu     xmm7, [eax + 16]
4659
    pmaddubsw  xmm0, xmm2
4660
    pmaddubsw  xmm7, xmm2
4661 4662
    movdqu     xmm6, [eax]  // G
    movdqu     xmm1, [eax + 16]
4663
    pmaddubsw  xmm6, xmm3
4664
    pmaddubsw  xmm1, xmm3
4665 4666 4667 4668
    phaddsw    xmm0, xmm7   // B
    phaddsw    xmm6, xmm1   // G
    psraw      xmm0, 6      // B
    psraw      xmm6, 6      // G
4669
    packuswb   xmm0, xmm0   // 8 B values
4670 4671
    packuswb   xmm6, xmm6   // 8 G values
    punpcklbw  xmm0, xmm6   // 8 BG values
4672 4673
    movdqu     xmm1, [eax]  // R
    movdqu     xmm7, [eax + 16]
4674
    pmaddubsw  xmm1, xmm4
4675 4676
    pmaddubsw  xmm7, xmm4
    phaddsw    xmm1, xmm7   // R
4677 4678
    movdqu     xmm6, [eax]  // A
    movdqu     xmm7, [eax + 16]
4679 4680 4681 4682 4683 4684
    pmaddubsw  xmm6, xmm5
    pmaddubsw  xmm7, xmm5
    phaddsw    xmm6, xmm7   // A
    psraw      xmm1, 6      // R
    psraw      xmm6, 6      // A
    packuswb   xmm1, xmm1   // 8 R values
4685
    packuswb   xmm6, xmm6   // 8 A values
4686 4687 4688 4689
    punpcklbw  xmm1, xmm6   // 8 RA values
    movdqa     xmm6, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm1   // BGRA first 4
    punpckhwd  xmm6, xmm1   // BGRA next 4
4690 4691
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm6
4692
    lea        eax, [eax + 32]
4693
    lea        edx, [edx + 32]
4694
    sub        ecx, 8
4695 4696 4697 4698 4699 4700
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBCOLORMATRIXROW_SSSE3

4701 4702
#ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes).
4703
__declspec(naked)
4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
                          int interval_offset, int width) {
  __asm {
    mov        eax, [esp + 4]    /* dst_argb */
    movd       xmm2, [esp + 8]   /* scale */
    movd       xmm3, [esp + 12]  /* interval_size */
    movd       xmm4, [esp + 16]  /* interval_offset */
    mov        ecx, [esp + 20]   /* width */
    pshuflw    xmm2, xmm2, 040h
    pshufd     xmm2, xmm2, 044h
    pshuflw    xmm3, xmm3, 040h
    pshufd     xmm3, xmm3, 044h
    pshuflw    xmm4, xmm4, 040h
    pshufd     xmm4, xmm4, 044h
    pxor       xmm5, xmm5  // constant 0
    pcmpeqb    xmm6, xmm6  // generate mask 0xff000000
    pslld      xmm6, 24

 convertloop:
4723
    movdqu     xmm0, [eax]  // read 4 pixels
4724 4725
    punpcklbw  xmm0, xmm5   // first 2 pixels
    pmulhuw    xmm0, xmm2   // pixel * scale >> 16
4726
    movdqu     xmm1, [eax]  // read 4 pixels
4727 4728 4729
    punpckhbw  xmm1, xmm5   // next 2 pixels
    pmulhuw    xmm1, xmm2
    pmullw     xmm0, xmm3   // * interval_size
4730
    movdqu     xmm7, [eax]  // read 4 pixels
4731 4732 4733 4734 4735 4736
    pmullw     xmm1, xmm3
    pand       xmm7, xmm6   // mask alpha
    paddw      xmm0, xmm4   // + interval_size / 2
    paddw      xmm1, xmm4
    packuswb   xmm0, xmm1
    por        xmm0, xmm7
4737
    movdqu     [eax], xmm0
4738
    lea        eax, [eax + 16]
4739
    sub        ecx, 4
4740 4741 4742 4743 4744 4745
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBQUANTIZEROW_SSE2

4746 4747
#ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value.
4748
__declspec(naked)
4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
                       uint32 value) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    movd       xmm2, [esp + 16]  // value
    punpcklbw  xmm2, xmm2
    punpcklqdq xmm2, xmm2

 convertloop:
4760
    movdqu     xmm0, [eax]      // read 4 pixels
4761
    lea        eax, [eax + 16]
4762 4763 4764 4765 4766 4767 4768 4769
    movdqa     xmm1, xmm0
    punpcklbw  xmm0, xmm0       // first 2
    punpckhbw  xmm1, xmm1       // next 2
    pmulhuw    xmm0, xmm2       // argb * value
    pmulhuw    xmm1, xmm2       // argb * value
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
4770
    movdqu     [edx], xmm0
4771
    lea        edx, [edx + 16]
4772
    sub        ecx, 4
4773 4774 4775 4776 4777 4778 4779
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBSHADEROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
4780
#ifdef HAS_ARGBMULTIPLYROW_SSE2
4781
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
4782
__declspec(naked)
4783 4784
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4785
  __asm {
4786 4787 4788 4789 4790
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
fbarchard@google.com's avatar
fbarchard@google.com committed
4791 4792 4793
    pxor       xmm5, xmm5  // constant 0

 convertloop:
4794
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4795
    movdqu     xmm2, [esi]        // read 4 pixels from src_argb1
4796 4797
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
4798 4799 4800 4801 4802 4803 4804 4805
    punpcklbw  xmm0, xmm0         // first 2
    punpckhbw  xmm1, xmm1         // next 2
    punpcklbw  xmm2, xmm5         // first 2
    punpckhbw  xmm3, xmm5         // next 2
    pmulhuw    xmm0, xmm2         // src_argb0 * src_argb1 first 2
    pmulhuw    xmm1, xmm3         // src_argb0 * src_argb1 next 2
    lea        eax, [eax + 16]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4806
    packuswb   xmm0, xmm1
4807 4808
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4809
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
4810 4811
    jg         convertloop

4812
    pop        esi
fbarchard@google.com's avatar
fbarchard@google.com committed
4813 4814 4815 4816 4817
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_SSE2

4818 4819
#ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time.
4820
// TODO(fbarchard): Port this to posix, neon and other math functions.
4821
__declspec(naked)
4822 4823 4824 4825 4826 4827 4828 4829 4830
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

4831 4832 4833 4834 4835
    sub        ecx, 4
    jl         convertloop49

 convertloop4:
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4836 4837 4838
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4839
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4840 4841
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4842
    sub        ecx, 4
4843 4844 4845 4846 4847
    jge        convertloop4

 convertloop49:
    add        ecx, 4 - 1
    jl         convertloop19
4848

4849 4850
 convertloop1:
    movd       xmm0, [eax]        // read 1 pixels from src_argb0
4851 4852 4853
    lea        eax, [eax + 4]
    movd       xmm1, [esi]        // read 1 pixels from src_argb1
    lea        esi, [esi + 4]
4854
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4855 4856
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4857
    sub        ecx, 1
4858 4859 4860
    jge        convertloop1

 convertloop19:
4861 4862 4863 4864 4865 4866
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBADDROW_SSE2

4867 4868
#ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
4869
__declspec(naked)
4870 4871 4872 4873 4874 4875 4876 4877 4878 4879
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
4880
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4881 4882 4883
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4884
    psubusb    xmm0, xmm1         // src_argb0 - src_argb1
4885 4886
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4887
    sub        ecx, 4
4888 4889 4890 4891 4892 4893 4894 4895
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_SSE2

4896 4897
#ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
4898
__declspec(naked)
4899 4900 4901 4902 4903 4904 4905 4906
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4907
    vpxor      ymm5, ymm5, ymm5     // constant 0
4908 4909 4910

 convertloop:
    vmovdqu    ymm1, [eax]        // read 8 pixels from src_argb0
4911 4912 4913
    lea        eax, [eax + 32]
    vmovdqu    ymm3, [esi]        // read 8 pixels from src_argb1
    lea        esi, [esi + 32]
4914 4915 4916 4917 4918 4919 4920
    vpunpcklbw ymm0, ymm1, ymm1   // low 4
    vpunpckhbw ymm1, ymm1, ymm1   // high 4
    vpunpcklbw ymm2, ymm3, ymm5   // low 4
    vpunpckhbw ymm3, ymm3, ymm5   // high 4
    vpmulhuw   ymm0, ymm0, ymm2   // src_argb0 * src_argb1 low 4
    vpmulhuw   ymm1, ymm1, ymm3   // src_argb0 * src_argb1 high 4
    vpackuswb  ymm0, ymm0, ymm1
4921 4922
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4923
    sub        ecx, 8
4924 4925 4926
    jg         convertloop

    pop        esi
4927
    vzeroupper
4928 4929 4930 4931 4932 4933 4934
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_AVX2

#ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
4935
__declspec(naked)
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4948 4949 4950 4951
    vpaddusb   ymm0, ymm0, [esi]        // add 8 pixels from src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4952
    sub        ecx, 8
4953 4954 4955
    jg         convertloop

    pop        esi
4956
    vzeroupper
4957 4958 4959 4960 4961 4962 4963
    ret
  }
}
#endif  // HAS_ARGBADDROW_AVX2

#ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
4964
__declspec(naked)
4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4977 4978 4979 4980
    vpsubusb   ymm0, ymm0, [esi]        // src_argb0 - src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4981
    sub        ecx, 8
4982 4983 4984
    jg         convertloop

    pop        esi
4985
    vzeroupper
4986 4987 4988 4989 4990
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_AVX2

4991
#ifdef HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4992 4993 4994 4995
// SobelX as a matrix is
// -1  0  1
// -2  0  2
// -1  0  1
4996
__declspec(naked)
4997 4998
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    const uint8* src_y2, uint8* dst_sobelx, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_y0
    mov        esi, [esp + 8 + 8]   // src_y1
    mov        edi, [esp + 8 + 12]  // src_y2
    mov        edx, [esp + 8 + 16]  // dst_sobelx
    mov        ecx, [esp + 8 + 20]  // width
    sub        esi, eax
    sub        edi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    movq       xmm2, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + edi]      // read 8 pixels from src_y2[0]
    movq       xmm3, qword ptr [eax + edi + 2]  // read 8 pixels from src_y2[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5031 5032 5033
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5034 5035 5036
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5037
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5038 5039 5040 5041 5042 5043 5044
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
5045
#endif  // HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5046

5047
#ifdef HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5048 5049 5050 5051
// SobelY as a matrix is
// -1 -2 -1
//  0  0  0
//  1  2  1
5052
__declspec(naked)
5053 5054
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    uint8* dst_sobely, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_y0
    mov        esi, [esp + 4 + 8]   // src_y1
    mov        edx, [esp + 4 + 12]  // dst_sobely
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + 1]        // read 8 pixels from src_y0[1]
    movq       xmm2, qword ptr [eax + esi + 1]  // read 8 pixels from src_y1[1]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    movq       xmm3, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5084 5085 5086
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5087 5088 5089
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5090
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5091 5092 5093 5094 5095 5096
    jg         convertloop

    pop        esi
    ret
  }
}
5097
#endif  // HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5098

5099 5100 5101 5102 5103 5104
#ifdef HAS_SOBELROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
// A = 255
// R = Sobel
// G = Sobel
// B = Sobel
5105
__declspec(naked)
5106
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
5107
                   uint8* dst_argb, int width) {
5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    pcmpeqb    xmm5, xmm5           // alpha 255
    pslld      xmm5, 24             // 0xff000000

 convertloop:
5119 5120
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
    movdqa     xmm2, xmm0             // GG
    punpcklbw  xmm2, xmm0             // First 8
    punpckhbw  xmm0, xmm0             // Next 8
    movdqa     xmm1, xmm2             // GGGG
    punpcklwd  xmm1, xmm2             // First 4
    punpckhwd  xmm2, xmm2             // Next 4
    por        xmm1, xmm5             // GGGA
    por        xmm2, xmm5
    movdqa     xmm3, xmm0             // GGGG
    punpcklwd  xmm3, xmm0             // Next 4
    punpckhwd  xmm0, xmm0             // Last 4
    por        xmm3, xmm5             // GGGA
    por        xmm0, xmm5
5136 5137 5138 5139
    movdqu     [edx], xmm1
    movdqu     [edx + 16], xmm2
    movdqu     [edx + 32], xmm3
    movdqu     [edx + 48], xmm0
5140
    lea        edx, [edx + 64]
5141
    sub        ecx, 16
5142 5143 5144 5145 5146 5147 5148 5149
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELROW_SSE2

5150 5151
#ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane.
5152
__declspec(naked)
5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                          uint8* dst_y, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax

 convertloop:
5164 5165
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5166 5167
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
5168
    movdqu     [edx], xmm0
5169
    lea        edx, [edx + 16]
5170
    sub        ecx, 16
5171 5172 5173 5174 5175 5176 5177 5178
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELTOPLANEROW_SSE2

5179 5180 5181 5182 5183 5184
#ifdef HAS_SOBELXYROW_SSE2
// Mixes Sobel X, Sobel Y and Sobel into ARGB.
// A = 255
// R = Sobel X
// G = Sobel
// B = Sobel Y
5185
__declspec(naked)
5186 5187 5188 5189 5190 5191 5192 5193 5194
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
5195
    pcmpeqb    xmm5, xmm5           // alpha 255
5196 5197

 convertloop:
5198 5199
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214
    lea        eax, [eax + 16]
    movdqa     xmm2, xmm0
    paddusb    xmm2, xmm1             // sobel = sobelx + sobely
    movdqa     xmm3, xmm0             // XA
    punpcklbw  xmm3, xmm5
    punpckhbw  xmm0, xmm5
    movdqa     xmm4, xmm1             // YS
    punpcklbw  xmm4, xmm2
    punpckhbw  xmm1, xmm2
    movdqa     xmm6, xmm4             // YSXA
    punpcklwd  xmm6, xmm3             // First 4
    punpckhwd  xmm4, xmm3             // Next 4
    movdqa     xmm7, xmm1             // YSXA
    punpcklwd  xmm7, xmm0             // Next 4
    punpckhwd  xmm1, xmm0             // Last 4
5215 5216 5217 5218
    movdqu     [edx], xmm6
    movdqu     [edx + 16], xmm4
    movdqu     [edx + 32], xmm7
    movdqu     [edx + 48], xmm1
5219
    lea        edx, [edx + 64]
5220
    sub        ecx, 16
5221 5222 5223 5224 5225 5226
    jg         convertloop

    pop        esi
    ret
  }
}
5227
#endif  // HAS_SOBELXYROW_SSE2
5228

5229
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240
// Consider float CumulativeSum.
// Consider calling CumulativeSum one row at time as needed.
// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
// Convert cumulative sum for an area to an average for 1 pixel.
// topleft is pointer to top left of CumulativeSum buffer for area.
// botleft is pointer to bottom left of CumulativeSum buffer.
// width is offset from left to right of area in CumulativeSum buffer measured
//   in number of ints.
// area is the number of pixels in the area being averaged.
// dst points to pixel to store result to.
// count is number of averaged pixels to produce.
5241
// Does 4 pixels at a time.
5242 5243 5244
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
                                    int width, int area, uint8* dst,
                                    int count) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5245 5246 5247 5248
  __asm {
    mov        eax, topleft  // eax topleft
    mov        esi, botleft  // esi botleft
    mov        edx, width
5249
    movd       xmm5, area
fbarchard@google.com's avatar
fbarchard@google.com committed
5250 5251
    mov        edi, dst
    mov        ecx, count
5252 5253
    cvtdq2ps   xmm5, xmm5
    rcpss      xmm4, xmm5  // 1.0f / area
fbarchard@google.com's avatar
fbarchard@google.com committed
5254 5255 5256 5257
    pshufd     xmm4, xmm4, 0
    sub        ecx, 4
    jl         l4b

5258 5259 5260
    cmp        area, 128  // 128 pixels will not overflow 15 bits.
    ja         l4

5261 5262 5263 5264 5265 5266
    pshufd     xmm5, xmm5, 0        // area
    pcmpeqb    xmm6, xmm6           // constant of 65536.0 - 1 = 65535.0
    psrld      xmm6, 16
    cvtdq2ps   xmm6, xmm6
    addps      xmm5, xmm6           // (65536.0 + area - 1)
    mulps      xmm5, xmm4           // (65536.0 + area - 1) * 1 / area
5267
    cvtps2dq   xmm5, xmm5           // 0.16 fixed point
5268
    packssdw   xmm5, xmm5           // 16 bit shorts
5269 5270 5271 5272

    // 4 pixel loop small blocks.
  s4:
    // top left
5273 5274 5275 5276
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    packssdw   xmm0, xmm1  // pack 4 pixels into 2 registers
    packssdw   xmm2, xmm3

    pmulhuw    xmm0, xmm5
    pmulhuw    xmm2, xmm5

    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        s4

    jmp        l4b

fbarchard@google.com's avatar
fbarchard@google.com committed
5312 5313 5314
    // 4 pixel loop
  l4:
    // top left
5315 5316 5317 5318
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
fbarchard@google.com's avatar
fbarchard@google.com committed
5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    cvtdq2ps   xmm0, xmm0   // Average = Sum * 1 / Area
    cvtdq2ps   xmm1, xmm1
    mulps      xmm0, xmm4
    mulps      xmm1, xmm4
    cvtdq2ps   xmm2, xmm2
    cvtdq2ps   xmm3, xmm3
    mulps      xmm2, xmm4
    mulps      xmm3, xmm4
    cvtps2dq   xmm0, xmm0
    cvtps2dq   xmm1, xmm1
    cvtps2dq   xmm2, xmm2
    cvtps2dq   xmm3, xmm3
    packssdw   xmm0, xmm1
    packssdw   xmm2, xmm3
    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
5366
    movdqu     xmm0, [eax]
fbarchard@google.com's avatar
fbarchard@google.com committed
5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383
    psubd      xmm0, [eax + edx * 4]
    lea        eax, [eax + 16]
    psubd      xmm0, [esi]
    paddd      xmm0, [esi + edx * 4]
    lea        esi, [esi + 16]
    cvtdq2ps   xmm0, xmm0
    mulps      xmm0, xmm4
    cvtps2dq   xmm0, xmm0
    packssdw   xmm0, xmm0
    packuswb   xmm0, xmm0
    movd       dword ptr [edi], xmm0
    lea        edi, [edi + 4]
    sub        ecx, 1
    jge        l1
  l1b:
  }
}
5384
#endif  // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5385 5386 5387 5388 5389

#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
5390
                                  const int32* previous_cumsum, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420
  __asm {
    mov        eax, row
    mov        edx, cumsum
    mov        esi, previous_cumsum
    mov        ecx, width
    pxor       xmm0, xmm0
    pxor       xmm1, xmm1

    sub        ecx, 4
    jl         l4b
    test       edx, 15
    jne        l4b

    // 4 pixel loop
  l4:
    movdqu     xmm2, [eax]  // 4 argb pixels 16 bytes.
    lea        eax, [eax + 16]
    movdqa     xmm4, xmm2

    punpcklbw  xmm2, xmm1
    movdqa     xmm3, xmm2
    punpcklwd  xmm2, xmm1
    punpckhwd  xmm3, xmm1

    punpckhbw  xmm4, xmm1
    movdqa     xmm5, xmm4
    punpcklwd  xmm4, xmm1
    punpckhwd  xmm5, xmm1

    paddd      xmm0, xmm2
5421
    movdqu     xmm2, [esi]  // previous row above.
fbarchard@google.com's avatar
fbarchard@google.com committed
5422 5423 5424
    paddd      xmm2, xmm0

    paddd      xmm0, xmm3
5425
    movdqu     xmm3, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5426 5427 5428
    paddd      xmm3, xmm0

    paddd      xmm0, xmm4
5429
    movdqu     xmm4, [esi + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5430 5431 5432
    paddd      xmm4, xmm0

    paddd      xmm0, xmm5
5433
    movdqu     xmm5, [esi + 48]
5434
    lea        esi, [esi + 64]
fbarchard@google.com's avatar
fbarchard@google.com committed
5435 5436
    paddd      xmm5, xmm0

5437 5438 5439 5440
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
    movdqu     [edx + 32], xmm4
    movdqu     [edx + 48], xmm5
fbarchard@google.com's avatar
fbarchard@google.com committed
5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453

    lea        edx, [edx + 64]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
    movd       xmm2, dword ptr [eax]  // 1 argb pixel 4 bytes.
    lea        eax, [eax + 4]
5454 5455
    punpcklbw  xmm2, xmm1
    punpcklwd  xmm2, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5456
    paddd      xmm0, xmm2
5457 5458
    movdqu     xmm2, [esi]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469
    paddd      xmm2, xmm0
    movdqu     [edx], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 1
    jge        l1

 l1b:
  }
}
#endif  // HAS_COMPUTECUMULATIVESUMROW_SSE2

5470 5471
#ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination.
5472
__declspec(naked)
5473
LIBYUV_API
5474 5475 5476 5477
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                        uint8* dst_argb, const float* uv_dudv, int width) {
  __asm {
    push       esi
5478
    push       edi
5479
    mov        eax, [esp + 12]  // src_argb
5480 5481 5482
    mov        esi, [esp + 16]  // stride
    mov        edx, [esp + 20]  // dst_argb
    mov        ecx, [esp + 24]  // pointer to uv_dudv
5483
    movq       xmm2, qword ptr [ecx]  // uv
5484
    movq       xmm7, qword ptr [ecx + 8]  // dudv
5485
    mov        ecx, [esp + 28]  // width
5486 5487
    shl        esi, 16          // 4, stride
    add        esi, 4
5488 5489 5490
    movd       xmm5, esi
    sub        ecx, 4
    jl         l4b
5491

5492 5493 5494
    // setup for 4 pixel loop
    pshufd     xmm7, xmm7, 0x44  // dup dudv
    pshufd     xmm5, xmm5, 0  // dup 4, stride
5495
    movdqa     xmm0, xmm2    // x0, y0, x1, y1
5496
    addps      xmm0, xmm7
5497
    movlhps    xmm2, xmm0
5498 5499 5500 5501 5502
    movdqa     xmm4, xmm7
    addps      xmm4, xmm4    // dudv *= 2
    movdqa     xmm3, xmm2    // x2, y2, x3, y3
    addps      xmm3, xmm4
    addps      xmm4, xmm4    // dudv *= 4
5503

5504 5505 5506 5507 5508 5509 5510 5511
    // 4 pixel loop
  l4:
    cvttps2dq  xmm0, xmm2    // x, y float to int first 2
    cvttps2dq  xmm1, xmm3    // x, y float to int next 2
    packssdw   xmm0, xmm1    // x, y as 8 shorts
    pmaddwd    xmm0, xmm5    // offsets = x * 4 + y * stride.
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5512
    movd       edi, xmm0
5513
    pshufd     xmm0, xmm0, 0x39  // shift right
5514 5515
    movd       xmm1, [eax + esi]  // read pixel 0
    movd       xmm6, [eax + edi]  // read pixel 1
5516
    punpckldq  xmm1, xmm6     // combine pixel 0 and 1
5517 5518
    addps      xmm2, xmm4    // x, y += dx, dy first 2
    movq       qword ptr [edx], xmm1
5519 5520
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5521
    movd       edi, xmm0
5522
    movd       xmm6, [eax + esi]  // read pixel 2
5523
    movd       xmm0, [eax + edi]  // read pixel 3
5524
    punpckldq  xmm6, xmm0     // combine pixel 2 and 3
5525 5526
    addps      xmm3, xmm4    // x, y += dx, dy next 2
    movq       qword ptr 8[edx], xmm6
5527
    lea        edx, [edx + 16]
5528
    sub        ecx, 4
5529
    jge        l4
5530

5531 5532
  l4b:
    add        ecx, 4 - 1
5533 5534 5535 5536
    jl         l1b

    // 1 pixel loop
  l1:
5537 5538 5539 5540 5541
    cvttps2dq  xmm0, xmm2    // x, y float to int
    packssdw   xmm0, xmm0    // x, y as shorts
    pmaddwd    xmm0, xmm5    // offset = x * 4 + y * stride
    addps      xmm2, xmm7    // x, y += dx, dy
    movd       esi, xmm0
5542 5543 5544
    movd       xmm0, [eax + esi]  // copy a pixel
    movd       [edx], xmm0
    lea        edx, [edx + 4]
5545
    sub        ecx, 1
5546 5547
    jge        l1
  l1b:
5548
    pop        edi
5549 5550 5551 5552 5553 5554
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBAFFINEROW_SSE2

5555
#ifdef HAS_INTERPOLATEROW_AVX2
5556
// Bilinear filter 32x2 -> 32x1
5557
__declspec(naked)
5558
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
5559 5560
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601
  __asm {
    push       esi
    push       edi
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    shr        eax, 1
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
    sub        edi, esi
    cmp        eax, 32
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
    cmp        eax, 64
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
    cmp        eax, 96
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.

    vmovd      xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    vmovd      xmm5, eax  // low fraction 128..1
    vpunpcklbw xmm5, xmm5, xmm0
    vpunpcklwd xmm5, xmm5, xmm5
    vpxor      ymm0, ymm0, ymm0
    vpermd     ymm5, ymm0, ymm5

  xloop:
    vmovdqu    ymm0, [esi]
    vmovdqu    ymm2, [esi + edx]
    vpunpckhbw ymm1, ymm0, ymm2  // mutates
    vpunpcklbw ymm0, ymm0, ymm2  // mutates
    vpmaddubsw ymm0, ymm0, ymm5
    vpmaddubsw ymm1, ymm1, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm1, ymm1, 7
    vpackuswb  ymm0, ymm0, ymm1  // unmutates
    vmovdqu    [esi + edi], ymm0
    lea        esi, [esi + 32]
5602
    sub        ecx, 32
5603 5604 5605
    jg         xloop
    jmp        xloop99

5606 5607 5608 5609 5610 5611 5612 5613
   // Blend 25 / 75.
 xloop25:
   vmovdqu    ymm0, [esi]
   vmovdqu    ymm1, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5614
   sub        ecx, 32
5615 5616 5617 5618 5619 5620
   jg         xloop25
   jmp        xloop99

   // Blend 50 / 50.
 xloop50:
   vmovdqu    ymm0, [esi]
5621
   vpavgb     ymm0, ymm0, [esi + edx]
5622 5623
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5624
   sub        ecx, 32
5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635
   jg         xloop50
   jmp        xloop99

   // Blend 75 / 25.
 xloop75:
   vmovdqu    ymm1, [esi]
   vmovdqu    ymm0, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5636
   sub        ecx, 32
5637 5638 5639 5640 5641 5642
   jg         xloop75
   jmp        xloop99

   // Blend 100 / 0 - Copy row unchanged.
 xloop100:
   rep movsb
5643 5644 5645 5646 5647 5648 5649 5650 5651 5652

  xloop99:
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_INTERPOLATEROW_AVX2

5653
// Bilinear filter 16x2 -> 16x1
5654
__declspec(naked)
5655 5656 5657
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
                          ptrdiff_t src_stride, int dst_width,
                          int source_y_fraction) {
5658 5659 5660
  __asm {
    push       esi
    push       edi
5661 5662
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5663 5664 5665 5666 5667
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    shr        eax, 1
5668 5669 5670
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
5671
    cmp        eax, 32
5672
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
5673
    cmp        eax, 64
5674
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
5675
    cmp        eax, 96
5676
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.
5677

5678 5679 5680 5681 5682 5683 5684 5685
    movd       xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    movd       xmm5, eax  // low fraction 128..1
    punpcklbw  xmm5, xmm0
    punpcklwd  xmm5, xmm5
    pshufd     xmm5, xmm5, 0

5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698
  xloop:
    movdqu     xmm0, [esi]
    movdqu     xmm2, [esi + edx]
    movdqu     xmm1, xmm0
    punpcklbw  xmm0, xmm2
    punpckhbw  xmm1, xmm2
    pmaddubsw  xmm0, xmm5
    pmaddubsw  xmm1, xmm5
    psrlw      xmm0, 7
    psrlw      xmm1, 7
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5699
    sub        ecx, 16
5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5711
    sub        ecx, 16
5712 5713 5714 5715 5716 5717 5718 5719 5720 5721
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5722
    sub        ecx, 16
5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5734
    sub        ecx, 16
5735 5736 5737 5738 5739 5740 5741 5742
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5743
    sub        ecx, 16
5744 5745 5746 5747 5748 5749 5750 5751 5752
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}

5753
#ifdef HAS_INTERPOLATEROW_SSE2
5754
// Bilinear filter 16x2 -> 16x1
5755
__declspec(naked)
5756 5757 5758
void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5759 5760 5761
  __asm {
    push       esi
    push       edi
5762 5763
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 256.  Blend 100 / 0.
    cmp        eax, 64
    je         xloop75   // 64 / 256 is 0.25.  Blend 75 / 25.
    cmp        eax, 128
    je         xloop50   // 128 / 256 is 0.50.  Blend 50 / 50.
    cmp        eax, 192
    je         xloop25   // 192 / 256 is 0.75.  Blend 25 / 75.

    movd       xmm5, eax            // xmm5 = y fraction
    punpcklbw  xmm5, xmm5
    psrlw      xmm5, 1
    punpcklwd  xmm5, xmm5
    punpckldq  xmm5, xmm5
    punpcklqdq xmm5, xmm5
    pxor       xmm4, xmm4

  xloop:
    movdqu     xmm0, [esi]  // row0
    movdqu     xmm2, [esi + edx]  // row1
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
    punpcklbw  xmm2, xmm4
    punpckhbw  xmm3, xmm4
    punpcklbw  xmm0, xmm4
    punpckhbw  xmm1, xmm4
    psubw      xmm2, xmm0  // row1 - row0
    psubw      xmm3, xmm1
    paddw      xmm2, xmm2  // 9 bits * 15 bits = 8.16
    paddw      xmm3, xmm3
    pmulhw     xmm2, xmm5  // scale diff
    pmulhw     xmm3, xmm5
    paddw      xmm0, xmm2  // sum rows
    paddw      xmm1, xmm3
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5806
    sub        ecx, 16
5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5818
    sub        ecx, 16
5819 5820 5821 5822 5823 5824 5825 5826 5827 5828
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5829
    sub        ecx, 16
5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5841
    sub        ecx, 16
5842 5843 5844 5845 5846 5847 5848 5849
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5850
    sub        ecx, 16
5851 5852 5853 5854 5855 5856 5857 5858
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}
5859
#endif  // HAS_INTERPOLATEROW_SSE2
5860

fbarchard@google.com's avatar
fbarchard@google.com committed
5861
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
5862
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5863
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
5864
                          const uint8* shuffler, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5865 5866
  __asm {
    mov        eax, [esp + 4]    // src_argb
5867
    mov        edx, [esp + 8]    // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5868
    mov        ecx, [esp + 12]   // shuffler
5869
    movdqu     xmm5, [ecx]
5870
    mov        ecx, [esp + 16]   // width
fbarchard@google.com's avatar
fbarchard@google.com committed
5871 5872

  wloop:
5873 5874
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5875 5876 5877
    lea        eax, [eax + 32]
    pshufb     xmm0, xmm5
    pshufb     xmm1, xmm5
5878 5879
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5880
    lea        edx, [edx + 32]
5881
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5882 5883 5884 5885 5886 5887
    jg         wloop
    ret
  }
}

#ifdef HAS_ARGBSHUFFLEROW_AVX2
5888
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5889
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
5890
                         const uint8* shuffler, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5891 5892
  __asm {
    mov        eax, [esp + 4]     // src_argb
5893
    mov        edx, [esp + 8]     // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5894
    mov        ecx, [esp + 12]    // shuffler
5895
    vbroadcastf128 ymm5, [ecx]    // same shuffle in high as low.
5896
    mov        ecx, [esp + 16]    // width
fbarchard@google.com's avatar
fbarchard@google.com committed
5897 5898 5899 5900 5901 5902 5903 5904 5905 5906

  wloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vpshufb    ymm0, ymm0, ymm5
    vpshufb    ymm1, ymm1, ymm5
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
5907
    sub        ecx, 16
fbarchard@google.com's avatar
fbarchard@google.com committed
5908
    jg         wloop
5909 5910

    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
5911 5912 5913
    ret
  }
}
5914
#endif  // HAS_ARGBSHUFFLEROW_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
5915

5916
__declspec(naked)
5917
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
5918
                         const uint8* shuffler, int width) {
5919 5920 5921 5922 5923 5924
  __asm {
    push       ebx
    push       esi
    mov        eax, [esp + 8 + 4]    // src_argb
    mov        edx, [esp + 8 + 8]    // dst_argb
    mov        esi, [esp + 8 + 12]   // shuffler
5925
    mov        ecx, [esp + 8 + 16]   // width
5926
    pxor       xmm5, xmm5
5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961

    mov        ebx, [esi]   // shuffler
    cmp        ebx, 0x03000102
    je         shuf_3012
    cmp        ebx, 0x00010203
    je         shuf_0123
    cmp        ebx, 0x00030201
    je         shuf_0321
    cmp        ebx, 0x02010003
    je         shuf_2103

  // TODO(fbarchard): Use one source pointer and 3 offsets.
  shuf_any1:
    movzx      ebx, byte ptr [esi]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx], bl
    movzx      ebx, byte ptr [esi + 1]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 1], bl
    movzx      ebx, byte ptr [esi + 2]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 2], bl
    movzx      ebx, byte ptr [esi + 3]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 3], bl
    lea        eax, [eax + 4]
    lea        edx, [edx + 4]
    sub        ecx, 1
    jg         shuf_any1
    jmp        shuf99

  shuf_0123:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5962 5963
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5964 5965 5966 5967 5968 5969 5970
    pshufhw    xmm0, xmm0, 01Bh   // 1B = 00011011 = 0x0123 = BGRAToARGB
    pshuflw    xmm0, xmm0, 01Bh
    pshufhw    xmm1, xmm1, 01Bh
    pshuflw    xmm1, xmm1, 01Bh
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5971
    sub        ecx, 4
5972 5973 5974 5975 5976 5977 5978
    jg         shuf_0123
    jmp        shuf99

  shuf_0321:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5979 5980
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5981 5982 5983 5984 5985 5986 5987
    pshufhw    xmm0, xmm0, 039h   // 39 = 00111001 = 0x0321 = RGBAToARGB
    pshuflw    xmm0, xmm0, 039h
    pshufhw    xmm1, xmm1, 039h
    pshuflw    xmm1, xmm1, 039h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5988
    sub        ecx, 4
5989 5990 5991 5992 5993 5994 5995
    jg         shuf_0321
    jmp        shuf99

  shuf_2103:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5996 5997
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5998 5999 6000 6001 6002 6003 6004
    pshufhw    xmm0, xmm0, 093h   // 93 = 10010011 = 0x2103 = ARGBToRGBA
    pshuflw    xmm0, xmm0, 093h
    pshufhw    xmm1, xmm1, 093h
    pshuflw    xmm1, xmm1, 093h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6005
    sub        ecx, 4
6006 6007 6008 6009 6010 6011 6012
    jg         shuf_2103
    jmp        shuf99

  shuf_3012:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6013 6014
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6015 6016 6017 6018 6019 6020 6021
    pshufhw    xmm0, xmm0, 0C6h   // C6 = 11000110 = 0x3012 = ABGRToARGB
    pshuflw    xmm0, xmm0, 0C6h
    pshufhw    xmm1, xmm1, 0C6h
    pshuflw    xmm1, xmm1, 0C6h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6022
    sub        ecx, 4
6023 6024 6025 6026 6027 6028 6029 6030 6031
    jg         shuf_3012

  shuf99:
    pop        esi
    pop        ebx
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
6032 6033 6034 6035 6036 6037
// YUY2 - Macro-pixel = 2 image pixels
// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....

// UYVY - Macro-pixel = 2 image pixels
// U0Y0V0Y1

6038
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057
void I422ToYUY2Row_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6058
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6059
    lea        eax, [eax + 16]
6060
    movdqa     xmm1, xmm0
fbarchard@google.com's avatar
fbarchard@google.com committed
6061 6062
    punpcklbw  xmm0, xmm2 // YUYV
    punpckhbw  xmm1, xmm2
6063 6064
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
6065 6066 6067 6068 6069 6070 6071 6072 6073 6074
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

6075
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094
void I422ToUYVYRow_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6095
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6096 6097 6098 6099
    movdqa     xmm1, xmm2
    lea        eax, [eax + 16]
    punpcklbw  xmm1, xmm0 // UYVY
    punpckhbw  xmm2, xmm0
6100 6101
    movdqu     [edi], xmm1
    movdqu     [edi + 16], xmm2
fbarchard@google.com's avatar
fbarchard@google.com committed
6102 6103 6104 6105 6106 6107 6108 6109 6110
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
6111

6112
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
6113
__declspec(naked)
6114 6115 6116 6117
void ARGBPolynomialRow_SSE2(const uint8* src_argb,
                            uint8* dst_argb, const float* poly,
                            int width) {
  __asm {
6118 6119 6120 6121 6122
    push       esi
    mov        eax, [esp + 4 + 4]   /* src_argb */
    mov        edx, [esp + 4 + 8]   /* dst_argb */
    mov        esi, [esp + 4 + 12]  /* poly */
    mov        ecx, [esp + 4 + 16]  /* width */
6123
    pxor       xmm3, xmm3  // 0 constant for zero extending bytes to ints.
6124

6125
    // 2 pixel loop.
6126
 convertloop:
6127 6128
//    pmovzxbd  xmm0, dword ptr [eax]  // BGRA pixel
//    pmovzxbd  xmm4, dword ptr [eax + 4]  // BGRA pixel
6129 6130
    movq       xmm0, qword ptr [eax]  // BGRABGRA
    lea        eax, [eax + 8]
6131
    punpcklbw  xmm0, xmm3
6132 6133 6134
    movdqa     xmm4, xmm0
    punpcklwd  xmm0, xmm3  // pixel 0
    punpckhwd  xmm4, xmm3  // pixel 1
6135
    cvtdq2ps   xmm0, xmm0  // 4 floats
6136
    cvtdq2ps   xmm4, xmm4
6137
    movdqa     xmm1, xmm0  // X
6138 6139 6140 6141 6142
    movdqa     xmm5, xmm4
    mulps      xmm0, [esi + 16]  // C1 * X
    mulps      xmm4, [esi + 16]
    addps      xmm0, [esi]  // result = C0 + C1 * X
    addps      xmm4, [esi]
6143
    movdqa     xmm2, xmm1
6144
    movdqa     xmm6, xmm5
6145
    mulps      xmm2, xmm1  // X * X
6146
    mulps      xmm6, xmm5
6147
    mulps      xmm1, xmm2  // X * X * X
6148 6149 6150 6151 6152
    mulps      xmm5, xmm6
    mulps      xmm2, [esi + 32]  // C2 * X * X
    mulps      xmm6, [esi + 32]
    mulps      xmm1, [esi + 48]  // C3 * X * X * X
    mulps      xmm5, [esi + 48]
6153
    addps      xmm0, xmm2  // result += C2 * X * X
6154
    addps      xmm4, xmm6
6155
    addps      xmm0, xmm1  // result += C3 * X * X * X
6156
    addps      xmm4, xmm5
6157
    cvttps2dq  xmm0, xmm0
6158 6159
    cvttps2dq  xmm4, xmm4
    packuswb   xmm0, xmm4
6160
    packuswb   xmm0, xmm0
6161 6162
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
6163
    sub        ecx, 2
6164
    jg         convertloop
6165
    pop        esi
6166 6167 6168 6169 6170
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_SSE2

6171
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
6172
__declspec(naked)
6173
void ARGBPolynomialRow_AVX2(const uint8* src_argb,
6174 6175
                            uint8* dst_argb, const float* poly,
                            int width) {
6176 6177 6178
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
6179 6180 6181 6182 6183
    mov        ecx, [esp + 12]   /* poly */
    vbroadcastf128 ymm4, [ecx]       // C0
    vbroadcastf128 ymm5, [ecx + 16]  // C1
    vbroadcastf128 ymm6, [ecx + 32]  // C2
    vbroadcastf128 ymm7, [ecx + 48]  // C3
6184 6185
    mov        ecx, [esp + 16]  /* width */

6186
    // 2 pixel loop.
6187
 convertloop:
6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201
    vpmovzxbd   ymm0, qword ptr [eax]  // 2 BGRA pixels
    lea         eax, [eax + 8]
    vcvtdq2ps   ymm0, ymm0        // X 8 floats
    vmulps      ymm2, ymm0, ymm0  // X * X
    vmulps      ymm3, ymm0, ymm7  // C3 * X
    vfmadd132ps ymm0, ymm4, ymm5  // result = C0 + C1 * X
    vfmadd231ps ymm0, ymm2, ymm6  // result += C2 * X * X
    vfmadd231ps ymm0, ymm2, ymm3  // result += C3 * X * X * X
    vcvttps2dq  ymm0, ymm0
    vpackusdw   ymm0, ymm0, ymm0  // b0g0r0a0_00000000_b0g0r0a0_00000000
    vpermq      ymm0, ymm0, 0xd8  // b0g0r0a0_b0g0r0a0_00000000_00000000
    vpackuswb   xmm0, xmm0, xmm0  // bgrabgra_00000000_00000000_00000000
    vmovq       qword ptr [edx], xmm0
    lea         edx, [edx + 8]
6202
    sub         ecx, 2
6203
    jg          convertloop
6204 6205 6206 6207 6208 6209
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_AVX2

fbarchard@google.com's avatar
fbarchard@google.com committed
6210 6211
#ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table.
6212
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6213 6214 6215 6216 6217 6218 6219
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
                           int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */
6220

fbarchard@google.com's avatar
fbarchard@google.com committed
6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242
    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    movzx      edx, byte ptr [eax - 4 + 3]
    movzx      edx, byte ptr [esi + edx * 4 + 3]
    mov        byte ptr [eax - 4 + 3], dl
    dec        ecx
    jg         convertloop
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBCOLORTABLEROW_X86
6243

fbarchard@google.com's avatar
fbarchard@google.com committed
6244 6245
#ifdef HAS_RGBCOLORTABLEROW_X86
// Tranform RGB pixels with color table.
6246
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6247
void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
6248
  __asm {
fbarchard@google.com's avatar
fbarchard@google.com committed
6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */

    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    dec        ecx
    jg         convertloop

    pop        esi
6270 6271 6272
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6273
#endif  // HAS_RGBCOLORTABLEROW_X86
6274

fbarchard@google.com's avatar
fbarchard@google.com committed
6275 6276
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table.
6277
__declspec(naked)
6278 6279 6280
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                                 int width,
                                 const uint8* luma, uint32 lumacoeff) {
fbarchard@google.com's avatar
fbarchard@google.com committed
6281 6282 6283 6284 6285
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   /* src_argb */
    mov        edi, [esp + 8 + 8]   /* dst_argb */
6286 6287 6288
    mov        ecx, [esp + 8 + 12]  /* width */
    movd       xmm2, dword ptr [esp + 8 + 16]  // luma table
    movd       xmm3, dword ptr [esp + 8 + 20]  // lumacoeff
fbarchard@google.com's avatar
fbarchard@google.com committed
6289
    pshufd     xmm2, xmm2, 0
6290
    pshufd     xmm3, xmm3, 0
6291
    pcmpeqb    xmm4, xmm4        // generate mask 0xff00ff00
fbarchard@google.com's avatar
fbarchard@google.com committed
6292 6293 6294 6295 6296
    psllw      xmm4, 8
    pxor       xmm5, xmm5

    // 4 pixel loop.
  convertloop:
Frank Barchard's avatar
Frank Barchard committed
6297
    movdqu     xmm0, xmmword ptr [eax]      // generate luma ptr
fbarchard@google.com's avatar
fbarchard@google.com committed
6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363
    pmaddubsw  xmm0, xmm3
    phaddw     xmm0, xmm0
    pand       xmm0, xmm4  // mask out low bits
    punpcklwd  xmm0, xmm5
    paddd      xmm0, xmm2  // add table base
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi], dl
    movzx      edx, byte ptr [eax + 1]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 1], dl
    movzx      edx, byte ptr [eax + 2]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 2], dl
    movzx      edx, byte ptr [eax + 3]  // copy alpha.
    mov        byte ptr [edi + 3], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 4]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 4], dl
    movzx      edx, byte ptr [eax + 5]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 5], dl
    movzx      edx, byte ptr [eax + 6]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 6], dl
    movzx      edx, byte ptr [eax + 7]  // copy alpha.
    mov        byte ptr [edi + 7], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 8]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 8], dl
    movzx      edx, byte ptr [eax + 9]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 9], dl
    movzx      edx, byte ptr [eax + 10]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 10], dl
    movzx      edx, byte ptr [eax + 11]  // copy alpha.
    mov        byte ptr [edi + 11], dl

    movd       esi, xmm0

    movzx      edx, byte ptr [eax + 12]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 12], dl
    movzx      edx, byte ptr [eax + 13]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 13], dl
    movzx      edx, byte ptr [eax + 14]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 14], dl
    movzx      edx, byte ptr [eax + 15]  // copy alpha.
    mov        byte ptr [edi + 15], dl

    lea        eax, [eax + 16]
    lea        edi, [edi + 16]
6364
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
6365 6366 6367 6368 6369
    jg         convertloop

    pop        edi
    pop        esi
    ret
6370 6371
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6372
#endif  // HAS_ARGBLUMACOLORTABLEROW_SSSE3
6373

6374
#endif  // defined(_M_X64)
6375
#endif  // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64))
6376

6377
#ifdef __cplusplus
6378
}  // extern "C"
6379 6380
}  // namespace libyuv
#endif