row_win.cc 201 KB
Newer Older
1
/*
2
 *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 4 5 6
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS. All contributing project authors may
8 9 10
 *  be found in the AUTHORS file in the root of the source tree.
 */

11
#include "libyuv/row.h"
12

13 14
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \
    defined(_MSC_VER) && !defined(__clang__)
15 16 17 18
#include <emmintrin.h>
#include <tmmintrin.h>  // For _mm_maddubs_epi16
#endif

19 20
#ifdef __cplusplus
namespace libyuv {
21
extern "C" {
22
#endif
23

24 25
// This module is for Visual C 32/64 bit and clangcl 32 bit
#if !defined(LIBYUV_DISABLE_X86) && \
Frank Barchard's avatar
Frank Barchard committed
26
    (defined(_M_IX86) || (defined(_M_X64) && !defined(__clang__)))
27

Frank Barchard's avatar
Frank Barchard committed
28 29 30 31 32 33 34 35
#define KUVTOB   0
#define KUVTOG   32
#define KUVTOR   64
#define KUVBIASB 96
#define KUVBIASG 128
#define KUVBIASR 160
#define KYTORGB  192

36 37 38 39 40
// BT.601 YUV to RGB reference
//  R = (Y - 16) * 1.164              - V * -1.596
//  G = (Y - 16) * 1.164 - U *  0.391 - V *  0.813
//  B = (Y - 16) * 1.164 - U * -2.018

41
// Y contribution to R,G,B.  Scale and bias.
42
// TODO(fbarchard): Consider moving constants into a common header.
43
#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
44
#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */
45 46

// U and V contributions to R,G,B.
47 48 49 50
#define UB -128 /* max(-128, round(-2.018 * 64)) */
#define UG 25 /* round(0.391 * 64) */
#define VG 52 /* round(0.813 * 64) */
#define VR -102 /* round(-1.596 * 64) */
51 52

// Bias values to subtract 16 from Y and 128 from U and V.
53 54 55
#define BB (UB * 128            + YGB)
#define BG (UG * 128 + VG * 128 + YGB)
#define BR            (VR * 128 + YGB)
56

57
// BT601 constants for YUV to RGB.
58
YuvConstants SIMD_ALIGNED(kYuvConstants) = {
59 60 61 62 63 64 65 66 67 68
  { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
    UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 },
  { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
    UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG },
  { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
    0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR },
  { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
  { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
  { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
  { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
69 70
};

71
// BT601 constants for NV21 where chroma plane is VU instead of UV.
72
YuvConstants SIMD_ALIGNED(kYvuConstants) = {
73 74 75 76 77 78 79 80 81 82
  { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
    0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB },
  { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
    VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG },
  { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
    VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 },
  { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
  { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
  { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
  { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
83 84
};

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
#undef YG
#undef YGB
#undef UB
#undef UG
#undef VG
#undef VR
#undef BB
#undef BG
#undef BR

// JPEG YUV to RGB reference
// *  R = Y                - V * -1.40200
// *  G = Y - U *  0.34414 - V *  0.71414
// *  B = Y - U * -1.77200

// Y contribution to R,G,B.  Scale and bias.
// TODO(fbarchard): Consider moving constants into a common header.
#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
#define YGBJ 32  /* 64 / 2 */

// U and V contributions to R,G,B.
#define UBJ -113 /* round(-1.77200 * 64) */
#define UGJ 22 /* round(0.34414 * 64) */
#define VGJ 46 /* round(0.71414  * 64) */
#define VRJ -90 /* round(-1.40200 * 64) */

// Bias values to subtract 16 from Y and 128 from U and V.
#define BBJ (UBJ * 128             + YGBJ)
#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ)
#define BRJ             (VRJ * 128 + YGBJ)

// JPEG constants for YUV to RGB.
117
YuvConstants SIMD_ALIGNED(kYuvJConstants) = {
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
  { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0,
    UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 },
  { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
    UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ },
  { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ,
    0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ },
  { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ,
    BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ },
  { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ,
    BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ },
  { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ,
    BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ },
  { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ,
    YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ }
};

#undef YGJ
#undef YGBJ
#undef UBJ
#undef UGJ
#undef VGJ
#undef VRJ
#undef BBJ
#undef BGJ
#undef BRJ

Frank Barchard's avatar
Frank Barchard committed
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
// BT.709 YUV to RGB reference
// *  R = Y                - V * -1.28033
// *  G = Y - U *  0.21482 - V *  0.38059
// *  B = Y - U * -2.12798

// Y contribution to R,G,B.  Scale and bias.
// TODO(fbarchard): Consider moving constants into a common header.
#define YGH 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
#define YGBH 32  /* 64 / 2 */

// U and V contributions to R,G,B.
#define UBH -128 /* max(-128, round(-2.12798 * 64)) */
#define UGH 14 /* round(0.21482 * 64) */
#define VGH 24 /* round(0.38059  * 64) */
#define VRH -82 /* round(-1.28033 * 64) */

// Bias values to round, and subtract 128 from U and V.
#define BBH (UBH * 128 + YGBH)
#define BGH (UGH * 128 + VGH * 128 + YGBH)
#define BRH (VRH * 128 + YGBH)

// BT.709 constants for YUV to RGB.
YuvConstants SIMD_ALIGNED(kYuvHConstants) = {
  { UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0,
    UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0, UBH, 0 },
  { UGH, VGH, UGH, VGH, UGH, VGH, UGH, VGH,
    UGH, VGH, UGH, VGH, UGH, VGH, UGH, VGH,
    UGH, VGH, UGH, VGH, UGH, VGH, UGH, VGH,
    UGH, VGH, UGH, VGH, UGH, VGH, UGH, VGH },
  { 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH,
    0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH, 0, VRH },
  { BBH, BBH, BBH, BBH, BBH, BBH, BBH, BBH,
    BBH, BBH, BBH, BBH, BBH, BBH, BBH, BBH },
  { BGH, BGH, BGH, BGH, BGH, BGH, BGH, BGH,
    BGH, BGH, BGH, BGH, BGH, BGH, BGH, BGH },
  { BRH, BRH, BRH, BRH, BRH, BRH, BRH, BRH,
    BRH, BRH, BRH, BRH, BRH, BRH, BRH, BRH },
  { YGH, YGH, YGH, YGH, YGH, YGH, YGH, YGH,
    YGH, YGH, YGH, YGH, YGH, YGH, YGH, YGH }
};

#undef YGH
#undef YGBH
#undef UBH
#undef UGH
#undef VGH
#undef VRH
#undef BBH
#undef BGH
#undef BRH

197 198
// 64 bit
#if defined(_M_X64)
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

// Read 4 UV from 422, upsample to 8 UV.
#define READYUV422                                                             \
    xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);                                 \
    xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);                                     \
    u_buf += 4;

// Convert 8 pixels: 8 UV and 8 Y.
#define YUVTORGB(YuvConstants)                                                 \
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm2 = _mm_loadu_si128(&xmm0);                                             \
    xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)YuvConstants->kUVToB);           \
    xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)YuvConstants->kUVToG);           \
    xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)YuvConstants->kUVToR);           \
    xmm0 = _mm_sub_epi16(*(__m128i*)YuvConstants->kUVBiasB, xmm0);             \
    xmm1 = _mm_sub_epi16(*(__m128i*)YuvConstants->kUVBiasG, xmm1);             \
    xmm2 = _mm_sub_epi16(*(__m128i*)YuvConstants->kUVBiasR, xmm2);             \
    xmm3 = _mm_loadl_epi64((__m128i*)y_buf);                                   \
    y_buf += 8;                                                                \
    xmm3 = _mm_unpacklo_epi8(xmm3, xmm3);                                      \
    xmm3 = _mm_mulhi_epu16(xmm3, *(__m128i*)YuvConstants->kYToRgb);            \
    xmm0 = _mm_adds_epi16(xmm0, xmm3);                                         \
    xmm1 = _mm_adds_epi16(xmm1, xmm3);                                         \
    xmm2 = _mm_adds_epi16(xmm2, xmm3);                                         \
    xmm0 = _mm_srai_epi16(xmm0, 6);                                            \
    xmm1 = _mm_srai_epi16(xmm1, 6);                                            \
    xmm2 = _mm_srai_epi16(xmm2, 6);                                            \
    xmm0 = _mm_packus_epi16(xmm0, xmm0);                                       \
    xmm1 = _mm_packus_epi16(xmm1, xmm1);                                       \
    xmm2 = _mm_packus_epi16(xmm2, xmm2);

// Store 8 ARGB values.
#define STOREARGB                                                              \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);                                      \
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);                                      \
    xmm1 = _mm_loadu_si128(&xmm0);                                             \
    xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);                                     \
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);                                     \
    _mm_storeu_si128((__m128i *)dst_argb, xmm0);                               \
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);                        \
    dst_argb += 32;

// Store 8 ABGR values.
#define STOREABGR                                                              \
    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);                                      \
    xmm0 = _mm_unpacklo_epi8(xmm0, xmm5);                                      \
    xmm1 = _mm_loadu_si128(&xmm2);                                             \
    xmm2 = _mm_unpacklo_epi16(xmm2, xmm0);                                     \
    xmm1 = _mm_unpackhi_epi16(xmm1, xmm0);                                     \
    _mm_storeu_si128((__m128i *)dst_argb, xmm2);                               \
    _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);                        \
    dst_argb += 32;


255 256 257 258 259 260 261
#if defined(HAS_I422TOARGBMATRIXROW_SSSE3)
void I422ToARGBMatrixRow_SSSE3(const uint8* y_buf,
                               const uint8* u_buf,
                               const uint8* v_buf,
                               uint8* dst_argb,
                               struct YuvConstants* YuvConstants,
                               int width) {
262 263 264 265
  __m128i xmm0, xmm1, xmm2, xmm3;
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
266 267 268 269 270 271 272
    READYUV422
    YUVTORGB(YuvConstants)
    STOREARGB
    width -= 8;
  }
}
#endif
273

274 275 276 277 278 279 280 281 282 283 284 285 286 287
#if defined(HAS_I422TOABGRMATRIXROW_SSSE3)
void I422ToABGRMatrixRow_SSSE3(const uint8* y_buf,
                               const uint8* u_buf,
                               const uint8* v_buf,
                               uint8* dst_argb,
                               struct YuvConstants* YuvConstants,
                               int width) {
  __m128i xmm0, xmm1, xmm2, xmm3;
  const __m128i xmm5 = _mm_set1_epi8(-1);
  const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
  while (width > 0) {
    READYUV422
    YUVTORGB(YuvConstants)
    STOREABGR
288 289 290
    width -= 8;
  }
}
291
#endif
292 293
// 32 bit
#else  // defined(_M_X64)
294 295
#ifdef HAS_ARGBTOYROW_SSSE3

296
// Constants for ARGB.
297
static const vec8 kARGBToY = {
298 299 300
  13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
};

301
// JPeg full range.
302
static const vec8 kARGBToYJ = {
303
  15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
304 305
};

306
static const vec8 kARGBToU = {
307 308 309
  112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
};

310
static const vec8 kARGBToUJ = {
311 312 313
  127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
};

314
static const vec8 kARGBToV = {
315 316 317
  -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
};

318
static const vec8 kARGBToVJ = {
319 320 321
  -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
};

322
// vpshufb for vphaddw + vpackuswb packed to shorts.
323
static const lvec8 kShufARGBToUV_AVX = {
324
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
325
  0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
326 327
};

328
// Constants for BGRA.
329
static const vec8 kBGRAToY = {
330 331 332
  0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
};

333
static const vec8 kBGRAToU = {
334 335 336
  0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
};

337
static const vec8 kBGRAToV = {
338 339 340
  0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
};

341
// Constants for ABGR.
342
static const vec8 kABGRToY = {
343 344 345
  33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
};

346
static const vec8 kABGRToU = {
347 348 349
  -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
};

350
static const vec8 kABGRToV = {
351 352 353
  112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
};

354
// Constants for RGBA.
355
static const vec8 kRGBAToY = {
356 357 358
  0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
};

359
static const vec8 kRGBAToU = {
360 361 362
  0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
};

363
static const vec8 kRGBAToV = {
364 365 366
  0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
};

367
static const uvec8 kAddY16 = {
368
  16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
369 370
};

371
// 7 bit fixed point 0.5.
372
static const vec16 kAddYJ64 = {
373 374
  64, 64, 64, 64, 64, 64, 64, 64
};
375

376
static const uvec8 kAddUV128 = {
377 378
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
  128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
379 380
};

381
static const uvec16 kAddUVJ128 = {
382 383 384
  0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
};

385
// Shuffle table for converting RGB24 to ARGB.
386
static const uvec8 kShuffleMaskRGB24ToARGB = {
387 388 389 390
  0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
};

// Shuffle table for converting RAW to ARGB.
391
static const uvec8 kShuffleMaskRAWToARGB = {
392 393 394
  2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
};

395
// Shuffle table for converting ARGB to RGB24.
396
static const uvec8 kShuffleMaskARGBToRGB24 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
397 398
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
};
399 400

// Shuffle table for converting ARGB to RAW.
401
static const uvec8 kShuffleMaskARGBToRAW = {
402
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
403
};
404

405
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24.  First 8 + next 4
406
static const uvec8 kShuffleMaskARGBToRGB24_0 = {
407 408 409 410
  0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
};

// Shuffle table for converting ARGB to RAW.
411
static const uvec8 kShuffleMaskARGBToRAW_0 = {
412 413 414
  2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
};

415
// Duplicates gray value 3 times and fills in alpha opaque.
416
__declspec(naked)
417
void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
418 419 420 421 422 423 424
  __asm {
    mov        eax, [esp + 4]        // src_y
    mov        edx, [esp + 8]        // dst_argb
    mov        ecx, [esp + 12]       // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
    pslld      xmm5, 24

425
  convertloop:
426 427 428 429 430 431 432 433
    movq       xmm0, qword ptr [eax]
    lea        eax,  [eax + 8]
    punpcklbw  xmm0, xmm0
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0
    punpckhwd  xmm1, xmm1
    por        xmm0, xmm5
    por        xmm1, xmm5
434 435
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
436 437
    lea        edx, [edx + 32]
    sub        ecx, 8
438
    jg         convertloop
439 440 441 442
    ret
  }
}

443
#ifdef HAS_J400TOARGBROW_AVX2
444
// Duplicates gray value 3 times and fills in alpha opaque.
445
__declspec(naked)
446
void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) {
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
  __asm {
    mov         eax, [esp + 4]        // src_y
    mov         edx, [esp + 8]        // dst_argb
    mov         ecx, [esp + 12]       // pix
    vpcmpeqb    ymm5, ymm5, ymm5      // generate mask 0xff000000
    vpslld      ymm5, ymm5, 24

  convertloop:
    vmovdqu     xmm0, [eax]
    lea         eax,  [eax + 16]
    vpermq      ymm0, ymm0, 0xd8
    vpunpcklbw  ymm0, ymm0, ymm0
    vpermq      ymm0, ymm0, 0xd8
    vpunpckhwd  ymm1, ymm0, ymm0
    vpunpcklwd  ymm0, ymm0, ymm0
    vpor        ymm0, ymm0, ymm5
    vpor        ymm1, ymm1, ymm5
    vmovdqu     [edx], ymm0
    vmovdqu     [edx + 32], ymm1
    lea         edx, [edx + 64]
    sub         ecx, 16
    jg          convertloop
    vzeroupper
    ret
  }
}
473
#endif  // HAS_J400TOARGBROW_AVX2
474

475
__declspec(naked)
476
void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
477
  __asm {
478
    mov       eax, [esp + 4]   // src_rgb24
479 480 481 482
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
483
    movdqa    xmm4, xmmword ptr kShuffleMaskRGB24ToARGB
484

485
 convertloop:
486 487 488
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
489 490 491 492 493 494 495
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
496
    movdqu    [edx + 32], xmm2
497 498
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
499
    movdqu    [edx], xmm0
500 501 502
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
503
    movdqu    [edx + 16], xmm1
504
    por       xmm3, xmm5
505
    movdqu    [edx + 48], xmm3
506
    lea       edx, [edx + 64]
507
    sub       ecx, 16
508
    jg        convertloop
509 510 511 512
    ret
  }
}

513
__declspec(naked)
514 515
void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
                        int pix) {
516
  __asm {
517 518 519 520 521
    mov       eax, [esp + 4]   // src_raw
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    pcmpeqb   xmm5, xmm5       // generate mask 0xff000000
    pslld     xmm5, 24
Frank Barchard's avatar
Frank Barchard committed
522
    movdqa    xmm4, xmmword ptr kShuffleMaskRAWToARGB
523

524
 convertloop:
525 526 527
    movdqu    xmm0, [eax]
    movdqu    xmm1, [eax + 16]
    movdqu    xmm3, [eax + 32]
528 529 530 531 532 533 534
    lea       eax, [eax + 48]
    movdqa    xmm2, xmm3
    palignr   xmm2, xmm1, 8    // xmm2 = { xmm3[0:3] xmm1[8:15]}
    pshufb    xmm2, xmm4
    por       xmm2, xmm5
    palignr   xmm1, xmm0, 12   // xmm1 = { xmm3[0:7] xmm0[12:15]}
    pshufb    xmm0, xmm4
535
    movdqu    [edx + 32], xmm2
536 537
    por       xmm0, xmm5
    pshufb    xmm1, xmm4
538
    movdqu    [edx], xmm0
539 540 541
    por       xmm1, xmm5
    palignr   xmm3, xmm3, 4    // xmm3 = { xmm3[4:15]}
    pshufb    xmm3, xmm4
542
    movdqu    [edx + 16], xmm1
543
    por       xmm3, xmm5
544
    movdqu    [edx + 48], xmm3
545
    lea       edx, [edx + 64]
546
    sub       ecx, 16
547
    jg        convertloop
548 549 550 551
    ret
  }
}

552 553
// pmul method to replicate bits.
// Math to replicate bits:
554 555 556 557
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
558
// 20 instructions.
559
__declspec(naked)
560 561
void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
562
  __asm {
563 564 565
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
566
    mov       eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
    pcmpeqb   xmm4, xmm4       // generate mask 0x07e007e0 for Green
    psllw     xmm4, 10
    psrlw     xmm4, 5
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_rgb565
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
584
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgr565
585 586 587 588 589 590 591 592 593 594 595 596 597 598
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    pand      xmm1, xmm3    // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    pand      xmm0, xmm4    // G in middle 6 bits
    pmulhuw   xmm0, xmm6    // << 5 * (256 + 4)
    por       xmm0, xmm7    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
599 600
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
601 602
    lea       eax, [eax + 16]
    sub       ecx, 8
603
    jg        convertloop
604 605 606 607
    ret
  }
}

608 609 610 611 612 613 614
#ifdef HAS_RGB565TOARGBROW_AVX2
// pmul method to replicate bits.
// Math to replicate bits:
// (v << 8) | (v << 3)
// v * 256 + v * 8
// v * (256 + 8)
// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
615
__declspec(naked)
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb,
                          int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x20802080  // multiplier shift by 5 and then repeat 6 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3       // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpcmpeqb   ymm4, ymm4, ymm4       // generate mask 0x07e007e0 for Green
    vpsllw     ymm4, ymm4, 10
    vpsrlw     ymm4, ymm4, 5
    vpcmpeqb   ymm7, ymm7, ymm7       // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax, [esp + 4]   // src_rgb565
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // pix
    sub        edx, eax
    sub        edx, eax

 convertloop:
    vmovdqu    ymm0, [eax]   // fetch 16 pixels of bgr565
    vpand      ymm1, ymm0, ymm3    // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpand      ymm0, ymm0, ymm4    // G in middle 6 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 5 * (256 + 4)
    vpor       ymm0, ymm0, ymm7    // AG
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 4 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 4 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
660
    ret
661 662
  }
}
663
#endif  // HAS_RGB565TOARGBROW_AVX2
664

665
#ifdef HAS_ARGB1555TOARGBROW_AVX2
666
__declspec(naked)
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
  __asm {
    mov        eax, 0x01080108  // generate multiplier to repeat 5 bits
    vmovd      xmm5, eax
    vbroadcastss ymm5, xmm5
    mov        eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd       xmm6, eax
    vbroadcastss ymm6, xmm6
    vpcmpeqb   ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
    vpsllw     ymm3, ymm3, 11
    vpsrlw     ymm4, ymm3, 6    // generate mask 0x03e003e0 for Green
    vpcmpeqb   ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
    vpsllw     ymm7, ymm7, 8

    mov        eax,  [esp + 4]   // src_argb1555
    mov        edx,  [esp + 8]   // dst_argb
    mov        ecx,  [esp + 12]  // pix
    sub        edx,  eax
    sub        edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of 1555
    vpsllw     ymm1, ymm0, 1       // R in upper 5 bits
    vpsllw     ymm2, ymm0, 11      // B in upper 5 bits
    vpand      ymm1, ymm1, ymm3
    vpmulhuw   ymm2, ymm2, ymm5    // * (256 + 8)
    vpmulhuw   ymm1, ymm1, ymm5    // * (256 + 8)
    vpsllw     ymm1, ymm1, 8
    vpor       ymm1, ymm1, ymm2    // RB
    vpsraw     ymm2, ymm0, 8       // A
    vpand      ymm0, ymm0, ymm4    // G in middle 5 bits
    vpmulhuw   ymm0, ymm0, ymm6    // << 6 * (256 + 8)
    vpand      ymm2, ymm2, ymm7
    vpor       ymm0, ymm0, ymm2    // AG
702 703
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm1, ymm1, 0xd8
704 705 706 707 708 709 710 711 712 713 714 715 716 717
    vpunpckhbw ymm2, ymm1, ymm0
    vpunpcklbw ymm1, ymm1, ymm0
    vmovdqu    [eax * 2 + edx], ymm1  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm2  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB1555TOARGBROW_AVX2

#ifdef HAS_ARGB4444TOARGBROW_AVX2
718
__declspec(naked)
719 720 721
void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
  __asm {
722 723
    mov       eax,  0x0f0f0f0f  // generate mask 0x0f0f0f0f
    vmovd     xmm4, eax
724
    vbroadcastss ymm4, xmm4
725
    vpslld    ymm5, ymm4, 4     // 0xf0f0f0f0 for high nibbles
726 727 728 729 730 731 732 733 734 735 736 737 738 739
    mov       eax,  [esp + 4]   // src_argb4444
    mov       edx,  [esp + 8]   // dst_argb
    mov       ecx,  [esp + 12]  // pix
    sub       edx,  eax
    sub       edx,  eax

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 16 pixels of bgra4444
    vpand      ymm2, ymm0, ymm5    // mask high nibbles
    vpand      ymm0, ymm0, ymm4    // mask low nibbles
    vpsrlw     ymm3, ymm2, 4
    vpsllw     ymm1, ymm0, 4
    vpor       ymm2, ymm2, ymm3
    vpor       ymm0, ymm0, ymm1
740 741
    vpermq     ymm0, ymm0, 0xd8    // mutate for unpack
    vpermq     ymm2, ymm2, 0xd8
742 743 744 745 746 747 748 749 750 751 752 753 754
    vpunpckhbw ymm1, ymm0, ymm2
    vpunpcklbw ymm0, ymm0, ymm2
    vmovdqu    [eax * 2 + edx], ymm0  // store 8 pixels of ARGB
    vmovdqu    [eax * 2 + edx + 32], ymm1  // store next 8 pixels of ARGB
    lea       eax, [eax + 32]
    sub       ecx, 16
    jg        convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGB4444TOARGBROW_AVX2

755
// 24 instructions
756
__declspec(naked)
757 758
void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
                            int pix) {
759
  __asm {
760 761 762 763 764 765 766 767
    mov       eax, 0x01080108  // generate multiplier to repeat 5 bits
    movd      xmm5, eax
    pshufd    xmm5, xmm5, 0
    mov       eax, 0x42004200  // multiplier shift by 6 and then repeat 5 bits
    movd      xmm6, eax
    pshufd    xmm6, xmm6, 0
    pcmpeqb   xmm3, xmm3       // generate mask 0xf800f800 for Red
    psllw     xmm3, 11
768
    movdqa    xmm4, xmm3       // generate mask 0x03e003e0 for Green
769 770 771 772 773 774 775 776 777 778 779
    psrlw     xmm4, 6
    pcmpeqb   xmm7, xmm7       // generate mask 0xff00ff00 for Alpha
    psllw     xmm7, 8

    mov       eax, [esp + 4]   // src_argb1555
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
    sub       edx, eax
    sub       edx, eax

 convertloop:
780
    movdqu    xmm0, [eax]   // fetch 8 pixels of 1555
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
    movdqa    xmm1, xmm0
    movdqa    xmm2, xmm0
    psllw     xmm1, 1       // R in upper 5 bits
    psllw     xmm2, 11      // B in upper 5 bits
    pand      xmm1, xmm3
    pmulhuw   xmm2, xmm5    // * (256 + 8)
    pmulhuw   xmm1, xmm5    // * (256 + 8)
    psllw     xmm1, 8
    por       xmm1, xmm2    // RB
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // G in middle 5 bits
    psraw     xmm2, 8       // A
    pmulhuw   xmm0, xmm6    // << 6 * (256 + 8)
    pand      xmm2, xmm7
    por       xmm0, xmm2    // AG
    movdqa    xmm2, xmm1
    punpcklbw xmm1, xmm0
    punpckhbw xmm2, xmm0
799 800
    movdqu    [eax * 2 + edx], xmm1  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm2  // store next 4 pixels of ARGB
801 802
    lea       eax, [eax + 16]
    sub       ecx, 8
803
    jg        convertloop
804 805 806
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
807

808
// 18 instructions.
809
__declspec(naked)
810 811
void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
                            int pix) {
812
  __asm {
813 814 815 816 817 818 819 820
    mov       eax, 0x0f0f0f0f  // generate mask 0x0f0f0f0f
    movd      xmm4, eax
    pshufd    xmm4, xmm4, 0
    movdqa    xmm5, xmm4       // 0xf0f0f0f0 for high nibbles
    pslld     xmm5, 4
    mov       eax, [esp + 4]   // src_argb4444
    mov       edx, [esp + 8]   // dst_argb
    mov       ecx, [esp + 12]  // pix
821 822
    sub       edx, eax
    sub       edx, eax
823 824

 convertloop:
825
    movdqu    xmm0, [eax]   // fetch 8 pixels of bgra4444
826 827 828 829 830 831 832 833 834
    movdqa    xmm2, xmm0
    pand      xmm0, xmm4    // mask low nibbles
    pand      xmm2, xmm5    // mask high nibbles
    movdqa    xmm1, xmm0
    movdqa    xmm3, xmm2
    psllw     xmm1, 4
    psrlw     xmm3, 4
    por       xmm0, xmm1
    por       xmm2, xmm3
835
    movdqa    xmm1, xmm0
836
    punpcklbw xmm0, xmm2
837
    punpckhbw xmm1, xmm2
838 839
    movdqu    [eax * 2 + edx], xmm0  // store 4 pixels of ARGB
    movdqu    [eax * 2 + edx + 16], xmm1  // store next 4 pixels of ARGB
840
    lea       eax, [eax + 16]
841
    sub       ecx, 8
842
    jg        convertloop
843 844 845 846
    ret
  }
}

847
__declspec(naked)
848
void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
849
  __asm {
850 851 852
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
Frank Barchard's avatar
Frank Barchard committed
853
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRGB24
854 855

 convertloop:
856 857 858 859
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
860
    lea       eax, [eax + 64]
861 862 863 864 865 866 867 868 869 870
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
871
    movdqu    [edx], xmm0  // store 0
872 873 874 875
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
876 877
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
878 879
    lea       edx, [edx + 48]
    sub       ecx, 16
880
    jg        convertloop
881 882 883 884
    ret
  }
}

885
__declspec(naked)
886
void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
887
  __asm {
888 889 890
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
Frank Barchard's avatar
Frank Barchard committed
891
    movdqa    xmm6, xmmword ptr kShuffleMaskARGBToRAW
892 893

 convertloop:
894 895 896 897
    movdqu    xmm0, [eax]   // fetch 16 pixels of argb
    movdqu    xmm1, [eax + 16]
    movdqu    xmm2, [eax + 32]
    movdqu    xmm3, [eax + 48]
898
    lea       eax, [eax + 64]
899 900 901 902 903 904 905 906 907 908
    pshufb    xmm0, xmm6    // pack 16 bytes of ARGB to 12 bytes of RGB
    pshufb    xmm1, xmm6
    pshufb    xmm2, xmm6
    pshufb    xmm3, xmm6
    movdqa    xmm4, xmm1   // 4 bytes from 1 for 0
    psrldq    xmm1, 4      // 8 bytes from 1
    pslldq    xmm4, 12     // 4 bytes from 1 for 0
    movdqa    xmm5, xmm2   // 8 bytes from 2 for 1
    por       xmm0, xmm4   // 4 bytes from 1 for 0
    pslldq    xmm5, 8      // 8 bytes from 2 for 1
909
    movdqu    [edx], xmm0  // store 0
910 911 912 913
    por       xmm1, xmm5   // 8 bytes from 2 for 1
    psrldq    xmm2, 8      // 4 bytes from 2
    pslldq    xmm3, 4      // 12 bytes from 3 for 2
    por       xmm2, xmm3   // 12 bytes from 3 for 2
914 915
    movdqu    [edx + 16], xmm1   // store 1
    movdqu    [edx + 32], xmm2   // store 2
916 917
    lea       edx, [edx + 48]
    sub       ecx, 16
918
    jg        convertloop
919 920 921 922
    ret
  }
}

923
// 4 pixels
924
__declspec(naked)
925
void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
926
  __asm {
927 928 929
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
930 931 932 933 934 935 936
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11
937 938

 convertloop:
939
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
940 941
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
942 943 944 945 946 947 948 949 950
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
951
    packssdw  xmm0, xmm0
952
    lea       eax, [eax + 16]
953
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
954 955
    lea       edx, [edx + 8]
    sub       ecx, 4
956
    jg        convertloop
957 958 959 960
    ret
  }
}

961
// 8 pixels
962
__declspec(naked)
963
void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
964
                                const uint32 dither4, int pix) {
965 966 967 968
  __asm {

    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
969
    movd      xmm6, [esp + 12] // dither4
970
    mov       ecx, [esp + 16]  // pix
971 972 973 974
    punpcklbw xmm6, xmm6       // make dither 16 bytes
    movdqa    xmm7, xmm6
    punpcklwd xmm6, xmm6
    punpckhwd xmm7, xmm7
975 976 977 978 979 980 981 982 983 984
    pcmpeqb   xmm3, xmm3       // generate mask 0x0000001f
    psrld     xmm3, 27
    pcmpeqb   xmm4, xmm4       // generate mask 0x000007e0
    psrld     xmm4, 26
    pslld     xmm4, 5
    pcmpeqb   xmm5, xmm5       // generate mask 0xfffff800
    pslld     xmm5, 11

 convertloop:
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
985
    paddusb   xmm0, xmm6    // add dither
986 987 988 989 990 991 992 993 994 995 996 997
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
    pslld     xmm0, 8       // R
    psrld     xmm1, 3       // B
    psrld     xmm2, 5       // G
    psrad     xmm0, 16      // R
    pand      xmm1, xmm3    // B
    pand      xmm2, xmm4    // G
    pand      xmm0, xmm5    // R
    por       xmm1, xmm2    // BG
    por       xmm0, xmm1    // BGR
    packssdw  xmm0, xmm0
998
    lea       eax, [eax + 16]
999
    movq      qword ptr [edx], xmm0  // store 4 pixels of RGB565
1000 1001
    lea       edx, [edx + 8]
    sub       ecx, 4
1002 1003 1004 1005 1006
    jg        convertloop
    ret
  }
}

1007
#ifdef HAS_ARGBTORGB565DITHERROW_AVX2
1008
__declspec(naked)
1009
void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
1010
                                const uint32 dither4, int pix) {
1011 1012 1013
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
1014
    vbroadcastss xmm6, [esp + 12]  // dither4
1015
    mov        ecx, [esp + 16]     // pix
1016 1017 1018
    vpunpcklbw xmm6, xmm6, xmm6    // make dither 32 bytes
    vpermq     ymm6, ymm6, 0xd8
    vpunpcklwd ymm6, ymm6, ymm6
1019 1020 1021 1022 1023
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
1024
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
1025 1026 1027

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
1028
    vpaddusb   ymm0, ymm0, ymm6    // add dither
1029 1030
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
1031
    vpsrld     ymm0, ymm0, 8       // R
1032 1033 1034 1035 1036
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
1037
    vpackusdw  ymm0, ymm0, ymm0
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565DITHERROW_AVX2

1050
// TODO(fbarchard): Improve sign extension/packing.
1051
__declspec(naked)
1052
void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
1053
  __asm {
1054 1055 1056
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
1057 1058 1059 1060 1061 1062 1063 1064
    pcmpeqb   xmm4, xmm4       // generate mask 0x0000001f
    psrld     xmm4, 27
    movdqa    xmm5, xmm4       // generate mask 0x000003e0
    pslld     xmm5, 5
    movdqa    xmm6, xmm4       // generate mask 0x00007c00
    pslld     xmm6, 10
    pcmpeqb   xmm7, xmm7       // generate mask 0xffff8000
    pslld     xmm7, 15
1065 1066

 convertloop:
1067
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
1068 1069
    movdqa    xmm1, xmm0    // B
    movdqa    xmm2, xmm0    // G
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
    movdqa    xmm3, xmm0    // R
    psrad     xmm0, 16      // A
    psrld     xmm1, 3       // B
    psrld     xmm2, 6       // G
    psrld     xmm3, 9       // R
    pand      xmm0, xmm7    // A
    pand      xmm1, xmm4    // B
    pand      xmm2, xmm5    // G
    pand      xmm3, xmm6    // R
    por       xmm0, xmm1    // BA
    por       xmm2, xmm3    // GR
    por       xmm0, xmm2    // BGRA
1082 1083 1084
    packssdw  xmm0, xmm0
    lea       eax, [eax + 16]
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB1555
1085 1086
    lea       edx, [edx + 8]
    sub       ecx, 4
1087
    jg        convertloop
1088 1089 1090 1091
    ret
  }
}

1092
__declspec(naked)
1093
void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
1094
  __asm {
1095 1096 1097
    mov       eax, [esp + 4]   // src_argb
    mov       edx, [esp + 8]   // dst_rgb
    mov       ecx, [esp + 12]  // pix
1098 1099 1100 1101 1102 1103
    pcmpeqb   xmm4, xmm4       // generate mask 0xf000f000
    psllw     xmm4, 12
    movdqa    xmm3, xmm4       // generate mask 0x00f000f0
    psrlw     xmm3, 8

 convertloop:
1104
    movdqu    xmm0, [eax]   // fetch 4 pixels of argb
1105 1106 1107
    movdqa    xmm1, xmm0
    pand      xmm0, xmm3    // low nibble
    pand      xmm1, xmm4    // high nibble
1108 1109
    psrld     xmm0, 4
    psrld     xmm1, 8
1110 1111
    por       xmm0, xmm1
    packuswb  xmm0, xmm0
1112
    lea       eax, [eax + 16]
1113 1114 1115
    movq      qword ptr [edx], xmm0  // store 4 pixels of ARGB4444
    lea       edx, [edx + 8]
    sub       ecx, 4
1116
    jg        convertloop
1117 1118 1119 1120
    ret
  }
}

1121
#ifdef HAS_ARGBTORGB565ROW_AVX2
1122
__declspec(naked)
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm3, ymm3, ymm3    // generate mask 0x0000001f
    vpsrld     ymm3, ymm3, 27
    vpcmpeqb   ymm4, ymm4, ymm4    // generate mask 0x000007e0
    vpsrld     ymm4, ymm4, 26
    vpslld     ymm4, ymm4, 5
1133
    vpslld     ymm5, ymm3, 11      // generate mask 0x0000f800
1134 1135 1136 1137 1138

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm2, ymm0, 5       // G
    vpsrld     ymm1, ymm0, 3       // B
1139
    vpsrld     ymm0, ymm0, 8       // R
1140 1141 1142 1143 1144
    vpand      ymm2, ymm2, ymm4    // G
    vpand      ymm1, ymm1, ymm3    // B
    vpand      ymm0, ymm0, ymm5    // R
    vpor       ymm1, ymm1, ymm2    // BG
    vpor       ymm0, ymm0, ymm1    // BGR
1145
    vpackusdw  ymm0, ymm0, ymm0
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of RGB565
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTORGB565ROW_AVX2

#ifdef HAS_ARGBTOARGB1555ROW_AVX2
1159
__declspec(naked)
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]      // src_argb
    mov        edx, [esp + 8]      // dst_rgb
    mov        ecx, [esp + 12]     // pix
    vpcmpeqb   ymm4, ymm4, ymm4
    vpsrld     ymm4, ymm4, 27      // generate mask 0x0000001f
    vpslld     ymm5, ymm4, 5       // generate mask 0x000003e0
    vpslld     ymm6, ymm4, 10      // generate mask 0x00007c00
    vpcmpeqb   ymm7, ymm7, ymm7    // generate mask 0xffff8000
    vpslld     ymm7, ymm7, 15

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpsrld     ymm3, ymm0, 9       // R
    vpsrld     ymm2, ymm0, 6       // G
    vpsrld     ymm1, ymm0, 3       // B
    vpsrad     ymm0, ymm0, 16      // A
    vpand      ymm3, ymm3, ymm6    // R
    vpand      ymm2, ymm2, ymm5    // G
    vpand      ymm1, ymm1, ymm4    // B
    vpand      ymm0, ymm0, ymm7    // A
    vpor       ymm0, ymm0, ymm1    // BA
    vpor       ymm2, ymm2, ymm3    // GR
    vpor       ymm0, ymm0, ymm2    // BGRA
    vpackssdw  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB1555
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB1555ROW_AVX2

1198
#ifdef HAS_ARGBTOARGB4444ROW_AVX2
1199
__declspec(naked)
1200 1201 1202 1203 1204
void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_rgb
    mov        ecx, [esp + 12]  // pix
1205
    vpcmpeqb   ymm4, ymm4, ymm4   // generate mask 0xf000f000
1206
    vpsllw     ymm4, ymm4, 12
1207
    vpsrlw     ymm3, ymm4, 8      // generate mask 0x00f000f0
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228

 convertloop:
    vmovdqu    ymm0, [eax]         // fetch 8 pixels of argb
    vpand      ymm1, ymm0, ymm4    // high nibble
    vpand      ymm0, ymm0, ymm3    // low nibble
    vpsrld     ymm1, ymm1, 8
    vpsrld     ymm0, ymm0, 4
    vpor       ymm0, ymm0, ymm1
    vpackuswb  ymm0, ymm0, ymm0
    vpermq     ymm0, ymm0, 0xd8
    lea        eax, [eax + 32]
    vmovdqu    [edx], xmm0         // store 8 pixels of ARGB4444
    lea        edx, [edx + 16]
    sub        ecx, 8
    jg         convertloop
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBTOARGB4444ROW_AVX2

1229
// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
1230
__declspec(naked)
1231
void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1232
  __asm {
1233 1234 1235
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1236 1237
    movdqa     xmm4, xmmword ptr kARGBToY
    movdqa     xmm5, xmmword ptr kAddY16
1238

1239
 convertloop:
1240 1241 1242 1243
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1244 1245 1246 1247
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1248 1249 1250 1251 1252 1253
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1254
    paddb      xmm0, xmm5
1255
    movdqu     [edx], xmm0
1256
    lea        edx, [edx + 16]
1257
    sub        ecx, 16
1258
    jg         convertloop
1259 1260 1261 1262
    ret
  }
}

1263 1264
// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
1265
__declspec(naked)
1266 1267 1268 1269 1270
void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1271 1272
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
1273 1274

 convertloop:
1275 1276 1277 1278
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1279 1280 1281 1282 1283 1284 1285
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
1286
    paddw      xmm0, xmm5  // Add .5 for rounding.
1287
    paddw      xmm2, xmm5
1288 1289 1290
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1291
    movdqu     [edx], xmm0
1292
    lea        edx, [edx + 16]
1293
    sub        ecx, 16
1294 1295 1296 1297 1298
    jg         convertloop
    ret
  }
}

1299
#ifdef HAS_ARGBTOYROW_AVX2
1300 1301 1302 1303 1304
// vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = {
  0, 4, 1, 5, 2, 6, 3, 7
};

1305
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1306
__declspec(naked)
1307 1308 1309 1310 1311
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1312 1313 1314
    vbroadcastf128 ymm4, xmmword ptr kARGBToY
    vbroadcastf128 ymm5, xmmword ptr kAddY16
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1315 1316

 convertloop:
1317 1318 1319 1320
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
1321 1322 1323 1324 1325
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
1326
    vphaddw    ymm0, ymm0, ymm1  // mutates.
1327 1328 1329
    vphaddw    ymm2, ymm2, ymm3
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
1330
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
1331
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
1332
    vpaddb     ymm0, ymm0, ymm5  // add 16 for Y
1333
    vmovdqu    [edx], ymm0
1334
    lea        edx, [edx + 32]
1335
    sub        ecx, 32
1336
    jg         convertloop
1337
    vzeroupper
1338 1339 1340 1341 1342
    ret
  }
}
#endif  //  HAS_ARGBTOYROW_AVX2

1343
#ifdef HAS_ARGBTOYJROW_AVX2
1344
// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
1345
__declspec(naked)
1346 1347 1348 1349 1350
void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1351 1352 1353
    vbroadcastf128 ymm4, xmmword ptr kARGBToYJ
    vbroadcastf128 ymm5, xmmword ptr kAddYJ64
    vmovdqu    ymm6, ymmword ptr kPermdARGBToY_AVX
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374

 convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpmaddubsw ymm0, ymm0, ymm4
    vpmaddubsw ymm1, ymm1, ymm4
    vpmaddubsw ymm2, ymm2, ymm4
    vpmaddubsw ymm3, ymm3, ymm4
    lea        eax, [eax + 128]
    vphaddw    ymm0, ymm0, ymm1  // mutates.
    vphaddw    ymm2, ymm2, ymm3
    vpaddw     ymm0, ymm0, ymm5  // Add .5 for rounding.
    vpaddw     ymm2, ymm2, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm2, ymm2, 7
    vpackuswb  ymm0, ymm0, ymm2  // mutates.
    vpermd     ymm0, ymm6, ymm0  // For vphaddw + vpackuswb mutation.
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
1375
    sub        ecx, 32
1376 1377 1378 1379 1380 1381 1382 1383
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  //  HAS_ARGBTOYJROW_AVX2

1384
__declspec(naked)
1385
void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1386
  __asm {
1387 1388 1389
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1390 1391
    movdqa     xmm4, xmmword ptr kBGRAToY
    movdqa     xmm5, xmmword ptr kAddY16
1392

1393
 convertloop:
1394 1395 1396 1397
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1398 1399 1400 1401
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1402 1403 1404 1405 1406 1407
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1408
    paddb      xmm0, xmm5
1409
    movdqu     [edx], xmm0
1410
    lea        edx, [edx + 16]
1411
    sub        ecx, 16
1412
    jg         convertloop
1413 1414 1415 1416
    ret
  }
}

1417
__declspec(naked)
1418
void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1419
  __asm {
1420 1421 1422
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1423 1424
    movdqa     xmm4, xmmword ptr kABGRToY
    movdqa     xmm5, xmmword ptr kAddY16
1425

1426
 convertloop:
1427 1428 1429 1430
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1431 1432 1433 1434
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
1435 1436 1437 1438 1439 1440
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
1441
    paddb      xmm0, xmm5
1442
    movdqu     [edx], xmm0
1443
    lea        edx, [edx + 16]
1444
    sub        ecx, 16
1445
    jg         convertloop
1446 1447 1448 1449
    ret
  }
}

1450
__declspec(naked)
1451
void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
1452
  __asm {
1453 1454 1455
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_y */
    mov        ecx, [esp + 12]  /* pix */
Frank Barchard's avatar
Frank Barchard committed
1456 1457
    movdqa     xmm4, xmmword ptr kRGBAToY
    movdqa     xmm5, xmmword ptr kAddY16
1458 1459

 convertloop:
1460 1461 1462 1463
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    pmaddubsw  xmm2, xmm4
    pmaddubsw  xmm3, xmm4
    lea        eax, [eax + 64]
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
    psrlw      xmm0, 7
    psrlw      xmm2, 7
    packuswb   xmm0, xmm2
    paddb      xmm0, xmm5
1475
    movdqu     [edx], xmm0
1476
    lea        edx, [edx + 16]
1477
    sub        ecx, 16
1478 1479 1480 1481 1482
    jg         convertloop
    ret
  }
}

1483
__declspec(naked)
1484 1485
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1486
  __asm {
1487 1488 1489 1490 1491 1492 1493
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1494 1495 1496
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1497
    sub        edi, edx             // stride from u to v
1498

1499
 convertloop:
1500
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1501
    movdqu     xmm0, [eax]
1502
    movdqu     xmm4, [eax + esi]
1503
    pavgb      xmm0, xmm4
1504
    movdqu     xmm1, [eax + 16]
1505
    movdqu     xmm4, [eax + esi + 16]
1506
    pavgb      xmm1, xmm4
1507
    movdqu     xmm2, [eax + 32]
1508
    movdqu     xmm4, [eax + esi + 32]
1509
    pavgb      xmm2, xmm4
1510
    movdqu     xmm3, [eax + 48]
1511
    movdqu     xmm4, [eax + esi + 48]
1512 1513
    pavgb      xmm3, xmm4

1514 1515
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
1516
    shufps     xmm0, xmm1, 0x88
1517 1518 1519 1520 1521 1522
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4
1523 1524 1525

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
1526
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
1527
    movdqa     xmm1, xmm0
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1544
    sub        ecx, 16
1545 1546
    jg         convertloop

1547 1548 1549 1550 1551 1552
    pop        edi
    pop        esi
    ret
  }
}

1553
__declspec(naked)
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                        uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1564 1565 1566
    movdqa     xmm5, xmmword ptr kAddUVJ128
    movdqa     xmm6, xmmword ptr kARGBToVJ
    movdqa     xmm7, xmmword ptr kARGBToUJ
1567 1568 1569 1570
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1571
    movdqu     xmm0, [eax]
1572
    movdqu     xmm4, [eax + esi]
1573
    pavgb      xmm0, xmm4
1574
    movdqu     xmm1, [eax + 16]
1575
    movdqu     xmm4, [eax + esi + 16]
1576
    pavgb      xmm1, xmm4
1577
    movdqu     xmm2, [eax + 32]
1578
    movdqu     xmm4, [eax + esi + 32]
1579
    pavgb      xmm2, xmm4
1580
    movdqu     xmm3, [eax + 48]
1581
    movdqu     xmm4, [eax + esi + 48]
1582 1583
    pavgb      xmm3, xmm4

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    paddw      xmm0, xmm5            // +.5 rounding -> unsigned
    paddw      xmm1, xmm5
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1615
    sub        ecx, 16
1616 1617 1618 1619 1620 1621 1622 1623
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

1624
#ifdef HAS_ARGBTOUVROW_AVX2
1625
__declspec(naked)
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
                      uint8* dst_u, uint8* dst_v, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1636 1637 1638
    vbroadcastf128 ymm5, xmmword ptr kAddUV128
    vbroadcastf128 ymm6, xmmword ptr kARGBToV
    vbroadcastf128 ymm7, xmmword ptr kARGBToU
1639 1640 1641
    sub        edi, edx             // stride from u to v

 convertloop:
1642
    /* step 1 - subsample 32x2 argb pixels to 16x1 */
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vmovdqu    ymm2, [eax + 64]
    vmovdqu    ymm3, [eax + 96]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    vpavgb     ymm2, ymm2, [eax + esi + 64]
    vpavgb     ymm3, ymm3, [eax + esi + 96]
    lea        eax,  [eax + 128]
    vshufps    ymm4, ymm0, ymm1, 0x88
    vshufps    ymm0, ymm0, ymm1, 0xdd
    vpavgb     ymm0, ymm0, ymm4  // mutated by vshufps
    vshufps    ymm4, ymm2, ymm3, 0x88
    vshufps    ymm2, ymm2, ymm3, 0xdd
    vpavgb     ymm2, ymm2, ymm4  // mutated by vshufps
1658 1659 1660 1661

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 32 different pixels, its 16 pixels of U and 16 of V
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
    vpmaddubsw ymm1, ymm0, ymm7  // U
    vpmaddubsw ymm3, ymm2, ymm7
    vpmaddubsw ymm0, ymm0, ymm6  // V
    vpmaddubsw ymm2, ymm2, ymm6
    vphaddw    ymm1, ymm1, ymm3  // mutates
    vphaddw    ymm0, ymm0, ymm2
    vpsraw     ymm1, ymm1, 8
    vpsraw     ymm0, ymm0, 8
    vpacksswb  ymm0, ymm1, ymm0  // mutates
    vpermq     ymm0, ymm0, 0xd8  // For vpacksswb
Frank Barchard's avatar
Frank Barchard committed
1672
    vpshufb    ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX  // for vshufps/vphaddw
1673
    vpaddb     ymm0, ymm0, ymm5  // -> unsigned
1674 1675

    // step 3 - store 16 U and 16 V values
1676 1677
    vextractf128 [edx], ymm0, 0 // U
    vextractf128 [edx + edi], ymm0, 1 // V
1678
    lea        edx, [edx + 16]
1679
    sub        ecx, 32
1680 1681 1682 1683
    jg         convertloop

    pop        edi
    pop        esi
1684
    vzeroupper
1685 1686 1687 1688 1689
    ret
  }
}
#endif  // HAS_ARGBTOUVROW_AVX2

1690
__declspec(naked)
1691 1692
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1693
  __asm {
1694
    push       edi
1695 1696 1697 1698
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
Frank Barchard's avatar
Frank Barchard committed
1699 1700 1701
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1702 1703 1704
    sub        edi, edx             // stride from u to v

 convertloop:
1705 1706
    /* convert to U and V */
    movdqu     xmm0, [eax]          // U
1707 1708 1709 1710 1711 1712 1713 1714 1715
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm7
    pmaddubsw  xmm1, xmm7
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm3, xmm7
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1716 1717 1718
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
    paddb      xmm0, xmm5
    movdqu     [edx], xmm0

    movdqu     xmm0, [eax]          // V
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
    pmaddubsw  xmm0, xmm6
    pmaddubsw  xmm1, xmm6
    pmaddubsw  xmm2, xmm6
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm1
    phaddw     xmm2, xmm3
1732 1733 1734
    psraw      xmm0, 8
    psraw      xmm2, 8
    packsswb   xmm0, xmm2
1735 1736 1737 1738
    paddb      xmm0, xmm5
    lea        eax,  [eax + 64]
    movdqu     [edx + edi], xmm0
    lea        edx,  [edx + 16]
1739
    sub        ecx,  16
1740 1741 1742 1743 1744 1745 1746
    jg         convertloop

    pop        edi
    ret
  }
}

1747
__declspec(naked)
1748 1749
void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
                          uint8* dst_u, uint8* dst_v, int width) {
1750
  __asm {
1751 1752 1753 1754 1755
    push       edi
    mov        eax, [esp + 4 + 4]   // src_argb
    mov        edx, [esp + 4 + 8]   // dst_u
    mov        edi, [esp + 4 + 12]  // dst_v
    mov        ecx, [esp + 4 + 16]  // pix
Frank Barchard's avatar
Frank Barchard committed
1756 1757 1758
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kARGBToV
    movdqa     xmm7, xmmword ptr kARGBToU
1759 1760 1761 1762
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1763 1764 1765 1766
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1797
    sub        ecx, 16
1798 1799 1800 1801 1802 1803 1804
    jg         convertloop

    pop        edi
    ret
  }
}

1805
__declspec(naked)
1806 1807
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1808
  __asm {
1809 1810 1811 1812 1813 1814 1815
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1816 1817 1818
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kBGRAToV
    movdqa     xmm7, xmmword ptr kBGRAToU
1819
    sub        edi, edx             // stride from u to v
1820

1821
 convertloop:
1822
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1823
    movdqu     xmm0, [eax]
1824
    movdqu     xmm4, [eax + esi]
1825
    pavgb      xmm0, xmm4
1826
    movdqu     xmm1, [eax + 16]
1827
    movdqu     xmm4, [eax + esi + 16]
1828
    pavgb      xmm1, xmm4
1829
    movdqu     xmm2, [eax + 32]
1830
    movdqu     xmm4, [eax + esi + 32]
1831
    pavgb      xmm2, xmm4
1832
    movdqu     xmm3, [eax + 48]
1833
    movdqu     xmm4, [eax + esi + 48]
1834 1835
    pavgb      xmm3, xmm4

1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1866
    sub        ecx, 16
1867 1868
    jg         convertloop

1869 1870 1871 1872
    pop        edi
    pop        esi
    ret
  }
1873 1874
}

1875
__declspec(naked)
1876 1877
void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1878
  __asm {
1879 1880 1881 1882 1883 1884 1885
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1886 1887 1888
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kABGRToV
    movdqa     xmm7, xmmword ptr kABGRToU
1889 1890
    sub        edi, edx             // stride from u to v

1891
 convertloop:
1892
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1893
    movdqu     xmm0, [eax]
1894
    movdqu     xmm4, [eax + esi]
1895
    pavgb      xmm0, xmm4
1896
    movdqu     xmm1, [eax + 16]
1897
    movdqu     xmm4, [eax + esi + 16]
1898
    pavgb      xmm1, xmm4
1899
    movdqu     xmm2, [eax + 32]
1900
    movdqu     xmm4, [eax + esi + 32]
1901
    pavgb      xmm2, xmm4
1902
    movdqu     xmm3, [eax + 48]
1903
    movdqu     xmm4, [eax + esi + 48]
1904 1905
    pavgb      xmm3, xmm4

1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
1936
    sub        ecx, 16
1937 1938
    jg         convertloop

1939 1940 1941 1942 1943 1944
    pop        edi
    pop        esi
    ret
  }
}

1945
__declspec(naked)
1946 1947
void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
                       uint8* dst_u, uint8* dst_v, int width) {
1948
  __asm {
1949 1950 1951 1952 1953 1954 1955
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_argb
    mov        esi, [esp + 8 + 8]   // src_stride_argb
    mov        edx, [esp + 8 + 12]  // dst_u
    mov        edi, [esp + 8 + 16]  // dst_v
    mov        ecx, [esp + 8 + 20]  // pix
Frank Barchard's avatar
Frank Barchard committed
1956 1957 1958
    movdqa     xmm5, xmmword ptr kAddUV128
    movdqa     xmm6, xmmword ptr kRGBAToV
    movdqa     xmm7, xmmword ptr kRGBAToU
1959 1960 1961 1962
    sub        edi, edx             // stride from u to v

 convertloop:
    /* step 1 - subsample 16x2 argb pixels to 8x1 */
1963
    movdqu     xmm0, [eax]
1964
    movdqu     xmm4, [eax + esi]
1965
    pavgb      xmm0, xmm4
1966
    movdqu     xmm1, [eax + 16]
1967
    movdqu     xmm4, [eax + esi + 16]
1968
    pavgb      xmm1, xmm4
1969
    movdqu     xmm2, [eax + 32]
1970
    movdqu     xmm4, [eax + esi + 32]
1971
    pavgb      xmm2, xmm4
1972
    movdqu     xmm3, [eax + 48]
1973
    movdqu     xmm4, [eax + esi + 48]
1974 1975
    pavgb      xmm3, xmm4

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
    lea        eax,  [eax + 64]
    movdqa     xmm4, xmm0
    shufps     xmm0, xmm1, 0x88
    shufps     xmm4, xmm1, 0xdd
    pavgb      xmm0, xmm4
    movdqa     xmm4, xmm2
    shufps     xmm2, xmm3, 0x88
    shufps     xmm4, xmm3, 0xdd
    pavgb      xmm2, xmm4

    // step 2 - convert to U and V
    // from here down is very similar to Y code except
    // instead of 16 different pixels, its 8 pixels of U and 8 of V
    movdqa     xmm1, xmm0
    movdqa     xmm3, xmm2
    pmaddubsw  xmm0, xmm7  // U
    pmaddubsw  xmm2, xmm7
    pmaddubsw  xmm1, xmm6  // V
    pmaddubsw  xmm3, xmm6
    phaddw     xmm0, xmm2
    phaddw     xmm1, xmm3
    psraw      xmm0, 8
    psraw      xmm1, 8
    packsswb   xmm0, xmm1
    paddb      xmm0, xmm5            // -> unsigned

    // step 3 - store 8 U and 8 V values
    movlps     qword ptr [edx], xmm0 // U
    movhps     qword ptr [edx + edi], xmm0 // V
    lea        edx, [edx + 8]
2006
    sub        ecx, 16
2007 2008 2009 2010 2011 2012 2013
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
2014
#endif  // HAS_ARGBTOYROW_SSSE3
2015

2016 2017
// Read 16 UV from 444
#define READYUV444_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
2018 2019
    __asm vmovdqu    xmm0, [esi]                  /* U */                      \
    __asm vmovdqu    xmm1, [esi + edi]            /* V */                      \
2020 2021 2022 2023 2024 2025
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
  }

2026 2027
// Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
2028 2029
    __asm vmovq      xmm0, qword ptr [esi]        /* U */                      \
    __asm vmovq      xmm1, qword ptr [esi + edi]  /* V */                      \
2030 2031 2032 2033 2034 2035
    __asm lea        esi,  [esi + 8]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
  }

2036 2037
// Read 4 UV from 411, upsample to 16 UV.
#define READYUV411_AVX2 __asm {                                                \
Frank Barchard's avatar
Frank Barchard committed
2038 2039
    __asm vmovd      xmm0, dword ptr [esi]        /* U */                      \
    __asm vmovd      xmm1, dword ptr [esi + edi]  /* V */                      \
2040 2041 2042 2043 2044 2045 2046
    __asm lea        esi,  [esi + 4]                                           \
    __asm vpunpcklbw ymm0, ymm0, ymm1             /* UV */                     \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpckldq ymm0, ymm0, ymm0             /* UVUVUVUV (upsample) */    \
  }

2047 2048 2049 2050 2051 2052 2053 2054
// Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 __asm {                                                  \
    __asm vmovdqu    xmm0, [esi]                  /* UV */                     \
    __asm lea        esi,  [esi + 16]                                          \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm0, ymm0             /* UVUV (upsample) */        \
  }

2055
// Convert 16 pixels: 16 UV and 16 Y.
2056
#define YUVTORGB_AVX2(YuvConstants) __asm {                                    \
Frank Barchard's avatar
Frank Barchard committed
2057 2058 2059 2060
    __asm vpmaddubsw ymm2, ymm0, ymmword ptr [YuvConstants + KUVTOR] /* R UV */\
    __asm vpmaddubsw ymm1, ymm0, ymmword ptr [YuvConstants + KUVTOG] /* G UV */\
    __asm vpmaddubsw ymm0, ymm0, ymmword ptr [YuvConstants + KUVTOB] /* B UV */\
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASR]               \
2061
    __asm vpsubw     ymm2, ymm3, ymm2                                          \
Frank Barchard's avatar
Frank Barchard committed
2062
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASG]               \
2063
    __asm vpsubw     ymm1, ymm3, ymm1                                          \
Frank Barchard's avatar
Frank Barchard committed
2064
    __asm vmovdqu    ymm3, ymmword ptr [YuvConstants + KUVBIASB]               \
2065
    __asm vpsubw     ymm0, ymm3, ymm0                                          \
2066
    /* Step 2: Find Y contribution to 16 R,G,B values */                       \
Frank Barchard's avatar
Frank Barchard committed
2067
    __asm vmovdqu    xmm3, [eax]                                               \
2068 2069
    __asm lea        eax, [eax + 16]                                           \
    __asm vpermq     ymm3, ymm3, 0xd8                                          \
2070
    __asm vpunpcklbw ymm3, ymm3, ymm3                                          \
Frank Barchard's avatar
Frank Barchard committed
2071
    __asm vpmulhuw   ymm3, ymm3, ymmword ptr [YuvConstants + KYTORGB]          \
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
    __asm vpaddsw    ymm0, ymm0, ymm3           /* B += Y */                   \
    __asm vpaddsw    ymm1, ymm1, ymm3           /* G += Y */                   \
    __asm vpaddsw    ymm2, ymm2, ymm3           /* R += Y */                   \
    __asm vpsraw     ymm0, ymm0, 6                                             \
    __asm vpsraw     ymm1, ymm1, 6                                             \
    __asm vpsraw     ymm2, ymm2, 6                                             \
    __asm vpackuswb  ymm0, ymm0, ymm0           /* B */                        \
    __asm vpackuswb  ymm1, ymm1, ymm1           /* G */                        \
    __asm vpackuswb  ymm2, ymm2, ymm2           /* R */                        \
  }

2083 2084 2085 2086 2087 2088 2089 2090
// Store 16 ARGB values.
#define STOREARGB_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm0, ymm0, ymm1           /* BG */                       \
    __asm vpermq     ymm0, ymm0, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm2, ymm5           /* RA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm1, ymm0, ymm2           /* BGRA first 8 pixels */      \
    __asm vpunpckhwd ymm0, ymm0, ymm2           /* BGRA next 8 pixels */       \
2091 2092
    __asm vmovdqu    0[edx], ymm1                                              \
    __asm vmovdqu    32[edx], ymm0                                             \
2093 2094 2095
    __asm lea        edx,  [edx + 64]                                          \
  }

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
// Store 16 ABGR values.
#define STOREBGRA_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm1, ymm0           /* GB */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm5, ymm2           /* AR */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm2, ymm1           /* ARGB first 8 pixels */      \
    __asm vpunpckhwd ymm2, ymm2, ymm1           /* ARGB next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm2                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

// Store 16 RGBA values.
#define STORERGBA_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm1, ymm2           /* GR */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm5, ymm0           /* AB */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm2, ymm1           /* ABGR first 8 pixels */      \
    __asm vpunpckhwd ymm1, ymm2, ymm1           /* ABGR next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm1                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

// Store 16 ABGR values.
#define STOREABGR_AVX2 __asm {                                                 \
    __asm vpunpcklbw ymm1, ymm2, ymm1           /* RG */                       \
    __asm vpermq     ymm1, ymm1, 0xd8                                          \
    __asm vpunpcklbw ymm2, ymm0, ymm5           /* BA */                       \
    __asm vpermq     ymm2, ymm2, 0xd8                                          \
    __asm vpunpcklwd ymm0, ymm1, ymm2           /* RGBA first 8 pixels */      \
    __asm vpunpckhwd ymm1, ymm1, ymm2           /* RGBA next 8 pixels */       \
    __asm vmovdqu    [edx], ymm0                                               \
    __asm vmovdqu    [edx + 32], ymm1                                          \
    __asm lea        edx,  [edx + 64]                                          \
  }

2135
#ifdef HAS_I422TOARGBMATRIXROW_AVX2
2136 2137
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2138
__declspec(naked)
2139 2140 2141 2142 2143 2144
void I422ToARGBMatrixRow_AVX2(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              uint8* dst_argb,
                              struct YuvConstants* YuvConstants,
                              int width) {
2145 2146 2147
  __asm {
    push       esi
    push       edi
2148 2149 2150 2151 2152 2153
    push       ebp
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ebp, [esp + 12 + 20]  // YuvConstants
2154
    mov        ecx, [esp + 12 + 24]  // width
2155 2156 2157 2158 2159
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2160
    YUVTORGB_AVX2(ebp)
2161 2162 2163 2164 2165
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

2166
    pop        ebp
2167 2168 2169 2170 2171 2172
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
2173
#endif  // HAS_I422TOARGBMATRIXROW_AVX2
2174

2175 2176 2177
#ifdef HAS_I444TOARGBROW_AVX2
// 16 pixels
// 16 UV values with 16 Y producing 16 ARGB (64 bytes).
2178
__declspec(naked)
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
void I444ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV444_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I444TOARGBROW_AVX2

2211 2212 2213
#ifdef HAS_I411TOARGBROW_AVX2
// 16 pixels
// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2214
__declspec(naked)
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
void I411ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2230

2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
 convertloop:
    READYUV411_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_I411TOARGBROW_AVX2
2246

2247
#ifdef HAS_NV12TOARGBROW_AVX2
2248 2249
// 16 pixels.
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
2250
__declspec(naked)
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
void NV12ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
    YUVTORGB_AVX2(kYuvConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        esi
2272
    vzeroupper
2273 2274 2275
    ret
  }
}
2276
#endif  // HAS_NV12TOARGBROW_AVX2
2277

2278
#ifdef HAS_NV21TOARGBROW_AVX2
2279 2280
// 16 pixels.
// 8 VU values upsampled to 16 VU, mixed with 16 Y producing 16 ARGB (64 bytes).
2281
__declspec(naked)
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
void NV21ToARGBRow_AVX2(const uint8* y_buf,
                        const uint8* uv_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READNV12_AVX2
    YUVTORGB_AVX2(kYvuConstants)
    STOREARGB_AVX2

    sub        ecx, 16
    jg         convertloop

    pop        esi
2303
    vzeroupper
2304 2305 2306
    ret
  }
}
2307
#endif  // HAS_NV21TOARGBROW_AVX2
2308

2309
#ifdef HAS_I422TOBGRAROW_AVX2
2310 2311
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
2312
// TODO(fbarchard): Use macros to reduce duplicate code.  See SSSE3.
2313
__declspec(naked)
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
void I422ToBGRARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
2331
    READYUV422_AVX2
2332
    YUVTORGB_AVX2(kYuvConstants)
2333
    STOREBGRA_AVX2
2334 2335 2336 2337 2338 2339

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2340
    vzeroupper
2341 2342 2343
    ret
  }
}
2344
#endif  // HAS_I422TOBGRAROW_AVX2
2345

2346
#ifdef HAS_I422TORGBAROW_AVX2
2347 2348
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
2349
__declspec(naked)
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
void I422ToRGBARow_AVX2(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
                        uint8* dst_argb,
                        int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // argb
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
2364
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha
2365 2366 2367

 convertloop:
    READYUV422_AVX2
2368
    YUVTORGB_AVX2(kYuvConstants)
2369
    STORERGBA_AVX2
2370 2371 2372 2373 2374 2375

    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
2376
    vzeroupper
2377 2378 2379
    ret
  }
}
2380
#endif  // HAS_I422TORGBAROW_AVX2
2381

2382
#ifdef HAS_I422TOABGRROW_AVX2
2383 2384
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
2385
__declspec(naked)
2386 2387 2388 2389 2390 2391
void I422ToABGRMatrixRow_AVX2(const uint8* y_buf,
                              const uint8* u_buf,
                              const uint8* v_buf,
                              uint8* dst_argb,
                              struct YuvConstants* YuvConstants,
                              int width) {
2392 2393 2394
  __asm {
    push       esi
    push       edi
2395 2396 2397 2398 2399 2400
    push       ebp
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ebp, [esp + 12 + 20]  // YuvConstants
2401
    mov        ecx, [esp + 12 + 24]  // width
2402 2403 2404 2405 2406
    sub        edi, esi
    vpcmpeqb   ymm5, ymm5, ymm5     // generate 0xffffffffffffffff for alpha

 convertloop:
    READYUV422_AVX2
2407 2408
    YUVTORGB_AVX2(ebp)
    STOREABGR_AVX2
2409 2410 2411 2412

    sub        ecx, 16
    jg         convertloop

2413
    pop        ebp
2414 2415
    pop        edi
    pop        esi
2416
    vzeroupper
2417 2418 2419
    ret
  }
}
2420
#endif  // HAS_I422TOABGRROW_AVX2
2421

2422
#if defined(HAS_I422TOARGBROW_SSSE3)
2423
// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
2424

2425
// Read 8 UV from 444.
2426
#define READYUV444 __asm {                                                     \
Frank Barchard's avatar
Frank Barchard committed
2427 2428
    __asm movq       xmm0, qword ptr [esi] /* U */                             \
    __asm movq       xmm1, qword ptr [esi + edi] /* V */                       \
2429 2430 2431 2432
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
  }

2433
// Read 4 UV from 422, upsample to 8 UV.
2434
#define READYUV422 __asm {                                                     \
2435 2436 2437 2438 2439 2440 2441
    __asm movd       xmm0, [esi]          /* U */                              \
    __asm movd       xmm1, [esi + edi]    /* V */                              \
    __asm lea        esi,  [esi + 4]                                           \
    __asm punpcklbw  xmm0, xmm1           /* UV */                             \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

2442
// Read 2 UV from 411, upsample to 8 UV.
2443
#define READYUV411 __asm {                                                     \
2444 2445
    __asm pinsrw     xmm0, [esi], 0        /* U */                             \
    __asm pinsrw     xmm1, [esi + edi], 0  /* V */                             \
2446
    __asm lea        esi,  [esi + 2]                                           \
2447 2448 2449
    __asm punpcklbw  xmm0, xmm1            /* UV */                            \
    __asm punpcklwd  xmm0, xmm0            /* UVUV (upsample) */               \
    __asm punpckldq  xmm0, xmm0            /* UVUVUVUV (upsample) */           \
2450 2451
  }

2452
// Read 4 UV from NV12, upsample to 8 UV.
2453
#define READNV12 __asm {                                                       \
Frank Barchard's avatar
Frank Barchard committed
2454
    __asm movq       xmm0, qword ptr [esi] /* UV */                            \
2455 2456 2457 2458
    __asm lea        esi,  [esi + 8]                                           \
    __asm punpcklwd  xmm0, xmm0           /* UVUV (upsample) */                \
  }

2459
// Convert 8 pixels: 8 UV and 8 Y.
2460
#define YUVTORGB(YuvConstants) __asm {                                         \
2461 2462
    __asm movdqa     xmm1, xmm0                                                \
    __asm movdqa     xmm2, xmm0                                                \
2463
    __asm movdqa     xmm3, xmm0                                                \
Frank Barchard's avatar
Frank Barchard committed
2464 2465
    __asm movdqa     xmm0, xmmword ptr [YuvConstants + KUVBIASB]               \
    __asm pmaddubsw  xmm1, xmmword ptr [YuvConstants + KUVTOB]                 \
2466
    __asm psubw      xmm0, xmm1                                                \
Frank Barchard's avatar
Frank Barchard committed
2467 2468
    __asm movdqa     xmm1, xmmword ptr [YuvConstants + KUVBIASG]               \
    __asm pmaddubsw  xmm2, xmmword ptr [YuvConstants + KUVTOG]                 \
2469
    __asm psubw      xmm1, xmm2                                                \
Frank Barchard's avatar
Frank Barchard committed
2470 2471
    __asm movdqa     xmm2, xmmword ptr [YuvConstants + KUVBIASR]               \
    __asm pmaddubsw  xmm3, xmmword ptr [YuvConstants + KUVTOR]                 \
2472
    __asm psubw      xmm2, xmm3                                                \
Frank Barchard's avatar
Frank Barchard committed
2473
    __asm movq       xmm3, qword ptr [eax]                                     \
2474
    __asm lea        eax, [eax + 8]                                            \
2475
    __asm punpcklbw  xmm3, xmm3                                                \
Frank Barchard's avatar
Frank Barchard committed
2476
    __asm pmulhuw    xmm3, xmmword ptr [YuvConstants + KYTORGB]                \
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
    __asm paddsw     xmm0, xmm3           /* B += Y */                         \
    __asm paddsw     xmm1, xmm3           /* G += Y */                         \
    __asm paddsw     xmm2, xmm3           /* R += Y */                         \
    __asm psraw      xmm0, 6                                                   \
    __asm psraw      xmm1, 6                                                   \
    __asm psraw      xmm2, 6                                                   \
    __asm packuswb   xmm0, xmm0           /* B */                              \
    __asm packuswb   xmm1, xmm1           /* G */                              \
    __asm packuswb   xmm2, xmm2           /* R */                              \
  }

2488 2489 2490 2491 2492 2493 2494
// Store 8 ARGB values.
#define STOREARGB __asm {                                                      \
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm5           /* RA */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRA next 4 pixels */             \
2495 2496
    __asm movdqu     0[edx], xmm0                                              \
    __asm movdqu     16[edx], xmm1                                             \
2497 2498 2499
    __asm lea        edx,  [edx + 32]                                          \
  }

2500 2501 2502 2503 2504 2505 2506 2507
// Store 8 BGRA values.
#define STOREBGRA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm0           /* GB */                             \
    __asm punpcklbw  xmm5, xmm2           /* AR */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* BGRA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* BGRA next 4 pixels */             \
2508 2509
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 ABGR values.
#define STOREABGR __asm {                                                      \
    __asm punpcklbw  xmm2, xmm1           /* RG */                             \
    __asm punpcklbw  xmm0, xmm5           /* BA */                             \
    __asm movdqa     xmm1, xmm2                                                \
    __asm punpcklwd  xmm2, xmm0           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm0           /* RGBA next 4 pixels */             \
2520 2521
    __asm movdqu     0[edx], xmm2                                              \
    __asm movdqu     16[edx], xmm1                                             \
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGBA values.
#define STORERGBA __asm {                                                      \
    __asm pcmpeqb    xmm5, xmm5           /* generate 0xffffffff for alpha */  \
    __asm punpcklbw  xmm1, xmm2           /* GR */                             \
    __asm punpcklbw  xmm5, xmm0           /* AB */                             \
    __asm movdqa     xmm0, xmm5                                                \
    __asm punpcklwd  xmm5, xmm1           /* RGBA first 4 pixels */            \
    __asm punpckhwd  xmm0, xmm1           /* RGBA next 4 pixels */             \
2533 2534
    __asm movdqu     0[edx], xmm5                                              \
    __asm movdqu     16[edx], xmm0                                             \
2535 2536 2537 2538 2539
    __asm lea        edx,  [edx + 32]                                          \
  }

// Store 8 RGB24 values.
#define STORERGB24 __asm {                                                     \
2540
    /* Weave into RRGB */                                                      \
2541 2542 2543 2544 2545
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2546
    /* RRGB -> RGB24 */                                                        \
2547 2548 2549
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2550 2551
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2552 2553 2554 2555 2556
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RAW values.
#define STORERAW __asm {                                                       \
2557
    /* Weave into RRGB */                                                      \
2558 2559 2560 2561 2562 2563 2564 2565 2566
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
    /* Step 4: RRGB -> RAW */                                                  \
    __asm pshufb     xmm0, xmm5           /* Pack first 8 and last 4 bytes. */ \
    __asm pshufb     xmm1, xmm6           /* Pack first 12 bytes. */           \
    __asm palignr    xmm1, xmm0, 12       /* last 4 bytes of xmm0 + 12 xmm1 */ \
2567 2568
    __asm movq       qword ptr 0[edx], xmm0  /* First 8 bytes */               \
    __asm movdqu     8[edx], xmm1         /* Last 16 bytes */                  \
2569 2570 2571 2572 2573
    __asm lea        edx,  [edx + 24]                                          \
  }

// Store 8 RGB565 values.
#define STORERGB565 __asm {                                                    \
2574
    /* Weave into RRGB */                                                      \
2575 2576 2577 2578 2579
    __asm punpcklbw  xmm0, xmm1           /* BG */                             \
    __asm punpcklbw  xmm2, xmm2           /* RR */                             \
    __asm movdqa     xmm1, xmm0                                                \
    __asm punpcklwd  xmm0, xmm2           /* BGRR first 4 pixels */            \
    __asm punpckhwd  xmm1, xmm2           /* BGRR next 4 pixels */             \
2580
    /* RRGB -> RGB565 */                                                       \
2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
    __asm movdqa     xmm3, xmm0    /* B  first 4 pixels of argb */             \
    __asm movdqa     xmm2, xmm0    /* G */                                     \
    __asm pslld      xmm0, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm0, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm0, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm0, xmm3    /* BGR */                                   \
    __asm movdqa     xmm3, xmm1    /* B  next 4 pixels of argb */              \
    __asm movdqa     xmm2, xmm1    /* G */                                     \
    __asm pslld      xmm1, 8       /* R */                                     \
    __asm psrld      xmm3, 3       /* B */                                     \
    __asm psrld      xmm2, 5       /* G */                                     \
    __asm psrad      xmm1, 16      /* R */                                     \
    __asm pand       xmm3, xmm5    /* B */                                     \
    __asm pand       xmm2, xmm6    /* G */                                     \
    __asm pand       xmm1, xmm7    /* R */                                     \
    __asm por        xmm3, xmm2    /* BG */                                    \
    __asm por        xmm1, xmm3    /* BGR */                                   \
    __asm packssdw   xmm0, xmm1                                                \
2604
    __asm movdqu     0[edx], xmm0  /* store 8 pixels of RGB565 */              \
2605 2606 2607
    __asm lea        edx, [edx + 16]                                           \
  }

2608
// 8 pixels.
2609
// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
2610
__declspec(naked)
2611
void I444ToARGBRow_SSSE3(const uint8* y_buf,
2612 2613
                         const uint8* u_buf,
                         const uint8* v_buf,
2614
                         uint8* dst_argb,
2615
                         int width) {
2616 2617 2618 2619 2620 2621
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2622
    mov        edx, [esp + 8 + 16]  // argb
2623 2624 2625 2626
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

2627
 convertloop:
2628
    READYUV444
2629
    YUVTORGB(kYuvConstants)
2630
    STOREARGB
2631

2632
    sub        ecx, 8
2633
    jg         convertloop
2634 2635 2636 2637 2638 2639 2640

    pop        edi
    pop        esi
    ret
  }
}

2641
// 8 pixels.
2642
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB24 (24 bytes).
2643
__declspec(naked)
2644 2645 2646
void I422ToRGB24Row_SSSE3(const uint8* y_buf,
                          const uint8* u_buf,
                          const uint8* v_buf,
2647
                          uint8* dst_rgb24,
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
                          int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgb24
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
Frank Barchard's avatar
Frank Barchard committed
2658 2659
    movdqa     xmm5, xmmword ptr kShuffleMaskARGBToRGB24_0
    movdqa     xmm6, xmmword ptr kShuffleMaskARGBToRGB24
2660 2661 2662

 convertloop:
    READYUV422
2663
    YUVTORGB(kYuvConstants)
2664
    STORERGB24
2665 2666 2667 2668 2669 2670 2671 2672 2673 2674

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2675
// 8 pixels.
2676
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RAW (24 bytes).
2677
__declspec(naked)
2678 2679 2680
void I422ToRAWRow_SSSE3(const uint8* y_buf,
                        const uint8* u_buf,
                        const uint8* v_buf,
2681
                        uint8* dst_raw,
2682
                        int width) {
2683 2684 2685 2686 2687 2688 2689 2690 2691
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // raw
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
Frank Barchard's avatar
Frank Barchard committed
2692 2693
    movdqa     xmm5, xmmword ptr kShuffleMaskARGBToRAW_0
    movdqa     xmm6, xmmword ptr kShuffleMaskARGBToRAW
2694 2695

 convertloop:
2696
    READYUV422
2697
    YUVTORGB(kYuvConstants)
2698
    STORERAW
2699 2700 2701 2702

    sub        ecx, 8
    jg         convertloop

2703
    pop        edi
2704 2705 2706 2707 2708
    pop        esi
    ret
  }
}

2709
// 8 pixels
2710
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes).
2711
__declspec(naked)
2712 2713 2714 2715 2716
void I422ToRGB565Row_SSSE3(const uint8* y_buf,
                           const uint8* u_buf,
                           const uint8* v_buf,
                           uint8* rgb565_buf,
                           int width) {
2717 2718 2719 2720 2721 2722
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
2723
    mov        edx, [esp + 8 + 16]  // rgb565
2724 2725
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi
2726 2727 2728 2729 2730 2731 2732
    pcmpeqb    xmm5, xmm5       // generate mask 0x0000001f
    psrld      xmm5, 27
    pcmpeqb    xmm6, xmm6       // generate mask 0x000007e0
    psrld      xmm6, 26
    pslld      xmm6, 5
    pcmpeqb    xmm7, xmm7       // generate mask 0xfffff800
    pslld      xmm7, 11
2733 2734

 convertloop:
2735
    READYUV422
2736
    YUVTORGB(kYuvConstants)
2737
    STORERGB565
2738

2739
    sub        ecx, 8
2740 2741 2742 2743 2744 2745 2746 2747
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2748
// 8 pixels.
2749
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2750
__declspec(naked)
2751 2752 2753 2754 2755 2756
void I422ToARGBMatrixRow_SSSE3(const uint8* y_buf,
                               const uint8* u_buf,
                               const uint8* v_buf,
                               uint8* dst_argb,
                               struct YuvConstants* YuvConstants,
                               int width) {
2757 2758 2759
  __asm {
    push       esi
    push       edi
2760 2761 2762 2763 2764
    push       ebp
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
2765 2766
    mov        ebp, [esp + 12 + 20]  // YuvConstants
    mov        ecx, [esp + 12 + 24]  // width
2767 2768 2769 2770
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2771
    READYUV422
2772
    YUVTORGB(ebp)
2773
    STOREARGB
2774

2775 2776 2777
    sub        ecx, 8
    jg         convertloop

2778
    pop        ebp
2779 2780 2781 2782 2783 2784
    pop        edi
    pop        esi
    ret
  }
}

2785
// 8 pixels.
2786
// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2787
// Similar to I420 but duplicate UV once more.
2788
__declspec(naked)
2789 2790 2791 2792 2793
void I411ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
                         uint8* dst_argb,
                         int width) {
2794
  __asm {
2795
    push       ebx
2796 2797
    push       esi
    push       edi
2798 2799 2800 2801 2802
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ecx, [esp + 12 + 20]  // width
2803
    sub        edi, esi
2804
    pcmpeqb    xmm5, xmm5            // generate 0xffffffff for alpha
2805 2806

 convertloop:
2807
    READYUV411  // modifies EBX
2808
    YUVTORGB(kYuvConstants)
2809
    STOREARGB
2810

2811 2812 2813 2814 2815
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
2816
    pop        ebx
2817 2818 2819 2820
    ret
  }
}

2821
// 8 pixels.
2822
// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
2823
__declspec(naked)
2824 2825 2826 2827
void NV12ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
    mov        esi, [esp + 4 + 8]   // UV
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
2838
    YUVTORGB(kYuvConstants)
2839
    STOREARGB
2840

2841 2842 2843 2844 2845 2846 2847 2848
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2849
// 8 pixels.
2850
// 4 VU values upsampled to 8 VU, mixed with 8 Y producing 8 ARGB (32 bytes).
2851
__declspec(naked)
2852 2853 2854 2855
void NV21ToARGBRow_SSSE3(const uint8* y_buf,
                         const uint8* uv_buf,
                         uint8* dst_argb,
                         int width) {
2856 2857 2858
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // Y
2859
    mov        esi, [esp + 4 + 8]   // UV
2860 2861 2862 2863 2864 2865
    mov        edx, [esp + 4 + 12]  // argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
    READNV12
2866
    YUVTORGB(kYvuConstants)
2867
    STOREARGB
2868

2869 2870 2871 2872 2873 2874 2875 2876
    sub        ecx, 8
    jg         convertloop

    pop        esi
    ret
  }
}

2877
__declspec(naked)
2878 2879 2880
void I422ToBGRARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2881
                         uint8* dst_bgra,
2882 2883 2884 2885 2886 2887 2888 2889
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // bgra
2890 2891 2892 2893
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
2894
    READYUV422
2895
    YUVTORGB(kYuvConstants)
2896
    STOREBGRA
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906

    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

2907
__declspec(naked)
2908 2909 2910 2911 2912 2913
void I422ToABGRMatrixRow_SSSE3(const uint8* y_buf,
                               const uint8* u_buf,
                               const uint8* v_buf,
                               uint8* dst_abgr,
                               struct YuvConstants* YuvConstants,
                               int width) {
2914 2915 2916
  __asm {
    push       esi
    push       edi
2917 2918 2919 2920 2921 2922 2923
    push       ebp
    mov        eax, [esp + 12 + 4]   // Y
    mov        esi, [esp + 12 + 8]   // U
    mov        edi, [esp + 12 + 12]  // V
    mov        edx, [esp + 12 + 16]  // argb
    mov        ebp, [esp + 12 + 20]  // YuvConstants
    mov        ecx, [esp + 12 + 24]  // width
2924 2925 2926 2927
    sub        edi, esi
    pcmpeqb    xmm5, xmm5           // generate 0xffffffff for alpha

 convertloop:
2928
    READYUV422
2929
    YUVTORGB(ebp)
2930
    STOREABGR
2931 2932 2933 2934

    sub        ecx, 8
    jg         convertloop

2935
    pop        ebp
2936 2937 2938 2939 2940 2941
    pop        edi
    pop        esi
    ret
  }
}

2942
__declspec(naked)
2943 2944 2945
void I422ToRGBARow_SSSE3(const uint8* y_buf,
                         const uint8* u_buf,
                         const uint8* v_buf,
2946
                         uint8* dst_rgba,
2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
                         int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // Y
    mov        esi, [esp + 8 + 8]   // U
    mov        edi, [esp + 8 + 12]  // V
    mov        edx, [esp + 8 + 16]  // rgba
    mov        ecx, [esp + 8 + 20]  // width
    sub        edi, esi

 convertloop:
    READYUV422
2960
    YUVTORGB(kYuvConstants)
2961
    STORERGBA
2962

2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
    sub        ecx, 8
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

#endif  // HAS_I422TOARGBROW_SSSE3
2973

2974
#ifdef HAS_I400TOARGBROW_SSE2
2975
// 8 pixels of Y converted to 8 pixels of ARGB (32 bytes).
2976
__declspec(naked)
2977 2978 2979
void I400ToARGBRow_SSE2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
2980
  __asm {
2981
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
2982 2983
    movd       xmm2, eax
    pshufd     xmm2, xmm2,0
2984 2985 2986 2987 2988
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    movd       xmm3, eax
    pshufd     xmm3, xmm3, 0
    pcmpeqb    xmm4, xmm4           // generate mask 0xff000000
    pslld      xmm4, 24
2989

2990 2991 2992 2993
    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

2994
 convertloop:
2995
    // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
2996
    movq       xmm0, qword ptr [eax]
2997
    lea        eax, [eax + 8]
2998 2999
    punpcklbw  xmm0, xmm0           // Y.Y
    pmulhuw    xmm0, xmm2
3000
    psubusw    xmm0, xmm3
3001
    psrlw      xmm0, 6
3002 3003 3004 3005 3006 3007 3008
    packuswb   xmm0, xmm0           // G

    // Step 2: Weave into ARGB
    punpcklbw  xmm0, xmm0           // GG
    movdqa     xmm1, xmm0
    punpcklwd  xmm0, xmm0           // BGRA first 4 pixels
    punpckhwd  xmm1, xmm1           // BGRA next 4 pixels
3009 3010
    por        xmm0, xmm4
    por        xmm1, xmm4
3011 3012
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3013 3014
    lea        edx,  [edx + 32]
    sub        ecx, 8
3015
    jg         convertloop
3016 3017 3018
    ret
  }
}
3019
#endif  // HAS_I400TOARGBROW_SSE2
3020

3021
#ifdef HAS_I400TOARGBROW_AVX2
3022
// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
3023
// note: vpunpcklbw mutates and vpackuswb unmutates.
3024
__declspec(naked)
3025 3026 3027
void I400ToARGBRow_AVX2(const uint8* y_buf,
                        uint8* rgb_buf,
                        int width) {
3028
  __asm {
3029
    mov        eax, 0x4a354a35      // 4a35 = 18997 = round(1.164 * 64 * 256)
3030 3031
    vmovd      xmm2, eax
    vbroadcastss ymm2, xmm2
3032 3033 3034 3035 3036
    mov        eax, 0x04880488      // 0488 = 1160 = round(1.164 * 64 * 16)
    vmovd      xmm3, eax
    vbroadcastss ymm3, xmm3
    vpcmpeqb   ymm4, ymm4, ymm4     // generate mask 0xff000000
    vpslld     ymm4, ymm4, 24
3037 3038 3039 3040 3041 3042

    mov        eax, [esp + 4]       // Y
    mov        edx, [esp + 8]       // rgb
    mov        ecx, [esp + 12]      // width

 convertloop:
3043
    // Step 1: Scale Y contriportbution to 16 G values. G = (y - 16) * 1.164
3044 3045
    vmovdqu    xmm0, [eax]
    lea        eax, [eax + 16]
3046
    vpermq     ymm0, ymm0, 0xd8           // vpunpcklbw mutates
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056
    vpunpcklbw ymm0, ymm0, ymm0           // Y.Y
    vpmulhuw   ymm0, ymm0, ymm2
    vpsubusw   ymm0, ymm0, ymm3
    vpsrlw     ymm0, ymm0, 6
    vpackuswb  ymm0, ymm0, ymm0           // G.  still mutated: 3120

    // TODO(fbarchard): Weave alpha with unpack.
    // Step 2: Weave into ARGB
    vpunpcklbw ymm1, ymm0, ymm0           // GG - mutates
    vpermq     ymm1, ymm1, 0xd8
3057 3058
    vpunpcklwd ymm0, ymm1, ymm1           // GGGG first 8 pixels
    vpunpckhwd ymm1, ymm1, ymm1           // GGGG next 8 pixels
3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
    vpor       ymm0, ymm0, ymm4
    vpor       ymm1, ymm1, ymm4
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx,  [edx + 64]
    sub        ecx, 16
    jg         convertloop
    vzeroupper
    ret
  }
}
3070
#endif  // HAS_I400TOARGBROW_AVX2
3071

3072
#ifdef HAS_MIRRORROW_SSSE3
3073
// Shuffle table for reversing the bytes.
3074
static const uvec8 kShuffleMirror = {
3075 3076
  15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
3077

3078
// TODO(fbarchard): Replace lea with -16 offset.
3079
__declspec(naked)
3080
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
3081
  __asm {
3082 3083 3084
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3085
    movdqa    xmm5, xmmword ptr kShuffleMirror
3086

3087
 convertloop:
3088
    movdqu    xmm0, [eax - 16 + ecx]
3089
    pshufb    xmm0, xmm5
3090
    movdqu    [edx], xmm0
3091
    lea       edx, [edx + 16]
3092
    sub       ecx, 16
3093
    jg        convertloop
3094 3095 3096
    ret
  }
}
3097
#endif  // HAS_MIRRORROW_SSSE3
3098

fbarchard@google.com's avatar
fbarchard@google.com committed
3099
#ifdef HAS_MIRRORROW_AVX2
3100
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3101 3102 3103 3104 3105
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3106
    vbroadcastf128 ymm5, xmmword ptr kShuffleMirror
fbarchard@google.com's avatar
fbarchard@google.com committed
3107 3108

 convertloop:
3109
    vmovdqu   ymm0, [eax - 32 + ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
3110 3111 3112 3113
    vpshufb   ymm0, ymm0, ymm5
    vpermq    ymm0, ymm0, 0x4e  // swap high and low halfs
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3114
    sub       ecx, 32
fbarchard@google.com's avatar
fbarchard@google.com committed
3115
    jg        convertloop
3116
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3117 3118 3119 3120 3121
    ret
  }
}
#endif  // HAS_MIRRORROW_AVX2

3122
#ifdef HAS_MIRRORROW_SSE2
3123
__declspec(naked)
3124
void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3125
  __asm {
3126 3127 3128
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3129

3130
 convertloop:
3131
    movdqu    xmm0, [eax - 16 + ecx]
3132
    movdqa    xmm1, xmm0        // swap bytes
3133 3134 3135 3136 3137
    psllw     xmm0, 8
    psrlw     xmm1, 8
    por       xmm0, xmm1
    pshuflw   xmm0, xmm0, 0x1b  // swap words
    pshufhw   xmm0, xmm0, 0x1b
3138
    pshufd    xmm0, xmm0, 0x4e  // swap qwords
3139
    movdqu    [edx], xmm0
3140
    lea       edx, [edx + 16]
3141
    sub       ecx, 16
3142
    jg        convertloop
3143 3144 3145
    ret
  }
}
3146
#endif  // HAS_MIRRORROW_SSE2
3147

3148 3149
#ifdef HAS_MIRRORROW_UV_SSSE3
// Shuffle table for reversing the bytes of UV channels.
3150
static const uvec8 kShuffleMirrorUV = {
3151 3152 3153
  14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
};

3154
__declspec(naked)
3155
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
3156 3157 3158 3159 3160 3161 3162
                       int width) {
  __asm {
    push      edi
    mov       eax, [esp + 4 + 4]   // src
    mov       edx, [esp + 4 + 8]   // dst_u
    mov       edi, [esp + 4 + 12]  // dst_v
    mov       ecx, [esp + 4 + 16]  // width
Frank Barchard's avatar
Frank Barchard committed
3163
    movdqa    xmm1, xmmword ptr kShuffleMirrorUV
3164 3165 3166 3167
    lea       eax, [eax + ecx * 2 - 16]
    sub       edi, edx

 convertloop:
3168
    movdqu    xmm0, [eax]
3169 3170 3171 3172 3173
    lea       eax, [eax - 16]
    pshufb    xmm0, xmm1
    movlpd    qword ptr [edx], xmm0
    movhpd    qword ptr [edx + edi], xmm0
    lea       edx, [edx + 8]
3174
    sub       ecx, 8
3175
    jg        convertloop
3176 3177 3178 3179 3180

    pop       edi
    ret
  }
}
3181
#endif  // HAS_MIRRORROW_UV_SSSE3
3182

3183
#ifdef HAS_ARGBMIRRORROW_SSE2
3184
__declspec(naked)
3185
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
3186
  __asm {
3187 3188 3189
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
3190
    lea       eax, [eax - 16 + ecx * 4]  // last 4 pixels.
3191 3192

 convertloop:
3193
    movdqu    xmm0, [eax]
3194
    lea       eax, [eax - 16]
3195
    pshufd    xmm0, xmm0, 0x1b
3196
    movdqu    [edx], xmm0
3197
    lea       edx, [edx + 16]
3198
    sub       ecx, 4
3199 3200 3201 3202
    jg        convertloop
    ret
  }
}
3203
#endif  // HAS_ARGBMIRRORROW_SSE2
3204

fbarchard@google.com's avatar
fbarchard@google.com committed
3205 3206
#ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes.
3207
static const ulvec32 kARGBShuffleMirror_AVX2 = {
fbarchard@google.com's avatar
fbarchard@google.com committed
3208 3209 3210
  7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};

3211
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3212 3213 3214 3215 3216
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov       eax, [esp + 4]   // src
    mov       edx, [esp + 8]   // dst
    mov       ecx, [esp + 12]  // width
Frank Barchard's avatar
Frank Barchard committed
3217
    vmovdqu   ymm5, ymmword ptr kARGBShuffleMirror_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
3218 3219

 convertloop:
3220
    vpermd    ymm0, ymm5, [eax - 32 + ecx * 4]  // permute dword order
fbarchard@google.com's avatar
fbarchard@google.com committed
3221 3222
    vmovdqu   [edx], ymm0
    lea       edx, [edx + 32]
3223
    sub       ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
3224
    jg        convertloop
3225
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
3226 3227 3228
    ret
  }
}
3229
#endif  // HAS_ARGBMIRRORROW_AVX2
3230

3231
#ifdef HAS_SPLITUVROW_SSE2
3232
__declspec(naked)
3233
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    lea        eax,  [eax + 32]
    movdqa     xmm2, xmm0
    movdqa     xmm3, xmm1
    pand       xmm0, xmm5   // even bytes
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    psrlw      xmm2, 8      // odd bytes
    psrlw      xmm3, 8
    packuswb   xmm2, xmm3
    movdqu     [edx], xmm0
    movdqu     [edx + edi], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
3266

3267
#endif  // HAS_SPLITUVROW_SSE2
3268

3269
#ifdef HAS_SPLITUVROW_AVX2
3270
__declspec(naked)
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_uv
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
3283 3284
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
3285 3286 3287 3288 3289 3290 3291 3292 3293
    lea        eax,  [eax + 64]
    vpsrlw     ymm2, ymm0, 8      // odd bytes
    vpsrlw     ymm3, ymm1, 8
    vpand      ymm0, ymm0, ymm5   // even bytes
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1
    vpackuswb  ymm2, ymm2, ymm3
    vpermq     ymm0, ymm0, 0xd8
    vpermq     ymm2, ymm2, 0xd8
3294 3295
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + edi], ymm2
3296 3297 3298 3299 3300
    lea        edx, [edx + 32]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3301
    vzeroupper
3302 3303 3304
    ret
  }
}
3305
#endif  // HAS_SPLITUVROW_AVX2
3306

3307
#ifdef HAS_MERGEUVROW_SSE2
3308
__declspec(naked)
3309 3310
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
3311 3312
  __asm {
    push       edi
3313 3314 3315 3316 3317
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax
3318 3319

  convertloop:
3320 3321
    movdqu     xmm0, [eax]      // read 16 U's
    movdqu     xmm1, [eax + edx]  // and 16 V's
3322 3323 3324 3325
    lea        eax,  [eax + 16]
    movdqa     xmm2, xmm0
    punpcklbw  xmm0, xmm1       // first 8 UV pairs
    punpckhbw  xmm2, xmm1       // next 8 UV pairs
3326 3327
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm2
3328 3329
    lea        edi, [edi + 32]
    sub        ecx, 16
3330 3331 3332 3333 3334 3335
    jg         convertloop

    pop        edi
    ret
  }
}
3336
#endif  //  HAS_MERGEUVROW_SSE2
3337

3338
#ifdef HAS_MERGEUVROW_AVX2
3339
__declspec(naked)
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
                     int width) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_u
    mov        edx, [esp + 4 + 8]    // src_v
    mov        edi, [esp + 4 + 12]   // dst_uv
    mov        ecx, [esp + 4 + 16]   // width
    sub        edx, eax

  convertloop:
    vmovdqu    ymm0, [eax]           // read 32 U's
    vmovdqu    ymm1, [eax + edx]     // and 32 V's
    lea        eax,  [eax + 32]
    vpunpcklbw ymm2, ymm0, ymm1      // low 16 UV pairs. mutated qqword 0,2
    vpunpckhbw ymm0, ymm0, ymm1      // high 16 UV pairs. mutated qqword 1,3
3356 3357 3358 3359
    vextractf128 [edi], ymm2, 0       // bytes 0..15
    vextractf128 [edi + 16], ymm0, 0  // bytes 16..31
    vextractf128 [edi + 32], ymm2, 1  // bytes 32..47
    vextractf128 [edi + 48], ymm0, 1  // bytes 47..63
3360 3361 3362 3363 3364
    lea        edi, [edi + 64]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3365
    vzeroupper
3366 3367 3368 3369 3370
    ret
  }
}
#endif  //  HAS_MERGEUVROW_AVX2

3371
#ifdef HAS_COPYROW_SSE2
3372
// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
3373
__declspec(naked)
3374 3375 3376 3377 3378
void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3379

3380
  convertloop:
3381 3382
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3383
    lea        eax, [eax + 32]
3384 3385
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
3386
    lea        edx, [edx + 32]
3387
    sub        ecx, 32
3388
    jg         convertloop
3389 3390 3391 3392 3393
    ret
  }
}
#endif  // HAS_COPYROW_SSE2

3394 3395
#ifdef HAS_COPYROW_AVX
// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
3396
__declspec(naked)
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
    sub        ecx, 64
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_COPYROW_AVX

3419
// Multiple of 1.
3420
__declspec(naked)
3421
void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
  __asm {
    mov        eax, esi
    mov        edx, edi
    mov        esi, [esp + 4]   // src
    mov        edi, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    rep movsb
    mov        edi, edx
    mov        esi, eax
    ret
  }
}

3435 3436
#ifdef HAS_ARGBCOPYALPHAROW_SSE2
// width in pixels
3437
__declspec(naked)
3438 3439 3440
void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
fbarchard@google.com's avatar
fbarchard@google.com committed
3441
    mov        edx, [esp + 8]   // dst
3442
    mov        ecx, [esp + 12]  // count
fbarchard@google.com's avatar
fbarchard@google.com committed
3443 3444 3445 3446
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8
3447 3448

  convertloop:
3449 3450
    movdqu     xmm2, [eax]
    movdqu     xmm3, [eax + 16]
3451
    lea        eax, [eax + 32]
3452 3453
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
3454 3455 3456 3457 3458 3459
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3460 3461
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
fbarchard@google.com's avatar
fbarchard@google.com committed
3462
    lea        edx, [edx + 32]
3463 3464 3465 3466 3467 3468 3469 3470
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
3471 3472
#ifdef HAS_ARGBCOPYALPHAROW_AVX2
// width in pixels
3473
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
3474 3475 3476 3477 3478
void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
3479
    vpcmpeqb   ymm0, ymm0, ymm0
3480
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff
fbarchard@google.com's avatar
fbarchard@google.com committed
3481 3482

  convertloop:
3483 3484
    vmovdqu    ymm1, [eax]
    vmovdqu    ymm2, [eax + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
3485
    lea        eax, [eax + 64]
3486 3487 3488 3489
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
fbarchard@google.com's avatar
fbarchard@google.com committed
3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYALPHAROW_AVX2

3500 3501
#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
// width in pixels
3502
__declspec(naked)
3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    pcmpeqb    xmm0, xmm0       // generate mask 0xff000000
    pslld      xmm0, 24
    pcmpeqb    xmm1, xmm1       // generate mask 0x00ffffff
    psrld      xmm1, 8

  convertloop:
    movq       xmm2, qword ptr [eax]  // 8 Y's
    lea        eax, [eax + 8]
    punpcklbw  xmm2, xmm2
    punpckhwd  xmm3, xmm2
    punpcklwd  xmm2, xmm2
3519 3520
    movdqu     xmm4, [edx]
    movdqu     xmm5, [edx + 16]
3521 3522 3523 3524 3525 3526
    pand       xmm2, xmm0
    pand       xmm3, xmm0
    pand       xmm4, xmm1
    pand       xmm5, xmm1
    por        xmm2, xmm4
    por        xmm3, xmm5
3527 3528
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
    lea        edx, [edx + 32]
    sub        ecx, 8
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_SSE2

#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
// width in pixels
3540
__declspec(naked)
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
  __asm {
    mov        eax, [esp + 4]   // src
    mov        edx, [esp + 8]   // dst
    mov        ecx, [esp + 12]  // count
    vpcmpeqb   ymm0, ymm0, ymm0
    vpsrld     ymm0, ymm0, 8    // generate mask 0x00ffffff

  convertloop:
    vpmovzxbd  ymm1, qword ptr [eax]
    vpmovzxbd  ymm2, qword ptr [eax + 8]
    lea        eax, [eax + 16]
    vpslld     ymm1, ymm1, 24
    vpslld     ymm2, ymm2, 24
    vpblendvb  ymm1, ymm1, [edx], ymm0
    vpblendvb  ymm2, ymm2, [edx + 32], ymm0
    vmovdqu    [edx], ymm1
    vmovdqu    [edx + 32], ymm2
    lea        edx, [edx + 64]
    sub        ecx, 16
    jg         convertloop

    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBCOPYYTOALPHAROW_AVX2

3569
#ifdef HAS_SETROW_X86
3570 3571
// Write 'count' bytes using an 8 bit value repeated.
// Count should be multiple of 4.
3572
__declspec(naked)
3573
void SetRow_X86(uint8* dst, uint8 v8, int count) {
3574
  __asm {
3575 3576 3577
    movzx      eax, byte ptr [esp + 8]    // v8
    mov        edx, 0x01010101  // Duplicate byte to all bytes.
    mul        edx              // overwrites edx with upper part of result.
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        ecx, [esp + 12]  // count
    shr        ecx, 2
    rep stosd
    mov        edi, edx
    ret
  }
}

3588
// Write 'count' bytes using an 8 bit value repeated.
3589
__declspec(naked)
3590
void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
3591
  __asm {
3592 3593 3594 3595 3596 3597 3598 3599 3600
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v8
    mov        ecx, [esp + 12]  // count
    rep stosb
    mov        edi, edx
    ret
  }
}
3601

3602
// Write 'count' 32 bit values.
3603
__declspec(naked)
3604 3605 3606 3607 3608 3609
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
  __asm {
    mov        edx, edi
    mov        edi, [esp + 4]   // dst
    mov        eax, [esp + 8]   // v32
    mov        ecx, [esp + 12]  // count
3610
    rep stosd
3611
    mov        edi, edx
3612 3613 3614 3615 3616
    ret
  }
}
#endif  // HAS_SETROW_X86

3617
#ifdef HAS_YUY2TOYROW_AVX2
3618
__declspec(naked)
3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637
void YUY2ToYRow_AVX2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // even bytes are Y
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3638
    sub        ecx, 32
3639
    jg         convertloop
3640
    vzeroupper
3641 3642 3643 3644
    ret
  }
}

3645
__declspec(naked)
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3684
    vzeroupper
3685 3686 3687 3688
    ret
  }
}

3689
__declspec(naked)
3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // YUYV -> UVUV
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3723
    vzeroupper
3724 3725 3726 3727
    ret
  }
}

3728
__declspec(naked)
3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745
void UYVYToYRow_AVX2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpsrlw     ymm0, ymm0, 8      // odd bytes are Y
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
3746
    sub        ecx, 32
3747
    jg         convertloop
3748
    vzeroupper
3749
    ret
3750 3751 3752
  }
}

3753
__declspec(naked)
3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
                      uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    vpavgb     ymm0, ymm0, [eax + esi]
    vpavgb     ymm1, ymm1, [eax + esi + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
    pop        esi
3792
    vzeroupper
3793 3794 3795 3796
    ret
  }
}

3797
__declspec(naked)
3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830
void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    vpcmpeqb   ymm5, ymm5, ymm5      // generate mask 0x00ff00ff
    vpsrlw     ymm5, ymm5, 8
    sub        edi, edx

  convertloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax,  [eax + 64]
    vpand      ymm0, ymm0, ymm5   // UYVY -> UVUV
    vpand      ymm1, ymm1, ymm5
    vpackuswb  ymm0, ymm0, ymm1   // mutates.
    vpermq     ymm0, ymm0, 0xd8
    vpand      ymm1, ymm0, ymm5  // U
    vpsrlw     ymm0, ymm0, 8     // V
    vpackuswb  ymm1, ymm1, ymm1  // mutates.
    vpackuswb  ymm0, ymm0, ymm0  // mutates.
    vpermq     ymm1, ymm1, 0xd8
    vpermq     ymm0, ymm0, 0xd8
    vextractf128 [edx], ymm1, 0  // U
    vextractf128 [edx + edi], ymm0, 0 // V
    lea        edx, [edx + 16]
    sub        ecx, 32
    jg         convertloop

    pop        edi
3831
    vzeroupper
3832 3833 3834 3835 3836
    ret
  }
}
#endif  // HAS_YUY2TOYROW_AVX2

3837
#ifdef HAS_YUY2TOYROW_SSE2
3838
__declspec(naked)
3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
void YUY2ToYRow_SSE2(const uint8* src_yuy2,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_yuy2
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix
    pcmpeqb    xmm5, xmm5        // generate mask 0x00ff00ff
    psrlw      xmm5, 8

  convertloop:
3849 3850
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3851 3852 3853 3854
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // even bytes are Y
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
3855
    movdqu     [edx], xmm0
3856
    lea        edx, [edx + 16]
3857
    sub        ecx, 16
3858
    jg         convertloop
3859 3860 3861 3862
    ret
  }
}

3863
__declspec(naked)
3864
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
3865
                      uint8* dst_u, uint8* dst_v, int pix) {
3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3879 3880 3881 3882
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
3898
    jg         convertloop
3899 3900 3901 3902 3903 3904 3905

    pop        edi
    pop        esi
    ret
  }
}

3906
__declspec(naked)
3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3920 3921
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8      // YUYV -> UVUV
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}

3942
__declspec(naked)
3943 3944 3945 3946 3947 3948 3949 3950
void UYVYToYRow_SSE2(const uint8* src_uyvy,
                     uint8* dst_y, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_uyvy
    mov        edx, [esp + 8]    // dst_y
    mov        ecx, [esp + 12]   // pix

  convertloop:
3951 3952
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
3953 3954 3955 3956
    lea        eax,  [eax + 32]
    psrlw      xmm0, 8    // odd bytes are Y
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
3957
    movdqu     [edx], xmm0
3958
    lea        edx, [edx + 16]
3959
    sub        ecx, 16
3960
    jg         convertloop
3961 3962 3963 3964
    ret
  }
}

3965
__declspec(naked)
3966
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
3967
                      uint8* dst_u, uint8* dst_v, int pix) {
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_yuy2
    mov        esi, [esp + 8 + 8]    // stride_yuy2
    mov        edx, [esp + 8 + 12]   // dst_u
    mov        edi, [esp + 8 + 16]   // dst_v
    mov        ecx, [esp + 8 + 20]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
3981 3982 3983 3984
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + esi]
    movdqu     xmm3, [eax + esi + 16]
3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
    lea        eax,  [eax + 32]
    pavgb      xmm0, xmm2
    pavgb      xmm1, xmm3
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
4000
    jg         convertloop
4001 4002 4003 4004 4005 4006 4007

    pop        edi
    pop        esi
    ret
  }
}

4008
__declspec(naked)
4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021
void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
                         uint8* dst_u, uint8* dst_v, int pix) {
  __asm {
    push       edi
    mov        eax, [esp + 4 + 4]    // src_yuy2
    mov        edx, [esp + 4 + 8]    // dst_u
    mov        edi, [esp + 4 + 12]   // dst_v
    mov        ecx, [esp + 4 + 16]   // pix
    pcmpeqb    xmm5, xmm5            // generate mask 0x00ff00ff
    psrlw      xmm5, 8
    sub        edi, edx

  convertloop:
4022 4023
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042
    lea        eax,  [eax + 32]
    pand       xmm0, xmm5   // UYVY -> UVUV
    pand       xmm1, xmm5
    packuswb   xmm0, xmm1
    movdqa     xmm1, xmm0
    pand       xmm0, xmm5  // U
    packuswb   xmm0, xmm0
    psrlw      xmm1, 8     // V
    packuswb   xmm1, xmm1
    movq       qword ptr [edx], xmm0
    movq       qword ptr [edx + edi], xmm1
    lea        edx, [edx + 8]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    ret
  }
}
4043
#endif  // HAS_YUY2TOYROW_SSE2
4044

4045
#ifdef HAS_ARGBBLENDROW_SSE2
4046
// Blend 8 pixels at a time.
4047
__declspec(naked)
4048 4049
void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                       uint8* dst_argb, int width) {
4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    pcmpeqb    xmm7, xmm7       // generate constant 1
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4064 4065
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4066

4067
    // 4 pixel loop.
4068
  convertloop4:
4069 4070
    movdqu     xmm3, [eax]      // src argb
    lea        eax, [eax + 16]
4071 4072
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4073
    movdqu     xmm2, [esi]      // _r_b
4074
    psrlw      xmm3, 8          // alpha
4075 4076
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
4077 4078 4079
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4080
    movdqu     xmm1, [esi]      // _a_g
4081
    lea        esi, [esi + 16]
4082 4083 4084 4085 4086 4087 4088
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4089
    movdqu     [edx], xmm0
4090
    lea        edx, [edx + 16]
4091
    sub        ecx, 4
4092
    jge        convertloop4
4093

4094 4095 4096
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4097

4098 4099
    // 1 pixel loop.
  convertloop1:
4100
    movd       xmm3, [eax]      // src argb
4101 4102 4103 4104 4105
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
    psrlw      xmm3, 8          // alpha
4106 4107
    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
    pshuflw    xmm3, xmm3, 0F5h
4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4122
    sub        ecx, 1
4123
    jge        convertloop1
4124

4125
  convertloop1b:
4126 4127 4128 4129
    pop        esi
    ret
  }
}
4130
#endif  // HAS_ARGBBLENDROW_SSE2
4131 4132

#ifdef HAS_ARGBBLENDROW_SSSE3
4133
// Shuffle table for isolating alpha.
4134
static const uvec8 kShuffleAlpha = {
4135 4136 4137
  3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
  11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
4138
// Same as SSE2, but replaces:
4139
//    psrlw      xmm3, 8          // alpha
4140 4141
//    pshufhw    xmm3, xmm3, 0F5h // 8 alpha words
//    pshuflw    xmm3, xmm3, 0F5h
4142 4143
// with..
//    pshufb     xmm3, kShuffleAlpha // alpha
4144
// Blend 8 pixels at a time.
4145

4146
__declspec(naked)
4147 4148
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
                        uint8* dst_argb, int width) {
4149 4150 4151 4152 4153 4154
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4155
    pcmpeqb    xmm7, xmm7       // generate constant 0x0001
4156 4157 4158 4159 4160 4161 4162
    psrlw      xmm7, 15
    pcmpeqb    xmm6, xmm6       // generate mask 0x00ff00ff
    psrlw      xmm6, 8
    pcmpeqb    xmm5, xmm5       // generate mask 0xff00ff00
    psllw      xmm5, 8
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
4163 4164
    sub        ecx, 4
    jl         convertloop4b    // less than 4 pixels?
4165

4166
    // 4 pixel loop.
4167
  convertloop4:
4168
    movdqu     xmm3, [eax]      // src argb
4169 4170 4171
    lea        eax, [eax + 16]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
4172
    movdqu     xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4173
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4174 4175 4176
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
4177
    movdqu     xmm1, [esi]      // _a_g
4178 4179 4180 4181 4182 4183 4184 4185
    lea        esi, [esi + 16]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
4186
    movdqu     [edx], xmm0
4187
    lea        edx, [edx + 16]
4188
    sub        ecx, 4
4189
    jge        convertloop4
4190

4191 4192 4193
  convertloop4b:
    add        ecx, 4 - 1
    jl         convertloop1b
4194

4195 4196
    // 1 pixel loop.
  convertloop1:
4197
    movd       xmm3, [eax]      // src argb
4198 4199 4200 4201
    lea        eax, [eax + 4]
    movdqa     xmm0, xmm3       // src argb
    pxor       xmm3, xmm4       // ~alpha
    movd       xmm2, [esi]      // _r_b
Frank Barchard's avatar
Frank Barchard committed
4202
    pshufb     xmm3, xmmword ptr kShuffleAlpha // alpha
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216
    pand       xmm2, xmm6       // _r_b
    paddw      xmm3, xmm7       // 256 - alpha
    pmullw     xmm2, xmm3       // _r_b * alpha
    movd       xmm1, [esi]      // _a_g
    lea        esi, [esi + 4]
    psrlw      xmm1, 8          // _a_g
    por        xmm0, xmm4       // set alpha to 255
    pmullw     xmm1, xmm3       // _a_g * alpha
    psrlw      xmm2, 8          // _r_b convert to 8 bits again
    paddusb    xmm0, xmm2       // + src argb
    pand       xmm1, xmm5       // a_g_ convert to 8 bits again
    paddusb    xmm0, xmm1       // + src argb
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4217
    sub        ecx, 1
4218
    jge        convertloop1
4219

4220
  convertloop1b:
4221 4222 4223 4224
    pop        esi
    ret
  }
}
4225
#endif  // HAS_ARGBBLENDROW_SSSE3
4226

4227
#ifdef HAS_ARGBATTENUATEROW_SSE2
4228
// Attenuate 4 pixels at a time.
4229
__declspec(naked)
4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240
void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm4, xmm4       // generate mask 0xff000000
    pslld      xmm4, 24
    pcmpeqb    xmm5, xmm5       // generate mask 0x00ffffff
    psrld      xmm5, 8

 convertloop:
4241
    movdqu     xmm0, [eax]      // read 4 pixels
4242
    punpcklbw  xmm0, xmm0       // first 2
4243 4244
    pshufhw    xmm2, xmm0, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
4245
    pmulhuw    xmm0, xmm2       // rgb * a
4246
    movdqu     xmm1, [eax]      // read 4 pixels
4247
    punpckhbw  xmm1, xmm1       // next 2 pixels
4248 4249
    pshufhw    xmm2, xmm1, 0FFh // 8 alpha words
    pshuflw    xmm2, xmm2, 0FFh
4250
    pmulhuw    xmm1, xmm2       // rgb * a
4251
    movdqu     xmm2, [eax]      // alphas
4252
    lea        eax, [eax + 16]
4253
    psrlw      xmm0, 8
4254
    pand       xmm2, xmm4
4255 4256 4257
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    pand       xmm0, xmm5       // keep original alphas
4258
    por        xmm0, xmm2
4259
    movdqu     [edx], xmm0
4260
    lea        edx, [edx + 16]
4261
    sub        ecx, 4
4262 4263 4264 4265 4266
    jg         convertloop

    ret
  }
}
4267
#endif  // HAS_ARGBATTENUATEROW_SSE2
4268

4269
#ifdef HAS_ARGBATTENUATEROW_SSSE3
4270
// Shuffle table duplicating alpha.
4271
static const uvec8 kShuffleAlpha0 = {
4272 4273
  3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
};
4274
static const uvec8 kShuffleAlpha1 = {
4275 4276 4277
  11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
  15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
};
4278
__declspec(naked)
4279
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4280
  __asm {
4281 4282 4283 4284 4285
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    pcmpeqb    xmm3, xmm3       // generate mask 0xff000000
    pslld      xmm3, 24
Frank Barchard's avatar
Frank Barchard committed
4286 4287
    movdqa     xmm4, xmmword ptr kShuffleAlpha0
    movdqa     xmm5, xmmword ptr kShuffleAlpha1
4288 4289

 convertloop:
4290
    movdqu     xmm0, [eax]      // read 4 pixels
4291
    pshufb     xmm0, xmm4       // isolate first 2 alphas
4292
    movdqu     xmm1, [eax]      // read 4 pixels
4293 4294
    punpcklbw  xmm1, xmm1       // first 2 pixel rgbs
    pmulhuw    xmm0, xmm1       // rgb * a
4295
    movdqu     xmm1, [eax]      // read 4 pixels
4296
    pshufb     xmm1, xmm5       // isolate next 2 alphas
4297
    movdqu     xmm2, [eax]      // read 4 pixels
4298 4299
    punpckhbw  xmm2, xmm2       // next 2 pixel rgbs
    pmulhuw    xmm1, xmm2       // rgb * a
4300
    movdqu     xmm2, [eax]      // mask original alpha
4301
    lea        eax, [eax + 16]
4302 4303 4304 4305 4306
    pand       xmm2, xmm3
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
    por        xmm0, xmm2       // copy original alpha
4307
    movdqu     [edx], xmm0
4308
    lea        edx, [edx + 16]
4309
    sub        ecx, 4
4310 4311 4312 4313 4314
    jg         convertloop

    ret
  }
}
4315
#endif  // HAS_ARGBATTENUATEROW_SSSE3
4316

fbarchard@google.com's avatar
fbarchard@google.com committed
4317 4318
#ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4319
static const uvec8 kShuffleAlpha_AVX2 = {
4320
  6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
fbarchard@google.com's avatar
fbarchard@google.com committed
4321
};
4322
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4323 4324 4325 4326 4327 4328
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4329
    vbroadcastf128 ymm4, xmmword ptr kShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
    vpcmpeqb   ymm5, ymm5, ymm5 // generate mask 0xff000000
    vpslld     ymm5, ymm5, 24

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpshufb    ymm2, ymm0, ymm4  // low 4 alphas
    vpshufb    ymm3, ymm1, ymm4  // high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * a
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * a
    vpand      ymm6, ymm6, ymm5  // isolate alpha
    vpsrlw     ymm0, ymm0, 8
    vpsrlw     ymm1, ymm1, 8
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vpor       ymm0, ymm0, ymm6  // copy original alpha
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4348
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4349 4350
    jg         convertloop

4351
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4352 4353 4354 4355 4356
    ret
  }
}
#endif  // HAS_ARGBATTENUATEROW_AVX2

4357
#ifdef HAS_ARGBUNATTENUATEROW_SSE2
4358
// Unattenuate 4 pixels at a time.
4359
__declspec(naked)
4360 4361 4362
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
4363
    push       ebx
4364 4365
    push       esi
    push       edi
4366 4367 4368 4369
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    lea        ebx, fixed_invtbl8
4370 4371

 convertloop:
4372
    movdqu     xmm0, [eax]      // read 4 pixels
4373 4374 4375
    movzx      esi, byte ptr [eax + 3]  // first alpha
    movzx      edi, byte ptr [eax + 7]  // second alpha
    punpcklbw  xmm0, xmm0       // first 2
4376 4377
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4378 4379
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words.  1, a, a, a
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4380 4381 4382
    movlhps    xmm2, xmm3
    pmulhuw    xmm0, xmm2       // rgb * a

4383
    movdqu     xmm1, [eax]      // read 4 pixels
4384 4385 4386
    movzx      esi, byte ptr [eax + 11]  // third alpha
    movzx      edi, byte ptr [eax + 15]  // forth alpha
    punpckhbw  xmm1, xmm1       // next 2
4387 4388
    movd       xmm2, dword ptr [ebx + esi * 4]
    movd       xmm3, dword ptr [ebx + edi * 4]
4389 4390
    pshuflw    xmm2, xmm2, 040h // first 4 inv_alpha words
    pshuflw    xmm3, xmm3, 040h // next 4 inv_alpha words
4391 4392
    movlhps    xmm2, xmm3
    pmulhuw    xmm1, xmm2       // rgb * a
4393
    lea        eax, [eax + 16]
4394
    packuswb   xmm0, xmm1
4395
    movdqu     [edx], xmm0
4396
    lea        edx, [edx + 16]
4397
    sub        ecx, 4
4398
    jg         convertloop
4399

4400 4401
    pop        edi
    pop        esi
4402
    pop        ebx
4403 4404 4405
    ret
  }
}
4406
#endif  // HAS_ARGBUNATTENUATEROW_SSE2
4407

fbarchard@google.com's avatar
fbarchard@google.com committed
4408 4409
#ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha.
4410
static const uvec8 kUnattenShuffleAlpha_AVX2 = {
4411
  0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
fbarchard@google.com's avatar
fbarchard@google.com committed
4412
};
4413 4414 4415
// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
// USE_GATHER is not on by default, due to being a slow instruction.
#ifdef USE_GATHER
4416
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
4417 4418 4419 4420 4421 4422 4423
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {
    mov        eax, [esp + 4]   // src_argb0
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    sub        edx, eax
Frank Barchard's avatar
Frank Barchard committed
4424
    vbroadcastf128 ymm4, xmmword ptr kUnattenShuffleAlpha_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
4425 4426 4427

 convertloop:
    vmovdqu    ymm6, [eax]       // read 8 pixels.
4428
    vpcmpeqb   ymm5, ymm5, ymm5  // generate mask 0xffffffff for gather.
fbarchard@google.com's avatar
fbarchard@google.com committed
4429 4430 4431
    vpsrld     ymm2, ymm6, 24    // alpha in low 8 bits.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
4432 4433 4434 4435
    vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5  // ymm5 cleared.  1, a
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm4  // replicate low 4 alphas. 1, a, a, a
fbarchard@google.com's avatar
fbarchard@google.com committed
4436 4437 4438 4439 4440 4441
    vpshufb    ymm3, ymm3, ymm4  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4442
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
4443 4444
    jg         convertloop

4445
    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
4446 4447 4448
    ret
  }
}
4449
#else  // USE_GATHER
4450
__declspec(naked)
4451 4452 4453 4454
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                             int width) {
  __asm {

4455
    push       ebx
4456 4457
    push       esi
    push       edi
4458 4459 4460 4461 4462 4463
    mov        eax, [esp + 12 + 4]   // src_argb
    mov        edx, [esp + 12 + 8]   // dst_argb
    mov        ecx, [esp + 12 + 12]  // width
    sub        edx, eax
    lea        ebx, fixed_invtbl8
    vbroadcastf128 ymm5, xmmword ptr kUnattenShuffleAlpha_AVX2
4464 4465 4466

 convertloop:
    // replace VPGATHER
4467 4468
    movzx      esi, byte ptr [eax + 3]                 // alpha0
    movzx      edi, byte ptr [eax + 7]                 // alpha1
4469 4470
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a0]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a1]
4471 4472
    movzx      esi, byte ptr [eax + 11]                // alpha2
    movzx      edi, byte ptr [eax + 15]                // alpha3
4473
    vpunpckldq xmm6, xmm0, xmm1                        // [1,a1,1,a0]
4474 4475
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a2]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a3]
4476 4477
    movzx      esi, byte ptr [eax + 19]                // alpha4
    movzx      edi, byte ptr [eax + 23]                // alpha5
4478
    vpunpckldq xmm7, xmm2, xmm3                        // [1,a3,1,a2]
4479 4480
    vmovd      xmm0, dword ptr [ebx + esi * 4]  // [1,a4]
    vmovd      xmm1, dword ptr [ebx + edi * 4]  // [1,a5]
4481 4482
    movzx      esi, byte ptr [eax + 27]                // alpha6
    movzx      edi, byte ptr [eax + 31]                // alpha7
4483
    vpunpckldq xmm0, xmm0, xmm1                        // [1,a5,1,a4]
4484 4485
    vmovd      xmm2, dword ptr [ebx + esi * 4]  // [1,a6]
    vmovd      xmm3, dword ptr [ebx + edi * 4]  // [1,a7]
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
    vpunpckldq xmm2, xmm2, xmm3                        // [1,a7,1,a6]
    vpunpcklqdq xmm3, xmm6, xmm7                       // [1,a3,1,a2,1,a1,1,a0]
    vpunpcklqdq xmm0, xmm0, xmm2                       // [1,a7,1,a6,1,a5,1,a4]
    vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
    // end of VPGATHER

    vmovdqu    ymm6, [eax]       // read 8 pixels.
    vpunpcklbw ymm0, ymm6, ymm6  // low 4 pixels. mutated.
    vpunpckhbw ymm1, ymm6, ymm6  // high 4 pixels. mutated.
    vpunpcklwd ymm2, ymm3, ymm3  // low 4 inverted alphas. mutated. 1, 1, a, a
    vpunpckhwd ymm3, ymm3, ymm3  // high 4 inverted alphas. mutated.
    vpshufb    ymm2, ymm2, ymm5  // replicate low 4 alphas. 1, a, a, a
    vpshufb    ymm3, ymm3, ymm5  // replicate high 4 alphas
    vpmulhuw   ymm0, ymm0, ymm2  // rgb * ia
    vpmulhuw   ymm1, ymm1, ymm3  // rgb * ia
    vpackuswb  ymm0, ymm0, ymm1  // unmutated.
    vmovdqu    [eax + edx], ymm0
    lea        eax, [eax + 32]
4504
    sub        ecx, 8
4505 4506 4507 4508
    jg         convertloop

    pop        edi
    pop        esi
4509
    pop        ebx
4510
    vzeroupper
4511 4512 4513 4514
    ret
  }
}
#endif  // USE_GATHER
fbarchard@google.com's avatar
fbarchard@google.com committed
4515 4516
#endif  // HAS_ARGBATTENUATEROW_AVX2

4517
#ifdef HAS_ARGBGRAYROW_SSSE3
4518
// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
4519
__declspec(naked)
4520
void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
4521
  __asm {
4522 4523 4524
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* width */
Frank Barchard's avatar
Frank Barchard committed
4525 4526
    movdqa     xmm4, xmmword ptr kARGBToYJ
    movdqa     xmm5, xmmword ptr kAddYJ64
4527 4528

 convertloop:
4529 4530
    movdqu     xmm0, [eax]  // G
    movdqu     xmm1, [eax + 16]
4531 4532 4533
    pmaddubsw  xmm0, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm0, xmm1
4534
    paddw      xmm0, xmm5  // Add .5 for rounding.
4535
    psrlw      xmm0, 7
4536
    packuswb   xmm0, xmm0   // 8 G bytes
4537 4538
    movdqu     xmm2, [eax]  // A
    movdqu     xmm3, [eax + 16]
4539
    lea        eax, [eax + 32]
4540 4541 4542 4543 4544 4545 4546
    psrld      xmm2, 24
    psrld      xmm3, 24
    packuswb   xmm2, xmm3
    packuswb   xmm2, xmm2   // 8 A bytes
    movdqa     xmm3, xmm0   // Weave into GG, GA, then GGGA
    punpcklbw  xmm0, xmm0   // 8 GG words
    punpcklbw  xmm3, xmm2   // 8 GA words
4547
    movdqa     xmm1, xmm0
4548 4549
    punpcklwd  xmm0, xmm3   // GGGA first 4
    punpckhwd  xmm1, xmm3   // GGGA next 4
4550 4551
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
4552
    lea        edx, [edx + 32]
4553
    sub        ecx, 8
4554 4555 4556 4557 4558
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBGRAYROW_SSSE3
4559 4560 4561 4562 4563

#ifdef HAS_ARGBSEPIAROW_SSSE3
//    b = (r * 35 + g * 68 + b * 17) >> 7
//    g = (r * 45 + g * 88 + b * 22) >> 7
//    r = (r * 50 + g * 98 + b * 24) >> 7
4564
// Constant for ARGB color to sepia tone.
4565
static const vec8 kARGBToSepiaB = {
4566 4567 4568
  17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
};

4569
static const vec8 kARGBToSepiaG = {
4570 4571 4572
  22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
};

4573
static const vec8 kARGBToSepiaR = {
4574 4575 4576
  24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
};

4577
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
4578
__declspec(naked)
4579 4580 4581 4582
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
  __asm {
    mov        eax, [esp + 4]   /* dst_argb */
    mov        ecx, [esp + 8]   /* width */
Frank Barchard's avatar
Frank Barchard committed
4583 4584 4585
    movdqa     xmm2, xmmword ptr kARGBToSepiaB
    movdqa     xmm3, xmmword ptr kARGBToSepiaG
    movdqa     xmm4, xmmword ptr kARGBToSepiaR
4586 4587

 convertloop:
4588 4589
    movdqu     xmm0, [eax]  // B
    movdqu     xmm6, [eax + 16]
4590 4591 4592 4593 4594
    pmaddubsw  xmm0, xmm2
    pmaddubsw  xmm6, xmm2
    phaddw     xmm0, xmm6
    psrlw      xmm0, 7
    packuswb   xmm0, xmm0   // 8 B values
4595 4596
    movdqu     xmm5, [eax]  // G
    movdqu     xmm1, [eax + 16]
4597 4598 4599 4600 4601 4602
    pmaddubsw  xmm5, xmm3
    pmaddubsw  xmm1, xmm3
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 G values
    punpcklbw  xmm0, xmm5   // 8 BG values
4603 4604
    movdqu     xmm5, [eax]  // R
    movdqu     xmm1, [eax + 16]
4605 4606 4607 4608 4609
    pmaddubsw  xmm5, xmm4
    pmaddubsw  xmm1, xmm4
    phaddw     xmm5, xmm1
    psrlw      xmm5, 7
    packuswb   xmm5, xmm5   // 8 R values
4610 4611
    movdqu     xmm6, [eax]  // A
    movdqu     xmm1, [eax + 16]
4612 4613 4614 4615 4616 4617 4618 4619
    psrld      xmm6, 24
    psrld      xmm1, 24
    packuswb   xmm6, xmm1
    packuswb   xmm6, xmm6   // 8 A values
    punpcklbw  xmm5, xmm6   // 8 RA values
    movdqa     xmm1, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm5   // BGRA first 4
    punpckhwd  xmm1, xmm5   // BGRA next 4
4620 4621
    movdqu     [eax], xmm0
    movdqu     [eax + 16], xmm1
4622
    lea        eax, [eax + 32]
4623
    sub        ecx, 8
4624 4625 4626 4627 4628
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBSEPIAROW_SSSE3
4629

4630 4631 4632
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided.
4633
// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
4634
// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
4635
__declspec(naked)
4636 4637
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                              const int8* matrix_argb, int width) {
4638
  __asm {
4639 4640 4641
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
    mov        ecx, [esp + 12]  /* matrix_argb */
4642 4643 4644 4645 4646
    movdqu     xmm5, [ecx]
    pshufd     xmm2, xmm5, 0x00
    pshufd     xmm3, xmm5, 0x55
    pshufd     xmm4, xmm5, 0xaa
    pshufd     xmm5, xmm5, 0xff
4647
    mov        ecx, [esp + 16]  /* width */
4648 4649

 convertloop:
4650 4651
    movdqu     xmm0, [eax]  // B
    movdqu     xmm7, [eax + 16]
4652
    pmaddubsw  xmm0, xmm2
4653
    pmaddubsw  xmm7, xmm2
4654 4655
    movdqu     xmm6, [eax]  // G
    movdqu     xmm1, [eax + 16]
4656
    pmaddubsw  xmm6, xmm3
4657
    pmaddubsw  xmm1, xmm3
4658 4659 4660 4661
    phaddsw    xmm0, xmm7   // B
    phaddsw    xmm6, xmm1   // G
    psraw      xmm0, 6      // B
    psraw      xmm6, 6      // G
4662
    packuswb   xmm0, xmm0   // 8 B values
4663 4664
    packuswb   xmm6, xmm6   // 8 G values
    punpcklbw  xmm0, xmm6   // 8 BG values
4665 4666
    movdqu     xmm1, [eax]  // R
    movdqu     xmm7, [eax + 16]
4667
    pmaddubsw  xmm1, xmm4
4668 4669
    pmaddubsw  xmm7, xmm4
    phaddsw    xmm1, xmm7   // R
4670 4671
    movdqu     xmm6, [eax]  // A
    movdqu     xmm7, [eax + 16]
4672 4673 4674 4675 4676 4677
    pmaddubsw  xmm6, xmm5
    pmaddubsw  xmm7, xmm5
    phaddsw    xmm6, xmm7   // A
    psraw      xmm1, 6      // R
    psraw      xmm6, 6      // A
    packuswb   xmm1, xmm1   // 8 R values
4678
    packuswb   xmm6, xmm6   // 8 A values
4679 4680 4681 4682
    punpcklbw  xmm1, xmm6   // 8 RA values
    movdqa     xmm6, xmm0   // Weave BG, RA together
    punpcklwd  xmm0, xmm1   // BGRA first 4
    punpckhwd  xmm6, xmm1   // BGRA next 4
4683 4684
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm6
4685
    lea        eax, [eax + 32]
4686
    lea        edx, [edx + 32]
4687
    sub        ecx, 8
4688 4689 4690 4691 4692 4693
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBCOLORMATRIXROW_SSSE3

4694 4695
#ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes).
4696
__declspec(naked)
4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
                          int interval_offset, int width) {
  __asm {
    mov        eax, [esp + 4]    /* dst_argb */
    movd       xmm2, [esp + 8]   /* scale */
    movd       xmm3, [esp + 12]  /* interval_size */
    movd       xmm4, [esp + 16]  /* interval_offset */
    mov        ecx, [esp + 20]   /* width */
    pshuflw    xmm2, xmm2, 040h
    pshufd     xmm2, xmm2, 044h
    pshuflw    xmm3, xmm3, 040h
    pshufd     xmm3, xmm3, 044h
    pshuflw    xmm4, xmm4, 040h
    pshufd     xmm4, xmm4, 044h
    pxor       xmm5, xmm5  // constant 0
    pcmpeqb    xmm6, xmm6  // generate mask 0xff000000
    pslld      xmm6, 24

 convertloop:
4716
    movdqu     xmm0, [eax]  // read 4 pixels
4717 4718
    punpcklbw  xmm0, xmm5   // first 2 pixels
    pmulhuw    xmm0, xmm2   // pixel * scale >> 16
4719
    movdqu     xmm1, [eax]  // read 4 pixels
4720 4721 4722
    punpckhbw  xmm1, xmm5   // next 2 pixels
    pmulhuw    xmm1, xmm2
    pmullw     xmm0, xmm3   // * interval_size
4723
    movdqu     xmm7, [eax]  // read 4 pixels
4724 4725 4726 4727 4728 4729
    pmullw     xmm1, xmm3
    pand       xmm7, xmm6   // mask alpha
    paddw      xmm0, xmm4   // + interval_size / 2
    paddw      xmm1, xmm4
    packuswb   xmm0, xmm1
    por        xmm0, xmm7
4730
    movdqu     [eax], xmm0
4731
    lea        eax, [eax + 16]
4732
    sub        ecx, 4
4733 4734 4735 4736 4737 4738
    jg         convertloop
    ret
  }
}
#endif  // HAS_ARGBQUANTIZEROW_SSE2

4739 4740
#ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value.
4741
__declspec(naked)
4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
                       uint32 value) {
  __asm {
    mov        eax, [esp + 4]   // src_argb
    mov        edx, [esp + 8]   // dst_argb
    mov        ecx, [esp + 12]  // width
    movd       xmm2, [esp + 16]  // value
    punpcklbw  xmm2, xmm2
    punpcklqdq xmm2, xmm2

 convertloop:
4753
    movdqu     xmm0, [eax]      // read 4 pixels
4754
    lea        eax, [eax + 16]
4755 4756 4757 4758 4759 4760 4761 4762
    movdqa     xmm1, xmm0
    punpcklbw  xmm0, xmm0       // first 2
    punpckhbw  xmm1, xmm1       // next 2
    pmulhuw    xmm0, xmm2       // argb * value
    pmulhuw    xmm1, xmm2       // argb * value
    psrlw      xmm0, 8
    psrlw      xmm1, 8
    packuswb   xmm0, xmm1
4763
    movdqu     [edx], xmm0
4764
    lea        edx, [edx + 16]
4765
    sub        ecx, 4
4766 4767 4768 4769 4770 4771 4772
    jg         convertloop

    ret
  }
}
#endif  // HAS_ARGBSHADEROW_SSE2

fbarchard@google.com's avatar
fbarchard@google.com committed
4773
#ifdef HAS_ARGBMULTIPLYROW_SSE2
4774
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
4775
__declspec(naked)
4776 4777
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4778
  __asm {
4779 4780 4781 4782 4783
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
fbarchard@google.com's avatar
fbarchard@google.com committed
4784 4785 4786
    pxor       xmm5, xmm5  // constant 0

 convertloop:
4787
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4788
    movdqu     xmm2, [esi]        // read 4 pixels from src_argb1
4789 4790
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
4791 4792 4793 4794 4795 4796 4797 4798
    punpcklbw  xmm0, xmm0         // first 2
    punpckhbw  xmm1, xmm1         // next 2
    punpcklbw  xmm2, xmm5         // first 2
    punpckhbw  xmm3, xmm5         // next 2
    pmulhuw    xmm0, xmm2         // src_argb0 * src_argb1 first 2
    pmulhuw    xmm1, xmm3         // src_argb0 * src_argb1 next 2
    lea        eax, [eax + 16]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
4799
    packuswb   xmm0, xmm1
4800 4801
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4802
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
4803 4804
    jg         convertloop

4805
    pop        esi
fbarchard@google.com's avatar
fbarchard@google.com committed
4806 4807 4808 4809 4810
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_SSE2

4811 4812
#ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time.
4813
// TODO(fbarchard): Port this to posix, neon and other math functions.
4814
__declspec(naked)
4815 4816 4817 4818 4819 4820 4821 4822 4823
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

4824 4825 4826 4827 4828
    sub        ecx, 4
    jl         convertloop49

 convertloop4:
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4829 4830 4831
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4832
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4833 4834
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4835
    sub        ecx, 4
4836 4837 4838 4839 4840
    jge        convertloop4

 convertloop49:
    add        ecx, 4 - 1
    jl         convertloop19
4841

4842 4843
 convertloop1:
    movd       xmm0, [eax]        // read 1 pixels from src_argb0
4844 4845 4846
    lea        eax, [eax + 4]
    movd       xmm1, [esi]        // read 1 pixels from src_argb1
    lea        esi, [esi + 4]
4847
    paddusb    xmm0, xmm1         // src_argb0 + src_argb1
4848 4849
    movd       [edx], xmm0
    lea        edx, [edx + 4]
4850
    sub        ecx, 1
4851 4852 4853
    jge        convertloop1

 convertloop19:
4854 4855 4856 4857 4858 4859
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBADDROW_SSE2

4860 4861
#ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
4862
__declspec(naked)
4863 4864 4865 4866 4867 4868 4869 4870 4871 4872
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
4873
    movdqu     xmm0, [eax]        // read 4 pixels from src_argb0
4874 4875 4876
    lea        eax, [eax + 16]
    movdqu     xmm1, [esi]        // read 4 pixels from src_argb1
    lea        esi, [esi + 16]
4877
    psubusb    xmm0, xmm1         // src_argb0 - src_argb1
4878 4879
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
4880
    sub        ecx, 4
4881 4882 4883 4884 4885 4886 4887 4888
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_SSE2

4889 4890
#ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
4891
__declspec(naked)
4892 4893 4894 4895 4896 4897 4898 4899
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
4900
    vpxor      ymm5, ymm5, ymm5     // constant 0
4901 4902 4903

 convertloop:
    vmovdqu    ymm1, [eax]        // read 8 pixels from src_argb0
4904 4905 4906
    lea        eax, [eax + 32]
    vmovdqu    ymm3, [esi]        // read 8 pixels from src_argb1
    lea        esi, [esi + 32]
4907 4908 4909 4910 4911 4912 4913
    vpunpcklbw ymm0, ymm1, ymm1   // low 4
    vpunpckhbw ymm1, ymm1, ymm1   // high 4
    vpunpcklbw ymm2, ymm3, ymm5   // low 4
    vpunpckhbw ymm3, ymm3, ymm5   // high 4
    vpmulhuw   ymm0, ymm0, ymm2   // src_argb0 * src_argb1 low 4
    vpmulhuw   ymm1, ymm1, ymm3   // src_argb0 * src_argb1 high 4
    vpackuswb  ymm0, ymm0, ymm1
4914 4915
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4916
    sub        ecx, 8
4917 4918 4919
    jg         convertloop

    pop        esi
4920
    vzeroupper
4921 4922 4923 4924 4925 4926 4927
    ret
  }
}
#endif  // HAS_ARGBMULTIPLYROW_AVX2

#ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
4928
__declspec(naked)
4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4941 4942 4943 4944
    vpaddusb   ymm0, ymm0, [esi]        // add 8 pixels from src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4945
    sub        ecx, 8
4946 4947 4948
    jg         convertloop

    pop        esi
4949
    vzeroupper
4950 4951 4952 4953 4954 4955 4956
    ret
  }
}
#endif  // HAS_ARGBADDROW_AVX2

#ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
4957
__declspec(naked)
4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
                          uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_argb0
    mov        esi, [esp + 4 + 8]   // src_argb1
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width

 convertloop:
    vmovdqu    ymm0, [eax]              // read 8 pixels from src_argb0
    lea        eax, [eax + 32]
4970 4971 4972 4973
    vpsubusb   ymm0, ymm0, [esi]        // src_argb0 - src_argb1
    lea        esi, [esi + 32]
    vmovdqu    [edx], ymm0
    lea        edx, [edx + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
4974
    sub        ecx, 8
4975 4976 4977
    jg         convertloop

    pop        esi
4978
    vzeroupper
4979 4980 4981 4982 4983
    ret
  }
}
#endif  // HAS_ARGBSUBTRACTROW_AVX2

4984
#ifdef HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
4985 4986 4987 4988
// SobelX as a matrix is
// -1  0  1
// -2  0  2
// -1  0  1
4989
__declspec(naked)
4990 4991
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    const uint8* src_y2, uint8* dst_sobelx, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   // src_y0
    mov        esi, [esp + 8 + 8]   // src_y1
    mov        edi, [esp + 8 + 12]  // src_y2
    mov        edx, [esp + 8 + 16]  // dst_sobelx
    mov        ecx, [esp + 8 + 20]  // width
    sub        esi, eax
    sub        edi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    movq       xmm2, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + edi]      // read 8 pixels from src_y2[0]
    movq       xmm3, qword ptr [eax + edi + 2]  // read 8 pixels from src_y2[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5024 5025 5026
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5027 5028 5029
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5030
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5031 5032 5033 5034 5035 5036 5037
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
5038
#endif  // HAS_SOBELXROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5039

5040
#ifdef HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5041 5042 5043 5044
// SobelY as a matrix is
// -1 -2 -1
//  0  0  0
//  1  2  1
5045
__declspec(naked)
5046 5047
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
                    uint8* dst_sobely, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_y0
    mov        esi, [esp + 4 + 8]   // src_y1
    mov        edx, [esp + 4 + 12]  // dst_sobely
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    sub        edx, eax
    pxor       xmm5, xmm5  // constant 0

 convertloop:
    movq       xmm0, qword ptr [eax]            // read 8 pixels from src_y0[0]
    movq       xmm1, qword ptr [eax + esi]      // read 8 pixels from src_y1[0]
    punpcklbw  xmm0, xmm5
    punpcklbw  xmm1, xmm5
    psubw      xmm0, xmm1
    movq       xmm1, qword ptr [eax + 1]        // read 8 pixels from src_y0[1]
    movq       xmm2, qword ptr [eax + esi + 1]  // read 8 pixels from src_y1[1]
    punpcklbw  xmm1, xmm5
    punpcklbw  xmm2, xmm5
    psubw      xmm1, xmm2
    movq       xmm2, qword ptr [eax + 2]        // read 8 pixels from src_y0[2]
    movq       xmm3, qword ptr [eax + esi + 2]  // read 8 pixels from src_y1[2]
    punpcklbw  xmm2, xmm5
    punpcklbw  xmm3, xmm5
    psubw      xmm2, xmm3
    paddw      xmm0, xmm2
    paddw      xmm0, xmm1
    paddw      xmm0, xmm1
5077 5078 5079
    pxor       xmm1, xmm1   // abs = max(xmm0, -xmm0).  SSSE3 could use pabsw
    psubw      xmm1, xmm0
    pmaxsw     xmm0, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5080 5081 5082
    packuswb   xmm0, xmm0
    movq       qword ptr [eax + edx], xmm0
    lea        eax, [eax + 8]
5083
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5084 5085 5086 5087 5088 5089
    jg         convertloop

    pop        esi
    ret
  }
}
5090
#endif  // HAS_SOBELYROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5091

5092 5093 5094 5095 5096 5097
#ifdef HAS_SOBELROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
// A = 255
// R = Sobel
// G = Sobel
// B = Sobel
5098
__declspec(naked)
5099
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
5100
                   uint8* dst_argb, int width) {
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
    pcmpeqb    xmm5, xmm5           // alpha 255
    pslld      xmm5, 24             // 0xff000000

 convertloop:
5112 5113
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
    movdqa     xmm2, xmm0             // GG
    punpcklbw  xmm2, xmm0             // First 8
    punpckhbw  xmm0, xmm0             // Next 8
    movdqa     xmm1, xmm2             // GGGG
    punpcklwd  xmm1, xmm2             // First 4
    punpckhwd  xmm2, xmm2             // Next 4
    por        xmm1, xmm5             // GGGA
    por        xmm2, xmm5
    movdqa     xmm3, xmm0             // GGGG
    punpcklwd  xmm3, xmm0             // Next 4
    punpckhwd  xmm0, xmm0             // Last 4
    por        xmm3, xmm5             // GGGA
    por        xmm0, xmm5
5129 5130 5131 5132
    movdqu     [edx], xmm1
    movdqu     [edx + 16], xmm2
    movdqu     [edx + 32], xmm3
    movdqu     [edx + 48], xmm0
5133
    lea        edx, [edx + 64]
5134
    sub        ecx, 16
5135 5136 5137 5138 5139 5140 5141 5142
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELROW_SSE2

5143 5144
#ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane.
5145
__declspec(naked)
5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                          uint8* dst_y, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax

 convertloop:
5157 5158
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5159 5160
    lea        eax, [eax + 16]
    paddusb    xmm0, xmm1             // sobel = sobelx + sobely
5161
    movdqu     [edx], xmm0
5162
    lea        edx, [edx + 16]
5163
    sub        ecx, 16
5164 5165 5166 5167 5168 5169 5170 5171
    jg         convertloop

    pop        esi
    ret
  }
}
#endif  // HAS_SOBELTOPLANEROW_SSE2

5172 5173 5174 5175 5176 5177
#ifdef HAS_SOBELXYROW_SSE2
// Mixes Sobel X, Sobel Y and Sobel into ARGB.
// A = 255
// R = Sobel X
// G = Sobel
// B = Sobel Y
5178
__declspec(naked)
5179 5180 5181 5182 5183 5184 5185 5186 5187
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
                     uint8* dst_argb, int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   // src_sobelx
    mov        esi, [esp + 4 + 8]   // src_sobely
    mov        edx, [esp + 4 + 12]  // dst_argb
    mov        ecx, [esp + 4 + 16]  // width
    sub        esi, eax
5188
    pcmpeqb    xmm5, xmm5           // alpha 255
5189 5190

 convertloop:
5191 5192
    movdqu     xmm0, [eax]            // read 16 pixels src_sobelx
    movdqu     xmm1, [eax + esi]      // read 16 pixels src_sobely
5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207
    lea        eax, [eax + 16]
    movdqa     xmm2, xmm0
    paddusb    xmm2, xmm1             // sobel = sobelx + sobely
    movdqa     xmm3, xmm0             // XA
    punpcklbw  xmm3, xmm5
    punpckhbw  xmm0, xmm5
    movdqa     xmm4, xmm1             // YS
    punpcklbw  xmm4, xmm2
    punpckhbw  xmm1, xmm2
    movdqa     xmm6, xmm4             // YSXA
    punpcklwd  xmm6, xmm3             // First 4
    punpckhwd  xmm4, xmm3             // Next 4
    movdqa     xmm7, xmm1             // YSXA
    punpcklwd  xmm7, xmm0             // Next 4
    punpckhwd  xmm1, xmm0             // Last 4
5208 5209 5210 5211
    movdqu     [edx], xmm6
    movdqu     [edx + 16], xmm4
    movdqu     [edx + 32], xmm7
    movdqu     [edx + 48], xmm1
5212
    lea        edx, [edx + 64]
5213
    sub        ecx, 16
5214 5215 5216 5217 5218 5219
    jg         convertloop

    pop        esi
    ret
  }
}
5220
#endif  // HAS_SOBELXYROW_SSE2
5221

5222
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233
// Consider float CumulativeSum.
// Consider calling CumulativeSum one row at time as needed.
// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
// Convert cumulative sum for an area to an average for 1 pixel.
// topleft is pointer to top left of CumulativeSum buffer for area.
// botleft is pointer to bottom left of CumulativeSum buffer.
// width is offset from left to right of area in CumulativeSum buffer measured
//   in number of ints.
// area is the number of pixels in the area being averaged.
// dst points to pixel to store result to.
// count is number of averaged pixels to produce.
5234
// Does 4 pixels at a time.
5235 5236 5237
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
                                    int width, int area, uint8* dst,
                                    int count) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5238 5239 5240 5241
  __asm {
    mov        eax, topleft  // eax topleft
    mov        esi, botleft  // esi botleft
    mov        edx, width
5242
    movd       xmm5, area
fbarchard@google.com's avatar
fbarchard@google.com committed
5243 5244
    mov        edi, dst
    mov        ecx, count
5245 5246
    cvtdq2ps   xmm5, xmm5
    rcpss      xmm4, xmm5  // 1.0f / area
fbarchard@google.com's avatar
fbarchard@google.com committed
5247 5248 5249 5250
    pshufd     xmm4, xmm4, 0
    sub        ecx, 4
    jl         l4b

5251 5252 5253
    cmp        area, 128  // 128 pixels will not overflow 15 bits.
    ja         l4

5254 5255 5256 5257 5258 5259
    pshufd     xmm5, xmm5, 0        // area
    pcmpeqb    xmm6, xmm6           // constant of 65536.0 - 1 = 65535.0
    psrld      xmm6, 16
    cvtdq2ps   xmm6, xmm6
    addps      xmm5, xmm6           // (65536.0 + area - 1)
    mulps      xmm5, xmm4           // (65536.0 + area - 1) * 1 / area
5260
    cvtps2dq   xmm5, xmm5           // 0.16 fixed point
5261
    packssdw   xmm5, xmm5           // 16 bit shorts
5262 5263 5264 5265

    // 4 pixel loop small blocks.
  s4:
    // top left
5266 5267 5268 5269
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    packssdw   xmm0, xmm1  // pack 4 pixels into 2 registers
    packssdw   xmm2, xmm3

    pmulhuw    xmm0, xmm5
    pmulhuw    xmm2, xmm5

    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        s4

    jmp        l4b

fbarchard@google.com's avatar
fbarchard@google.com committed
5305 5306 5307
    // 4 pixel loop
  l4:
    // top left
5308 5309 5310 5311
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
    movdqu     xmm2, [eax + 32]
    movdqu     xmm3, [eax + 48]
fbarchard@google.com's avatar
fbarchard@google.com committed
5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358

    // - top right
    psubd      xmm0, [eax + edx * 4]
    psubd      xmm1, [eax + edx * 4 + 16]
    psubd      xmm2, [eax + edx * 4 + 32]
    psubd      xmm3, [eax + edx * 4 + 48]
    lea        eax, [eax + 64]

    // - bottom left
    psubd      xmm0, [esi]
    psubd      xmm1, [esi + 16]
    psubd      xmm2, [esi + 32]
    psubd      xmm3, [esi + 48]

    // + bottom right
    paddd      xmm0, [esi + edx * 4]
    paddd      xmm1, [esi + edx * 4 + 16]
    paddd      xmm2, [esi + edx * 4 + 32]
    paddd      xmm3, [esi + edx * 4 + 48]
    lea        esi, [esi + 64]

    cvtdq2ps   xmm0, xmm0   // Average = Sum * 1 / Area
    cvtdq2ps   xmm1, xmm1
    mulps      xmm0, xmm4
    mulps      xmm1, xmm4
    cvtdq2ps   xmm2, xmm2
    cvtdq2ps   xmm3, xmm3
    mulps      xmm2, xmm4
    mulps      xmm3, xmm4
    cvtps2dq   xmm0, xmm0
    cvtps2dq   xmm1, xmm1
    cvtps2dq   xmm2, xmm2
    cvtps2dq   xmm3, xmm3
    packssdw   xmm0, xmm1
    packssdw   xmm2, xmm3
    packuswb   xmm0, xmm2
    movdqu     [edi], xmm0
    lea        edi, [edi + 16]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
5359
    movdqu     xmm0, [eax]
fbarchard@google.com's avatar
fbarchard@google.com committed
5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376
    psubd      xmm0, [eax + edx * 4]
    lea        eax, [eax + 16]
    psubd      xmm0, [esi]
    paddd      xmm0, [esi + edx * 4]
    lea        esi, [esi + 16]
    cvtdq2ps   xmm0, xmm0
    mulps      xmm0, xmm4
    cvtps2dq   xmm0, xmm0
    packssdw   xmm0, xmm0
    packuswb   xmm0, xmm0
    movd       dword ptr [edi], xmm0
    lea        edi, [edi + 4]
    sub        ecx, 1
    jge        l1
  l1b:
  }
}
5377
#endif  // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
fbarchard@google.com's avatar
fbarchard@google.com committed
5378 5379 5380 5381 5382

#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
5383
                                  const int32* previous_cumsum, int width) {
fbarchard@google.com's avatar
fbarchard@google.com committed
5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413
  __asm {
    mov        eax, row
    mov        edx, cumsum
    mov        esi, previous_cumsum
    mov        ecx, width
    pxor       xmm0, xmm0
    pxor       xmm1, xmm1

    sub        ecx, 4
    jl         l4b
    test       edx, 15
    jne        l4b

    // 4 pixel loop
  l4:
    movdqu     xmm2, [eax]  // 4 argb pixels 16 bytes.
    lea        eax, [eax + 16]
    movdqa     xmm4, xmm2

    punpcklbw  xmm2, xmm1
    movdqa     xmm3, xmm2
    punpcklwd  xmm2, xmm1
    punpckhwd  xmm3, xmm1

    punpckhbw  xmm4, xmm1
    movdqa     xmm5, xmm4
    punpcklwd  xmm4, xmm1
    punpckhwd  xmm5, xmm1

    paddd      xmm0, xmm2
5414
    movdqu     xmm2, [esi]  // previous row above.
fbarchard@google.com's avatar
fbarchard@google.com committed
5415 5416 5417
    paddd      xmm2, xmm0

    paddd      xmm0, xmm3
5418
    movdqu     xmm3, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5419 5420 5421
    paddd      xmm3, xmm0

    paddd      xmm0, xmm4
5422
    movdqu     xmm4, [esi + 32]
fbarchard@google.com's avatar
fbarchard@google.com committed
5423 5424 5425
    paddd      xmm4, xmm0

    paddd      xmm0, xmm5
5426
    movdqu     xmm5, [esi + 48]
5427
    lea        esi, [esi + 64]
fbarchard@google.com's avatar
fbarchard@google.com committed
5428 5429
    paddd      xmm5, xmm0

5430 5431 5432 5433
    movdqu     [edx], xmm2
    movdqu     [edx + 16], xmm3
    movdqu     [edx + 32], xmm4
    movdqu     [edx + 48], xmm5
fbarchard@google.com's avatar
fbarchard@google.com committed
5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446

    lea        edx, [edx + 64]
    sub        ecx, 4
    jge        l4

  l4b:
    add        ecx, 4 - 1
    jl         l1b

    // 1 pixel loop
  l1:
    movd       xmm2, dword ptr [eax]  // 1 argb pixel 4 bytes.
    lea        eax, [eax + 4]
5447 5448
    punpcklbw  xmm2, xmm1
    punpcklwd  xmm2, xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5449
    paddd      xmm0, xmm2
5450 5451
    movdqu     xmm2, [esi]
    lea        esi, [esi + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462
    paddd      xmm2, xmm0
    movdqu     [edx], xmm2
    lea        edx, [edx + 16]
    sub        ecx, 1
    jge        l1

 l1b:
  }
}
#endif  // HAS_COMPUTECUMULATIVESUMROW_SSE2

5463 5464
#ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination.
5465
__declspec(naked)
5466
LIBYUV_API
5467 5468 5469 5470
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                        uint8* dst_argb, const float* uv_dudv, int width) {
  __asm {
    push       esi
5471
    push       edi
5472
    mov        eax, [esp + 12]  // src_argb
5473 5474 5475
    mov        esi, [esp + 16]  // stride
    mov        edx, [esp + 20]  // dst_argb
    mov        ecx, [esp + 24]  // pointer to uv_dudv
5476
    movq       xmm2, qword ptr [ecx]  // uv
5477
    movq       xmm7, qword ptr [ecx + 8]  // dudv
5478
    mov        ecx, [esp + 28]  // width
5479 5480
    shl        esi, 16          // 4, stride
    add        esi, 4
5481 5482 5483
    movd       xmm5, esi
    sub        ecx, 4
    jl         l4b
5484

5485 5486 5487
    // setup for 4 pixel loop
    pshufd     xmm7, xmm7, 0x44  // dup dudv
    pshufd     xmm5, xmm5, 0  // dup 4, stride
5488
    movdqa     xmm0, xmm2    // x0, y0, x1, y1
5489
    addps      xmm0, xmm7
5490
    movlhps    xmm2, xmm0
5491 5492 5493 5494 5495
    movdqa     xmm4, xmm7
    addps      xmm4, xmm4    // dudv *= 2
    movdqa     xmm3, xmm2    // x2, y2, x3, y3
    addps      xmm3, xmm4
    addps      xmm4, xmm4    // dudv *= 4
5496

5497 5498 5499 5500 5501 5502 5503 5504
    // 4 pixel loop
  l4:
    cvttps2dq  xmm0, xmm2    // x, y float to int first 2
    cvttps2dq  xmm1, xmm3    // x, y float to int next 2
    packssdw   xmm0, xmm1    // x, y as 8 shorts
    pmaddwd    xmm0, xmm5    // offsets = x * 4 + y * stride.
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5505
    movd       edi, xmm0
5506
    pshufd     xmm0, xmm0, 0x39  // shift right
5507 5508
    movd       xmm1, [eax + esi]  // read pixel 0
    movd       xmm6, [eax + edi]  // read pixel 1
5509
    punpckldq  xmm1, xmm6     // combine pixel 0 and 1
5510 5511
    addps      xmm2, xmm4    // x, y += dx, dy first 2
    movq       qword ptr [edx], xmm1
5512 5513
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // shift right
5514
    movd       edi, xmm0
5515
    movd       xmm6, [eax + esi]  // read pixel 2
5516
    movd       xmm0, [eax + edi]  // read pixel 3
5517
    punpckldq  xmm6, xmm0     // combine pixel 2 and 3
5518 5519
    addps      xmm3, xmm4    // x, y += dx, dy next 2
    movq       qword ptr 8[edx], xmm6
5520
    lea        edx, [edx + 16]
5521
    sub        ecx, 4
5522
    jge        l4
5523

5524 5525
  l4b:
    add        ecx, 4 - 1
5526 5527 5528 5529
    jl         l1b

    // 1 pixel loop
  l1:
5530 5531 5532 5533 5534
    cvttps2dq  xmm0, xmm2    // x, y float to int
    packssdw   xmm0, xmm0    // x, y as shorts
    pmaddwd    xmm0, xmm5    // offset = x * 4 + y * stride
    addps      xmm2, xmm7    // x, y += dx, dy
    movd       esi, xmm0
5535 5536 5537
    movd       xmm0, [eax + esi]  // copy a pixel
    movd       [edx], xmm0
    lea        edx, [edx + 4]
5538
    sub        ecx, 1
5539 5540
    jge        l1
  l1b:
5541
    pop        edi
5542 5543 5544 5545 5546 5547
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBAFFINEROW_SSE2

5548
#ifdef HAS_INTERPOLATEROW_AVX2
5549
// Bilinear filter 32x2 -> 32x1
5550
__declspec(naked)
5551
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
5552 5553
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594
  __asm {
    push       esi
    push       edi
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    shr        eax, 1
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
    sub        edi, esi
    cmp        eax, 32
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
    cmp        eax, 64
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
    cmp        eax, 96
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.

    vmovd      xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    vmovd      xmm5, eax  // low fraction 128..1
    vpunpcklbw xmm5, xmm5, xmm0
    vpunpcklwd xmm5, xmm5, xmm5
    vpxor      ymm0, ymm0, ymm0
    vpermd     ymm5, ymm0, ymm5

  xloop:
    vmovdqu    ymm0, [esi]
    vmovdqu    ymm2, [esi + edx]
    vpunpckhbw ymm1, ymm0, ymm2  // mutates
    vpunpcklbw ymm0, ymm0, ymm2  // mutates
    vpmaddubsw ymm0, ymm0, ymm5
    vpmaddubsw ymm1, ymm1, ymm5
    vpsrlw     ymm0, ymm0, 7
    vpsrlw     ymm1, ymm1, 7
    vpackuswb  ymm0, ymm0, ymm1  // unmutates
    vmovdqu    [esi + edi], ymm0
    lea        esi, [esi + 32]
5595
    sub        ecx, 32
5596 5597 5598
    jg         xloop
    jmp        xloop99

5599 5600 5601 5602 5603 5604 5605 5606
   // Blend 25 / 75.
 xloop25:
   vmovdqu    ymm0, [esi]
   vmovdqu    ymm1, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5607
   sub        ecx, 32
5608 5609 5610 5611 5612 5613
   jg         xloop25
   jmp        xloop99

   // Blend 50 / 50.
 xloop50:
   vmovdqu    ymm0, [esi]
5614
   vpavgb     ymm0, ymm0, [esi + edx]
5615 5616
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5617
   sub        ecx, 32
5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628
   jg         xloop50
   jmp        xloop99

   // Blend 75 / 25.
 xloop75:
   vmovdqu    ymm1, [esi]
   vmovdqu    ymm0, [esi + edx]
   vpavgb     ymm0, ymm0, ymm1
   vpavgb     ymm0, ymm0, ymm1
   vmovdqu    [esi + edi], ymm0
   lea        esi, [esi + 32]
5629
   sub        ecx, 32
5630 5631 5632 5633 5634 5635
   jg         xloop75
   jmp        xloop99

   // Blend 100 / 0 - Copy row unchanged.
 xloop100:
   rep movsb
5636 5637 5638 5639 5640 5641 5642 5643 5644 5645

  xloop99:
    pop        edi
    pop        esi
    vzeroupper
    ret
  }
}
#endif  // HAS_INTERPOLATEROW_AVX2

5646
// Bilinear filter 16x2 -> 16x1
5647
__declspec(naked)
5648 5649 5650
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
                          ptrdiff_t src_stride, int dst_width,
                          int source_y_fraction) {
5651 5652 5653
  __asm {
    push       esi
    push       edi
5654 5655
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5656 5657 5658 5659 5660
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    shr        eax, 1
5661 5662 5663
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 128.  Blend 100 / 0.
5664
    cmp        eax, 32
5665
    je         xloop75   // 32 / 128 is 0.25.  Blend 75 / 25.
5666
    cmp        eax, 64
5667
    je         xloop50   // 64 / 128 is 0.50.  Blend 50 / 50.
5668
    cmp        eax, 96
5669
    je         xloop25   // 96 / 128 is 0.75.  Blend 25 / 75.
5670

5671 5672 5673 5674 5675 5676 5677 5678
    movd       xmm0, eax  // high fraction 0..127
    neg        eax
    add        eax, 128
    movd       xmm5, eax  // low fraction 128..1
    punpcklbw  xmm5, xmm0
    punpcklwd  xmm5, xmm5
    pshufd     xmm5, xmm5, 0

5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691
  xloop:
    movdqu     xmm0, [esi]
    movdqu     xmm2, [esi + edx]
    movdqu     xmm1, xmm0
    punpcklbw  xmm0, xmm2
    punpckhbw  xmm1, xmm2
    pmaddubsw  xmm0, xmm5
    pmaddubsw  xmm1, xmm5
    psrlw      xmm0, 7
    psrlw      xmm1, 7
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5692
    sub        ecx, 16
5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5704
    sub        ecx, 16
5705 5706 5707 5708 5709 5710 5711 5712 5713 5714
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5715
    sub        ecx, 16
5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5727
    sub        ecx, 16
5728 5729 5730 5731 5732 5733 5734 5735
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5736
    sub        ecx, 16
5737 5738 5739 5740 5741 5742 5743 5744 5745
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}

5746
#ifdef HAS_INTERPOLATEROW_SSE2
5747
// Bilinear filter 16x2 -> 16x1
5748
__declspec(naked)
5749 5750 5751
void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
                         ptrdiff_t src_stride, int dst_width,
                         int source_y_fraction) {
5752 5753 5754
  __asm {
    push       esi
    push       edi
5755 5756
    mov        edi, [esp + 8 + 4]   // dst_ptr
    mov        esi, [esp + 8 + 8]   // src_ptr
5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798
    mov        edx, [esp + 8 + 12]  // src_stride
    mov        ecx, [esp + 8 + 16]  // dst_width
    mov        eax, [esp + 8 + 20]  // source_y_fraction (0..255)
    sub        edi, esi
    // Dispatch to specialized filters if applicable.
    cmp        eax, 0
    je         xloop100  // 0 / 256.  Blend 100 / 0.
    cmp        eax, 64
    je         xloop75   // 64 / 256 is 0.25.  Blend 75 / 25.
    cmp        eax, 128
    je         xloop50   // 128 / 256 is 0.50.  Blend 50 / 50.
    cmp        eax, 192
    je         xloop25   // 192 / 256 is 0.75.  Blend 25 / 75.

    movd       xmm5, eax            // xmm5 = y fraction
    punpcklbw  xmm5, xmm5
    psrlw      xmm5, 1
    punpcklwd  xmm5, xmm5
    punpckldq  xmm5, xmm5
    punpcklqdq xmm5, xmm5
    pxor       xmm4, xmm4

  xloop:
    movdqu     xmm0, [esi]  // row0
    movdqu     xmm2, [esi + edx]  // row1
    movdqu     xmm1, xmm0
    movdqu     xmm3, xmm2
    punpcklbw  xmm2, xmm4
    punpckhbw  xmm3, xmm4
    punpcklbw  xmm0, xmm4
    punpckhbw  xmm1, xmm4
    psubw      xmm2, xmm0  // row1 - row0
    psubw      xmm3, xmm1
    paddw      xmm2, xmm2  // 9 bits * 15 bits = 8.16
    paddw      xmm3, xmm3
    pmulhw     xmm2, xmm5  // scale diff
    pmulhw     xmm3, xmm5
    paddw      xmm0, xmm2  // sum rows
    paddw      xmm1, xmm3
    packuswb   xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5799
    sub        ecx, 16
5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810
    jg         xloop
    jmp        xloop99

    // Blend 25 / 75.
  xloop25:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5811
    sub        ecx, 16
5812 5813 5814 5815 5816 5817 5818 5819 5820 5821
    jg         xloop25
    jmp        xloop99

    // Blend 50 / 50.
  xloop50:
    movdqu     xmm0, [esi]
    movdqu     xmm1, [esi + edx]
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5822
    sub        ecx, 16
5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833
    jg         xloop50
    jmp        xloop99

    // Blend 75 / 25.
  xloop75:
    movdqu     xmm1, [esi]
    movdqu     xmm0, [esi + edx]
    pavgb      xmm0, xmm1
    pavgb      xmm0, xmm1
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5834
    sub        ecx, 16
5835 5836 5837 5838 5839 5840 5841 5842
    jg         xloop75
    jmp        xloop99

    // Blend 100 / 0 - Copy row unchanged.
  xloop100:
    movdqu     xmm0, [esi]
    movdqu     [esi + edi], xmm0
    lea        esi, [esi + 16]
5843
    sub        ecx, 16
5844 5845 5846 5847 5848 5849 5850 5851
    jg         xloop100

  xloop99:
    pop        edi
    pop        esi
    ret
  }
}
5852
#endif  // HAS_INTERPOLATEROW_SSE2
5853

fbarchard@google.com's avatar
fbarchard@google.com committed
5854
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
5855
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5856 5857 5858 5859
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                          const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]    // src_argb
5860
    mov        edx, [esp + 8]    // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5861
    mov        ecx, [esp + 12]   // shuffler
5862
    movdqu     xmm5, [ecx]
fbarchard@google.com's avatar
fbarchard@google.com committed
5863 5864 5865
    mov        ecx, [esp + 16]   // pix

  wloop:
5866 5867
    movdqu     xmm0, [eax]
    movdqu     xmm1, [eax + 16]
fbarchard@google.com's avatar
fbarchard@google.com committed
5868 5869 5870
    lea        eax, [eax + 32]
    pshufb     xmm0, xmm5
    pshufb     xmm1, xmm5
5871 5872
    movdqu     [edx], xmm0
    movdqu     [edx + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
5873
    lea        edx, [edx + 32]
5874
    sub        ecx, 8
fbarchard@google.com's avatar
fbarchard@google.com committed
5875 5876 5877 5878 5879 5880
    jg         wloop
    ret
  }
}

#ifdef HAS_ARGBSHUFFLEROW_AVX2
5881
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
5882 5883 5884 5885
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    mov        eax, [esp + 4]     // src_argb
5886
    mov        edx, [esp + 8]     // dst_argb
fbarchard@google.com's avatar
fbarchard@google.com committed
5887
    mov        ecx, [esp + 12]    // shuffler
5888
    vbroadcastf128 ymm5, [ecx]    // same shuffle in high as low.
fbarchard@google.com's avatar
fbarchard@google.com committed
5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899
    mov        ecx, [esp + 16]    // pix

  wloop:
    vmovdqu    ymm0, [eax]
    vmovdqu    ymm1, [eax + 32]
    lea        eax, [eax + 64]
    vpshufb    ymm0, ymm0, ymm5
    vpshufb    ymm1, ymm1, ymm5
    vmovdqu    [edx], ymm0
    vmovdqu    [edx + 32], ymm1
    lea        edx, [edx + 64]
5900
    sub        ecx, 16
fbarchard@google.com's avatar
fbarchard@google.com committed
5901
    jg         wloop
5902 5903

    vzeroupper
fbarchard@google.com's avatar
fbarchard@google.com committed
5904 5905 5906
    ret
  }
}
5907
#endif  // HAS_ARGBSHUFFLEROW_AVX2
fbarchard@google.com's avatar
fbarchard@google.com committed
5908

5909
__declspec(naked)
5910 5911 5912 5913 5914 5915 5916 5917 5918
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
                         const uint8* shuffler, int pix) {
  __asm {
    push       ebx
    push       esi
    mov        eax, [esp + 8 + 4]    // src_argb
    mov        edx, [esp + 8 + 8]    // dst_argb
    mov        esi, [esp + 8 + 12]   // shuffler
    mov        ecx, [esp + 8 + 16]   // pix
5919
    pxor       xmm5, xmm5
5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954

    mov        ebx, [esi]   // shuffler
    cmp        ebx, 0x03000102
    je         shuf_3012
    cmp        ebx, 0x00010203
    je         shuf_0123
    cmp        ebx, 0x00030201
    je         shuf_0321
    cmp        ebx, 0x02010003
    je         shuf_2103

  // TODO(fbarchard): Use one source pointer and 3 offsets.
  shuf_any1:
    movzx      ebx, byte ptr [esi]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx], bl
    movzx      ebx, byte ptr [esi + 1]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 1], bl
    movzx      ebx, byte ptr [esi + 2]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 2], bl
    movzx      ebx, byte ptr [esi + 3]
    movzx      ebx, byte ptr [eax + ebx]
    mov        [edx + 3], bl
    lea        eax, [eax + 4]
    lea        edx, [edx + 4]
    sub        ecx, 1
    jg         shuf_any1
    jmp        shuf99

  shuf_0123:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5955 5956
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5957 5958 5959 5960 5961 5962 5963
    pshufhw    xmm0, xmm0, 01Bh   // 1B = 00011011 = 0x0123 = BGRAToARGB
    pshuflw    xmm0, xmm0, 01Bh
    pshufhw    xmm1, xmm1, 01Bh
    pshuflw    xmm1, xmm1, 01Bh
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5964
    sub        ecx, 4
5965 5966 5967 5968 5969 5970 5971
    jg         shuf_0123
    jmp        shuf99

  shuf_0321:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5972 5973
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5974 5975 5976 5977 5978 5979 5980
    pshufhw    xmm0, xmm0, 039h   // 39 = 00111001 = 0x0321 = RGBAToARGB
    pshuflw    xmm0, xmm0, 039h
    pshufhw    xmm1, xmm1, 039h
    pshuflw    xmm1, xmm1, 039h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5981
    sub        ecx, 4
5982 5983 5984 5985 5986 5987 5988
    jg         shuf_0321
    jmp        shuf99

  shuf_2103:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
5989 5990
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
5991 5992 5993 5994 5995 5996 5997
    pshufhw    xmm0, xmm0, 093h   // 93 = 10010011 = 0x2103 = ARGBToRGBA
    pshuflw    xmm0, xmm0, 093h
    pshufhw    xmm1, xmm1, 093h
    pshuflw    xmm1, xmm1, 093h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
5998
    sub        ecx, 4
5999 6000 6001 6002 6003 6004 6005
    jg         shuf_2103
    jmp        shuf99

  shuf_3012:
    movdqu     xmm0, [eax]
    lea        eax, [eax + 16]
    movdqa     xmm1, xmm0
6006 6007
    punpcklbw  xmm0, xmm5
    punpckhbw  xmm1, xmm5
6008 6009 6010 6011 6012 6013 6014
    pshufhw    xmm0, xmm0, 0C6h   // C6 = 11000110 = 0x3012 = ABGRToARGB
    pshuflw    xmm0, xmm0, 0C6h
    pshufhw    xmm1, xmm1, 0C6h
    pshuflw    xmm1, xmm1, 0C6h
    packuswb   xmm0, xmm1
    movdqu     [edx], xmm0
    lea        edx, [edx + 16]
6015
    sub        ecx, 4
6016 6017 6018 6019 6020 6021 6022 6023 6024
    jg         shuf_3012

  shuf99:
    pop        esi
    pop        ebx
    ret
  }
}

fbarchard@google.com's avatar
fbarchard@google.com committed
6025 6026 6027 6028 6029 6030
// YUY2 - Macro-pixel = 2 image pixels
// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....

// UYVY - Macro-pixel = 2 image pixels
// U0Y0V0Y1

6031
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050
void I422ToYUY2Row_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6051
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6052
    lea        eax, [eax + 16]
6053
    movdqa     xmm1, xmm0
fbarchard@google.com's avatar
fbarchard@google.com committed
6054 6055
    punpcklbw  xmm0, xmm2 // YUYV
    punpckhbw  xmm1, xmm2
6056 6057
    movdqu     [edi], xmm0
    movdqu     [edi + 16], xmm1
fbarchard@google.com's avatar
fbarchard@google.com committed
6058 6059 6060 6061 6062 6063 6064 6065 6066 6067
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}

6068
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087
void I422ToUYVYRow_SSE2(const uint8* src_y,
                        const uint8* src_u,
                        const uint8* src_v,
                        uint8* dst_frame, int width) {
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]    // src_y
    mov        esi, [esp + 8 + 8]    // src_u
    mov        edx, [esp + 8 + 12]   // src_v
    mov        edi, [esp + 8 + 16]   // dst_frame
    mov        ecx, [esp + 8 + 20]   // width
    sub        edx, esi

  convertloop:
    movq       xmm2, qword ptr [esi] // U
    movq       xmm3, qword ptr [esi + edx] // V
    lea        esi, [esi + 8]
    punpcklbw  xmm2, xmm3 // UV
6088
    movdqu     xmm0, [eax] // Y
fbarchard@google.com's avatar
fbarchard@google.com committed
6089 6090 6091 6092
    movdqa     xmm1, xmm2
    lea        eax, [eax + 16]
    punpcklbw  xmm1, xmm0 // UYVY
    punpckhbw  xmm2, xmm0
6093 6094
    movdqu     [edi], xmm1
    movdqu     [edi + 16], xmm2
fbarchard@google.com's avatar
fbarchard@google.com committed
6095 6096 6097 6098 6099 6100 6101 6102 6103
    lea        edi, [edi + 32]
    sub        ecx, 16
    jg         convertloop

    pop        edi
    pop        esi
    ret
  }
}
6104

6105
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
6106
__declspec(naked)
6107 6108 6109 6110
void ARGBPolynomialRow_SSE2(const uint8* src_argb,
                            uint8* dst_argb, const float* poly,
                            int width) {
  __asm {
6111 6112 6113 6114 6115
    push       esi
    mov        eax, [esp + 4 + 4]   /* src_argb */
    mov        edx, [esp + 4 + 8]   /* dst_argb */
    mov        esi, [esp + 4 + 12]  /* poly */
    mov        ecx, [esp + 4 + 16]  /* width */
6116
    pxor       xmm3, xmm3  // 0 constant for zero extending bytes to ints.
6117

6118
    // 2 pixel loop.
6119
 convertloop:
6120 6121
//    pmovzxbd  xmm0, dword ptr [eax]  // BGRA pixel
//    pmovzxbd  xmm4, dword ptr [eax + 4]  // BGRA pixel
6122 6123
    movq       xmm0, qword ptr [eax]  // BGRABGRA
    lea        eax, [eax + 8]
6124
    punpcklbw  xmm0, xmm3
6125 6126 6127
    movdqa     xmm4, xmm0
    punpcklwd  xmm0, xmm3  // pixel 0
    punpckhwd  xmm4, xmm3  // pixel 1
6128
    cvtdq2ps   xmm0, xmm0  // 4 floats
6129
    cvtdq2ps   xmm4, xmm4
6130
    movdqa     xmm1, xmm0  // X
6131 6132 6133 6134 6135
    movdqa     xmm5, xmm4
    mulps      xmm0, [esi + 16]  // C1 * X
    mulps      xmm4, [esi + 16]
    addps      xmm0, [esi]  // result = C0 + C1 * X
    addps      xmm4, [esi]
6136
    movdqa     xmm2, xmm1
6137
    movdqa     xmm6, xmm5
6138
    mulps      xmm2, xmm1  // X * X
6139
    mulps      xmm6, xmm5
6140
    mulps      xmm1, xmm2  // X * X * X
6141 6142 6143 6144 6145
    mulps      xmm5, xmm6
    mulps      xmm2, [esi + 32]  // C2 * X * X
    mulps      xmm6, [esi + 32]
    mulps      xmm1, [esi + 48]  // C3 * X * X * X
    mulps      xmm5, [esi + 48]
6146
    addps      xmm0, xmm2  // result += C2 * X * X
6147
    addps      xmm4, xmm6
6148
    addps      xmm0, xmm1  // result += C3 * X * X * X
6149
    addps      xmm4, xmm5
6150
    cvttps2dq  xmm0, xmm0
6151 6152
    cvttps2dq  xmm4, xmm4
    packuswb   xmm0, xmm4
6153
    packuswb   xmm0, xmm0
6154 6155
    movq       qword ptr [edx], xmm0
    lea        edx, [edx + 8]
6156
    sub        ecx, 2
6157
    jg         convertloop
6158
    pop        esi
6159 6160 6161 6162 6163
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_SSE2

6164
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
6165
__declspec(naked)
6166
void ARGBPolynomialRow_AVX2(const uint8* src_argb,
6167 6168
                            uint8* dst_argb, const float* poly,
                            int width) {
6169 6170 6171
  __asm {
    mov        eax, [esp + 4]   /* src_argb */
    mov        edx, [esp + 8]   /* dst_argb */
6172 6173 6174 6175 6176
    mov        ecx, [esp + 12]   /* poly */
    vbroadcastf128 ymm4, [ecx]       // C0
    vbroadcastf128 ymm5, [ecx + 16]  // C1
    vbroadcastf128 ymm6, [ecx + 32]  // C2
    vbroadcastf128 ymm7, [ecx + 48]  // C3
6177 6178
    mov        ecx, [esp + 16]  /* width */

6179
    // 2 pixel loop.
6180
 convertloop:
6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194
    vpmovzxbd   ymm0, qword ptr [eax]  // 2 BGRA pixels
    lea         eax, [eax + 8]
    vcvtdq2ps   ymm0, ymm0        // X 8 floats
    vmulps      ymm2, ymm0, ymm0  // X * X
    vmulps      ymm3, ymm0, ymm7  // C3 * X
    vfmadd132ps ymm0, ymm4, ymm5  // result = C0 + C1 * X
    vfmadd231ps ymm0, ymm2, ymm6  // result += C2 * X * X
    vfmadd231ps ymm0, ymm2, ymm3  // result += C3 * X * X * X
    vcvttps2dq  ymm0, ymm0
    vpackusdw   ymm0, ymm0, ymm0  // b0g0r0a0_00000000_b0g0r0a0_00000000
    vpermq      ymm0, ymm0, 0xd8  // b0g0r0a0_b0g0r0a0_00000000_00000000
    vpackuswb   xmm0, xmm0, xmm0  // bgrabgra_00000000_00000000_00000000
    vmovq       qword ptr [edx], xmm0
    lea         edx, [edx + 8]
6195
    sub         ecx, 2
6196
    jg          convertloop
6197 6198 6199 6200 6201 6202
    vzeroupper
    ret
  }
}
#endif  // HAS_ARGBPOLYNOMIALROW_AVX2

fbarchard@google.com's avatar
fbarchard@google.com committed
6203 6204
#ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table.
6205
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6206 6207 6208 6209 6210 6211 6212
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
                           int width) {
  __asm {
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */
6213

fbarchard@google.com's avatar
fbarchard@google.com committed
6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235
    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    movzx      edx, byte ptr [eax - 4 + 3]
    movzx      edx, byte ptr [esi + edx * 4 + 3]
    mov        byte ptr [eax - 4 + 3], dl
    dec        ecx
    jg         convertloop
    pop        esi
    ret
  }
}
#endif  // HAS_ARGBCOLORTABLEROW_X86
6236

fbarchard@google.com's avatar
fbarchard@google.com committed
6237 6238
#ifdef HAS_RGBCOLORTABLEROW_X86
// Tranform RGB pixels with color table.
6239
__declspec(naked)
fbarchard@google.com's avatar
fbarchard@google.com committed
6240
void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
6241
  __asm {
fbarchard@google.com's avatar
fbarchard@google.com committed
6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262
    push       esi
    mov        eax, [esp + 4 + 4]   /* dst_argb */
    mov        esi, [esp + 4 + 8]   /* table_argb */
    mov        ecx, [esp + 4 + 12]  /* width */

    // 1 pixel loop.
  convertloop:
    movzx      edx, byte ptr [eax]
    lea        eax, [eax + 4]
    movzx      edx, byte ptr [esi + edx * 4]
    mov        byte ptr [eax - 4], dl
    movzx      edx, byte ptr [eax - 4 + 1]
    movzx      edx, byte ptr [esi + edx * 4 + 1]
    mov        byte ptr [eax - 4 + 1], dl
    movzx      edx, byte ptr [eax - 4 + 2]
    movzx      edx, byte ptr [esi + edx * 4 + 2]
    mov        byte ptr [eax - 4 + 2], dl
    dec        ecx
    jg         convertloop

    pop        esi
6263 6264 6265
    ret
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6266
#endif  // HAS_RGBCOLORTABLEROW_X86
6267

fbarchard@google.com's avatar
fbarchard@google.com committed
6268 6269
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table.
6270
__declspec(naked)
6271 6272 6273
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
                                 int width,
                                 const uint8* luma, uint32 lumacoeff) {
fbarchard@google.com's avatar
fbarchard@google.com committed
6274 6275 6276 6277 6278
  __asm {
    push       esi
    push       edi
    mov        eax, [esp + 8 + 4]   /* src_argb */
    mov        edi, [esp + 8 + 8]   /* dst_argb */
6279 6280 6281
    mov        ecx, [esp + 8 + 12]  /* width */
    movd       xmm2, dword ptr [esp + 8 + 16]  // luma table
    movd       xmm3, dword ptr [esp + 8 + 20]  // lumacoeff
fbarchard@google.com's avatar
fbarchard@google.com committed
6282
    pshufd     xmm2, xmm2, 0
6283
    pshufd     xmm3, xmm3, 0
6284
    pcmpeqb    xmm4, xmm4        // generate mask 0xff00ff00
fbarchard@google.com's avatar
fbarchard@google.com committed
6285 6286 6287 6288 6289
    psllw      xmm4, 8
    pxor       xmm5, xmm5

    // 4 pixel loop.
  convertloop:
Frank Barchard's avatar
Frank Barchard committed
6290
    movdqu     xmm0, xmmword ptr [eax]      // generate luma ptr
fbarchard@google.com's avatar
fbarchard@google.com committed
6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356
    pmaddubsw  xmm0, xmm3
    phaddw     xmm0, xmm0
    pand       xmm0, xmm4  // mask out low bits
    punpcklwd  xmm0, xmm5
    paddd      xmm0, xmm2  // add table base
    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi], dl
    movzx      edx, byte ptr [eax + 1]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 1], dl
    movzx      edx, byte ptr [eax + 2]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 2], dl
    movzx      edx, byte ptr [eax + 3]  // copy alpha.
    mov        byte ptr [edi + 3], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 4]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 4], dl
    movzx      edx, byte ptr [eax + 5]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 5], dl
    movzx      edx, byte ptr [eax + 6]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 6], dl
    movzx      edx, byte ptr [eax + 7]  // copy alpha.
    mov        byte ptr [edi + 7], dl

    movd       esi, xmm0
    pshufd     xmm0, xmm0, 0x39  // 00111001 to rotate right 32

    movzx      edx, byte ptr [eax + 8]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 8], dl
    movzx      edx, byte ptr [eax + 9]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 9], dl
    movzx      edx, byte ptr [eax + 10]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 10], dl
    movzx      edx, byte ptr [eax + 11]  // copy alpha.
    mov        byte ptr [edi + 11], dl

    movd       esi, xmm0

    movzx      edx, byte ptr [eax + 12]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 12], dl
    movzx      edx, byte ptr [eax + 13]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 13], dl
    movzx      edx, byte ptr [eax + 14]
    movzx      edx, byte ptr [esi + edx]
    mov        byte ptr [edi + 14], dl
    movzx      edx, byte ptr [eax + 15]  // copy alpha.
    mov        byte ptr [edi + 15], dl

    lea        eax, [eax + 16]
    lea        edi, [edi + 16]
6357
    sub        ecx, 4
fbarchard@google.com's avatar
fbarchard@google.com committed
6358 6359 6360 6361 6362
    jg         convertloop

    pop        edi
    pop        esi
    ret
6363 6364
  }
}
fbarchard@google.com's avatar
fbarchard@google.com committed
6365
#endif  // HAS_ARGBLUMACOLORTABLEROW_SSSE3
6366

6367
#endif  // defined(_M_X64)
6368
#endif  // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64))
6369

6370
#ifdef __cplusplus
6371
}  // extern "C"
6372 6373
}  // namespace libyuv
#endif