Commit c70c7c02 authored by fbarchard@google.com's avatar fbarchard@google.com

scale to half size optimization for avx2 - use pmaddubsw instruction to…

scale to half size optimization for avx2 - use pmaddubsw instruction to horizontally add bytes, then pavgw to round and divide by 2.
BUG=314
TESTED=libyuvTest.ScaleDownBy2*
R=tpsiaki@google.com

Review URL: https://webrtc-codereview.appspot.com/45909004

git-svn-id: http://libyuv.googlecode.com/svn/trunk@1352 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent f23d6222
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 1349 Version: 1352
License: BSD License: BSD
License File: LICENSE License File: LICENSE
......
...@@ -11,6 +11,6 @@ ...@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1349 #define LIBYUV_VERSION 1352
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
...@@ -236,22 +236,23 @@ void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, ...@@ -236,22 +236,23 @@ void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
// src_stride // src_stride
mov edx, [esp + 12] // dst_ptr mov edx, [esp + 12] // dst_ptr
mov ecx, [esp + 16] // dst_width mov ecx, [esp + 16] // dst_width
vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
vpsrlw ymm5, ymm5, 8 vpcmpeqb ymm4, ymm4, ymm4 // '1' constant, 8b
vpsrlw ymm4, ymm4, 15
vpackuswb ymm4, ymm4, ymm4
vpxor ymm5, ymm5, ymm5 // constant 0
wloop: wloop:
vmovdqu ymm0, [eax] vmovdqu ymm0, [eax]
vmovdqu ymm1, [eax + 32] vmovdqu ymm1, [eax + 32]
lea eax, [eax + 64] lea eax, [eax + 64]
vpsrlw ymm2, ymm0, 8 // average columns (32 to 16 pixels) vpmaddubsw ymm0, ymm0, ymm4 // average horizontally
vpsrlw ymm3, ymm1, 8 vpmaddubsw ymm1, ymm1, ymm4
vpand ymm0, ymm0, ymm5 vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2
vpand ymm1, ymm1, ymm5 vpavgw ymm1, ymm1, ymm5
vpavgw ymm0, ymm0, ymm2
vpavgw ymm1, ymm1, ymm3
vpackuswb ymm0, ymm0, ymm1 vpackuswb ymm0, ymm0, ymm1
vpermq ymm0, ymm0, 0xd8 // unmutate vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
vmovdqu [edx], ymm0 vmovdqu [edx], ymm0
lea edx, [edx + 32] lea edx, [edx + 32]
...@@ -273,24 +274,25 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, ...@@ -273,24 +274,25 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
mov esi, [esp + 4 + 8] // src_stride mov esi, [esp + 4 + 8] // src_stride
mov edx, [esp + 4 + 12] // dst_ptr mov edx, [esp + 4 + 12] // dst_ptr
mov ecx, [esp + 4 + 16] // dst_width mov ecx, [esp + 4 + 16] // dst_width
vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
vpsrlw ymm5, ymm5, 8 vpcmpeqb ymm4, ymm4, ymm4 // '1' constant, 8b
vpsrlw ymm4, ymm4, 15
vpackuswb ymm4, ymm4, ymm4
vpxor ymm5, ymm5, ymm5 // constant 0
wloop: wloop:
vmovdqu ymm0, [eax] vmovdqu ymm0, [eax] // average rows
vmovdqu ymm1, [eax + 32] vmovdqu ymm1, [eax + 32]
vpavgb ymm0, ymm0, [eax + esi] // average rows vpavgb ymm0, ymm0, [eax + esi]
vpavgb ymm1, ymm1, [eax + esi + 32] vpavgb ymm1, ymm1, [eax + esi + 32]
lea eax, [eax + 64] lea eax, [eax + 64]
vpsrlw ymm2, ymm0, 8 // average columns (32 to 16 pixels) vpmaddubsw ymm0, ymm0, ymm4 // average horizontally
vpsrlw ymm3, ymm1, 8 vpmaddubsw ymm1, ymm1, ymm4
vpand ymm0, ymm0, ymm5 vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2
vpand ymm1, ymm1, ymm5 vpavgw ymm1, ymm1, ymm5
vpavgw ymm0, ymm0, ymm2
vpavgw ymm1, ymm1, ymm3
vpackuswb ymm0, ymm0, ymm1 vpackuswb ymm0, ymm0, ymm1
vpermq ymm0, ymm0, 0xd8 // unmutate vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
vmovdqu [edx], ymm0 vmovdqu [edx], ymm0
lea edx, [edx + 32] lea edx, [edx + 32]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment