Commit fdec4be3 authored by fbarchard@google.com's avatar fbarchard@google.com

quick fix for sub in wrong place

BUG=none
TEST=none

git-svn-id: http://libyuv.googlecode.com/svn/trunk@487 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 66d16f41
...@@ -1035,10 +1035,10 @@ static void ScaleFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ...@@ -1035,10 +1035,10 @@ static void ScaleFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
mov edx, [esp + 8 + 12] // src_stride mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255) mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
sub edi, esi
shr eax, 1 shr eax, 1
cmp eax, 0 // dispatch to specialized filters if applicable. cmp eax, 0 // dispatch to specialized filters if applicable.
je xloop100 je xloop100
sub edi, esi
cmp eax, 32 cmp eax, 32
je xloop75 je xloop75
cmp eax, 64 cmp eax, 64
...@@ -1145,10 +1145,10 @@ static void ScaleFilterRows_Unaligned_SSSE3(uint8* dst_ptr, ...@@ -1145,10 +1145,10 @@ static void ScaleFilterRows_Unaligned_SSSE3(uint8* dst_ptr,
mov edx, [esp + 8 + 12] // src_stride mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255) mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
sub edi, esi
shr eax, 1 shr eax, 1
cmp eax, 0 // dispatch to specialized filters if applicable. cmp eax, 0 // dispatch to specialized filters if applicable.
je xloop100 je xloop100
sub edi, esi
cmp eax, 32 cmp eax, 32
je xloop75 je xloop75
cmp eax, 64 cmp eax, 64
......
...@@ -289,10 +289,10 @@ void ScaleARGBFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ...@@ -289,10 +289,10 @@ void ScaleARGBFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
mov edx, [esp + 8 + 12] // src_stride mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255) mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
sub edi, esi
shr eax, 1 shr eax, 1
cmp eax, 0 // dispatch to specialized filters if applicable. cmp eax, 0 // dispatch to specialized filters if applicable.
je xloop100 je xloop100
sub edi, esi
cmp eax, 32 cmp eax, 32
je xloop75 je xloop75
cmp eax, 64 cmp eax, 64
...@@ -611,10 +611,10 @@ void ScaleARGBFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ...@@ -611,10 +611,10 @@ void ScaleARGBFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride, int dst_width, ptrdiff_t src_stride, int dst_width,
int source_y_fraction) { int source_y_fraction) {
asm volatile ( asm volatile (
"sub %1,%0 \n"
"shr %3 \n" "shr %3 \n"
"cmp $0x0,%3 \n" "cmp $0x0,%3 \n"
"je 100f \n" "je 100f \n"
"sub %1,%0 \n"
"cmp $0x20,%3 \n" "cmp $0x20,%3 \n"
"je 75f \n" "je 75f \n"
"cmp $0x40,%3 \n" "cmp $0x40,%3 \n"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment