Commit b00d4016 authored by Frank Barchard's avatar Frank Barchard

make unittest allocator align to 64 bytes.

blur requires memory be aligned.  change the unittest allocator to guarantee 64 byte alignment.
re-enable blur any test that fails if memory is unaligned.

TBR=harryjin@google.com
BUG=libyuv:596,libyuv:594
TESTED=local build passes with row.h removed from tests.

Review URL: https://codereview.chromium.org/2019753002 .
parent ade85fb5
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 1591 Version: 1592
License: BSD License: BSD
License File: LICENSE License File: LICENSE
......
...@@ -11,6 +11,6 @@ ...@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1591 #define LIBYUV_VERSION 1592
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
...@@ -1744,9 +1744,7 @@ int ARGBBlur(const uint8* src_argb, int src_stride_argb, ...@@ -1744,9 +1744,7 @@ int ARGBBlur(const uint8* src_argb, int src_stride_argb,
#if defined(HAS_CUMULATIVESUMTOAVERAGEROW_SSE2) #if defined(HAS_CUMULATIVESUMTOAVERAGEROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) { if (TestCpuFlag(kCpuHasSSE2)) {
ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2; ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2;
if (IS_ALIGNED(dst_cumsum, 16)) { CumulativeSumToAverageRow = CumulativeSumToAverageRow_SSE2;
CumulativeSumToAverageRow = CumulativeSumToAverageRow_SSE2;
}
} }
#endif #endif
// Compute enough CumulativeSum for first row to be blurred. After this // Compute enough CumulativeSum for first row to be blurred. After this
......
...@@ -5275,6 +5275,7 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, ...@@ -5275,6 +5275,7 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
// dst points to pixel to store result to. // dst points to pixel to store result to.
// count is number of averaged pixels to produce. // count is number of averaged pixels to produce.
// Does 4 pixels at a time. // Does 4 pixels at a time.
// This function requires alignment on accumulation buffer pointers.
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft, void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
int width, int area, uint8* dst, int width, int area, uint8* dst,
int count) { int count) {
......
...@@ -1929,7 +1929,7 @@ static int TestBlur(int width, int height, int benchmark_iterations, ...@@ -1929,7 +1929,7 @@ static int TestBlur(int width, int height, int benchmark_iterations,
} }
static const int kBlurSize = 55; static const int kBlurSize = 55;
TEST_F(LibYUVPlanarTest, DISABLED_ARGBBlur_Any) { TEST_F(LibYUVPlanarTest, ARGBBlur_Any) {
int max_diff = TestBlur(benchmark_width_ - 1, benchmark_height_, int max_diff = TestBlur(benchmark_width_ - 1, benchmark_height_,
benchmark_iterations_, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_, disable_cpu_flags_, benchmark_cpu_info_,
......
...@@ -34,7 +34,7 @@ DEFINE_int32(libyuv_cpu_info, -1, ...@@ -34,7 +34,7 @@ DEFINE_int32(libyuv_cpu_info, -1,
// Set flags to -1 for benchmarking to avoid slower C code. // Set flags to -1 for benchmarking to avoid slower C code.
LibYUVConvertTest::LibYUVConvertTest() : LibYUVConvertTest::LibYUVConvertTest() :
benchmark_iterations_(BENCHMARK_ITERATIONS), benchmark_width_(130), benchmark_iterations_(BENCHMARK_ITERATIONS), benchmark_width_(128),
benchmark_height_(72), disable_cpu_flags_(1), benchmark_cpu_info_(-1) { benchmark_height_(72), disable_cpu_flags_(1), benchmark_cpu_info_(-1) {
const char* repeat = getenv("LIBYUV_REPEAT"); const char* repeat = getenv("LIBYUV_REPEAT");
if (repeat) { if (repeat) {
......
...@@ -67,9 +67,9 @@ static inline bool SizeValid(int src_width, int src_height, ...@@ -67,9 +67,9 @@ static inline bool SizeValid(int src_width, int src_height,
#define align_buffer_page_end(var, size) \ #define align_buffer_page_end(var, size) \
uint8* var; \ uint8* var; \
uint8* var##_mem; \ uint8* var##_mem; \
var##_mem = reinterpret_cast<uint8*>(malloc((((size) + 4095) & ~4095) + \ var##_mem = reinterpret_cast<uint8*>(malloc(((size) + 4095 + 63) & ~4095)); \
OFFBY)); \ var = (uint8*)((intptr_t)(var##_mem + (((size) + 4095 + 63) & ~4095) - \
var = var##_mem + (-(size) & 4095) + OFFBY; (size)) & ~63);
#define free_aligned_buffer_page_end(var) \ #define free_aligned_buffer_page_end(var) \
free(var##_mem); \ free(var##_mem); \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment