Commit 9508c3e7 authored by Henrik Kjellander's avatar Henrik Kjellander Committed by Commit Bot

Fix Valgrind by restoring scripts deleted in Chroium.

Copy Valgrind scripts that was deleted from Chromium's tools/ to fix the Memcheck bot:
    valgrind/chrome_tests.bat
    valgrind/chrome_tests.py
    valgrind/chrome_tests.sh
    valgrind/common.py
    valgrind/gdb_helper.py
    valgrind/locate_valgrind.sh
    valgrind/memcheck_analyze.py
    valgrind/valgrind.gni
    valgrind/valgrind.sh
    valgrind/valgrind_test.py

valgrind_test.py was stripped of its Mac and Dr Memory specific parts, which
we don't use. There's still more cleanup to do, tracked in bugs.webrc.org/7849.

This is similar to changes in https://codereview.webrtc.org/2945753002.

BUG=libyuv:714
NOTRY=True

Change-Id: Ia6ba9bd3d3fca6f2ebe0e4f30e1eb39bb1a66813
Reviewed-on: https://chromium-review.googlesource.com/615162Reviewed-by: 's avatarHenrik Kjellander <kjellander@chromium.org>
Commit-Queue: Henrik Kjellander <kjellander@chromium.org>
parent 9079966f
@echo off
:: Copyright (c) 2011 The Chromium Authors. All rights reserved.
:: Use of this source code is governed by a BSD-style license that can be
:: found in the LICENSE file.
setlocal
set THISDIR=%~dp0
set TOOL_NAME="unknown"
:: Get the tool name and put it into TOOL_NAME {{{1
:: NB: SHIFT command doesn't modify %*
:PARSE_ARGS_LOOP
if %1 == () GOTO:TOOLNAME_NOT_FOUND
if %1 == --tool GOTO:TOOLNAME_FOUND
SHIFT
goto :PARSE_ARGS_LOOP
:TOOLNAME_NOT_FOUND
echo "Please specify a tool (e.g. drmemory) by using --tool flag"
exit /B 1
:TOOLNAME_FOUND
SHIFT
set TOOL_NAME=%1
:: }}}
if "%TOOL_NAME%" == "drmemory" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_light" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_full" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_pattern" GOTO :SETUP_DRMEMORY
echo "Unknown tool: `%TOOL_NAME%`! Only drmemory is supported right now"
exit /B 1
:SETUP_DRMEMORY
:: Set up DRMEMORY_COMMAND to invoke Dr. Memory {{{1
set DRMEMORY_PATH=%THISDIR%..\..\third_party\drmemory
set DRMEMORY_SFX=%DRMEMORY_PATH%\drmemory-windows-sfx.exe
if EXIST %DRMEMORY_SFX% GOTO DRMEMORY_BINARY_OK
echo "Can't find Dr. Memory executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"
echo "for the instructions on how to get them."
exit /B 1
:DRMEMORY_BINARY_OK
%DRMEMORY_SFX% -o%DRMEMORY_PATH%\unpacked -y
set DRMEMORY_COMMAND=%DRMEMORY_PATH%\unpacked\bin\drmemory.exe
:: }}}
goto :RUN_TESTS
:RUN_TESTS
set PYTHONPATH=%THISDIR%../python/google
set RUNNING_ON_VALGRIND=yes
python %THISDIR%/chrome_tests.py %*
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import subprocess
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ExecutableNotFound(Exception): pass
class BadBinary(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if tool_name == "drmemory":
if self._options.drmemory_ops:
# prepending " " to avoid Dr. Memory's option confusing optparse
cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
exe_path = os.path.join(self._options.build_dir, exe)
if not os.path.exists(exe_path):
raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
# Make sure we don't try to test ASan-built binaries
# with other dynamic instrumentation-based tools.
# TODO(timurrrr): also check TSan and MSan?
# `nm` might not be available, so use try-except.
try:
# Do not perform this check on OS X, as 'nm' on 10.6 can't handle
# binaries built with Clang 3.5+.
if not common.IsMac():
nm_output = subprocess.check_output(["nm", exe_path])
if nm_output.find("__asan_init") != -1:
raise BadBinary("You're trying to run an executable instrumented "
"with AddressSanitizer under %s. Please provide "
"an uninstrumented executable." % tool_name)
except OSError:
pass
cmd.append(exe_path)
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.gtest_break_on_failure:
cmd.append("--gtest_break_on_failure")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
if self._options.test_launcher_total_shards is not None:
cmd.append("--test-launcher-total-shards=%d"
% self._options.test_launcher_total_shards)
if self._options.test_launcher_shard_index is not None:
cmd.append("--test-launcher-shard-index=%d"
% self._options.test_launcher_shard_index)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed their own filter mentioning only one test, just use
it. Otherwise, filter out tests listed in the appropriate gtest_exclude
files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAccessibility(self):
return self.SimpleTest("accessibility", "accessibility_unittests")
def TestAddressInput(self):
return self.SimpleTest("addressinput", "libaddressinput_unittests")
def TestAngle(self):
return self.SimpleTest("angle", "angle_unittests")
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBlinkHeap(self):
return self.SimpleTest("blink_heap", "blink_heap_unittests")
def TestBlinkPlatform(self):
return self.SimpleTest("blink_platform", "blink_platform_unittests")
def TestCacheInvalidation(self):
return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
def TestCast(self):
return self.SimpleTest("chrome", "cast_unittests")
def TestCC(self):
return self.SimpleTest("cc", "cc_unittests",
cmd_args=[
"--cc-layer-tree-test-long-timeout"])
def TestChromeApp(self):
return self.SimpleTest("chrome_app", "chrome_app_unittests")
def TestChromeElf(self):
return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
def TestChromeDriver(self):
return self.SimpleTest("chromedriver", "chromedriver_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestDisplay(self):
return self.SimpleTest("display", "display_unittests")
def TestEvents(self):
return self.SimpleTest("events", "events_unittests")
def TestExtensions(self):
return self.SimpleTest("extensions", "extensions_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGCM(self):
return self.SimpleTest("gcm", "gcm_unit_tests")
def TestGfx(self):
return self.SimpleTest("gfx", "gfx_unittests")
def TestGin(self):
return self.SimpleTest("gin", "gin_unittests")
def TestGoogleApis(self):
return self.SimpleTest("google_apis", "google_apis_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestInstallerUtil(self):
return self.SimpleTest("installer_util", "installer_util_unittests")
def TestInstallStatic(self):
return self.SimpleTest("install_static", "install_static_unittests")
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestKeyboard(self):
return self.SimpleTest("keyboard", "keyboard_unittests")
def TestLatency(self):
return self.SimpleTest("latency", "latency_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestMidi(self):
return self.SimpleTest("chrome", "midi_unittests")
def TestMojoCommon(self):
return self.SimpleTest("mojo_common", "mojo_common_unittests")
def TestMojoPublicBindings(self):
return self.SimpleTest("mojo_public_bindings",
"mojo_public_bindings_unittests")
def TestMojoPublicSystem(self):
return self.SimpleTest("mojo_public_system",
"mojo_public_system_unittests")
def TestMojoPublicSysPerf(self):
return self.SimpleTest("mojo_public_sysperf",
"mojo_public_system_perftests")
def TestMojoSystem(self):
return self.SimpleTest("mojo_system", "mojo_system_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPhoneNumber(self):
return self.SimpleTest("phonenumber", "libphonenumber_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSkia(self):
return self.SimpleTest("skia", "skia_unittests")
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestStorage(self):
return self.SimpleTest("storage", "storage_unittests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIBaseUnit(self):
return self.SimpleTest("chrome", "ui_base_unittests")
def TestUIChromeOS(self):
return self.SimpleTest("chrome", "ui_chromeos_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
assert((chunk_size == 0) != (len(self._args) == 0))
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run-webkits-tests commandline.
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
"Scripts", "run-webkit-tests")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
# It'd be nice if content_shell automatically throttled the startup of new
# tests if we're low on memory.
jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
script_cmd = ["python", script, "-v",
# run a separate DumpRenderTree for each test
"--batch-size=1",
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=800000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps",
"--additional-driver-flag=--no-sandbox"]
# Pass build mode to run-webkit-tests. We aren't passed it directly,
# so parse it out of build_dir. run-webkit-tests can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build-dir / --debug)
if self._options.build_dir:
build_root, mode = os.path.split(self._options.build_dir)
script_cmd.extend(["--build-directory", build_root, "--target", mode])
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if chunk_size == 0 or len(self._args):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
chunk_str = f.read()
if len(chunk_str):
chunk_num = int(chunk_str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"addressinput": TestAddressInput,
"libaddressinput_unittests": TestAddressInput,
"accessibility": TestAccessibility,
"angle": TestAngle, "angle_unittests": TestAngle,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"base": TestBase, "base_unittests": TestBase,
"blink_heap": TestBlinkHeap,
"blink_platform": TestBlinkPlatform,
"browser": TestBrowser, "browser_tests": TestBrowser,
"cacheinvalidation": TestCacheInvalidation,
"cacheinvalidation_unittests": TestCacheInvalidation,
"cast": TestCast, "cast_unittests": TestCast,
"cc": TestCC, "cc_unittests": TestCC,
"chrome_app": TestChromeApp,
"chrome_elf": TestChromeElf,
"chromedriver": TestChromeDriver,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"display": TestDisplay, "display_unittests": TestDisplay,
"events": TestEvents, "events_unittests": TestEvents,
"extensions": TestExtensions, "extensions_unittests": TestExtensions,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gcm": TestGCM, "gcm_unit_tests": TestGCM,
"gin": TestGin, "gin_unittests": TestGin,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"google_apis": TestGoogleApis,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"installer_util": TestInstallerUtil,
"installer_util_unittests": TestInstallerUtil,
"install_static_unittests": TestInstallStatic,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
"latency": TestLatency, "latency_unittests": TestLatency,
"layout": TestLayout, "layout_tests": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"midi": TestMidi, "midi_unittests": TestMidi,
"mojo_common": TestMojoCommon,
"mojo_common_unittests": TestMojoCommon,
"mojo_system": TestMojoSystem,
"mojo_system_unittests": TestMojoSystem,
"mojo_public_system": TestMojoPublicSystem,
"mojo_public_system_unittests": TestMojoPublicSystem,
"mojo_public_bindings": TestMojoPublicBindings,
"mojo_public_bindings_unittests": TestMojoPublicBindings,
"mojo_public_sysperf": TestMojoPublicSysPerf,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"phonenumber": TestPhoneNumber,
"libphonenumber_unittests": TestPhoneNumber,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"skia": TestSkia, "skia_unittests": TestSkia,
"sql": TestSql, "sql_unittests": TestSql,
"storage": TestStorage, "storage_unittests": TestStorage,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
"ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
"webkit": TestLayout,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("-f", "--force", action="store_true", default=False,
help="run a broken test anyway")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("--gtest_break_on_failure", action="store_true",
default=False,
help="Drop in to debugger on assertion failure. Also "
"useful for forcing tests to exit with a stack dump "
"on the first assertion failure when running with "
"--gtest_repeat=-1")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
parser.add_option("--test-launcher-total-shards", type=int,
help="run the tests with --test-launcher-total-shards")
parser.add_option("--test-launcher-shard-index", type=int,
help="run the tests with --test-launcher-shard-index")
parser.add_option("--drmemory_ops",
help="extra options passed to Dr. Memory")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
BROKEN_TESTS = {
'drmemory_light': [
'addressinput',
'aura',
'base_unittests',
'cc',
'components', # x64 only?
'content',
'gfx',
'mojo_public_bindings',
],
'drmemory_full': [
'addressinput',
'aura',
'base_unittests',
'blink_heap',
'blink_platform',
'browser_tests',
'cast',
'cc',
'chromedriver',
'compositor',
'content',
'content_browsertests',
'device',
'events',
'extensions',
'gfx',
'google_apis',
'gpu',
'ipc_tests',
'jingle',
'keyboard',
'media',
'midi',
'mojo_common',
'mojo_public_bindings',
'mojo_public_sysperf',
'mojo_public_system',
'mojo_system',
'net',
'remoting',
'unit',
'url',
],
}
for t in options.test:
if t in BROKEN_TESTS[options.valgrind_tool] and not options.force:
logging.info("Skipping broken %s test %s -- see crbug.com/633693" %
(options.valgrind_tool, t))
return 0
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
#!/bin/bash
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Set up some paths and re-direct the arguments to chrome_tests.py
export THISDIR=`dirname $0`
ARGV_COPY="$@"
# We need to set CHROME_VALGRIND iff using Memcheck:
# tools/valgrind/chrome_tests.sh --tool memcheck
# or
# tools/valgrind/chrome_tests.sh --tool=memcheck
tool="memcheck" # Default to memcheck.
while (( "$#" ))
do
if [[ "$1" == "--tool" ]]
then
tool="$2"
shift
elif [[ "$1" =~ --tool=(.*) ]]
then
tool="${BASH_REMATCH[1]}"
fi
shift
done
NEEDS_VALGRIND=0
NEEDS_DRMEMORY=0
case "$tool" in
"memcheck")
NEEDS_VALGRIND=1
;;
"drmemory" | "drmemory_light" | "drmemory_full" | "drmemory_pattern")
NEEDS_DRMEMORY=1
;;
esac
if [ "$NEEDS_VALGRIND" == "1" ]
then
export CHROME_VALGRIND=`sh $THISDIR/locate_valgrind.sh`
if [ "$CHROME_VALGRIND" = "" ]
then
# locate_valgrind.sh failed
exit 1
fi
echo "Using valgrind binaries from ${CHROME_VALGRIND}"
PATH="${CHROME_VALGRIND}/bin:$PATH"
# We need to set these variables to override default lib paths hard-coded into
# Valgrind binary.
export VALGRIND_LIB="$CHROME_VALGRIND/lib/valgrind"
export VALGRIND_LIB_INNER="$CHROME_VALGRIND/lib/valgrind"
# Clean up some /tmp directories that might be stale due to interrupted
# chrome_tests.py execution.
# FYI:
# -mtime +1 <- only print files modified more than 24h ago,
# -print0/-0 are needed to handle possible newlines in the filenames.
echo "Cleanup /tmp from Valgrind stuff"
find /tmp -maxdepth 1 \(\
-name "vgdb-pipe-*" -or -name "vg_logs_*" -or -name "valgrind.*" \
\) -mtime +1 -print0 | xargs -0 rm -rf
fi
if [ "$NEEDS_DRMEMORY" == "1" ]
then
if [ -z "$DRMEMORY_COMMAND" ]
then
DRMEMORY_PATH="$THISDIR/../../third_party/drmemory"
DRMEMORY_SFX="$DRMEMORY_PATH/drmemory-windows-sfx.exe"
if [ ! -f "$DRMEMORY_SFX" ]
then
echo "Can't find Dr. Memory executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"
echo "for the instructions on how to get them."
exit 1
fi
chmod +x "$DRMEMORY_SFX" # Cygwin won't run it without +x.
"$DRMEMORY_SFX" -o"$DRMEMORY_PATH/unpacked" -y
export DRMEMORY_COMMAND="$DRMEMORY_PATH/unpacked/bin/drmemory.exe"
fi
fi
PYTHONPATH=$THISDIR/../python/google python \
"$THISDIR/chrome_tests.py" $ARGV_COPY
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import logging
import platform
import os
import signal
import subprocess
import sys
import time
class NotImplementedError(Exception):
pass
class TimeoutError(Exception):
pass
def RunSubprocessInBackground(proc):
"""Runs a subprocess in the background. Returns a handle to the process."""
logging.info("running %s in the background" % " ".join(proc))
return subprocess.Popen(proc)
def RunSubprocess(proc, timeout=0):
""" Runs a subprocess, until it finishes or |timeout| is exceeded and the
process is killed with taskkill. A |timeout| <= 0 means no timeout.
Args:
proc: list of process components (exe + args)
timeout: how long to wait before killing, <= 0 means wait forever
"""
logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
sys.stdout.flush()
sys.stderr.flush()
# Manually read and print out stdout and stderr.
# By default, the subprocess is supposed to inherit these from its parent,
# however when run under buildbot, it seems unable to read data from a
# grandchild process, so we have to read the child and print the data as if
# it came from us for buildbot to read it. We're not sure why this is
# necessary.
# TODO(erikkay): should we buffer stderr and stdout separately?
p = subprocess.Popen(proc, universal_newlines=True,
bufsize=0, # unbuffered
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info("started subprocess")
did_timeout = False
if timeout > 0:
wait_until = time.time() + timeout
while p.poll() is None and not did_timeout:
# Have to use readline rather than readlines() or "for line in p.stdout:",
# otherwise we get buffered even with bufsize=0.
line = p.stdout.readline()
while line and not did_timeout:
sys.stdout.write(line)
sys.stdout.flush()
line = p.stdout.readline()
if timeout > 0:
did_timeout = time.time() > wait_until
if did_timeout:
logging.info("process timed out")
else:
logging.info("process ended, did not time out")
if did_timeout:
if IsWindows():
subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
else:
# Does this kill all children, too?
os.kill(p.pid, signal.SIGINT)
logging.error("KILLED %d" % p.pid)
# Give the process a chance to actually die before continuing
# so that cleanup can happen safely.
time.sleep(1.0)
logging.error("TIMEOUT waiting for %s" % proc[0])
raise TimeoutError(proc[0])
else:
for line in p.stdout:
sys.stdout.write(line)
if not IsMac(): # stdout flush fails on Mac
logging.info("flushing stdout")
sys.stdout.flush()
logging.info("collecting result code")
result = p.poll()
if result:
logging.error("%s exited with non-zero result code %d" % (proc[0], result))
return result
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def WindowsVersionName():
"""Returns the name of the Windows version if it is known, or None.
Possible return values are: xp, vista, 7, 8, or None
"""
if sys.platform == 'cygwin':
# Windows version number is hiding in system name. Looks like:
# CYGWIN_NT-6.1-WOW64
try:
version_str = platform.uname()[0].split('-')[1]
except:
return None
elif sys.platform.startswith('win'):
# Normal Windows version string. Mine: 6.1.7601
version_str = platform.version()
else:
return None
parts = version_str.split('.')
try:
major = int(parts[0])
minor = int(parts[1])
except:
return None # Can't parse, unknown version.
if major == 5:
return 'xp'
elif major == 6 and minor == 0:
return 'vista'
elif major == 6 and minor == 1:
return '7'
elif major == 6 and minor == 2:
return '8' # Future proof. ;)
return None
def PlatformNames():
"""Return an array of string to be used in paths for the platform
(e.g. suppressions, gtest filters, ignore files etc.)
The first element of the array describes the 'main' platform
"""
if IsLinux():
return ['linux']
if IsMac():
return ['mac']
if IsWindows():
names = ['win32']
version_name = WindowsVersionName()
if version_name is not None:
names.append('win-%s' % version_name)
return names
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def PutEnvAndLog(env_name, env_value):
os.putenv(env_name, env_value)
logging.info('export %s=%s', env_name, env_value)
def BoringCallers(mangled, use_re_wildcards):
"""Return a list of 'boring' function names (optinally mangled)
with */? wildcards (optionally .*/.).
Boring = we drop off the bottom of stack traces below such functions.
"""
need_mangling = [
# Don't show our testing framework:
("testing::Test::Run", "_ZN7testing4Test3RunEv"),
("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"),
("testing::internal::Handle*ExceptionsInMethodIfSupported*",
"_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"),
# Depend on scheduling:
("MessageLoop::Run", "_ZN11MessageLoop3RunEv"),
("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"),
("RunnableMethod*", "_ZN14RunnableMethod*"),
("DispatchToMethod*", "_Z*16DispatchToMethod*"),
("base::internal::Invoker*::DoInvoke*",
"_ZN4base8internal8Invoker*DoInvoke*"), # Invoker{1,2,3}
("base::internal::RunnableAdapter*::Run*",
"_ZN4base8internal15RunnableAdapter*Run*"),
]
ret = []
for pair in need_mangling:
ret.append(pair[1 if mangled else 0])
ret += [
# Also don't show the internals of libc/pthread.
"start_thread",
"main",
"BaseThreadInitThunk",
]
if use_re_wildcards:
for i in range(0, len(ret)):
ret[i] = ret[i].replace('*', '.*').replace('?', '.')
return ret
def NormalizeWindowsPath(path):
"""If we're using Cygwin Python, turn the path into a Windows path.
Don't turn forward slashes into backslashes for easier copy-pasting and
escaping.
TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
_winreg to get the root Cygwin directory from the registry key:
HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir.
"""
if sys.platform.startswith("cygwin"):
p = subprocess.Popen(["cygpath", "-m", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if err:
logging.warning("WARNING: cygpath error: %s", err)
return out.strip()
else:
return path
############################
# Common output format code
def PrintUsedSuppressionsList(suppcounts):
""" Prints out the list of used suppressions in a format common to all the
memory tools. If the list is empty, prints nothing and returns False,
otherwise True.
suppcounts: a dictionary of used suppression counts,
Key -> name, Value -> count.
"""
if not suppcounts:
return False
print "-----------------------------------------------------"
print "Suppressions used:"
print " count name"
for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)):
print "%7d %s" % (count, name)
print "-----------------------------------------------------"
sys.stdout.flush()
return True
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
''' A bunch of helper functions for querying gdb.'''
import logging
import os
import re
import tempfile
GDB_LINE_RE = re.compile(r'Line ([0-9]*) of "([^"]*)".*')
def _GdbOutputToFileLine(output_line):
''' Parse the gdb output line, return a pair (file, line num) '''
match = GDB_LINE_RE.match(output_line)
if match:
return match.groups()[1], match.groups()[0]
else:
return None
def ResolveAddressesWithinABinary(binary_name, load_address, address_list):
''' For each address, return a pair (file, line num) '''
commands = tempfile.NamedTemporaryFile()
commands.write('add-symbol-file "%s" %s\n' % (binary_name, load_address))
for addr in address_list:
commands.write('info line *%s\n' % addr)
commands.write('quit\n')
commands.flush()
gdb_commandline = 'gdb -batch -x %s 2>/dev/null' % commands.name
gdb_pipe = os.popen(gdb_commandline)
result = gdb_pipe.readlines()
address_count = 0
ret = {}
for line in result:
if line.startswith('Line'):
ret[address_list[address_count]] = _GdbOutputToFileLine(line)
address_count += 1
if line.startswith('No line'):
ret[address_list[address_count]] = (None, None)
address_count += 1
gdb_pipe.close()
commands.close()
return ret
class AddressTable(object):
''' Object to do batched line number lookup. '''
def __init__(self):
self._load_addresses = {}
self._binaries = {}
self._all_resolved = False
def AddBinaryAt(self, binary, load_address):
''' Register a new shared library or executable. '''
self._load_addresses[binary] = load_address
def Add(self, binary, address):
''' Register a lookup request. '''
if binary == '':
logging.warn('adding address %s in empty binary?' % address)
if binary in self._binaries:
self._binaries[binary].append(address)
else:
self._binaries[binary] = [address]
self._all_resolved = False
def ResolveAll(self):
''' Carry out all lookup requests. '''
self._translation = {}
for binary in self._binaries.keys():
if binary != '' and binary in self._load_addresses:
load_address = self._load_addresses[binary]
addr = ResolveAddressesWithinABinary(
binary, load_address, self._binaries[binary])
self._translation[binary] = addr
self._all_resolved = True
def GetFileLine(self, binary, addr):
''' Get the (filename, linenum) result of a previously-registered lookup
request.
'''
if self._all_resolved:
if binary in self._translation:
if addr in self._translation[binary]:
return self._translation[binary][addr]
return (None, None)
......@@ -54,7 +54,7 @@ CHROME_VALGRIND_SCRIPTS=$THISDIR/../../tools/valgrind
if [ "$NEEDS_VALGRIND" == "1" ]
then
CHROME_VALGRIND=`sh $CHROME_VALGRIND_SCRIPTS/locate_valgrind.sh`
CHROME_VALGRIND=`sh $THISDIR/locate_valgrind.sh`
if [ "$CHROME_VALGRIND" = "" ]
then
CHROME_VALGRIND=../../src/third_party/valgrind/linux_x64
......
#!/bin/bash
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Prints a path to Valgrind binaries to be used for Chromium.
# Select the valgrind from third_party/valgrind by default,
# but allow users to override this default without editing scripts and
# without specifying a commandline option
export THISDIR=`dirname $0`
# User may use their own valgrind by giving its path with CHROME_VALGRIND env.
if [ "$CHROME_VALGRIND" = "" ]
then
# Guess which binaries we should use by uname
case "$(uname -a)" in
*Linux*x86_64*)
PLATFORM="linux_x64"
;;
*Linux*86*)
PLATFORM="linux_x86"
;;
*Darwin*9.[678].[01]*i386*)
# Didn't test other kernels.
PLATFORM="mac"
;;
*Darwin*10.[0-9].[0-9]*i386*)
PLATFORM="mac_10.6"
;;
*Darwin*10.[0-9].[0-9]*x86_64*)
PLATFORM="mac_10.6"
;;
*Darwin*11.[0-9].[0-9]*x86_64*)
PLATFORM="mac_10.7"
;;
*)
(echo "Sorry, your platform is not supported:" &&
uname -a
echo
echo "If you're on Mac OS X, please see http://crbug.com/441425") >&2
exit 42
esac
# The binaries should be in third_party/valgrind
# (checked out from deps/third_party/valgrind/binaries).
CHROME_VALGRIND="$THISDIR/../../third_party/valgrind/$PLATFORM"
# TODO(timurrrr): readlink -f is not present on Mac...
if [ "$PLATFORM" != "mac" ] && \
[ "$PLATFORM" != "mac_10.6" ] && \
[ "$PLATFORM" != "mac_10.7" ]
then
# Get rid of all "../" dirs
CHROME_VALGRIND=$(readlink -f $CHROME_VALGRIND)
fi
fi
if ! test -x $CHROME_VALGRIND/bin/valgrind
then
echo "Oops, could not find Valgrind binaries in your checkout." >&2
echo "Please see" >&2
echo " http://dev.chromium.org/developers/how-tos/using-valgrind/get-valgrind" >&2
echo "for the instructions on how to download pre-built binaries." >&2
exit 1
fi
echo $CHROME_VALGRIND
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# memcheck_analyze.py
''' Given a valgrind XML file, parses errors and uniques them.'''
import gdb_helper
from collections import defaultdict
import hashlib
import logging
import optparse
import os
import re
import subprocess
import sys
import time
from xml.dom.minidom import parse
from xml.parsers.expat import ExpatError
import common
# Global symbol table (yuck)
TheAddressTable = None
# These are regexps that define functions (using C++ mangled names)
# we don't want to see in stack traces while pretty printing
# or generating suppressions.
# Just stop printing the stack/suppression frames when the current one
# matches any of these.
_BORING_CALLERS = common.BoringCallers(mangled=True, use_re_wildcards=True)
def getTextOf(top_node, name):
''' Returns all text in all DOM nodes with a certain |name| that are children
of |top_node|.
'''
text = ""
for nodes_named in top_node.getElementsByTagName(name):
text += "".join([node.data for node in nodes_named.childNodes
if node.nodeType == node.TEXT_NODE])
return text
def getCDATAOf(top_node, name):
''' Returns all CDATA in all DOM nodes with a certain |name| that are children
of |top_node|.
'''
text = ""
for nodes_named in top_node.getElementsByTagName(name):
text += "".join([node.data for node in nodes_named.childNodes
if node.nodeType == node.CDATA_SECTION_NODE])
if (text == ""):
return None
return text
def shortenFilePath(source_dir, directory):
'''Returns a string with the string prefix |source_dir| removed from
|directory|.'''
prefixes_to_cut = ["build/src/", "valgrind/coregrind/", "out/Release/../../"]
if source_dir:
prefixes_to_cut.append(source_dir)
for p in prefixes_to_cut:
index = directory.rfind(p)
if index != -1:
directory = directory[index + len(p):]
return directory
# Constants that give real names to the abbreviations in valgrind XML output.
INSTRUCTION_POINTER = "ip"
OBJECT_FILE = "obj"
FUNCTION_NAME = "fn"
SRC_FILE_DIR = "dir"
SRC_FILE_NAME = "file"
SRC_LINE = "line"
def gatherFrames(node, source_dir):
frames = []
for frame in node.getElementsByTagName("frame"):
frame_dict = {
INSTRUCTION_POINTER : getTextOf(frame, INSTRUCTION_POINTER),
OBJECT_FILE : getTextOf(frame, OBJECT_FILE),
FUNCTION_NAME : getTextOf(frame, FUNCTION_NAME),
SRC_FILE_DIR : shortenFilePath(
source_dir, getTextOf(frame, SRC_FILE_DIR)),
SRC_FILE_NAME : getTextOf(frame, SRC_FILE_NAME),
SRC_LINE : getTextOf(frame, SRC_LINE)
}
# Ignore this frame and all the following if it's a "boring" function.
enough_frames = False
for regexp in _BORING_CALLERS:
if re.match("^%s$" % regexp, frame_dict[FUNCTION_NAME]):
enough_frames = True
break
if enough_frames:
break
frames += [frame_dict]
global TheAddressTable
if TheAddressTable != None and frame_dict[SRC_LINE] == "":
# Try using gdb
TheAddressTable.Add(frame_dict[OBJECT_FILE],
frame_dict[INSTRUCTION_POINTER])
return frames
class ValgrindError:
''' Takes a <DOM Element: error> node and reads all the data from it. A
ValgrindError is immutable and is hashed on its pretty printed output.
'''
def __init__(self, source_dir, error_node, commandline, testcase):
''' Copies all the relevant information out of the DOM and into object
properties.
Args:
error_node: The <error></error> DOM node we're extracting from.
source_dir: Prefix that should be stripped from the <dir> node.
commandline: The command that was run under valgrind
testcase: The test case name, if known.
'''
# Valgrind errors contain one <what><stack> pair, plus an optional
# <auxwhat><stack> pair, plus an optional <origin><what><stack></origin>,
# plus (since 3.5.0) a <suppression></suppression> pair.
# (Origin is nicely enclosed; too bad the other two aren't.)
# The most common way to see all three in one report is
# a syscall with a parameter that points to uninitialized memory, e.g.
# Format:
# <error>
# <unique>0x6d</unique>
# <tid>1</tid>
# <kind>SyscallParam</kind>
# <what>Syscall param write(buf) points to uninitialised byte(s)</what>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# <auxwhat>Address 0x5c9af4f is 7 bytes inside a block of ...</auxwhat>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# <origin>
# <what>Uninitialised value was created by a heap allocation</what>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# </origin>
# <suppression>
# <sname>insert_a_suppression_name_here</sname>
# <skind>Memcheck:Param</skind>
# <skaux>write(buf)</skaux>
# <sframe> <fun>__write_nocancel</fun> </sframe>
# ...
# <sframe> <fun>main</fun> </sframe>
# <rawtext>
# <![CDATA[
# {
# <insert_a_suppression_name_here>
# Memcheck:Param
# write(buf)
# fun:__write_nocancel
# ...
# fun:main
# }
# ]]>
# </rawtext>
# </suppression>
# </error>
#
# Each frame looks like this:
# <frame>
# <ip>0x83751BC</ip>
# <obj>/data/dkegel/chrome-build/src/out/Release/base_unittests</obj>
# <fn>_ZN7testing8internal12TestInfoImpl7RunTestEPNS_8TestInfoE</fn>
# <dir>/data/dkegel/chrome-build/src/testing/gtest/src</dir>
# <file>gtest-internal-inl.h</file>
# <line>655</line>
# </frame>
# although the dir, file, and line elements are missing if there is
# no debug info.
self._kind = getTextOf(error_node, "kind")
self._backtraces = []
self._suppression = None
self._commandline = commandline
self._testcase = testcase
self._additional = []
# Iterate through the nodes, parsing <what|auxwhat><stack> pairs.
description = None
for node in error_node.childNodes:
if node.localName == "what" or node.localName == "auxwhat":
description = "".join([n.data for n in node.childNodes
if n.nodeType == n.TEXT_NODE])
elif node.localName == "xwhat":
description = getTextOf(node, "text")
elif node.localName == "stack":
assert description
self._backtraces.append([description, gatherFrames(node, source_dir)])
description = None
elif node.localName == "origin":
description = getTextOf(node, "what")
stack = node.getElementsByTagName("stack")[0]
frames = gatherFrames(stack, source_dir)
self._backtraces.append([description, frames])
description = None
stack = None
frames = None
elif description and node.localName != None:
# The lastest description has no stack, e.g. "Address 0x28 is unknown"
self._additional.append(description)
description = None
if node.localName == "suppression":
self._suppression = getCDATAOf(node, "rawtext");
def __str__(self):
''' Pretty print the type and backtrace(s) of this specific error,
including suppression (which is just a mangled backtrace).'''
output = ""
output += "\n" # Make sure the ### is at the beginning of line.
output += "### BEGIN MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
self.ErrorHash()
if (self._commandline):
output += self._commandline + "\n"
output += self._kind + "\n"
for backtrace in self._backtraces:
output += backtrace[0] + "\n"
filter = subprocess.Popen("c++filt -n", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
close_fds=True)
buf = ""
for frame in backtrace[1]:
buf += (frame[FUNCTION_NAME] or frame[INSTRUCTION_POINTER]) + "\n"
(stdoutbuf, stderrbuf) = filter.communicate(buf.encode('latin-1'))
demangled_names = stdoutbuf.split("\n")
i = 0
for frame in backtrace[1]:
output += (" " + demangled_names[i])
i = i + 1
global TheAddressTable
if TheAddressTable != None and frame[SRC_FILE_DIR] == "":
# Try using gdb
foo = TheAddressTable.GetFileLine(frame[OBJECT_FILE],
frame[INSTRUCTION_POINTER])
if foo[0] != None:
output += (" (" + foo[0] + ":" + foo[1] + ")")
elif frame[SRC_FILE_DIR] != "":
output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] +
":" + frame[SRC_LINE] + ")")
else:
output += " (" + frame[OBJECT_FILE] + ")"
output += "\n"
for additional in self._additional:
output += additional + "\n"
assert self._suppression != None, "Your Valgrind doesn't generate " \
"suppressions - is it too old?"
if self._testcase:
output += "The report came from the `%s` test.\n" % self._testcase
output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
output += (" For more info on using suppressions see "
"http://dev.chromium.org/developers/tree-sheriffs/sheriff-details-chromium/memory-sheriff#TOC-Suppressing-memory-reports")
# Widen suppression slightly to make portable between mac and linux
# TODO(timurrrr): Oops, these transformations should happen
# BEFORE calculating the hash!
supp = self._suppression;
supp = supp.replace("fun:_Znwj", "fun:_Znw*")
supp = supp.replace("fun:_Znwm", "fun:_Znw*")
supp = supp.replace("fun:_Znaj", "fun:_Zna*")
supp = supp.replace("fun:_Znam", "fun:_Zna*")
# Make suppressions even less platform-dependent.
for sz in [1, 2, 4, 8]:
supp = supp.replace("Memcheck:Addr%d" % sz, "Memcheck:Unaddressable")
supp = supp.replace("Memcheck:Value%d" % sz, "Memcheck:Uninitialized")
supp = supp.replace("Memcheck:Cond", "Memcheck:Uninitialized")
# Split into lines so we can enforce length limits
supplines = supp.split("\n")
supp = None # to avoid re-use
# Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type)
# or at the first 'boring' caller.
# (https://bugs.kde.org/show_bug.cgi?id=199468 proposes raising
# VG_MAX_SUPP_CALLERS, but we're probably fine with it as is.)
newlen = min(26, len(supplines));
# Drop boring frames and all the following.
enough_frames = False
for frameno in range(newlen):
for boring_caller in _BORING_CALLERS:
if re.match("^ +fun:%s$" % boring_caller, supplines[frameno]):
newlen = frameno
enough_frames = True
break
if enough_frames:
break
if (len(supplines) > newlen):
supplines = supplines[0:newlen]
supplines.append("}")
for frame in range(len(supplines)):
# Replace the always-changing anonymous namespace prefix with "*".
m = re.match("( +fun:)_ZN.*_GLOBAL__N_.*\.cc_" +
"[0-9a-fA-F]{8}_[0-9a-fA-F]{8}(.*)",
supplines[frame])
if m:
supplines[frame] = "*".join(m.groups())
output += "\n".join(supplines) + "\n"
output += "### END MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
self.ErrorHash()
return output
def UniqueString(self):
''' String to use for object identity. Don't print this, use str(obj)
instead.'''
rep = self._kind + " "
for backtrace in self._backtraces:
for frame in backtrace[1]:
rep += frame[FUNCTION_NAME]
if frame[SRC_FILE_DIR] != "":
rep += frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME]
else:
rep += frame[OBJECT_FILE]
return rep
# This is a device-independent hash identifying the suppression.
# By printing out this hash we can find duplicate reports between tests and
# different shards running on multiple buildbots
def ErrorHash(self):
return int(hashlib.md5(self.UniqueString()).hexdigest()[:16], 16)
def __hash__(self):
return hash(self.UniqueString())
def __eq__(self, rhs):
return self.UniqueString() == rhs
def log_is_finished(f, force_finish):
f.seek(0)
prev_line = ""
while True:
line = f.readline()
if line == "":
if not force_finish:
return False
# Okay, the log is not finished but we can make it up to be parseable:
if prev_line.strip() in ["</error>", "</errorcounts>", "</status>"]:
f.write("</valgrindoutput>\n")
return True
return False
if '</valgrindoutput>' in line:
# Valgrind often has garbage after </valgrindoutput> upon crash.
f.truncate()
return True
prev_line = line
class MemcheckAnalyzer:
''' Given a set of Valgrind XML files, parse all the errors out of them,
unique them and output the results.'''
SANITY_TEST_SUPPRESSIONS = {
"Memcheck sanity test 01 (memory leak).": 1,
"Memcheck sanity test 02 (malloc/read left).": 1,
"Memcheck sanity test 03 (malloc/read right).": 1,
"Memcheck sanity test 04 (malloc/write left).": 1,
"Memcheck sanity test 05 (malloc/write right).": 1,
"Memcheck sanity test 06 (new/read left).": 1,
"Memcheck sanity test 07 (new/read right).": 1,
"Memcheck sanity test 08 (new/write left).": 1,
"Memcheck sanity test 09 (new/write right).": 1,
"Memcheck sanity test 10 (write after free).": 1,
"Memcheck sanity test 11 (write after delete).": 1,
"Memcheck sanity test 12 (array deleted without []).": 1,
"Memcheck sanity test 13 (single element deleted with []).": 1,
"Memcheck sanity test 14 (malloc/read uninit).": 1,
"Memcheck sanity test 15 (new/read uninit).": 1,
}
# Max time to wait for memcheck logs to complete.
LOG_COMPLETION_TIMEOUT = 180.0
def __init__(self, source_dir, show_all_leaks=False, use_gdb=False):
'''Create a parser for Memcheck logs.
Args:
source_dir: Path to top of source tree for this build
show_all_leaks: Whether to show even less important leaks
use_gdb: Whether to use gdb to resolve source filenames and line numbers
in the report stacktraces
'''
self._source_dir = source_dir
self._show_all_leaks = show_all_leaks
self._use_gdb = use_gdb
# Contains the set of unique errors
self._errors = set()
# Contains the time when the we started analyzing the first log file.
# This variable is used to skip incomplete logs after some timeout.
self._analyze_start_time = None
def Report(self, files, testcase, check_sanity=False):
'''Reads in a set of files and prints Memcheck report.
Args:
files: A list of filenames.
check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
'''
# Beyond the detailed errors parsed by ValgrindError above,
# the xml file contain records describing suppressions that were used:
# <suppcounts>
# <pair>
# <count>28</count>
# <name>pango_font_leak_todo</name>
# </pair>
# <pair>
# <count>378</count>
# <name>bug_13243</name>
# </pair>
# </suppcounts
# Collect these and print them at the end.
#
# With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in,
# the file also includes records of the form
# <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj>
# giving the filename and load address of each binary that was mapped
# into the process.
global TheAddressTable
if self._use_gdb:
TheAddressTable = gdb_helper.AddressTable()
else:
TheAddressTable = None
cur_report_errors = set()
suppcounts = defaultdict(int)
badfiles = set()
if self._analyze_start_time == None:
self._analyze_start_time = time.time()
start_time = self._analyze_start_time
parse_failed = False
for file in files:
# Wait up to three minutes for valgrind to finish writing all files,
# but after that, just skip incomplete files and warn.
f = open(file, "r+")
pid = re.match(".*\.([0-9]+)$", file)
if pid:
pid = pid.groups()[0]
found = False
running = True
firstrun = True
skip = False
origsize = os.path.getsize(file)
while (running and not found and not skip and
(firstrun or
((time.time() - start_time) < self.LOG_COMPLETION_TIMEOUT))):
firstrun = False
f.seek(0)
if pid:
# Make sure the process is still running so we don't wait for
# 3 minutes if it was killed. See http://crbug.com/17453
ps_out = subprocess.Popen("ps p %s" % pid, shell=True,
stdout=subprocess.PIPE).stdout
if len(ps_out.readlines()) < 2:
running = False
else:
skip = True
running = False
found = log_is_finished(f, False)
if not running and not found:
logging.warn("Valgrind process PID = %s is not running but its "
"XML log has not been finished correctly.\n"
"Make it up by adding some closing tags manually." % pid)
found = log_is_finished(f, not running)
if running and not found:
time.sleep(1)
f.close()
if not found:
badfiles.add(file)
else:
newsize = os.path.getsize(file)
if origsize > newsize+1:
logging.warn(str(origsize - newsize) +
" bytes of junk were after </valgrindoutput> in %s!" %
file)
try:
parsed_file = parse(file);
except ExpatError, e:
parse_failed = True
logging.warn("could not parse %s: %s" % (file, e))
lineno = e.lineno - 1
context_lines = 5
context_start = max(0, lineno - context_lines)
context_end = lineno + context_lines + 1
context_file = open(file, "r")
for i in range(0, context_start):
context_file.readline()
for i in range(context_start, context_end):
context_data = context_file.readline().rstrip()
if i != lineno:
logging.warn(" %s" % context_data)
else:
logging.warn("> %s" % context_data)
context_file.close()
continue
if TheAddressTable != None:
load_objs = parsed_file.getElementsByTagName("load_obj")
for load_obj in load_objs:
obj = getTextOf(load_obj, "obj")
ip = getTextOf(load_obj, "ip")
TheAddressTable.AddBinaryAt(obj, ip)
commandline = None
preamble = parsed_file.getElementsByTagName("preamble")[0];
for node in preamble.getElementsByTagName("line"):
if node.localName == "line":
for x in node.childNodes:
if x.nodeType == node.TEXT_NODE and "Command" in x.data:
commandline = x.data
break
raw_errors = parsed_file.getElementsByTagName("error")
for raw_error in raw_errors:
# Ignore "possible" leaks for now by default.
if (self._show_all_leaks or
getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
error = ValgrindError(self._source_dir,
raw_error, commandline, testcase)
if error not in cur_report_errors:
# We haven't seen such errors doing this report yet...
if error in self._errors:
# ... but we saw it in earlier reports, e.g. previous UI test
cur_report_errors.add("This error was already printed in "
"some other test, see 'hash=#%016X#'" % \
error.ErrorHash())
else:
# ... and we haven't seen it in other tests as well
self._errors.add(error)
cur_report_errors.add(error)
suppcountlist = parsed_file.getElementsByTagName("suppcounts")
if len(suppcountlist) > 0:
suppcountlist = suppcountlist[0]
for node in suppcountlist.getElementsByTagName("pair"):
count = getTextOf(node, "count");
name = getTextOf(node, "name");
suppcounts[name] += int(count)
if len(badfiles) > 0:
logging.warn("valgrind didn't finish writing %d files?!" % len(badfiles))
for file in badfiles:
logging.warn("Last 20 lines of %s :" % file)
os.system("tail -n 20 '%s' 1>&2" % file)
if parse_failed:
logging.error("FAIL! Couldn't parse Valgrind output file")
return -2
common.PrintUsedSuppressionsList(suppcounts)
retcode = 0
if cur_report_errors:
logging.error("FAIL! There were %s errors: " % len(cur_report_errors))
if TheAddressTable != None:
TheAddressTable.ResolveAll()
for error in cur_report_errors:
logging.error(error)
retcode = -1
# Report tool's insanity even if there were errors.
if check_sanity:
remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS
for (name, count) in suppcounts.iteritems():
# Workaround for http://crbug.com/334074
if (name in remaining_sanity_supp and
remaining_sanity_supp[name] <= count):
del remaining_sanity_supp[name]
if remaining_sanity_supp:
logging.error("FAIL! Sanity check failed!")
logging.info("The following test errors were not handled: ")
for (name, count) in remaining_sanity_supp.iteritems():
logging.info(" * %dx %s" % (count, name))
retcode = -3
if retcode != 0:
return retcode
logging.info("PASS! No errors found!")
return 0
def _main():
'''For testing only. The MemcheckAnalyzer class should be imported instead.'''
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("no filename specified")
filenames = args
analyzer = MemcheckAnalyzer(options.source_dir, use_gdb=True)
return analyzer.Report(filenames, None)
if __name__ == "__main__":
sys.exit(_main())
#!/bin/bash
# Copyright (c) 2017 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is a small script for manually launching valgrind, along with passing
# it the suppression file, and some helpful arguments (automatically attaching
# the debugger on failures, etc). Run it from your repo root, something like:
# $ sh ./tools/valgrind/valgrind.sh ./out/Debug/chrome
#
# This is mostly intended for running the chrome browser interactively.
# To run unit tests, you probably want to run chrome_tests.sh instead.
# That's the script used by the valgrind buildbot.
export THISDIR=`dirname $0`
setup_memcheck() {
RUN_COMMAND="valgrind"
# Prompt to attach gdb when there was an error detected.
DEFAULT_TOOL_FLAGS=("--db-command=gdb -nw %f %p" "--db-attach=yes" \
# Keep the registers in gdb in sync with the code.
"--vex-iropt-register-updates=allregs-at-mem-access" \
# Overwrite newly allocated or freed objects
# with 0x41 to catch inproper use.
"--malloc-fill=41" "--free-fill=41" \
# Increase the size of stacks being tracked.
"--num-callers=30")
}
setup_unknown() {
echo "Unknown tool \"$TOOL_NAME\" specified, the result is not guaranteed"
DEFAULT_TOOL_FLAGS=()
}
set -e
if [ $# -eq 0 ]; then
echo "usage: <command to run> <arguments ...>"
exit 1
fi
TOOL_NAME="memcheck"
declare -a DEFAULT_TOOL_FLAGS[0]
# Select a tool different from memcheck with --tool=TOOL as a first argument
TMP_STR=`echo $1 | sed 's/^\-\-tool=//'`
if [ "$TMP_STR" != "$1" ]; then
TOOL_NAME="$TMP_STR"
shift
fi
if echo "$@" | grep "\-\-tool" ; then
echo "--tool=TOOL must be the first argument" >&2
exit 1
fi
case $TOOL_NAME in
memcheck*) setup_memcheck "$1";;
*) setup_unknown;;
esac
SUPPRESSIONS="$THISDIR/$TOOL_NAME/suppressions.txt"
CHROME_VALGRIND=`sh $THISDIR/locate_valgrind.sh`
if [ "$CHROME_VALGRIND" = "" ]
then
# locate_valgrind.sh failed
exit 1
fi
echo "Using valgrind binaries from ${CHROME_VALGRIND}"
set -x
PATH="${CHROME_VALGRIND}/bin:$PATH"
# We need to set these variables to override default lib paths hard-coded into
# Valgrind binary.
export VALGRIND_LIB="$CHROME_VALGRIND/lib/valgrind"
export VALGRIND_LIB_INNER="$CHROME_VALGRIND/lib/valgrind"
# G_SLICE=always-malloc: make glib use system malloc
# NSS_DISABLE_UNLOAD=1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST=1: make nss use system malloc
# G_DEBUG=fatal_warnings: make GTK abort on any critical or warning assertions.
# If it crashes on you in the Options menu, you hit bug 19751,
# comment out the G_DEBUG=fatal_warnings line.
#
# GTEST_DEATH_TEST_USE_FORK=1: make gtest death tests valgrind-friendly
#
# When everyone has the latest valgrind, we might want to add
# --show-possibly-lost=no
# to ignore possible but not definite leaks.
G_SLICE=always-malloc \
NSS_DISABLE_UNLOAD=1 \
NSS_DISABLE_ARENA_FREE_LIST=1 \
G_DEBUG=fatal_warnings \
GTEST_DEATH_TEST_USE_FORK=1 \
$RUN_COMMAND \
--trace-children=yes \
--leak-check=yes \
--suppressions="$SUPPRESSIONS" \
"${DEFAULT_TOOL_FLAGS[@]}" \
"$@"
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import memcheck_analyze
class BaseTool(object):
"""Abstract class for running dynamic error detection tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool for an example.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
return True
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if 'CHROME_VALGRIND' in os.environ:
path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
else:
path = "valgrind"
proc = [path, "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
# This is really Python, but for some reason Valgrind follows it.
proc += ["--trace-children-skip='*lsb_release*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment