summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/drmemory/DrMemory-Windows-sfx.exebin20374113 -> 0 bytes
-rw-r--r--tools/drmemory/README86
-rw-r--r--tools/drmemory/scripts/common.py252
-rw-r--r--tools/drmemory/scripts/drmemory_analyze.py202
-rw-r--r--tools/drmemory/scripts/logging_utils.py82
-rw-r--r--tools/drmemory/scripts/path_utils.py84
-rw-r--r--tools/drmemory/scripts/pdfium_tests.bat24
-rw-r--r--tools/drmemory/scripts/pdfium_tests.py399
-rw-r--r--tools/drmemory/scripts/valgrind_test.py487
-rw-r--r--tools/drmemory/suppressions.txt28
10 files changed, 0 insertions, 1644 deletions
diff --git a/tools/drmemory/DrMemory-Windows-sfx.exe b/tools/drmemory/DrMemory-Windows-sfx.exe
deleted file mode 100644
index cd3992b84e..0000000000
--- a/tools/drmemory/DrMemory-Windows-sfx.exe
+++ /dev/null
Binary files differ
diff --git a/tools/drmemory/README b/tools/drmemory/README
deleted file mode 100644
index 1db5410e43..0000000000
--- a/tools/drmemory/README
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Dr. Memory
-
-Dr. Memory (www.drmemory.org) is an open-source dynamic memory
-monitoring tool for Windows, Linux, and Mac.
-
-## About Dr. Memory
-
-Dr. Memory operates on unmodified application binaries running on
-Windows, Linux, or Mac on commodity x86 and ARM32 (forthcoming) hardware.
-It is capable of identifying memory-related programming errors including:
- * accesses of uninitialized memory
- * accesses to unaddressable memory (heap underflow and overflow)
- * accesses to freed memory
- * double frees
- * memory leaks
- * handle leaks (on Windows)
- * GDI API usage errors (on Windows)
- * accesses to un-reserved thread local storage slots (on Windows)
-
-## Using Dr. Memory (Windows only)
-
-Build your application with debug information and then run it under
-Dr. Memory. Errors found are printed to the screen, and a summary is
-shown at the end of the run.
-
-### Obtain Dr. Memory
-
-The Dr. Memory package is provided as a self-extracting archive
-(DrMemory-Windows-sfx.exe) in tools/drmemory directory, which can be
-extracted by running command 'DrMemory-Windows-sfx.exe -ounpacked -y'.
-
-The Dr. Memory release package can be downloaded from
-https://github.com/DynamoRIO/drmemory/wiki/Downloads.
-
-Nightly builds can be downloaded from
-https://build.chromium.org/p/client.drmemory/builds/.
-
-The Dr. Memory source code can be found at
-https://github.com/DynamoRIO/drmemory.
-
-### Run your application with Dr. Memory
-
-To run your application with Dr. Memory, simply put 'drmemory.exe --'
-before the command that invokes the application.
-
- * Running pdfium_unittests with Dr. Memory:
- tools\drmemory\unpaced\bin\drmemory.exe -- out\Debug\pdfium_unittests.exe
-
- * Running pdfium_tests with Dr. Memory:
- tools\drmemory\unpaced\bin\drmemory.exe -- out\Debug\pdfium_tests.exe --png YourInputPDF.pdf
-
-### Run test suite with Dr. Memory
-
-A set of scripts are provided to run PDFium test suite with Dr. Memory
-on buildbots, which can also be used for running test suite locally.
-
- * Running pdfium_unittests with Dr. Memory:
- tools\drmemory\scripts\pdfium_tests.bat -t pdfium_unittests
-
- * Running pixel test suite with Dr. Memory:
- tools\drmemory\scripts\pdfium_tests.bat -t pdfium_pixel
-
-## Documentation
-
-Command 'drmemory.exe -help' prints a list of Dr. Memory runtime
-options with short description.
-
-To view the full documention, point your web browser at
-http://drmemory.org/docs/.
-
-
-## Contact
-
-This project is provided as-is, with no official support.
-Use the Dr. Memory Users group at
-http://groups.google.com/group/drmemory-users/ to ask questions and
-seek help on using Dr. Memory.
-
-Dr. Memory's source code and issue tracker live at
-https://github.com/DynamoRIO/drmemory
-
-If you would like to submit a patch, you will need to first sign a
-Contributor License Agreement.
-See https://github.com/DynamoRIO/drmemory/wiki/Contributing for more
-information.
diff --git a/tools/drmemory/scripts/common.py b/tools/drmemory/scripts/common.py
deleted file mode 100644
index 7e163e3c60..0000000000
--- a/tools/drmemory/scripts/common.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import platform
-import os
-import signal
-import subprocess
-import sys
-import time
-
-
-class NotImplementedError(Exception):
- pass
-
-
-class TimeoutError(Exception):
- pass
-
-
-def RunSubprocessInBackground(proc):
- """Runs a subprocess in the background. Returns a handle to the process."""
- logging.info("running %s in the background" % " ".join(proc))
- return subprocess.Popen(proc)
-
-
-def RunSubprocess(proc, timeout=0):
- """ Runs a subprocess, until it finishes or |timeout| is exceeded and the
- process is killed with taskkill. A |timeout| <= 0 means no timeout.
-
- Args:
- proc: list of process components (exe + args)
- timeout: how long to wait before killing, <= 0 means wait forever
- """
-
- logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
- sys.stdout.flush()
- sys.stderr.flush()
-
- # Manually read and print out stdout and stderr.
- # By default, the subprocess is supposed to inherit these from its parent,
- # however when run under buildbot, it seems unable to read data from a
- # grandchild process, so we have to read the child and print the data as if
- # it came from us for buildbot to read it. We're not sure why this is
- # necessary.
- # TODO(erikkay): should we buffer stderr and stdout separately?
- p = subprocess.Popen(proc, universal_newlines=True,
- bufsize=0, # unbuffered
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- logging.info("started subprocess")
-
- did_timeout = False
- if timeout > 0:
- wait_until = time.time() + timeout
- while p.poll() is None and not did_timeout:
- # Have to use readline rather than readlines() or "for line in p.stdout:",
- # otherwise we get buffered even with bufsize=0.
- line = p.stdout.readline()
- while line and not did_timeout:
- sys.stdout.write(line)
- sys.stdout.flush()
- line = p.stdout.readline()
- if timeout > 0:
- did_timeout = time.time() > wait_until
-
- if did_timeout:
- logging.info("process timed out")
- else:
- logging.info("process ended, did not time out")
-
- if did_timeout:
- if IsWindows():
- subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
- else:
- # Does this kill all children, too?
- os.kill(p.pid, signal.SIGINT)
- logging.error("KILLED %d" % p.pid)
- # Give the process a chance to actually die before continuing
- # so that cleanup can happen safely.
- time.sleep(1.0)
- logging.error("TIMEOUT waiting for %s" % proc[0])
- raise TimeoutError(proc[0])
- else:
- for line in p.stdout:
- sys.stdout.write(line)
- if not IsMac(): # stdout flush fails on Mac
- logging.info("flushing stdout")
- sys.stdout.flush()
-
- logging.info("collecting result code")
- result = p.poll()
- if result:
- logging.error("%s exited with non-zero result code %d" % (proc[0], result))
- return result
-
-
-def IsLinux():
- return sys.platform.startswith('linux')
-
-
-def IsMac():
- return sys.platform.startswith('darwin')
-
-
-def IsWindows():
- return sys.platform == 'cygwin' or sys.platform.startswith('win')
-
-
-def WindowsVersionName():
- """Returns the name of the Windows version if it is known, or None.
-
- Possible return values are: xp, vista, 7, 8, or None
- """
- if sys.platform == 'cygwin':
- # Windows version number is hiding in system name. Looks like:
- # CYGWIN_NT-6.1-WOW64
- try:
- version_str = platform.uname()[0].split('-')[1]
- except:
- return None
- elif sys.platform.startswith('win'):
- # Normal Windows version string. Mine: 6.1.7601
- version_str = platform.version()
- else:
- return None
-
- parts = version_str.split('.')
- try:
- major = int(parts[0])
- minor = int(parts[1])
- except:
- return None # Can't parse, unknown version.
-
- if major == 5:
- return 'xp'
- elif major == 6 and minor == 0:
- return 'vista'
- elif major == 6 and minor == 1:
- return '7'
- elif major == 6 and minor == 2:
- return '8' # Future proof. ;)
- return None
-
-
-def PlatformNames():
- """Return an array of string to be used in paths for the platform
- (e.g. suppressions, gtest filters, ignore files etc.)
- The first element of the array describes the 'main' platform
- """
- if IsLinux():
- return ['linux']
- if IsMac():
- return ['mac']
- if IsWindows():
- names = ['win32']
- version_name = WindowsVersionName()
- if version_name is not None:
- names.append('win-%s' % version_name)
- return names
- raise NotImplementedError('Unknown platform "%s".' % sys.platform)
-
-
-def PutEnvAndLog(env_name, env_value):
- os.putenv(env_name, env_value)
- logging.info('export %s=%s', env_name, env_value)
-
-def BoringCallers(mangled, use_re_wildcards):
- """Return a list of 'boring' function names (optinally mangled)
- with */? wildcards (optionally .*/.).
- Boring = we drop off the bottom of stack traces below such functions.
- """
-
- need_mangling = [
- # Don't show our testing framework:
- ("testing::Test::Run", "_ZN7testing4Test3RunEv"),
- ("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"),
- ("testing::internal::Handle*ExceptionsInMethodIfSupported*",
- "_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"),
-
- # Depend on scheduling:
- ("MessageLoop::Run", "_ZN11MessageLoop3RunEv"),
- ("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"),
- ("RunnableMethod*", "_ZN14RunnableMethod*"),
- ("DispatchToMethod*", "_Z*16DispatchToMethod*"),
- ("base::internal::Invoker*::DoInvoke*",
- "_ZN4base8internal8Invoker*DoInvoke*"), # Invoker{1,2,3}
- ("base::internal::RunnableAdapter*::Run*",
- "_ZN4base8internal15RunnableAdapter*Run*"),
- ]
-
- ret = []
- for pair in need_mangling:
- ret.append(pair[1 if mangled else 0])
-
- ret += [
- # Also don't show the internals of libc/pthread.
- "start_thread",
- "main",
- "BaseThreadInitThunk",
- ]
-
- if use_re_wildcards:
- for i in range(0, len(ret)):
- ret[i] = ret[i].replace('*', '.*').replace('?', '.')
-
- return ret
-
-def NormalizeWindowsPath(path):
- """If we're using Cygwin Python, turn the path into a Windows path.
-
- Don't turn forward slashes into backslashes for easier copy-pasting and
- escaping.
-
- TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
- _winreg to get the root Cygwin directory from the registry key:
- HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir.
- """
- if sys.platform.startswith("cygwin"):
- p = subprocess.Popen(["cygpath", "-m", path],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- (out, err) = p.communicate()
- if err:
- logging.warning("WARNING: cygpath error: %s", err)
- return out.strip()
- else:
- return path
-
-############################
-# Common output format code
-
-def PrintUsedSuppressionsList(suppcounts):
- """ Prints out the list of used suppressions in a format common to all the
- memory tools. If the list is empty, prints nothing and returns False,
- otherwise True.
-
- suppcounts: a dictionary of used suppression counts,
- Key -> name, Value -> count.
- """
- if not suppcounts:
- return False
-
- print "-----------------------------------------------------"
- print "Suppressions used:"
- print " count name"
- for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)):
- print "%7d %s" % (count, name)
- print "-----------------------------------------------------"
- sys.stdout.flush()
- return True
diff --git a/tools/drmemory/scripts/drmemory_analyze.py b/tools/drmemory/scripts/drmemory_analyze.py
deleted file mode 100644
index 29fc0ed4b0..0000000000
--- a/tools/drmemory/scripts/drmemory_analyze.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# drmemory_analyze.py
-
-''' Given a Dr. Memory output file, parses errors and uniques them.'''
-
-from collections import defaultdict
-import common
-import hashlib
-import logging
-import optparse
-import os
-import re
-import subprocess
-import sys
-import time
-
-class DrMemoryError:
- def __init__(self, report, suppression, testcase):
- self._report = report
- self._testcase = testcase
-
- # Chromium-specific transformations of the suppressions:
- # Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
- # Dr.Memory-generated error ids from the name= lines as they don't
- # make sense in a multiprocess report.
- supp_lines = suppression.split("\n")
- for l in xrange(len(supp_lines)):
- if supp_lines[l].startswith("name="):
- supp_lines[l] = "name=<insert_a_suppression_name_here>"
- if supp_lines[l].startswith("chrome.dll!"):
- supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
- bang_index = supp_lines[l].find("!")
- d_exe_index = supp_lines[l].find(".exe!")
- if bang_index >= 4 and d_exe_index + 4 == bang_index:
- supp_lines[l] = "*" + supp_lines[l][bang_index:]
- self._suppression = "\n".join(supp_lines)
-
- def __str__(self):
- output = ""
- output += "### BEGIN MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
- self.ErrorHash()
- output += self._report + "\n"
- if self._testcase:
- output += "The report came from the `%s` test.\n" % self._testcase
- output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
- output += (" For more info on using suppressions see "
- "http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
- output += "{\n%s\n}\n" % self._suppression
- output += "### END MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
- self.ErrorHash()
- return output
-
- # This is a device-independent hash identifying the suppression.
- # By printing out this hash we can find duplicate reports between tests and
- # different shards running on multiple buildbots
- def ErrorHash(self):
- return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
-
- def __hash__(self):
- return hash(self._suppression)
-
- def __eq__(self, rhs):
- return self._suppression == rhs
-
-
-class DrMemoryAnalyzer:
- ''' Given a set of Dr.Memory output files, parse all the errors out of
- them, unique them and output the results.'''
-
- def __init__(self):
- self.known_errors = set()
- self.error_count = 0;
-
- def ReadLine(self):
- self.line_ = self.cur_fd_.readline()
-
- def ReadSection(self):
- result = [self.line_]
- self.ReadLine()
- while len(self.line_.strip()) > 0:
- result.append(self.line_)
- self.ReadLine()
- return result
-
- def ParseReportFile(self, filename, testcase):
- ret = []
-
- # First, read the generated suppressions file so we can easily lookup a
- # suppression for a given error.
- supp_fd = open(filename.replace("results", "suppress"), 'r')
- generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
- for line in supp_fd:
- # NOTE: this regexp looks fragile. Might break if the generated
- # suppression format slightly changes.
- m = re.search("# Suppression for Error #([0-9]+)", line.strip())
- if not m:
- continue
- error_id = int(m.groups()[0])
- assert error_id not in generated_suppressions
- # OK, now read the next suppression:
- cur_supp = ""
- for supp_line in supp_fd:
- if supp_line.startswith("#") or supp_line.strip() == "":
- break
- cur_supp += supp_line
- generated_suppressions[error_id] = cur_supp.strip()
- supp_fd.close()
-
- self.cur_fd_ = open(filename, 'r')
- while True:
- self.ReadLine()
- if (self.line_ == ''): break
-
- match = re.search("^Error #([0-9]+): (.*)", self.line_)
- if match:
- error_id = int(match.groups()[0])
- self.line_ = match.groups()[1].strip() + "\n"
- report = "".join(self.ReadSection()).strip()
- suppression = generated_suppressions[error_id]
- ret.append(DrMemoryError(report, suppression, testcase))
-
- if re.search("SUPPRESSIONS USED:", self.line_):
- self.ReadLine()
- while self.line_.strip() != "":
- line = self.line_.strip()
- (count, name) = re.match(" *([0-9\?]+)x(?: \(.*?\))?: (.*)",
- line).groups()
- if (count == "?"):
- # Whole-module have no count available: assume 1
- count = 1
- else:
- count = int(count)
- self.used_suppressions[name] += count
- self.ReadLine()
-
- if self.line_.startswith("ASSERT FAILURE"):
- ret.append(self.line_.strip())
-
- self.cur_fd_.close()
- return ret
-
- def Report(self, filenames, testcase, check_sanity):
- sys.stdout.flush()
- # TODO(timurrrr): support positive tests / check_sanity==True
- self.used_suppressions = defaultdict(int)
-
- to_report = []
- reports_for_this_test = set()
- for f in filenames:
- cur_reports = self.ParseReportFile(f, testcase)
-
- # Filter out the reports that were there in previous tests.
- for r in cur_reports:
- if r in reports_for_this_test:
- # A similar report is about to be printed for this test.
- pass
- elif r in self.known_errors:
- # A similar report has already been printed in one of the prev tests.
- to_report.append("This error was already printed in some "
- "other test, see 'hash=#%016X#'" % r.ErrorHash())
- reports_for_this_test.add(r)
- else:
- self.known_errors.add(r)
- reports_for_this_test.add(r)
- to_report.append(r)
-
- common.PrintUsedSuppressionsList(self.used_suppressions)
-
- if not to_report:
- logging.info("PASS: No error reports found")
- return 0
-
- sys.stdout.flush()
- sys.stderr.flush()
- logging.info("Found %i error reports" % len(to_report))
- for report in to_report:
- self.error_count += 1
- logging.info("Report #%d\n%s" % (self.error_count, report))
- logging.info("Total: %i error reports" % len(to_report))
- sys.stdout.flush()
- return -1
-
-
-def main():
- '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
- parser = optparse.OptionParser("usage: %prog <files to analyze>")
-
- (options, args) = parser.parse_args()
- if len(args) == 0:
- parser.error("no filename specified")
- filenames = args
-
- logging.getLogger().setLevel(logging.INFO)
- return DrMemoryAnalyzer().Report(filenames, None, False)
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/drmemory/scripts/logging_utils.py b/tools/drmemory/scripts/logging_utils.py
deleted file mode 100644
index ef2d674950..0000000000
--- a/tools/drmemory/scripts/logging_utils.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-''' Utility functions and objects for logging.
-'''
-
-import logging
-import sys
-
-class StdoutStderrHandler(logging.Handler):
- ''' Subclass of logging.Handler which outputs to either stdout or stderr
- based on a threshold level.
- '''
-
- def __init__(self, threshold=logging.WARNING, err=sys.stderr, out=sys.stdout):
- ''' Args:
- threshold: below this logging level messages are sent to stdout,
- otherwise they are sent to stderr
- err: a stream object that error messages are sent to, defaults to
- sys.stderr
- out: a stream object that non-error messages are sent to, defaults to
- sys.stdout
- '''
- logging.Handler.__init__(self)
- self._err = logging.StreamHandler(err)
- self._out = logging.StreamHandler(out)
- self._threshold = threshold
- self._last_was_err = False
-
- def setLevel(self, lvl):
- logging.Handler.setLevel(self, lvl)
- self._err.setLevel(lvl)
- self._out.setLevel(lvl)
-
- def setFormatter(self, formatter):
- logging.Handler.setFormatter(self, formatter)
- self._err.setFormatter(formatter)
- self._out.setFormatter(formatter)
-
- def emit(self, record):
- if record.levelno < self._threshold:
- self._out.emit(record)
- self._last_was_err = False
- else:
- self._err.emit(record)
- self._last_was_err = False
-
- def flush(self):
- # preserve order on the flushing, the stalest stream gets flushed first
- if self._last_was_err:
- self._out.flush()
- self._err.flush()
- else:
- self._err.flush()
- self._out.flush()
-
-
-FORMAT = "%(asctime)s %(filename)s [%(levelname)s] %(message)s"
-DATEFMT = "%H:%M:%S"
-
-def config_root(level=logging.INFO, threshold=logging.WARNING, format=FORMAT,
- datefmt=DATEFMT):
- ''' Configure the root logger to use a StdoutStderrHandler and some default
- formatting.
- Args:
- level: messages below this level are ignored
- threshold: below this logging level messages are sent to stdout,
- otherwise they are sent to stderr
- format: format for log messages, see logger.Format
- datefmt: format for date in log messages
-
- '''
- # to set the handler of the root logging object, we need to do setup
- # manually rather than using basicConfig
- root = logging.getLogger()
- root.setLevel(level)
- formatter = logging.Formatter(format, datefmt)
- handler = StdoutStderrHandler(threshold=threshold)
- handler.setLevel(level)
- handler.setFormatter(formatter)
- root.addHandler(handler)
diff --git a/tools/drmemory/scripts/path_utils.py b/tools/drmemory/scripts/path_utils.py
deleted file mode 100644
index 6ab4312043..0000000000
--- a/tools/drmemory/scripts/path_utils.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Some utility methods for getting and manipulating paths."""
-
-# TODO(pamg): Have the buildbot use these, too.
-
-
-import errno
-import os
-import sys
-
-class PathNotFound(Exception): pass
-
-def ScriptDir():
- """Get the full path to the directory containing the current script."""
- script_filename = os.path.abspath(sys.argv[0])
- return os.path.dirname(script_filename)
-
-def FindAncestor(start_dir, ancestor):
- """Finds an ancestor dir in a path.
-
- For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
- 'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
- """
- start_dir = os.path.abspath(start_dir)
- path = start_dir
- while True:
- (parent, tail) = os.path.split(path)
- if tail == ancestor:
- return path
- if not tail:
- break
- path = parent
- raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
-
-def FindUpwardParent(start_dir, *desired_list):
- """Finds the desired object's parent, searching upward from the start_dir.
-
- Searches start_dir and all its parents looking for the desired directory
- or file, which may be given in one or more path components. Returns the
- first directory in which the top desired path component was found, or raises
- PathNotFound if it wasn't.
- """
- desired_path = os.path.join(*desired_list)
- last_dir = ''
- cur_dir = start_dir
- found_path = os.path.join(cur_dir, desired_path)
- while not os.path.exists(found_path):
- last_dir = cur_dir
- cur_dir = os.path.dirname(cur_dir)
- if last_dir == cur_dir:
- raise PathNotFound('Unable to find %s above %s' %
- (desired_path, start_dir))
- found_path = os.path.join(cur_dir, desired_path)
- # Strip the entire original desired path from the end of the one found
- # and remove a trailing path separator, if present.
- found_path = found_path[:len(found_path) - len(desired_path)]
- if found_path.endswith(os.sep):
- found_path = found_path[:len(found_path) - 1]
- return found_path
-
-
-def FindUpward(start_dir, *desired_list):
- """Returns a path to the desired directory or file, searching upward.
-
- Searches start_dir and all its parents looking for the desired directory
- or file, which may be given in one or more path components. Returns the full
- path to the desired object, or raises PathNotFound if it wasn't found.
- """
- parent = FindUpwardParent(start_dir, *desired_list)
- return os.path.join(parent, *desired_list)
-
-
-def MaybeMakeDirectory(*path):
- """Creates an entire path, if it doesn't already exist."""
- file_path = os.path.join(*path)
- try:
- os.makedirs(file_path)
- except OSError, e:
- # errno.EEXIST is "File exists". If we see another error, re-raise.
- if e.errno != errno.EEXIST:
- raise
diff --git a/tools/drmemory/scripts/pdfium_tests.bat b/tools/drmemory/scripts/pdfium_tests.bat
deleted file mode 100644
index 4618a0e945..0000000000
--- a/tools/drmemory/scripts/pdfium_tests.bat
+++ /dev/null
@@ -1,24 +0,0 @@
-@echo off
-:: Copyright (c) 2011 The Chromium Authors. All rights reserved.
-:: Use of this source code is governed by a BSD-style license that can be
-:: found in the LICENSE file.
-
-set THISDIR=%~dp0
-set TOOL_NAME="drmemory_full"
-
-:: Set up DRMEMORY_COMMAND to invoke Dr. Memory {{{1
-set DRMEMORY_PATH=%THISDIR%..
-set DRMEMORY_SFX=%DRMEMORY_PATH%\drmemory-windows-sfx.exe
-if EXIST %DRMEMORY_SFX% GOTO DRMEMORY_BINARY_OK
-echo "Can't find Dr. Memory executables."
-echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"
-echo "for the instructions on how to get them."
-exit /B 1
-
-:DRMEMORY_BINARY_OK
-%DRMEMORY_SFX% -o%DRMEMORY_PATH%\unpacked -y
-set DRMEMORY_COMMAND=%DRMEMORY_PATH%\unpacked\bin\drmemory.exe
-:: }}}
-
-:RUN_TESTS
-python %THISDIR%/pdfium_tests.py %*
diff --git a/tools/drmemory/scripts/pdfium_tests.py b/tools/drmemory/scripts/pdfium_tests.py
deleted file mode 100644
index f1d5308f98..0000000000
--- a/tools/drmemory/scripts/pdfium_tests.py
+++ /dev/null
@@ -1,399 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-''' Runs various chrome tests through valgrind_test.py.'''
-
-import glob
-import logging
-import optparse
-import os
-import subprocess
-import sys
-
-import logging_utils
-import path_utils
-
-import common
-import valgrind_test
-
-class TestNotFound(Exception): pass
-
-class MultipleGTestFiltersSpecified(Exception): pass
-
-class BuildDirNotFound(Exception): pass
-
-class BuildDirAmbiguous(Exception): pass
-
-class ExecutableNotFound(Exception): pass
-
-class BadBinary(Exception): pass
-
-class ChromeTests:
- SLOW_TOOLS = ["drmemory"]
-
- def __init__(self, options, args, test):
- if ':' in test:
- (self._test, self._gtest_filter) = test.split(':', 1)
- else:
- self._test = test
- self._gtest_filter = options.gtest_filter
-
- if self._test not in self._test_list:
- raise TestNotFound("Unknown test: %s" % test)
-
- if options.gtest_filter and options.gtest_filter != self._gtest_filter:
- raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
- "and --test %s" % test)
-
- self._options = options
- self._args = args
-
- # Compute the top of the tree (the "source dir") from the script dir
- # (where this script lives). We assume that the script dir is in
- # tools/drmemory/scripts relative to the top of the tree.
- script_dir = os.path.dirname(path_utils.ScriptDir())
- self._source_dir = os.path.dirname(os.path.dirname(script_dir))
- # Setup Dr. Memory if it's not set up yet.
- drmem_cmd = os.getenv("DRMEMORY_COMMAND")
- if not drmem_cmd:
- drmem_sfx = os.path.join(script_dir, "drmemory-windows-sfx.exe")
- if not os.path.isfile(drmem_sfx):
- raise RuntimeError, "Cannot find drmemory-windows-sfx.exe"
- drmem_dir = os.path.join(script_dir, "unpacked")
- subprocess.call([drmem_sfx, "-o" + drmem_dir, "-y"], 0)
- drmem_cmd = os.path.join(drmem_dir, "bin", "drmemory.exe")
- os.environ["DRMEMORY_COMMAND"] = drmem_cmd
- # since this path is used for string matching, make sure it's always
- # an absolute Unix-style path
- self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
- self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
-
- if not self._options.build_dir:
- dirs = [
- os.path.join(self._source_dir, "xcodebuild", "Debug"),
- os.path.join(self._source_dir, "out", "Debug"),
- os.path.join(self._source_dir, "build", "Debug"),
- ]
- build_dir = [d for d in dirs if os.path.isdir(d)]
- if len(build_dir) > 1:
- raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
- "%s\nPlease specify just one "
- "using --build-dir" % ", ".join(build_dir))
- elif build_dir:
- self._options.build_dir = build_dir[0]
- else:
- self._options.build_dir = None
-
- if self._options.build_dir:
- build_dir = os.path.abspath(self._options.build_dir)
- self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
-
- def _EnsureBuildDirFound(self):
- if not self._options.build_dir:
- raise BuildDirNotFound("Oops, couldn't find a build dir, please "
- "specify it manually using --build-dir")
-
- def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
- '''Generates the default command array that most tests will use.'''
- if exe and common.IsWindows():
- exe += '.exe'
-
- cmd = list(self._command_preamble)
-
- # Find all suppressions matching the following pattern:
- # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
- # and list them with --suppressions= prefix.
- script_dir = path_utils.ScriptDir()
- suppression_file = os.path.join(script_dir, "..", "suppressions.txt")
- if os.path.exists(suppression_file):
- cmd.append("--suppressions=%s" % suppression_file)
- # Platform-specific suppression
- for platform in common.PlatformNames():
- platform_suppression_file = \
- os.path.join(script_dir, "..", 'suppressions_%s.txt' % platform)
- if os.path.exists(platform_suppression_file):
- cmd.append("--suppressions=%s" % platform_suppression_file)
-
- if self._options.valgrind_tool_flags:
- cmd += self._options.valgrind_tool_flags.split(" ")
- if self._options.keep_logs:
- cmd += ["--keep_logs"]
- if valgrind_test_args != None:
- for arg in valgrind_test_args:
- cmd.append(arg)
- if exe:
- self._EnsureBuildDirFound()
- exe_path = os.path.join(self._options.build_dir, exe)
- if not os.path.exists(exe_path):
- raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
-
- cmd.append(exe_path)
- # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
- # so we can find the slowpokes.
- cmd.append("--gtest_print_time")
- # Built-in test launcher for gtest-based executables runs tests using
- # multiple process by default. Force the single-process mode back.
- cmd.append("--single-process-tests")
- if self._options.gtest_repeat:
- cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
- if self._options.gtest_shuffle:
- cmd.append("--gtest_shuffle")
- if self._options.gtest_break_on_failure:
- cmd.append("--gtest_break_on_failure")
- if self._options.test_launcher_bot_mode:
- cmd.append("--test-launcher-bot-mode")
- if self._options.test_launcher_total_shards is not None:
- cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher_total_shards)
- if self._options.test_launcher_shard_index is not None:
- cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_shard_index)
- return cmd
-
- def Run(self):
- ''' Runs the test specified by command-line argument --test '''
- logging.info("running test %s" % (self._test))
- return self._test_list[self._test](self)
-
- def _AppendGtestFilter(self, tool, name, cmd):
- '''Append an appropriate --gtest_filter flag to the googletest binary
- invocation.
- If the user passed his own filter mentioning only one test, just use it.
- Othewise, filter out tests listed in the appropriate gtest_exclude files.
- '''
- if (self._gtest_filter and
- ":" not in self._gtest_filter and
- "?" not in self._gtest_filter and
- "*" not in self._gtest_filter):
- cmd.append("--gtest_filter=%s" % self._gtest_filter)
- return
-
- filters = []
- gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
-
- gtest_filter_files = [
- os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
- # Use ".gtest.txt" files only for slow tools, as they now contain
- # Valgrind- and Dr.Memory-specific filters.
- # TODO(glider): rename the files to ".gtest_slow.txt"
- if tool.ToolName() in ChromeTests.SLOW_TOOLS:
- gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
- for platform_suffix in common.PlatformNames():
- gtest_filter_files += [
- os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
- os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
- (tool.ToolName(), platform_suffix))]
- logging.info("Reading gtest exclude filter files:")
- for filename in gtest_filter_files:
- # strip the leading absolute path (may be very long on the bot)
- # and the following / or \.
- readable_filename = filename.replace("\\", "/") # '\' on Windows
- readable_filename = readable_filename.replace(self._source_dir, "")[1:]
- if not os.path.exists(filename):
- logging.info(" \"%s\" - not found" % readable_filename)
- continue
- logging.info(" \"%s\" - OK" % readable_filename)
- f = open(filename, 'r')
- for line in f.readlines():
- if line.startswith("#") or line.startswith("//") or line.isspace():
- continue
- line = line.rstrip()
- test_prefixes = ["FLAKY", "FAILS"]
- for p in test_prefixes:
- # Strip prefixes from the test names.
- line = line.replace(".%s_" % p, ".")
- # Exclude the original test name.
- filters.append(line)
- if line[-2:] != ".*":
- # List all possible prefixes if line doesn't end with ".*".
- for p in test_prefixes:
- filters.append(line.replace(".", ".%s_" % p))
- # Get rid of duplicates.
- filters = set(filters)
- gtest_filter = self._gtest_filter
- if len(filters):
- if gtest_filter:
- gtest_filter += ":"
- if gtest_filter.find("-") < 0:
- gtest_filter += "-"
- else:
- gtest_filter = "-"
- gtest_filter += ":".join(filters)
- if gtest_filter:
- cmd.append("--gtest_filter=%s" % gtest_filter)
-
- @staticmethod
- def ShowTests():
- test_to_names = {}
- for name, test_function in ChromeTests._test_list.iteritems():
- test_to_names.setdefault(test_function, []).append(name)
-
- name_to_aliases = {}
- for names in test_to_names.itervalues():
- names.sort(key=lambda name: len(name))
- name_to_aliases[names[0]] = names[1:]
-
- print
- print "Available tests:"
- print "----------------"
- for name, aliases in sorted(name_to_aliases.iteritems()):
- if aliases:
- print " {} (aka {})".format(name, ', '.join(aliases))
- else:
- print " {}".format(name)
-
- def SetupLdPath(self, requires_build_dir):
- if requires_build_dir:
- self._EnsureBuildDirFound()
- elif not self._options.build_dir:
- return
-
- # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
- if (os.getenv("LD_LIBRARY_PATH")):
- os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
- self._options.build_dir))
- else:
- os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
-
- def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
- tool = valgrind_test.CreateTool(self._options.valgrind_tool)
- cmd = self._DefaultCommand(tool, name, valgrind_test_args)
- self._AppendGtestFilter(tool, name, cmd)
- cmd.extend(['--test-tiny-timeout=1000'])
- if cmd_args:
- cmd.extend(cmd_args)
-
- self.SetupLdPath(True)
- return tool.Run(cmd, module)
-
- def RunCmdLine(self):
- tool = valgrind_test.CreateTool(self._options.valgrind_tool)
- cmd = self._DefaultCommand(tool, None, self._args)
- self.SetupLdPath(False)
- return tool.Run(cmd, None)
-
- def TestPDFiumUnitTests(self):
- return self.SimpleTest("pdfium_unittests", "pdfium_unittests")
-
- def TestPDFiumEmbedderTests(self):
- return self.SimpleTest("pdfium_embeddertests", "pdfium_embeddertests")
-
- def TestPDFiumTest(self, script_name):
- # Build the command line in 'cmd'.
- # It's going to be roughly
- # python valgrind_test.py ...
- # but we'll use the --indirect_pdfium_test flag to valgrind_test.py
- # to avoid valgrinding python.
-
- # Start by building the valgrind_test.py commandline.
- tool = valgrind_test.CreateTool(self._options.valgrind_tool)
- cmd = self._DefaultCommand(tool)
- cmd.append("--trace_children")
- cmd.append("--indirect_pdfium_test")
- cmd.append("--ignore_exit_code")
- # Now build script_cmd, the run_corpus_tests commandline.
- script = os.path.join(self._source_dir, "testing", "tools", script_name)
- script_cmd = ["python", script]
- if self._options.build_dir:
- script_cmd.extend(["--build-dir", self._options.build_dir])
- # Now run script_cmd with the wrapper in cmd
- cmd.append("--")
- cmd.extend(script_cmd)
-
- ret = tool.Run(cmd, "layout", min_runtime_in_seconds=0)
- return ret
-
- def TestPDFiumJavascript(self):
- return self.TestPDFiumTest("run_javascript_tests.py")
-
- def TestPDFiumPixel(self):
- return self.TestPDFiumTest("run_pixel_tests.py")
-
- def TestPDFiumCorpus(self):
- return self.TestPDFiumTest("run_corpus_tests.py")
-
- # The known list of tests.
- _test_list = {
- "cmdline" : RunCmdLine,
- "pdfium_corpus": TestPDFiumCorpus,
- "pdfium_embeddertests": TestPDFiumEmbedderTests,
- "pdfium_javascript": TestPDFiumJavascript,
- "pdfium_pixel": TestPDFiumPixel,
- "pdfium_unittests": TestPDFiumUnitTests,
- }
-
-
-def _main():
- parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
- "[-t <test> ...]")
-
- parser.add_option("--help-tests", dest="help_tests", action="store_true",
- default=False, help="List all available tests")
- parser.add_option("-b", "--build-dir",
- help="the location of the compiler output")
- parser.add_option("--target", help="Debug or Release")
- parser.add_option("-t", "--test", action="append", default=[],
- help="which test to run, supports test:gtest_filter format "
- "as well.")
- parser.add_option("--gtest_filter",
- help="additional arguments to --gtest_filter")
- parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
- parser.add_option("--gtest_shuffle", action="store_true", default=False,
- help="Randomize tests' orders on every iteration.")
- parser.add_option("--gtest_break_on_failure", action="store_true",
- default=False,
- help="Drop in to debugger on assertion failure. Also "
- "useful for forcing tests to exit with a stack dump "
- "on the first assertion failure when running with "
- "--gtest_repeat=-1")
- parser.add_option("-v", "--verbose", action="store_true", default=False,
- help="verbose output - enable debug log messages")
- parser.add_option("--tool", dest="valgrind_tool", default="drmemory_full",
- help="specify a valgrind tool to run the tests under")
- parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
- help="specify custom flags for the selected valgrind tool")
- parser.add_option("--keep_logs", action="store_true", default=False,
- help="store memory tool logs in the <tool>.logs directory "
- "instead of /tmp.\nThis can be useful for tool "
- "developers/maintainers.\nPlease note that the <tool>"
- ".logs directory will be clobbered on tool startup.")
- parser.add_option("--test-launcher-bot-mode", action="store_true",
- help="run the tests with --test-launcher-bot-mode")
- parser.add_option("--test-launcher-total-shards", type=int,
- help="run the tests with --test-launcher-total-shards")
- parser.add_option("--test-launcher-shard-index", type=int,
- help="run the tests with --test-launcher-shard-index")
-
- options, args = parser.parse_args()
-
- # Bake target into build_dir.
- if options.target and options.build_dir:
- assert (options.target !=
- os.path.basename(os.path.dirname(options.build_dir)))
- options.build_dir = os.path.join(os.path.abspath(options.build_dir),
- options.target)
-
- if options.verbose:
- logging_utils.config_root(logging.DEBUG)
- else:
- logging_utils.config_root()
-
- if options.help_tests:
- ChromeTests.ShowTests()
- return 0
-
- if not options.test:
- parser.error("--test not specified")
-
- if len(options.test) != 1 and options.gtest_filter:
- parser.error("--gtest_filter and multiple tests don't make sense together")
-
- for t in options.test:
- tests = ChromeTests(options, args, t)
- ret = tests.Run()
- if ret: return ret
- return 0
-
-
-if __name__ == "__main__":
- sys.exit(_main())
diff --git a/tools/drmemory/scripts/valgrind_test.py b/tools/drmemory/scripts/valgrind_test.py
deleted file mode 100644
index 92960c79a5..0000000000
--- a/tools/drmemory/scripts/valgrind_test.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Runs an exe through Valgrind and puts the intermediate files in a
-directory.
-"""
-
-import datetime
-import glob
-import logging
-import optparse
-import os
-import re
-import shutil
-import stat
-import subprocess
-import sys
-import tempfile
-
-import common
-
-import drmemory_analyze
-
-class BaseTool(object):
- """Abstract class for running dynamic error detection tools.
-
- Always subclass this and implement ToolCommand with framework- and
- tool-specific stuff.
- """
-
- def __init__(self):
- temp_parent_dir = None
- self.log_parent_dir = ""
- if common.IsWindows():
- # gpu process on Windows Vista+ runs at Low Integrity and can only
- # write to certain directories (http://crbug.com/119131)
- #
- # TODO(bruening): if scripts die in middle and don't clean up temp
- # dir, we'll accumulate files in profile dir. should remove
- # really old files automatically.
- profile = os.getenv("USERPROFILE")
- if profile:
- self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
- if os.path.exists(self.log_parent_dir):
- self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
- temp_parent_dir = self.log_parent_dir
- # Generated every time (even when overridden)
- self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
- self.log_dir = self.temp_dir # overridable by --keep_logs
- self.option_parser_hooks = []
- # TODO(glider): we may not need some of the env vars on some of the
- # platforms.
- self._env = {
- "G_SLICE" : "always-malloc",
- "NSS_DISABLE_UNLOAD" : "1",
- "NSS_DISABLE_ARENA_FREE_LIST" : "1",
- "GTEST_DEATH_TEST_USE_FORK": "1",
- }
-
- def ToolName(self):
- raise NotImplementedError, "This method should be implemented " \
- "in the tool-specific subclass"
-
- def Analyze(self, check_sanity=False):
- raise NotImplementedError, "This method should be implemented " \
- "in the tool-specific subclass"
-
- def RegisterOptionParserHook(self, hook):
- # Frameworks and tools can add their own flags to the parser.
- self.option_parser_hooks.append(hook)
-
- def CreateOptionParser(self):
- # Defines Chromium-specific flags.
- self._parser = optparse.OptionParser("usage: %prog [options] <program to "
- "test>")
- self._parser.disable_interspersed_args()
- self._parser.add_option("-t", "--timeout",
- dest="timeout", metavar="TIMEOUT", default=100000,
- help="timeout in seconds for the run (default 100000)")
- self._parser.add_option("", "--build-dir",
- help="the location of the compiler output")
- self._parser.add_option("", "--source-dir",
- help="path to top of source tree for this build"
- "(used to normalize source paths in baseline)")
- self._parser.add_option("", "--gtest_filter", default="",
- help="which test case to run")
- self._parser.add_option("", "--gtest_repeat",
- help="how many times to run each test")
- self._parser.add_option("", "--gtest_print_time", action="store_true",
- default=False,
- help="show how long each test takes")
- self._parser.add_option("", "--ignore_exit_code", action="store_true",
- default=False,
- help="ignore exit code of the test "
- "(e.g. test failures)")
- self._parser.add_option("", "--keep_logs", action="store_true",
- default=False,
- help="store memory tool logs in the <tool>.logs "
- "directory instead of /tmp.\nThis can be "
- "useful for tool developers/maintainers.\n"
- "Please note that the <tool>.logs directory "
- "will be clobbered on tool startup.")
-
- # To add framework- or tool-specific flags, please add a hook using
- # RegisterOptionParserHook in the corresponding subclass.
- # See ValgrindTool for an example.
- for hook in self.option_parser_hooks:
- hook(self, self._parser)
-
- def ParseArgv(self, args):
- self.CreateOptionParser()
-
- # self._tool_flags will store those tool flags which we don't parse
- # manually in this script.
- self._tool_flags = []
- known_args = []
-
- """ We assume that the first argument not starting with "-" is a program
- name and all the following flags should be passed to the program.
- TODO(timurrrr): customize optparse instead
- """
- while len(args) > 0 and args[0][:1] == "-":
- arg = args[0]
- if (arg == "--"):
- break
- if self._parser.has_option(arg.split("=")[0]):
- known_args += [arg]
- else:
- self._tool_flags += [arg]
- args = args[1:]
-
- if len(args) > 0:
- known_args += args
-
- self._options, self._args = self._parser.parse_args(known_args)
-
- self._timeout = int(self._options.timeout)
- self._source_dir = self._options.source_dir
- if self._options.keep_logs:
- # log_parent_dir has trailing slash if non-empty
- self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
- if os.path.exists(self.log_dir):
- shutil.rmtree(self.log_dir)
- os.mkdir(self.log_dir)
- logging.info("Logs are in " + self.log_dir)
-
- self._ignore_exit_code = self._options.ignore_exit_code
- if self._options.gtest_filter != "":
- self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
- if self._options.gtest_repeat:
- self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
- if self._options.gtest_print_time:
- self._args.append("--gtest_print_time")
-
- return True
-
- def Setup(self, args):
- return self.ParseArgv(args)
-
- def ToolCommand(self):
- raise NotImplementedError, "This method should be implemented " \
- "in the tool-specific subclass"
-
- def Cleanup(self):
- # You may override it in the tool-specific subclass
- pass
-
- def Execute(self):
- """ Execute the app to be tested after successful instrumentation.
- Full execution command-line provided by subclassers via proc."""
- logging.info("starting execution...")
- proc = self.ToolCommand()
- for var in self._env:
- common.PutEnvAndLog(var, self._env[var])
- return common.RunSubprocess(proc, self._timeout)
-
- def RunTestsAndAnalyze(self, check_sanity):
- exec_retcode = self.Execute()
- analyze_retcode = self.Analyze(check_sanity)
-
- if analyze_retcode:
- logging.error("Analyze failed.")
- logging.info("Search the log for '[ERROR]' to see the error reports.")
- return analyze_retcode
-
- if exec_retcode:
- if self._ignore_exit_code:
- logging.info("Test execution failed, but the exit code is ignored.")
- else:
- logging.error("Test execution failed.")
- return exec_retcode
- else:
- logging.info("Test execution completed successfully.")
-
- if not analyze_retcode:
- logging.info("Analysis completed successfully.")
-
- return 0
-
- def Main(self, args, check_sanity, min_runtime_in_seconds):
- """Call this to run through the whole process: Setup, Execute, Analyze"""
- start_time = datetime.datetime.now()
- retcode = -1
- if self.Setup(args):
- retcode = self.RunTestsAndAnalyze(check_sanity)
- shutil.rmtree(self.temp_dir, ignore_errors=True)
- self.Cleanup()
- else:
- logging.error("Setup failed")
- end_time = datetime.datetime.now()
- runtime_in_seconds = (end_time - start_time).seconds
- hours = runtime_in_seconds / 3600
- seconds = runtime_in_seconds % 3600
- minutes = seconds / 60
- seconds = seconds % 60
- logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
- if (min_runtime_in_seconds > 0 and
- runtime_in_seconds < min_runtime_in_seconds):
- logging.error("Layout tests finished too quickly. "
- "It should have taken at least %d seconds. "
- "Something went wrong?" % min_runtime_in_seconds)
- retcode = -1
- return retcode
-
- def Run(self, args, module, min_runtime_in_seconds=0):
- MODULES_TO_SANITY_CHECK = ["base"]
-
- check_sanity = module in MODULES_TO_SANITY_CHECK
- return self.Main(args, check_sanity, min_runtime_in_seconds)
-
-
-class DrMemory(BaseTool):
- """Dr.Memory
- Dynamic memory error detector for Windows.
-
- http://dev.chromium.org/developers/how-tos/using-drmemory
- It is not very mature at the moment, some things might not work properly.
- """
-
- def __init__(self, full_mode, pattern_mode):
- super(DrMemory, self).__init__()
- self.full_mode = full_mode
- self.pattern_mode = pattern_mode
- self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
-
- def ToolName(self):
- return "drmemory"
-
- def ExtendOptionParser(self, parser):
- parser.add_option("", "--suppressions", default=[],
- action="append",
- help="path to a drmemory suppression file")
- parser.add_option("", "--follow_python", action="store_true",
- default=False, dest="follow_python",
- help="Monitor python child processes. If off, neither "
- "python children nor any children of python children "
- "will be monitored.")
- parser.add_option("", "--indirect_pdfium_test", action="store_true",
- default=False,
- help="set --wrapper rather than running Dr. Memory "
- "directly.")
- parser.add_option("", "--use_debug", action="store_true",
- default=False, dest="use_debug",
- help="Run Dr. Memory debug build")
- parser.add_option("", "--trace_children", action="store_true",
- default=True,
- help="TODO: default value differs from Valgrind")
-
- def ToolCommand(self):
- """Get the tool command to run."""
- # WINHEAP is what Dr. Memory supports as there are issues w/ both
- # jemalloc (https://github.com/DynamoRIO/drmemory/issues/320) and
- # tcmalloc (https://github.com/DynamoRIO/drmemory/issues/314)
- add_env = {
- "CHROME_ALLOCATOR" : "WINHEAP",
- "JSIMD_FORCEMMX" : "1", # https://github.com/DynamoRIO/drmemory/issues/540
- }
- for k,v in add_env.iteritems():
- logging.info("export %s=%s", k, v)
- os.putenv(k, v)
-
- drmem_cmd = os.getenv("DRMEMORY_COMMAND")
- if not drmem_cmd:
- raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
- "with the path to drmemory.exe"
- proc = drmem_cmd.split(" ")
-
- # By default, don't run python (this will exclude python's children as well)
- # to reduce runtime. We're not really interested in spending time finding
- # bugs in the python implementation.
- # With file-based config we must update the file every time, and
- # it will affect simultaneous drmem uses by this user. While file-based
- # config has many advantages, here we may want this-instance-only
- # (https://github.com/DynamoRIO/drmemory/issues/334).
- drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
- drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
- run_drconfig = True
- if self._options.follow_python:
- logging.info("Following python children")
- # -unreg fails if not already registered so query for that first
- query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
- query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
- shell=True)
- (query_out, query_err) = query_proc.communicate()
- if re.search("exe not registered", query_out):
- run_drconfig = False # all set
- else:
- drconfig_cmd += ["-unreg", "python.exe"]
- else:
- logging.info("Excluding python children")
- drconfig_cmd += ["-reg", "python.exe", "-norun"]
- if run_drconfig:
- drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
- if drconfig_retcode:
- logging.error("Configuring whether to follow python children failed " \
- "with %d.", drconfig_retcode)
- raise RuntimeError, "Configuring python children failed "
-
- suppression_count = 0
- supp_files = self._options.suppressions
- if self.full_mode:
- supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
- for suppression_file in supp_files:
- if os.path.exists(suppression_file):
- suppression_count += 1
- proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
-
- if not suppression_count:
- logging.warning("WARNING: NOT USING SUPPRESSIONS!")
-
- # Un-comment to dump Dr.Memory events on error
- #proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
-
- # Un-comment and comment next line to debug Dr.Memory
- #proc += ["-dr_ops", "-no_hide"]
- #proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
- #Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
- # Ensure we see messages about Dr. Memory crashing!
- proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
-
- if self._options.use_debug:
- proc += ["-debug"]
-
- proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
-
- if self.log_parent_dir:
- # gpu process on Windows Vista+ runs at Low Integrity and can only
- # write to certain directories (http://crbug.com/119131)
- symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
- elif self._options.build_dir:
- # The other case is only possible with -t cmdline.
- # Anyways, if we omit -symcache_dir the -logdir's value is used which
- # should be fine.
- symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
- if symcache_dir:
- if not os.path.exists(symcache_dir):
- try:
- os.mkdir(symcache_dir)
- except OSError:
- logging.warning("Can't create symcache dir?")
- if os.path.exists(symcache_dir):
- proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
-
- # Use -no_summary to suppress DrMemory's summary and init-time
- # notifications. We generate our own with drmemory_analyze.py.
- proc += ["-batch", "-no_summary"]
-
- # Un-comment to disable interleaved output. Will also suppress error
- # messages normally printed to stderr.
- #proc += ["-quiet", "-no_results_to_stderr"]
-
- proc += ["-callstack_max_frames", "40"]
-
- # disable leak scan for now
- proc += ["-no_count_leaks", "-no_leak_scan"]
-
- # disable warnings about unaddressable prefetches
- proc += ["-no_check_prefetch"]
-
- # crbug.com/413215, no heap mismatch check for Windows release build binary
- if common.IsWindows() and "Release" in self._options.build_dir:
- proc += ["-no_check_delete_mismatch"]
-
- # make callstacks easier to read
- proc += ["-callstack_srcfile_prefix",
- "build\\src,chromium\\src,crt_build\\self_x86"]
- proc += ["-callstack_modname_hide",
- "*drmemory*,chrome.dll"]
-
- boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
- # TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
- proc += ["-callstack_truncate_below", ",".join(boring_callers)]
-
- if self.pattern_mode:
- proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
- elif not self.full_mode:
- proc += ["-light"]
-
- proc += self._tool_flags
-
- # Dr.Memory requires -- to separate tool flags from the executable name.
- proc += ["--"]
-
- if self._options.indirect_pdfium_test:
- wrapper = " ".join(proc)
- logging.info("pdfium wrapper = " + wrapper)
- proc = self._args
- proc += ["--wrapper", wrapper]
- return proc
-
- # Note that self._args begins with the name of the exe to be run.
- self._args[0] = common.NormalizeWindowsPath(self._args[0])
- proc += self._args
- return proc
-
- def CreateBrowserWrapper(self, command):
- os.putenv("BROWSER_WRAPPER", command)
-
- def Analyze(self, check_sanity=False):
- # Use one analyzer for all the log files to avoid printing duplicate reports
- #
- # TODO(timurrrr): unify this with Valgrind and other tools when we have
- # https://github.com/DynamoRIO/drmemory/issues/684
- analyzer = drmemory_analyze.DrMemoryAnalyzer()
-
- ret = 0
- if not self._options.indirect_pdfium_test:
- filenames = glob.glob(self.log_dir + "/*/results.txt")
-
- ret = analyzer.Report(filenames, None, check_sanity)
- else:
- testcases = glob.glob(self.log_dir + "/testcase.*.logs")
- # If we have browser wrapper, the per-test logdirs are named as
- # "testcase.wrapper_PID.name".
- # Let's extract the list of wrapper_PIDs and name it ppids.
- # NOTE: ppids may contain '_', i.e. they are not ints!
- ppids = set([f.split(".")[-2] for f in testcases])
-
- for ppid in ppids:
- testcase_name = None
- try:
- f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
- testcase_name = f.read().strip()
- f.close()
- except IOError:
- pass
- print "====================================================="
- print " Below is the report for drmemory wrapper PID=%s." % ppid
- if testcase_name:
- print " It was used while running the `%s` test." % testcase_name
- else:
- # TODO(timurrrr): hm, the PID line is suppressed on Windows...
- print " You can find the corresponding test"
- print " by searching the above log for 'PID=%s'" % ppid
- sys.stdout.flush()
- ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
- (self.log_dir, ppid))
- ret |= analyzer.Report(ppid_filenames, testcase_name, False)
- print "====================================================="
- sys.stdout.flush()
-
- logging.info("Please see http://dev.chromium.org/developers/how-tos/"
- "using-drmemory for the info on Dr. Memory")
- return ret
-
-
-class ToolFactory:
- def Create(self, tool_name):
- if tool_name == "drmemory" or tool_name == "drmemory_light":
- # TODO(timurrrr): remove support for "drmemory" when buildbots are
- # switched to drmemory_light OR make drmemory==drmemory_full the default
- # mode when the tool is mature enough.
- return DrMemory(False, False)
- if tool_name == "drmemory_full":
- return DrMemory(True, False)
- if tool_name == "drmemory_pattern":
- return DrMemory(False, True)
- try:
- platform_name = common.PlatformNames()[0]
- except common.NotImplementedError:
- platform_name = sys.platform + "(Unknown)"
- raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
- platform_name)
-
-def CreateTool(tool):
- return ToolFactory().Create(tool)
diff --git a/tools/drmemory/suppressions.txt b/tools/drmemory/suppressions.txt
deleted file mode 100644
index 2ad8236cae..0000000000
--- a/tools/drmemory/suppressions.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# This file contains suppressions for the Dr.Memory tool, see
-# http://dev.chromium.org/developers/how-tos/using-drmemory
-
-
-# Intended alloc failure
-WARNING
-name=unittests:fxcrt.FX_AllocOverflow
-drmemorylib.dll!replace_calloc
-...
-*!fxcrt_FX_*AllocOverflow*_Test::TestBody
-
-# DrMem-i#471: simple floating register copy without meaningful usage
-UNINITIALIZED READ
-name=embeddertests:DrM-i#471
-*!v8::internal::RegisterValues::GetDoubleRegister
-*!v8::internal::FrameDescription::GetDoubleRegister
-*!v8::internal::Deoptimizer::CopyDoubleRegisters
-*!v8::internal::Deoptimizer::DoComputeCompiledStubFrame
-*!v8::internal::Deoptimizer::DoComputeOutputFrames
-*!v8::internal::Deoptimizer::ComputeOutputFrames
-*!v8::internal::`anonymous namespace'::Invoke
-*!v8::internal::Execution::Call
-
-# PDFium-i#287: new/delete[] mismatch
-INVALID HEAP ARGUMENT
-name=i#287:new-delete-array-mismatch
-drmemorylib.dll!replace_operator_delete_array
-*!NumberlikeArray<>::~NumberlikeArray<>