Back to index

enigmail  1.4.3
automationutils.py
Go to the documentation of this file.
00001 #
00002 # ***** BEGIN LICENSE BLOCK *****
00003 # Version: MPL 1.1/GPL 2.0/LGPL 2.1
00004 #
00005 # The contents of this file are subject to the Mozilla Public License Version
00006 # 1.1 (the "License"); you may not use this file except in compliance with
00007 # the License. You may obtain a copy of the License at
00008 # http://www.mozilla.org/MPL/
00009 #
00010 # Software distributed under the License is distributed on an "AS IS" basis,
00011 # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
00012 # for the specific language governing rights and limitations under the
00013 # License.
00014 #
00015 # The Original Code is mozilla.org code.
00016 #
00017 # The Initial Developer of the Original Code is The Mozilla Foundation
00018 # Portions created by the Initial Developer are Copyright (C) 2009
00019 # the Initial Developer. All Rights Reserved.
00020 #
00021 # Contributor(s):
00022 #   Serge Gautherie <sgautherie.bz@free.fr>
00023 #   Ted Mielczarek <ted.mielczarek@gmail.com>
00024 #
00025 # Alternatively, the contents of this file may be used under the terms of
00026 # either the GNU General Public License Version 2 or later (the "GPL"), or
00027 # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
00028 # in which case the provisions of the GPL or the LGPL are applicable instead
00029 # of those above. If you wish to allow use of your version of this file only
00030 # under the terms of either the GPL or the LGPL, and not to allow others to
00031 # use your version of this file under the terms of the MPL, indicate your
00032 # decision by deleting the provisions above and replace them with the notice
00033 # and other provisions required by the GPL or the LGPL. If you do not delete
00034 # the provisions above, a recipient may use your version of this file under
00035 # the terms of any one of the MPL, the GPL or the LGPL.
00036 #
00037 # ***** END LICENSE BLOCK ***** */
00038 
00039 from __future__ import with_statement
00040 import glob, logging, os, platform, shutil, subprocess, sys, tempfile, urllib2, zipfile
00041 import re
00042 from urlparse import urlparse
00043 from operator import itemgetter
00044 
00045 __all__ = [
00046   "ZipFileReader",
00047   "addCommonOptions",
00048   "checkForCrashes",
00049   "dumpLeakLog",
00050   "isURL",
00051   "processLeakLog",
00052   "getDebuggerInfo",
00053   "DEBUGGER_INFO",
00054   "replaceBackSlashes",
00055   "wrapCommand",
00056   "ShutdownLeakLogger"
00057   ]
00058 
00059 # Map of debugging programs to information about them, like default arguments
00060 # and whether or not they are interactive.
00061 DEBUGGER_INFO = {
00062   # gdb requires that you supply the '--args' flag in order to pass arguments
00063   # after the executable name to the executable.
00064   "gdb": {
00065     "interactive": True,
00066     "args": "-q --args"
00067   },
00068 
00069   # valgrind doesn't explain much about leaks unless you set the
00070   # '--leak-check=full' flag.
00071   "valgrind": {
00072     "interactive": False,
00073     "args": "--leak-check=full"
00074   }
00075 }
00076 
00077 class ZipFileReader(object):
00078   """
00079   Class to read zip files in Python 2.5 and later. Limited to only what we
00080   actually use.
00081   """
00082 
00083   def __init__(self, filename):
00084     self._zipfile = zipfile.ZipFile(filename, "r")
00085 
00086   def __del__(self):
00087     self._zipfile.close()
00088 
00089   def _getnormalizedpath(self, path):
00090     """
00091     Gets a normalized path from 'path' (or the current working directory if
00092     'path' is None). Also asserts that the path exists.
00093     """
00094     if path is None:
00095       path = os.curdir
00096     path = os.path.normpath(os.path.expanduser(path))
00097     assert os.path.isdir(path)
00098     return path
00099 
00100   def _extractname(self, name, path):
00101     """
00102     Extracts a file with the given name from the zip file to the given path.
00103     Also creates any directories needed along the way.
00104     """
00105     filename = os.path.normpath(os.path.join(path, name))
00106     if name.endswith("/"):
00107       os.makedirs(filename)
00108     else:
00109       path = os.path.split(filename)[0]
00110       if not os.path.isdir(path):
00111         os.makedirs(path)
00112       with open(filename, "wb") as dest:
00113         dest.write(self._zipfile.read(name))
00114 
00115   def namelist(self):
00116     return self._zipfile.namelist()
00117 
00118   def read(self, name):
00119     return self._zipfile.read(name)
00120 
00121   def extract(self, name, path = None):
00122     if hasattr(self._zipfile, "extract"):
00123       return self._zipfile.extract(name, path)
00124 
00125     # This will throw if name is not part of the zip file.
00126     self._zipfile.getinfo(name)
00127 
00128     self._extractname(name, self._getnormalizedpath(path))
00129 
00130   def extractall(self, path = None):
00131     if hasattr(self._zipfile, "extractall"):
00132       return self._zipfile.extractall(path)
00133 
00134     path = self._getnormalizedpath(path)
00135 
00136     for name in self._zipfile.namelist():
00137       self._extractname(name, path)
00138 
00139 log = logging.getLogger()
00140 
00141 def isURL(thing):
00142   """Return True if |thing| looks like a URL."""
00143   # We want to download URLs like http://... but not Windows paths like c:\...
00144   return len(urlparse(thing).scheme) >= 2
00145 
00146 def addCommonOptions(parser, defaults={}):
00147   parser.add_option("--xre-path",
00148                     action = "store", type = "string", dest = "xrePath",
00149                     # individual scripts will set a sane default
00150                     default = None,
00151                     help = "absolute path to directory containing XRE (probably xulrunner)")
00152   if 'SYMBOLS_PATH' not in defaults:
00153     defaults['SYMBOLS_PATH'] = None
00154   parser.add_option("--symbols-path",
00155                     action = "store", type = "string", dest = "symbolsPath",
00156                     default = defaults['SYMBOLS_PATH'],
00157                     help = "absolute path to directory containing breakpad symbols, or the URL of a zip file containing symbols")
00158   parser.add_option("--debugger",
00159                     action = "store", dest = "debugger",
00160                     help = "use the given debugger to launch the application")
00161   parser.add_option("--debugger-args",
00162                     action = "store", dest = "debuggerArgs",
00163                     help = "pass the given args to the debugger _before_ "
00164                            "the application on the command line")
00165   parser.add_option("--debugger-interactive",
00166                     action = "store_true", dest = "debuggerInteractive",
00167                     help = "prevents the test harness from redirecting "
00168                         "stdout and stderr for interactive debuggers")
00169 
00170 def checkForCrashes(dumpDir, symbolsPath, testName=None):
00171   stackwalkPath = os.environ.get('MINIDUMP_STACKWALK', None)
00172   # try to get the caller's filename if no test name is given
00173   if testName is None:
00174     try:
00175       testName = os.path.basename(sys._getframe(1).f_code.co_filename)
00176     except:
00177       testName = "unknown"
00178 
00179   # Check preconditions
00180   dumps = glob.glob(os.path.join(dumpDir, '*.dmp'))
00181   if len(dumps) == 0:
00182     return False
00183 
00184   foundCrash = False
00185   removeSymbolsPath = False
00186 
00187   # If our symbols are at a remote URL, download them now
00188   if isURL(symbolsPath):
00189     print "Downloading symbols from: " + symbolsPath
00190     removeSymbolsPath = True
00191     # Get the symbols and write them to a temporary zipfile
00192     data = urllib2.urlopen(symbolsPath)
00193     symbolsFile = tempfile.TemporaryFile()
00194     symbolsFile.write(data.read())
00195     # extract symbols to a temporary directory (which we'll delete after
00196     # processing all crashes)
00197     symbolsPath = tempfile.mkdtemp()
00198     zfile = ZipFileReader(symbolsFile)
00199     zfile.extractall(symbolsPath)
00200 
00201   try:
00202     for d in dumps:
00203       log.info("PROCESS-CRASH | %s | application crashed (minidump found)", testName)
00204       print "Crash dump filename: " + d
00205       if symbolsPath and stackwalkPath and os.path.exists(stackwalkPath):
00206         # run minidump stackwalk
00207         p = subprocess.Popen([stackwalkPath, d, symbolsPath],
00208                              stdout=subprocess.PIPE,
00209                              stderr=subprocess.PIPE)
00210         (out, err) = p.communicate()
00211         if len(out) > 3:
00212           # minidump_stackwalk is chatty, so ignore stderr when it succeeds.
00213           print out
00214         else:
00215           print "stderr from minidump_stackwalk:"
00216           print err
00217         if p.returncode != 0:
00218           print "minidump_stackwalk exited with return code %d" % p.returncode
00219       else:
00220         if not symbolsPath:
00221           print "No symbols path given, can't process dump."
00222         if not stackwalkPath:
00223           print "MINIDUMP_STACKWALK not set, can't process dump."
00224         elif stackwalkPath and not os.path.exists(stackwalkPath):
00225           print "MINIDUMP_STACKWALK binary not found: %s" % stackwalkPath
00226       dumpSavePath = os.environ.get('MINIDUMP_SAVE_PATH', None)
00227       if dumpSavePath:
00228         shutil.move(d, dumpSavePath)
00229         print "Saved dump as %s" % os.path.join(dumpSavePath,
00230                                                 os.path.basename(d))
00231       else:
00232         os.remove(d)
00233       extra = os.path.splitext(d)[0] + ".extra"
00234       if os.path.exists(extra):
00235         os.remove(extra)
00236       foundCrash = True
00237   finally:
00238     if removeSymbolsPath:
00239       shutil.rmtree(symbolsPath)
00240 
00241   return foundCrash
00242 
00243 def getFullPath(directory, path):
00244   "Get an absolute path relative to 'directory'."
00245   return os.path.normpath(os.path.join(directory, os.path.expanduser(path)))
00246 
00247 def searchPath(directory, path):
00248   "Go one step beyond getFullPath and try the various folders in PATH"
00249   # Try looking in the current working directory first.
00250   newpath = getFullPath(directory, path)
00251   if os.path.isfile(newpath):
00252     return newpath
00253 
00254   # At this point we have to fail if a directory was given (to prevent cases
00255   # like './gdb' from matching '/usr/bin/./gdb').
00256   if not os.path.dirname(path):
00257     for dir in os.environ['PATH'].split(os.pathsep):
00258       newpath = os.path.join(dir, path)
00259       if os.path.isfile(newpath):
00260         return newpath
00261   return None
00262 
00263 def getDebuggerInfo(directory, debugger, debuggerArgs, debuggerInteractive = False):
00264 
00265   debuggerInfo = None
00266 
00267   if debugger:
00268     debuggerPath = searchPath(directory, debugger)
00269     if not debuggerPath:
00270       print "Error: Path %s doesn't exist." % debugger
00271       sys.exit(1)
00272 
00273     debuggerName = os.path.basename(debuggerPath).lower()
00274 
00275     def getDebuggerInfo(type, default):
00276       if debuggerName in DEBUGGER_INFO and type in DEBUGGER_INFO[debuggerName]:
00277         return DEBUGGER_INFO[debuggerName][type]
00278       return default
00279 
00280     debuggerInfo = {
00281       "path": debuggerPath,
00282       "interactive" : getDebuggerInfo("interactive", False),
00283       "args": getDebuggerInfo("args", "").split()
00284     }
00285 
00286     if debuggerArgs:
00287       debuggerInfo["args"] = debuggerArgs.split()
00288     if debuggerInteractive:
00289       debuggerInfo["interactive"] = debuggerInteractive
00290 
00291   return debuggerInfo
00292 
00293 
00294 def dumpLeakLog(leakLogFile, filter = False):
00295   """Process the leak log, without parsing it.
00296 
00297   Use this function if you want the raw log only.
00298   Use it preferably with the |XPCOM_MEM_LEAK_LOG| environment variable.
00299   """
00300 
00301   # Don't warn (nor "info") if the log file is not there.
00302   if not os.path.exists(leakLogFile):
00303     return
00304 
00305   leaks = open(leakLogFile, "r")
00306   leakReport = leaks.read()
00307   leaks.close()
00308 
00309   # Only |XPCOM_MEM_LEAK_LOG| reports can be actually filtered out.
00310   # Only check whether an actual leak was reported.
00311   if filter and not "0 TOTAL " in leakReport:
00312     return
00313 
00314   # Simply copy the log.
00315   log.info(leakReport.rstrip("\n"))
00316 
00317 def processSingleLeakFile(leakLogFileName, PID, processType, leakThreshold):
00318   """Process a single leak log, corresponding to the specified
00319   process PID and type.
00320   """
00321 
00322   #                  Per-Inst  Leaked      Total  Rem ...
00323   #   0 TOTAL              17     192  419115886    2 ...
00324   # 833 nsTimerImpl        60     120      24726    2 ...
00325   lineRe = re.compile(r"^\s*\d+\s+(?P<name>\S+)\s+"
00326                       r"(?P<size>-?\d+)\s+(?P<bytesLeaked>-?\d+)\s+"
00327                       r"-?\d+\s+(?P<numLeaked>-?\d+)")
00328 
00329   processString = ""
00330   if PID and processType:
00331     processString = "| %s process %s " % (processType, PID)
00332   leaks = open(leakLogFileName, "r")
00333   for line in leaks:
00334     matches = lineRe.match(line)
00335     if (matches and
00336         int(matches.group("numLeaked")) == 0 and
00337         matches.group("name") != "TOTAL"):
00338       continue
00339     log.info(line.rstrip())
00340   leaks.close()
00341 
00342   leaks = open(leakLogFileName, "r")
00343   seenTotal = False
00344   crashedOnPurpose = False
00345   prefix = "TEST-PASS"
00346   numObjects = 0
00347   for line in leaks:
00348     if line.find("purposefully crash") > -1:
00349       crashedOnPurpose = True
00350     matches = lineRe.match(line)
00351     if not matches:
00352       continue
00353     name = matches.group("name")
00354     size = int(matches.group("size"))
00355     bytesLeaked = int(matches.group("bytesLeaked"))
00356     numLeaked = int(matches.group("numLeaked"))
00357     if size < 0 or bytesLeaked < 0 or numLeaked < 0:
00358       log.info("TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | negative leaks caught!" %
00359                processString)
00360       if name == "TOTAL":
00361         seenTotal = True
00362     elif name == "TOTAL":
00363       seenTotal = True
00364       # Check for leaks.
00365       if bytesLeaked < 0 or bytesLeaked > leakThreshold:
00366         prefix = "TEST-UNEXPECTED-FAIL"
00367         leakLog = "TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | leaked" \
00368                   " %d bytes during test execution" % (processString, bytesLeaked)
00369       elif bytesLeaked > 0:
00370         leakLog = "TEST-PASS %s| automationutils.processLeakLog() | WARNING leaked" \
00371                   " %d bytes during test execution" % (processString, bytesLeaked)
00372       else:
00373         leakLog = "TEST-PASS %s| automationutils.processLeakLog() | no leaks detected!" \
00374                   % processString
00375       # Remind the threshold if it is not 0, which is the default/goal.
00376       if leakThreshold != 0:
00377         leakLog += " (threshold set at %d bytes)" % leakThreshold
00378       # Log the information.
00379       log.info(leakLog)
00380     else:
00381       if numLeaked != 0:
00382         if numLeaked > 1:
00383           instance = "instances"
00384           rest = " each (%s bytes total)" % matches.group("bytesLeaked")
00385         else:
00386           instance = "instance"
00387           rest = ""
00388         numObjects += 1
00389         if numObjects > 5:
00390           # don't spam brief tinderbox logs with tons of leak output
00391           prefix = "TEST-INFO"
00392         log.info("%(prefix)s %(process)s| automationutils.processLeakLog() | leaked %(numLeaked)d %(instance)s of %(name)s "
00393                  "with size %(size)s bytes%(rest)s" %
00394                  { "prefix": prefix,
00395                    "process": processString,
00396                    "numLeaked": numLeaked,
00397                    "instance": instance,
00398                    "name": name,
00399                    "size": matches.group("size"),
00400                    "rest": rest })
00401   if not seenTotal:
00402     if crashedOnPurpose:
00403       log.info("INFO | automationutils.processLeakLog() | process %s was " \
00404                "deliberately crashed and thus has no leak log" % PID)
00405     else:
00406       log.info("TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | missing output line for total leaks!" %
00407              processString)
00408   leaks.close()
00409 
00410 
00411 def processLeakLog(leakLogFile, leakThreshold = 0):
00412   """Process the leak log, including separate leak logs created
00413   by child processes.
00414 
00415   Use this function if you want an additional PASS/FAIL summary.
00416   It must be used with the |XPCOM_MEM_BLOAT_LOG| environment variable.
00417   """
00418 
00419   if not os.path.exists(leakLogFile):
00420     log.info("WARNING | automationutils.processLeakLog() | refcount logging is off, so leaks can't be detected!")
00421     return
00422 
00423   (leakLogFileDir, leakFileBase) = os.path.split(leakLogFile)
00424   pidRegExp = re.compile(r".*?_([a-z]*)_pid(\d*)$")
00425   if leakFileBase[-4:] == ".log":
00426     leakFileBase = leakFileBase[:-4]
00427     pidRegExp = re.compile(r".*?_([a-z]*)_pid(\d*).log$")
00428 
00429   for fileName in os.listdir(leakLogFileDir):
00430     if fileName.find(leakFileBase) != -1:
00431       thisFile = os.path.join(leakLogFileDir, fileName)
00432       processPID = 0
00433       processType = None
00434       m = pidRegExp.search(fileName)
00435       if m:
00436         processType = m.group(1)
00437         processPID = m.group(2)
00438       processSingleLeakFile(thisFile, processPID, processType, leakThreshold)
00439 
00440 def replaceBackSlashes(input):
00441   return input.replace('\\', '/')
00442 
00443 def wrapCommand(cmd):
00444   """
00445   If running on OS X 10.5 or older, wrap |cmd| so that it will
00446   be executed as an i386 binary, in case it's a 32-bit/64-bit universal
00447   binary.
00448   """
00449   if platform.system() == "Darwin" and \
00450      hasattr(platform, 'mac_ver') and \
00451      platform.mac_ver()[0][:4] < '10.6':
00452     return ["arch", "-arch", "i386"] + cmd
00453   # otherwise just execute the command normally
00454   return cmd
00455 
00456 class ShutdownLeakLogger(object):
00457   """
00458   Parses the mochitest run log when running a debug build, assigns all leaked
00459   DOM windows (that are still around after test suite shutdown, despite running
00460   the GC) to the tests that created them and prints leak statistics.
00461   """
00462   MAX_LEAK_COUNT = 121
00463 
00464   def __init__(self, logger):
00465     self.logger = logger
00466     self.tests = []
00467     self.leakedWindows = {}
00468     self.leakedDocShells = set()
00469     self.currentTest = None
00470     self.seenShutdown = False
00471 
00472   def log(self, line):
00473     if line[2:11] == "DOMWINDOW":
00474       self._logWindow(line)
00475     elif line[2:10] == "DOCSHELL":
00476       self._logDocShell(line)
00477     elif line.startswith("TEST-START"):
00478       fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "")
00479       self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
00480     elif line.startswith("INFO TEST-END"):
00481       # don't track a test if no windows or docShells leaked
00482       if self.currentTest["windows"] or self.currentTest["docShells"]:
00483         self.tests.append(self.currentTest)
00484       self.currentTest = None
00485     elif line.startswith("INFO TEST-START | Shutdown"):
00486       self.seenShutdown = True
00487 
00488   def parse(self):
00489     leakingTests = self._parseLeakingTests()
00490 
00491     if leakingTests:
00492       totalWindows = sum(len(test["leakedWindows"]) for test in leakingTests)
00493       totalDocShells = sum(len(test["leakedDocShells"]) for test in leakingTests)
00494       msgType = "INFO" if totalWindows + totalDocShells <= self.MAX_LEAK_COUNT else "UNEXPECTED-FAIL"
00495       self.logger.info("TEST-%s | ShutdownLeaks | leaked %d DOMWindow(s) and %d DocShell(s) until shutdown", msgType, totalWindows, totalDocShells)
00496 
00497     for test in leakingTests:
00498       self.logger.info("\n[%s]", test["fileName"])
00499 
00500       for url, count in self._zipLeakedWindows(test["leakedWindows"]):
00501         self.logger.info("  %d window(s) [url = %s]", count, url)
00502 
00503       if test["leakedDocShells"]:
00504         self.logger.info("  %d docShell(s)", len(test["leakedDocShells"]))
00505 
00506   def _logWindow(self, line):
00507     created = line[:2] == "++"
00508     id = self._parseValue(line, "serial")
00509 
00510     # log line has invalid format
00511     if not id:
00512       return
00513 
00514     if self.currentTest:
00515       windows = self.currentTest["windows"]
00516       if created:
00517         windows.add(id)
00518       else:
00519         windows.discard(id)
00520     elif self.seenShutdown and not created:
00521       self.leakedWindows[id] = self._parseValue(line, "url")
00522 
00523   def _logDocShell(self, line):
00524     created = line[:2] == "++"
00525     id = self._parseValue(line, "id")
00526 
00527     # log line has invalid format
00528     if not id:
00529       return
00530 
00531     if self.currentTest:
00532       docShells = self.currentTest["docShells"]
00533       if created:
00534         docShells.add(id)
00535       else:
00536         docShells.discard(id)
00537     elif self.seenShutdown and not created:
00538       self.leakedDocShells.add(id)
00539 
00540   def _parseValue(self, line, name):
00541     match = re.search("\[%s = (.+?)\]" % name, line)
00542     if match:
00543       return match.group(1)
00544 
00545   def _parseLeakingTests(self):
00546     leakingTests = []
00547 
00548     for test in self.tests:
00549       test["leakedWindows"] = [self.leakedWindows[id] for id in test["windows"] if id in self.leakedWindows]
00550       test["leakedDocShells"] = [id for id in test["docShells"] if id in self.leakedDocShells]
00551       test["leakCount"] = len(test["leakedWindows"]) + len(test["leakedDocShells"])
00552 
00553       if test["leakCount"]:
00554         leakingTests.append(test)
00555 
00556     return sorted(leakingTests, key=itemgetter("leakCount"), reverse=True)
00557 
00558   def _zipLeakedWindows(self, leakedWindows):
00559     counts = []
00560     counted = set()
00561 
00562     for url in leakedWindows:
00563       if not url in counted:
00564         counts.append((url, leakedWindows.count(url)))
00565         counted.add(url)
00566 
00567     return sorted(counts, key=itemgetter(1), reverse=True)