Merge remote-tracking branch 'origin/master'
authorDmitry Trofimov <dmitry.trofimov@jetbrains.com>
Tue, 18 Oct 2016 19:13:22 +0000 (21:13 +0200)
committerDmitry Trofimov <dmitry.trofimov@jetbrains.com>
Tue, 18 Oct 2016 19:13:22 +0000 (21:13 +0200)
44 files changed:
plugins/coverage-common/src/com/intellij/coverage/SimpleCoverageAnnotator.java
python/helpers/coveragepy/coverage/__init__.py
python/helpers/coveragepy/coverage/__main__.py
python/helpers/coveragepy/coverage/annotate.py
python/helpers/coveragepy/coverage/backunittest.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/backward.py
python/helpers/coveragepy/coverage/bytecode.py
python/helpers/coveragepy/coverage/cmdline.py
python/helpers/coveragepy/coverage/codeunit.py [deleted file]
python/helpers/coveragepy/coverage/collector.py
python/helpers/coveragepy/coverage/config.py
python/helpers/coveragepy/coverage/control.py
python/helpers/coveragepy/coverage/data.py
python/helpers/coveragepy/coverage/debug.py
python/helpers/coveragepy/coverage/env.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/execfile.py
python/helpers/coveragepy/coverage/files.py
python/helpers/coveragepy/coverage/fullcoverage/encodings.py [deleted file]
python/helpers/coveragepy/coverage/html.py
python/helpers/coveragepy/coverage/htmlfiles/coverage_html.js
python/helpers/coveragepy/coverage/htmlfiles/index.html
python/helpers/coveragepy/coverage/htmlfiles/jquery.debounce.min.js [new file with mode: 0644]
python/helpers/coveragepy/coverage/htmlfiles/jquery.min.js
python/helpers/coveragepy/coverage/htmlfiles/keybd_closed.png
python/helpers/coveragepy/coverage/htmlfiles/keybd_open.png
python/helpers/coveragepy/coverage/htmlfiles/pyfile.html
python/helpers/coveragepy/coverage/htmlfiles/style.css
python/helpers/coveragepy/coverage/misc.py
python/helpers/coveragepy/coverage/multiproc.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/parser.py
python/helpers/coveragepy/coverage/phystokens.py
python/helpers/coveragepy/coverage/pickle2json.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/plugin.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/plugin_support.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/python.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/pytracer.py [new file with mode: 0644]
python/helpers/coveragepy/coverage/report.py
python/helpers/coveragepy/coverage/results.py
python/helpers/coveragepy/coverage/summary.py
python/helpers/coveragepy/coverage/templite.py
python/helpers/coveragepy/coverage/tracer.c [deleted file]
python/helpers/coveragepy/coverage/version.py
python/helpers/coveragepy/coverage/xmlreport.py
python/src/com/jetbrains/python/run/PyVirtualEnvReader.kt

index eb41bcfe34a9a2464b77382b9608dd82453b87f4..98c4d88becba990c93a1c0ad1aa10a5bc62a90de 100644 (file)
@@ -140,8 +140,7 @@ public abstract class SimpleCoverageAnnotator extends BaseCoverageAnnotator {
   protected FileCoverageInfo collectBaseFileCoverage(@NotNull final VirtualFile file,
                                                      @NotNull final Annotator annotator,
                                                      @NotNull final ProjectData projectData,
-                                                     @NotNull final Map<String, String> normalizedFiles2Files)
-  {
+                                                     @NotNull final Map<String, String> normalizedFiles2Files) {
     final String filePath = normalizeFilePath(file.getPath());
 
     // process file
@@ -166,8 +165,7 @@ public abstract class SimpleCoverageAnnotator extends BaseCoverageAnnotator {
   private static @Nullable ClassData getClassData(
     final @NotNull String filePath,
     final @NotNull ProjectData data,
-    final @NotNull Map<String, String> normalizedFiles2Files)
-  {
+    final @NotNull Map<String, String> normalizedFiles2Files) {
     final String originalFileName = normalizedFiles2Files.get(filePath);
     if (originalFileName == null) {
       return null;
@@ -272,8 +270,7 @@ public abstract class SimpleCoverageAnnotator extends BaseCoverageAnnotator {
                        @NotNull final CoverageSuitesBundle suite,
                        final @NotNull CoverageDataManager dataManager, @NotNull final ProjectData data,
                        final Project project,
-                       final Annotator annotator)
-  {
+                       final Annotator annotator) {
     if (!contentRoot.isValid()) {
       return;
     }
@@ -395,7 +392,7 @@ public abstract class SimpleCoverageAnnotator extends BaseCoverageAnnotator {
   }
 
   @Nullable
-  private static FileCoverageInfo fileInfoForCoveredFile(@NotNull final ClassData classData) {
+  private FileCoverageInfo fileInfoForCoveredFile(@NotNull final ClassData classData) {
     final Object[] lines = classData.getLines();
 
     // class data lines = [0, 1, ... count] but first element with index = #0 is fake and isn't
@@ -408,27 +405,31 @@ public abstract class SimpleCoverageAnnotator extends BaseCoverageAnnotator {
 
     final FileCoverageInfo info = new FileCoverageInfo();
 
-    int srcLinesCount = 0;
-    int coveredLinesCount = 0;
+    info.coveredLineCount = 0;
+    info.totalLineCount = 0;
     // let's count covered lines
     for (int i = 1; i <= count; i++) {
       final LineData lineData = classData.getLineData(i);
-      if (lineData == null) {
-        // Ignore not src code
-        continue;
-      }
-      final int status = lineData.getStatus();
-      // covered - if src code & covered (or inferred covered)
-      if (status != LineCoverage.NONE) {
-        coveredLinesCount++;
-      }
-      srcLinesCount++;
+
+      processLineData(info, lineData);
     }
-    info.totalLineCount = srcLinesCount;
-    info.coveredLineCount = coveredLinesCount;
     return info;
   }
 
+  protected void processLineData(@NotNull  FileCoverageInfo info, @Nullable  LineData lineData) {
+    if (lineData == null) {
+      // Ignore not src code
+      return;
+    }
+    final int status = lineData.getStatus();
+    // covered - if src code & covered (or inferred covered)
+
+    if (status != LineCoverage.NONE) {
+      info.coveredLineCount++;
+    }
+    info.totalLineCount++;
+  }
+
   @Nullable
   protected FileCoverageInfo fillInfoForUncoveredFile(@NotNull File file) {
     return null;
index 193b7a107ebd3716e69333d11df05537c8208ed3..192239926b7f5843b708ee7ba035df6b1b31b5f9 100644 (file)
@@ -1,3 +1,6 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Code coverage measurement for Python.
 
 Ned Batchelder
@@ -5,73 +8,16 @@ http://nedbatchelder.com/code/coverage
 
 """
 
-from coverage.version import __version__, __url__
+from coverage.version import __version__, __url__, version_info
 
-from coverage.control import coverage, process_startup
+from coverage.control import Coverage, process_startup
 from coverage.data import CoverageData
-from coverage.cmdline import main, CoverageScript
 from coverage.misc import CoverageException
+from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
+from coverage.pytracer import PyTracer
 
-# Module-level functions.  The original API to this module was based on
-# functions defined directly in the module, with a singleton of the coverage()
-# class.  That design hampered programmability, so the current api uses
-# explicitly-created coverage objects.  But for backward compatibility, here we
-# define the top-level functions to create the singleton when they are first
-# called.
-
-# Singleton object for use with module-level functions.  The singleton is
-# created as needed when one of the module-level functions is called.
-_the_coverage = None
-
-def _singleton_method(name):
-    """Return a function to the `name` method on a singleton `coverage` object.
-
-    The singleton object is created the first time one of these functions is
-    called.
-
-    """
-    # Disable pylint msg W0612, because a bunch of variables look unused, but
-    # they're accessed via locals().
-    # pylint: disable=W0612
-
-    def wrapper(*args, **kwargs):
-        """Singleton wrapper around a coverage method."""
-        global _the_coverage
-        if not _the_coverage:
-            _the_coverage = coverage(auto_data=True)
-        return getattr(_the_coverage, name)(*args, **kwargs)
-
-    import inspect
-    meth = getattr(coverage, name)
-    args, varargs, kw, defaults = inspect.getargspec(meth)
-    argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
-    docstring = meth.__doc__
-    wrapper.__doc__ = ("""\
-        A first-use-singleton wrapper around coverage.%(name)s.
-
-        This wrapper is provided for backward compatibility with legacy code.
-        New code should use coverage.%(name)s directly.
-
-        %(name)s%(argspec)s:
-
-        %(docstring)s
-        """ % locals()
-        )
-
-    return wrapper
-
-
-# Define the module-level functions.
-use_cache = _singleton_method('use_cache')
-start =     _singleton_method('start')
-stop =      _singleton_method('stop')
-erase =     _singleton_method('erase')
-exclude =   _singleton_method('exclude')
-analysis =  _singleton_method('analysis')
-analysis2 = _singleton_method('analysis2')
-report =    _singleton_method('report')
-annotate =  _singleton_method('annotate')
-
+# Backward compatibility.
+coverage = Coverage
 
 # On Windows, we encode and decode deep enough that something goes wrong and
 # the encodings.utf_8 module is loaded and then unloaded, I don't know why.
@@ -87,34 +33,3 @@ try:
     del sys.modules['coverage.coverage']
 except KeyError:
     pass
-
-
-# COPYRIGHT AND LICENSE
-#
-# Copyright 2001 Gareth Rees.  All rights reserved.
-# Copyright 2004-2013 Ned Batchelder.  All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the
-#    distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
index 55e0d259e04acfb78ba6c4cd2386606aa65a6ad4..35ab87a56bf42edb948641bb97b43939ca86e69a 100644 (file)
@@ -1,4 +1,8 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Coverage.py's main entry point."""
+
 import sys
 from coverage.cmdline import main
 sys.exit(main())
index 5c396784445cabaa28be0dac7a7316d67bfd7111..4060450fffb3bd5d8360421234e973495c13ab9e 100644 (file)
@@ -1,10 +1,19 @@
-"""Source file annotation for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import os, re
+"""Source file annotation for coverage.py."""
 
-from coverage.backward import sorted                    # pylint: disable=W0622
+import io
+import os
+import re
+
+from coverage.files import flat_rootname
+from coverage.misc import isolate_module
 from coverage.report import Reporter
 
+os = isolate_module(os)
+
+
 class AnnotateReporter(Reporter):
     """Generate annotated source files showing line coverage.
 
@@ -42,61 +51,53 @@ class AnnotateReporter(Reporter):
         """
         self.report_files(self.annotate_file, morfs, directory)
 
-    def annotate_file(self, cu, analysis):
+    def annotate_file(self, fr, analysis):
         """Annotate a single file.
 
-        `cu` is the CodeUnit for the file to annotate.
+        `fr` is the FileReporter for the file to annotate.
 
         """
-        if not cu.relative:
-            return
-
-        filename = cu.filename
-        source = cu.source_file()
-        if self.directory:
-            dest_file = os.path.join(self.directory, cu.flat_rootname())
-            dest_file += ".py,cover"
-        else:
-            dest_file = filename + ",cover"
-        dest = open(dest_file, 'w')
-
         statements = sorted(analysis.statements)
         missing = sorted(analysis.missing)
         excluded = sorted(analysis.excluded)
 
-        lineno = 0
-        i = 0
-        j = 0
-        covered = True
-        while True:
-            line = source.readline()
-            if line == '':
-                break
-            lineno += 1
-            while i < len(statements) and statements[i] < lineno:
-                i += 1
-            while j < len(missing) and missing[j] < lineno:
-                j += 1
-            if i < len(statements) and statements[i] == lineno:
-                covered = j >= len(missing) or missing[j] > lineno
-            if self.blank_re.match(line):
-                dest.write('  ')
-            elif self.else_re.match(line):
-                # Special logic for lines containing only 'else:'.
-                if i >= len(statements) and j >= len(missing):
-                    dest.write('! ')
-                elif i >= len(statements) or j >= len(missing):
-                    dest.write('> ')
-                elif statements[i] == missing[j]:
-                    dest.write('! ')
+        if self.directory:
+            dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
+            if dest_file.endswith("_py"):
+                dest_file = dest_file[:-3] + ".py"
+            dest_file += ",cover"
+        else:
+            dest_file = fr.filename + ",cover"
+
+        with io.open(dest_file, 'w', encoding='utf8') as dest:
+            i = 0
+            j = 0
+            covered = True
+            source = fr.source()
+            for lineno, line in enumerate(source.splitlines(True), start=1):
+                while i < len(statements) and statements[i] < lineno:
+                    i += 1
+                while j < len(missing) and missing[j] < lineno:
+                    j += 1
+                if i < len(statements) and statements[i] == lineno:
+                    covered = j >= len(missing) or missing[j] > lineno
+                if self.blank_re.match(line):
+                    dest.write(u'  ')
+                elif self.else_re.match(line):
+                    # Special logic for lines containing only 'else:'.
+                    if i >= len(statements) and j >= len(missing):
+                        dest.write(u'! ')
+                    elif i >= len(statements) or j >= len(missing):
+                        dest.write(u'> ')
+                    elif statements[i] == missing[j]:
+                        dest.write(u'! ')
+                    else:
+                        dest.write(u'> ')
+                elif lineno in excluded:
+                    dest.write(u'- ')
+                elif covered:
+                    dest.write(u'> ')
                 else:
-                    dest.write('> ')
-            elif lineno in excluded:
-                dest.write('- ')
-            elif covered:
-                dest.write('> ')
-            else:
-                dest.write('! ')
-            dest.write(line)
-        source.close()
-        dest.close()
+                    dest.write(u'! ')
+
+                dest.write(line)
diff --git a/python/helpers/coveragepy/coverage/backunittest.py b/python/helpers/coveragepy/coverage/backunittest.py
new file mode 100644 (file)
index 0000000..09574cc
--- /dev/null
@@ -0,0 +1,42 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Implementations of unittest features from the future."""
+
+# Use unittest2 if it's available, otherwise unittest.  This gives us
+# back-ported features for 2.6.
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+
+def unittest_has(method):
+    """Does `unittest.TestCase` have `method` defined?"""
+    return hasattr(unittest.TestCase, method)
+
+
+class TestCase(unittest.TestCase):
+    """Just like unittest.TestCase, but with assert methods added.
+
+    Designed to be compatible with 3.1 unittest.  Methods are only defined if
+    `unittest` doesn't have them.
+
+    """
+    # pylint: disable=missing-docstring
+
+    # Many Pythons have this method defined.  But PyPy3 has a bug with it
+    # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
+    # own implementation that works everywhere, at least for the ways we're
+    # calling it.
+    def assertCountEqual(self, s1, s2):
+        """Assert these have the same elements, regardless of order."""
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    if not unittest_has('assertRaisesRegex'):
+        def assertRaisesRegex(self, *args, **kwargs):
+            return self.assertRaisesRegexp(*args, **kwargs)
+
+    if not unittest_has('assertRegex'):
+        def assertRegex(self, *args, **kwargs):
+            return self.assertRegexpMatches(*args, **kwargs)
index 7d2685459782c85c59ff7b78cf8d02adf4b2ca89..700c3ebd1c0afac0bea547894d05f7b1af3c4c88 100644 (file)
@@ -1,60 +1,29 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Add things to old Pythons so I can pretend they are newer."""
 
-# This file does lots of tricky stuff, so disable a bunch of lintisms.
-# pylint: disable=F0401,W0611,W0622
-# F0401: Unable to import blah
-# W0611: Unused import blah
-# W0622: Redefining built-in blah
+# This file does lots of tricky stuff, so disable a bunch of pylint warnings.
+# pylint: disable=redefined-builtin
+# pylint: disable=unused-import
+# pxlint: disable=no-name-in-module
 
-import os, re, sys
+import sys
 
-# Python 2.3 doesn't have `set`
-try:
-    set = set       # new in 2.4
-except NameError:
-    from sets import Set as set
+from coverage import env
 
-# Python 2.3 doesn't have `sorted`.
-try:
-    sorted = sorted
-except NameError:
-    def sorted(iterable):
-        """A 2.3-compatible implementation of `sorted`."""
-        lst = list(iterable)
-        lst.sort()
-        return lst
 
-# Python 2.3 doesn't have `reversed`.
+# Pythons 2 and 3 differ on where to get StringIO.
 try:
-    reversed = reversed
-except NameError:
-    def reversed(iterable):
-        """A 2.3-compatible implementation of `reversed`."""
-        lst = list(iterable)
-        return lst[::-1]
-
-# rpartition is new in 2.5
-try:
-    "".rpartition
-except AttributeError:
-    def rpartition(s, sep):
-        """Implement s.rpartition(sep) for old Pythons."""
-        i = s.rfind(sep)
-        if i == -1:
-            return ('', '', s)
-        else:
-            return (s[:i], sep, s[i+len(sep):])
-else:
-    def rpartition(s, sep):
-        """A common interface for new Pythons."""
-        return s.rpartition(sep)
+    from cStringIO import StringIO
+except ImportError:
+    from io import StringIO
 
-# Pythons 2 and 3 differ on where to get StringIO
+# In py3, ConfigParser was renamed to the more-standard configparser
 try:
-    from cStringIO import StringIO
-    BytesIO = StringIO
+    import configparser
 except ImportError:
-    from io import StringIO, BytesIO
+    import ConfigParser as configparser
 
 # What's a string called?
 try:
@@ -62,6 +31,12 @@ try:
 except NameError:
     string_class = str
 
+# What's a Unicode string called?
+try:
+    unicode_class = unicode
+except NameError:
+    unicode_class = str
+
 # Where do pickles come from?
 try:
     import cPickle as pickle
@@ -72,7 +47,16 @@ except ImportError:
 try:
     range = xrange
 except NameError:
-    range = range
+    range = range       # pylint: disable=redefined-variable-type
+
+# shlex.quote is new, but there's an undocumented implementation in "pipes",
+# who knew!?
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    # Useful function, available under a different (undocumented) name
+    # in Python versions earlier than 3.3.
+    from pipes import quote as shlex_quote
 
 # A function to iterate listlessly over a dict's items.
 try:
@@ -86,71 +70,32 @@ else:
         """Produce the items from dict `d`."""
         return d.iteritems()
 
-# Exec is a statement in Py2, a function in Py3
-if sys.version_info >= (3, 0):
-    def exec_code_object(code, global_map):
-        """A wrapper around exec()."""
-        exec(code, global_map)
-else:
-    # OK, this is pretty gross.  In Py2, exec was a statement, but that will
-    # be a syntax error if we try to put it in a Py3 file, even if it is never
-    # executed.  So hide it inside an evaluated string literal instead.
-    eval(
-        compile(
-            "def exec_code_object(code, global_map):\n"
-            "    exec code in global_map\n",
-            "<exec_function>", "exec"
-            )
-        )
-
-# Reading Python source and interpreting the coding comment is a big deal.
-if sys.version_info >= (3, 0):
-    # Python 3.2 provides `tokenize.open`, the best way to open source files.
-    import tokenize
-    try:
-        open_source = tokenize.open     # pylint: disable=E1101
-    except AttributeError:
-        from io import TextIOWrapper
-        detect_encoding = tokenize.detect_encoding  # pylint: disable=E1101
-        # Copied from the 3.2 stdlib:
-        def open_source(fname):
-            """Open a file in read only mode using the encoding detected by
-            detect_encoding().
-            """
-            buffer = open(fname, 'rb')
-            encoding, _ = detect_encoding(buffer.readline)
-            buffer.seek(0)
-            text = TextIOWrapper(buffer, encoding, line_buffering=True)
-            text.mode = 'r'
-            return text
+# Getting the `next` function from an iterator is different in 2 and 3.
+try:
+    iter([]).next
+except AttributeError:
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).__next__
 else:
-    def open_source(fname):
-        """Open a source file the best way."""
-        return open(fname, "rU")
-
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).next
 
 # Python 3.x is picky about bytes and strings, so provide methods to
 # get them right, and make them no-ops in 2.x
-if sys.version_info >= (3, 0):
+if env.PY3:
     def to_bytes(s):
         """Convert string `s` to bytes."""
         return s.encode('utf8')
 
-    def to_string(b):
-        """Convert bytes `b` to a string."""
-        return b.decode('utf8')
-
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
         return bytes(byte_values)
 
-    def byte_to_int(byte_value):
-        """Turn an element of a bytes object into an int."""
-        return byte_value
-
     def bytes_to_ints(bytes_value):
         """Turn a bytes object into a sequence of ints."""
-        # In Py3, iterating bytes gives ints.
+        # In Python 3, iterating bytes gives ints.
         return bytes_value
 
 else:
@@ -158,27 +103,70 @@ else:
         """Convert string `s` to bytes (no-op in 2.x)."""
         return s
 
-    def to_string(b):
-        """Convert bytes `b` to a string (no-op in 2.x)."""
-        return b
-
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
-        return "".join([chr(b) for b in byte_values])
-
-    def byte_to_int(byte_value):
-        """Turn an element of a bytes object into an int."""
-        return ord(byte_value)
+        return "".join(chr(b) for b in byte_values)
 
     def bytes_to_ints(bytes_value):
         """Turn a bytes object into a sequence of ints."""
         for byte in bytes_value:
             yield ord(byte)
 
-# Md5 is available in different places.
+
+try:
+    # In Python 2.x, the builtins were in __builtin__
+    BUILTINS = sys.modules['__builtin__']
+except KeyError:
+    # In Python 3.x, they're in builtins
+    BUILTINS = sys.modules['builtins']
+
+
+# imp was deprecated in Python 3.3
 try:
-    import hashlib
-    md5 = hashlib.md5
+    import importlib
+    import importlib.util
+    imp = None
 except ImportError:
-    import md5
-    md5 = md5.new
+    importlib = None
+
+# We only want to use importlib if it has everything we need.
+try:
+    importlib_util_find_spec = importlib.util.find_spec
+except Exception:
+    import imp
+    importlib_util_find_spec = None
+
+# What is the .pyc magic number for this version of Python?
+try:
+    PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
+except AttributeError:
+    PYC_MAGIC_NUMBER = imp.get_magic()
+
+
+def import_local_file(modname, modfile=None):
+    """Import a local file as a module.
+
+    Opens a file in the current directory named `modname`.py, imports it
+    as `modname`, and returns the module object.  `modfile` is the file to
+    import if it isn't in the current directory.
+
+    """
+    try:
+        from importlib.machinery import SourceFileLoader
+    except ImportError:
+        SourceFileLoader = None
+
+    if modfile is None:
+        modfile = modname + '.py'
+    if SourceFileLoader:
+        mod = SourceFileLoader(modname, modfile).load_module()
+    else:
+        for suff in imp.get_suffixes():                 # pragma: part covered
+            if suff[0] == '.py':
+                break
+
+        with open(modfile, 'r') as f:
+            # pylint: disable=undefined-loop-variable
+            mod = imp.load_module(modname, f, modfile, suff)
+
+    return mod
index 85360638528e6bece33b1f5a1e4dd08e5c2ac3d0..d823c67c9200d7578b6610d5bf2a00e2f0c8347d 100644 (file)
@@ -1,62 +1,9 @@
-"""Bytecode manipulation for coverage.py"""
-
-import opcode, types
-
-from coverage.backward import byte_to_int
-
-class ByteCode(object):
-    """A single bytecode."""
-    def __init__(self):
-        # The offset of this bytecode in the code object.
-        self.offset = -1
-
-        # The opcode, defined in the `opcode` module.
-        self.op = -1
-
-        # The argument, a small integer, whose meaning depends on the opcode.
-        self.arg = -1
-
-        # The offset in the code object of the next bytecode.
-        self.next_offset = -1
-
-        # The offset to jump to.
-        self.jump_to = -1
-
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-class ByteCodes(object):
-    """Iterator over byte codes in `code`.
-
-    Returns `ByteCode` objects.
-
-    """
-    # pylint: disable=R0924
-    def __init__(self, code):
-        self.code = code
-
-    def __getitem__(self, i):
-        return byte_to_int(self.code[i])
-
-    def __iter__(self):
-        offset = 0
-        while offset < len(self.code):
-            bc = ByteCode()
-            bc.op = self[offset]
-            bc.offset = offset
-
-            next_offset = offset+1
-            if bc.op >= opcode.HAVE_ARGUMENT:
-                bc.arg = self[offset+1] + 256*self[offset+2]
-                next_offset += 2
-
-                label = -1
-                if bc.op in opcode.hasjrel:
-                    label = next_offset + bc.arg
-                elif bc.op in opcode.hasjabs:
-                    label = bc.arg
-                bc.jump_to = label
+"""Bytecode manipulation for coverage.py"""
 
-            bc.next_offset = offset = next_offset
-            yield bc
+import types
 
 
 class CodeObjects(object):
index ea112a8b8f2d425be141bdde38a9603d9c685b34..09e8232313cdf7e5a751018bf5f124d26e2a88c6 100644 (file)
-"""Command-line support for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import optparse, os, sys, time, traceback
+"""Command-line support for coverage.py."""
 
-from coverage.backward import sorted                # pylint: disable=W0622
+import glob
+import optparse
+import os.path
+import sys
+import textwrap
+import traceback
+
+from coverage import env
+from coverage.collector import CTracer
 from coverage.execfile import run_python_file, run_python_module
 from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
-from coverage.debug import info_formatter
+from coverage.debug import info_formatter, info_header
 
 
 class Opts(object):
     """A namespace class for individual options we'll build parsers from."""
 
     append = optparse.make_option(
-        '-a', '--append', action='store_false', dest="erase_first",
-        help="Append coverage data to .coverage, otherwise it is started "
-                "clean with each run."
-        )
+        '-a', '--append', action='store_true',
+        help="Append coverage data to .coverage, otherwise it starts clean each time.",
+    )
     branch = optparse.make_option(
         '', '--branch', action='store_true',
-        help="Measure branch coverage in addition to statement coverage."
-        )
+        help="Measure branch coverage in addition to statement coverage.",
+    )
+    CONCURRENCY_CHOICES = [
+        "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
+    ]
+    concurrency = optparse.make_option(
+        '', '--concurrency', action='store', metavar="LIB",
+        choices=CONCURRENCY_CHOICES,
+        help=(
+            "Properly measure code using a concurrency library. "
+            "Valid values are: %s."
+        ) % ", ".join(CONCURRENCY_CHOICES),
+    )
     debug = optparse.make_option(
         '', '--debug', action='store', metavar="OPTS",
-        help="Debug options, separated by commas"
-        )
+        help="Debug options, separated by commas",
+    )
     directory = optparse.make_option(
         '-d', '--directory', action='store', metavar="DIR",
-        help="Write the output files to DIR."
-        )
+        help="Write the output files to DIR.",
+    )
     fail_under = optparse.make_option(
         '', '--fail-under', action='store', metavar="MIN", type="int",
-        help="Exit with a status of 2 if the total coverage is less than MIN."
-        )
+        help="Exit with a status of 2 if the total coverage is less than MIN.",
+    )
     help = optparse.make_option(
         '-h', '--help', action='store_true',
-        help="Get help on this command."
-        )
+        help="Get help on this command.",
+    )
     ignore_errors = optparse.make_option(
         '-i', '--ignore-errors', action='store_true',
-        help="Ignore errors while reading source files."
-        )
+        help="Ignore errors while reading source files.",
+    )
     include = optparse.make_option(
         '', '--include', action='store',
         metavar="PAT1,PAT2,...",
-        help="Include files only when their filename path matches one of "
-                "these patterns.  Usually needs quoting on the command line."
-        )
+        help=(
+            "Include only files whose paths match one of these patterns. "
+            "Accepts shell-style wildcards, which must be quoted."
+        ),
+    )
     pylib = optparse.make_option(
         '-L', '--pylib', action='store_true',
-        help="Measure coverage even inside the Python installed library, "
-                "which isn't done by default."
-        )
+        help=(
+            "Measure coverage even inside the Python installed library, "
+            "which isn't done by default."
+        ),
+    )
     show_missing = optparse.make_option(
         '-m', '--show-missing', action='store_true',
-        help="Show line numbers of statements in each module that weren't "
-                "executed."
-        )
-    old_omit = optparse.make_option(
-        '-o', '--omit', action='store',
-        metavar="PAT1,PAT2,...",
-        help="Omit files when their filename matches one of these patterns. "
-                "Usually needs quoting on the command line."
-        )
+        help="Show line numbers of statements in each module that weren't executed.",
+    )
+    skip_covered = optparse.make_option(
+        '--skip-covered', action='store_true',
+        help="Skip files with 100% coverage.",
+    )
     omit = optparse.make_option(
         '', '--omit', action='store',
         metavar="PAT1,PAT2,...",
-        help="Omit files when their filename matches one of these patterns. "
-                "Usually needs quoting on the command line."
-        )
+        help=(
+            "Omit files whose paths match one of these patterns. "
+            "Accepts shell-style wildcards, which must be quoted."
+        ),
+    )
     output_xml = optparse.make_option(
         '-o', '', action='store', dest="outfile",
         metavar="OUTFILE",
-        help="Write the XML report to this file. Defaults to 'coverage.xml'"
-        )
+        help="Write the XML report to this file. Defaults to 'coverage.xml'",
+    )
     parallel_mode = optparse.make_option(
         '-p', '--parallel-mode', action='store_true',
-        help="Append the machine name, process id and random number to the "
-                ".coverage data file name to simplify collecting data from "
-                "many processes."
-        )
+        help=(
+            "Append the machine name, process id and random number to the "
+            ".coverage data file name to simplify collecting data from "
+            "many processes."
+        ),
+    )
     module = optparse.make_option(
         '-m', '--module', action='store_true',
-        help="<pyfile> is an importable Python module, not a script path, "
-                "to be run as 'python -m' would run it."
-        )
+        help=(
+            "<pyfile> is an importable Python module, not a script path, "
+            "to be run as 'python -m' would run it."
+        ),
+    )
     rcfile = optparse.make_option(
         '', '--rcfile', action='store',
-        help="Specify configuration file.  Defaults to '.coveragerc'"
-        )
+        help="Specify configuration file.  Defaults to '.coveragerc'",
+    )
     source = optparse.make_option(
         '', '--source', action='store', metavar="SRC1,SRC2,...",
-        help="A list of packages or directories of code to be measured."
-        )
+        help="A list of packages or directories of code to be measured.",
+    )
     timid = optparse.make_option(
         '', '--timid', action='store_true',
-        help="Use a simpler but slower trace method.  Try this if you get "
-                "seemingly impossible results!"
-        )
+        help=(
+            "Use a simpler but slower trace method.  Try this if you get "
+            "seemingly impossible results!"
+        ),
+    )
     title = optparse.make_option(
         '', '--title', action='store', metavar="TITLE",
-        help="A text string to use as the title on the HTML."
-        )
+        help="A text string to use as the title on the HTML.",
+    )
     version = optparse.make_option(
         '', '--version', action='store_true',
-        help="Display version information and exit."
-        )
+        help="Display version information and exit.",
+    )
 
 
 class CoverageOptionParser(optparse.OptionParser, object):
-    """Base OptionParser for coverage.
+    """Base OptionParser for coverage.py.
 
     Problems don't exit the program.
     Defaults are initialized for all options.
@@ -120,24 +148,26 @@ class CoverageOptionParser(optparse.OptionParser, object):
             add_help_option=False, *args, **kwargs
             )
         self.set_defaults(
-            actions=[],
+            action=None,
+            append=None,
             branch=None,
+            concurrency=None,
             debug=None,
             directory=None,
             fail_under=None,
             help=None,
             ignore_errors=None,
             include=None,
+            module=None,
             omit=None,
             parallel_mode=None,
-            module=None,
             pylib=None,
             rcfile=True,
             show_missing=None,
+            skip_covered=None,
             source=None,
             timid=None,
             title=None,
-            erase_first=None,
             version=None,
             )
 
@@ -152,7 +182,7 @@ class CoverageOptionParser(optparse.OptionParser, object):
         """Used to stop the optparse error handler ending the process."""
         pass
 
-    def parse_args(self, args=None, options=None):
+    def parse_args_ok(self, args=None, options=None):
         """Call optparse.parse_args, but return a triple:
 
         (ok, options, args)
@@ -171,189 +201,187 @@ class CoverageOptionParser(optparse.OptionParser, object):
         raise self.OptionParserError
 
 
-class ClassicOptionParser(CoverageOptionParser):
-    """Command-line parser for coverage.py classic arguments."""
+class GlobalOptionParser(CoverageOptionParser):
+    """Command-line parser for coverage.py global option arguments."""
 
     def __init__(self):
-        super(ClassicOptionParser, self).__init__()
-
-        self.add_action('-a', '--annotate', 'annotate')
-        self.add_action('-b', '--html', 'html')
-        self.add_action('-c', '--combine', 'combine')
-        self.add_action('-e', '--erase', 'erase')
-        self.add_action('-r', '--report', 'report')
-        self.add_action('-x', '--execute', 'execute')
+        super(GlobalOptionParser, self).__init__()
 
         self.add_options([
-            Opts.directory,
             Opts.help,
-            Opts.ignore_errors,
-            Opts.pylib,
-            Opts.show_missing,
-            Opts.old_omit,
-            Opts.parallel_mode,
-            Opts.timid,
             Opts.version,
         ])
 
-    def add_action(self, dash, dashdash, action_code):
-        """Add a specialized option that is the action to execute."""
-        option = self.add_option(dash, dashdash, action='callback',
-            callback=self._append_action
-            )
-        option.action_code = action_code
-
-    def _append_action(self, option, opt_unused, value_unused, parser):
-        """Callback for an option that adds to the `actions` list."""
-        parser.values.actions.append(option.action_code)
-
 
 class CmdOptionParser(CoverageOptionParser):
     """Parse one of the new-style commands for coverage.py."""
 
-    def __init__(self, action, options=None, defaults=None, usage=None,
-                cmd=None, description=None
-                ):
-        """Create an OptionParser for a coverage command.
+    def __init__(self, action, options, defaults=None, usage=None, description=None):
+        """Create an OptionParser for a coverage.py command.
 
-        `action` is the slug to put into `options.actions`.
+        `action` is the slug to put into `options.action`.
         `options` is a list of Option's for the command.
         `defaults` is a dict of default value for options.
         `usage` is the usage string to display in help.
-        `cmd` is the command name, if different than `action`.
         `description` is the description of the command, for the help text.
 
         """
         if usage:
             usage = "%prog " + usage
         super(CmdOptionParser, self).__init__(
-            prog="coverage %s" % (cmd or action),
             usage=usage,
             description=description,
         )
-        self.set_defaults(actions=[action], **(defaults or {}))
-        if options:
-            self.add_options(options)
-        self.cmd = cmd or action
+        self.set_defaults(action=action, **(defaults or {}))
+        self.add_options(options)
+        self.cmd = action
 
     def __eq__(self, other):
         # A convenience equality, so that I can put strings in unit test
         # results, and they will compare equal to objects.
         return (other == "<CmdOptionParser:%s>" % self.cmd)
 
+    def get_prog_name(self):
+        """Override of an undocumented function in optparse.OptionParser."""
+        program_name = super(CmdOptionParser, self).get_prog_name()
+
+        # Include the sub-command for this parser as part of the command.
+        return "%(command)s %(subcommand)s" % {'command': program_name, 'subcommand': self.cmd}
+
+
 GLOBAL_ARGS = [
-    Opts.rcfile,
+    Opts.debug,
     Opts.help,
+    Opts.rcfile,
     ]
 
 CMDS = {
-    'annotate': CmdOptionParser("annotate",
+    'annotate': CmdOptionParser(
+        "annotate",
         [
             Opts.directory,
             Opts.ignore_errors,
-            Opts.omit,
             Opts.include,
+            Opts.omit,
             ] + GLOBAL_ARGS,
-        usage = "[options] [modules]",
-        description = "Make annotated copies of the given files, marking "
-            "statements that are executed with > and statements that are "
-            "missed with !."
+        usage="[options] [modules]",
+        description=(
+            "Make annotated copies of the given files, marking statements that are executed "
+            "with > and statements that are missed with !."
         ),
+    ),
 
-    'combine': CmdOptionParser("combine", GLOBAL_ARGS,
-        usage = " ",
-        description = "Combine data from multiple coverage files collected "
+    'combine': CmdOptionParser(
+        "combine",
+        [
+            Opts.append,
+            ] + GLOBAL_ARGS,
+        usage="[options] <path1> <path2> ... <pathN>",
+        description=(
+            "Combine data from multiple coverage files collected "
             "with 'run -p'.  The combined results are written to a single "
-            "file representing the union of the data."
+            "file representing the union of the data. The positional "
+            "arguments are data files or directories containing data files. "
+            "If no paths are provided, data files in the default data file's "
+            "directory are combined."
         ),
+    ),
 
-    'debug': CmdOptionParser("debug", GLOBAL_ARGS,
-        usage = "<topic>",
-        description = "Display information on the internals of coverage.py, "
+    'debug': CmdOptionParser(
+        "debug", GLOBAL_ARGS,
+        usage="<topic>",
+        description=(
+            "Display information on the internals of coverage.py, "
             "for diagnosing problems. "
             "Topics are 'data' to show a summary of the collected data, "
             "or 'sys' to show installation information."
         ),
+    ),
 
-    'erase': CmdOptionParser("erase", GLOBAL_ARGS,
-        usage = " ",
-        description = "Erase previously collected coverage data."
-        ),
+    'erase': CmdOptionParser(
+        "erase", GLOBAL_ARGS,
+        description="Erase previously collected coverage data.",
+    ),
 
-    'help': CmdOptionParser("help", GLOBAL_ARGS,
-        usage = "[command]",
-        description = "Describe how to use coverage.py"
-        ),
+    'help': CmdOptionParser(
+        "help", GLOBAL_ARGS,
+        usage="[command]",
+        description="Describe how to use coverage.py",
+    ),
 
-    'html': CmdOptionParser("html",
+    'html': CmdOptionParser(
+        "html",
         [
             Opts.directory,
             Opts.fail_under,
             Opts.ignore_errors,
-            Opts.omit,
             Opts.include,
+            Opts.omit,
             Opts.title,
             ] + GLOBAL_ARGS,
-        usage = "[options] [modules]",
-        description = "Create an HTML report of the coverage of the files.  "
+        usage="[options] [modules]",
+        description=(
+            "Create an HTML report of the coverage of the files.  "
             "Each file gets its own page, with the source decorated to show "
             "executed, excluded, and missed lines."
         ),
+    ),
 
-    'report': CmdOptionParser("report",
+    'report': CmdOptionParser(
+        "report",
         [
             Opts.fail_under,
             Opts.ignore_errors,
-            Opts.omit,
             Opts.include,
+            Opts.omit,
             Opts.show_missing,
+            Opts.skip_covered,
             ] + GLOBAL_ARGS,
-        usage = "[options] [modules]",
-        description = "Report coverage statistics on modules."
-        ),
+        usage="[options] [modules]",
+        description="Report coverage statistics on modules."
+    ),
 
-    'run': CmdOptionParser("execute",
+    'run': CmdOptionParser(
+        "run",
         [
             Opts.append,
             Opts.branch,
-            Opts.debug,
+            Opts.concurrency,
+            Opts.include,
+            Opts.module,
+            Opts.omit,
             Opts.pylib,
             Opts.parallel_mode,
-            Opts.module,
-            Opts.timid,
             Opts.source,
-            Opts.omit,
-            Opts.include,
+            Opts.timid,
             ] + GLOBAL_ARGS,
-        defaults = {'erase_first': True},
-        cmd = "run",
-        usage = "[options] <pyfile> [program options]",
-        description = "Run a Python program, measuring code execution."
-        ),
+        usage="[options] <pyfile> [program options]",
+        description="Run a Python program, measuring code execution."
+    ),
 
-    'xml': CmdOptionParser("xml",
+    'xml': CmdOptionParser(
+        "xml",
         [
             Opts.fail_under,
             Opts.ignore_errors,
-            Opts.omit,
             Opts.include,
+            Opts.omit,
             Opts.output_xml,
             ] + GLOBAL_ARGS,
-        cmd = "xml",
-        usage = "[options] [modules]",
-        description = "Generate an XML report of coverage results."
-        ),
-    }
+        usage="[options] [modules]",
+        description="Generate an XML report of coverage results."
+    ),
+}
 
 
 OK, ERR, FAIL_UNDER = 0, 1, 2
 
 
 class CoverageScript(object):
-    """The command-line interface to Coverage."""
+    """The command-line interface to coverage.py."""
 
     def __init__(self, _covpkg=None, _run_python_file=None,
-                 _run_python_module=None, _help_fn=None):
+                 _run_python_module=None, _help_fn=None, _path_exists=None):
         # _covpkg is for dependency injection, so we can test this code.
         if _covpkg:
             self.covpkg = _covpkg
@@ -365,12 +393,26 @@ class CoverageScript(object):
         self.run_python_file = _run_python_file or run_python_file
         self.run_python_module = _run_python_module or run_python_module
         self.help_fn = _help_fn or self.help
-        self.classic = False
+        self.path_exists = _path_exists or os.path.exists
+        self.global_option = False
 
         self.coverage = None
 
+        self.program_name = os.path.basename(sys.argv[0])
+        if self.program_name == '__main__.py':
+            self.program_name = 'coverage'
+        if env.WINDOWS:
+            # entry_points={'console_scripts':...} on Windows makes files
+            # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
+            # invoke coverage-script.py, coverage3-script.py, and
+            # coverage-3.5-script.py.  argv[0] is the .py file, but we want to
+            # get back to the original form.
+            auto_suffix = "-script.py"
+            if self.program_name.endswith(auto_suffix):
+                self.program_name = self.program_name[:-len(auto_suffix)]
+
     def command_line(self, argv):
-        """The bulk of the command line interface to Coverage.
+        """The bulk of the command line interface to coverage.py.
 
         `argv` is the argument list to process.
 
@@ -382,11 +424,11 @@ class CoverageScript(object):
             self.help_fn(topic='minimum_help')
             return OK
 
-        # The command syntax we parse depends on the first argument.  Classic
-        # syntax always starts with an option.
-        self.classic = argv[0].startswith('-')
-        if self.classic:
-            parser = ClassicOptionParser()
+        # The command syntax we parse depends on the first argument.  Global
+        # switch syntax always starts with an option.
+        self.global_option = argv[0].startswith('-')
+        if self.global_option:
+            parser = GlobalOptionParser()
         else:
             parser = CMDS.get(argv[0])
             if not parser:
@@ -395,7 +437,7 @@ class CoverageScript(object):
             argv = argv[1:]
 
         parser.help_fn = self.help_fn
-        ok, options, args = parser.parse_args(argv)
+        ok, options, args = parser.parse_args_ok(argv)
         if not ok:
             return ERR
 
@@ -403,9 +445,9 @@ class CoverageScript(object):
         if self.do_help(options, args, parser):
             return OK
 
-        # Check for conflicts and problems in the options.
-        if not self.args_ok(options, args):
-            return ERR
+        # We need to be able to import from the current directory, because
+        # plugins may try to, for example, to read Django settings.
+        sys.path[0] = ''
 
         # Listify the list options.
         source = unshell_list(options.source)
@@ -415,74 +457,101 @@ class CoverageScript(object):
 
         # Do something.
         self.coverage = self.covpkg.coverage(
-            data_suffix = options.parallel_mode,
-            cover_pylib = options.pylib,
-            timid = options.timid,
-            branch = options.branch,
-            config_file = options.rcfile,
-            source = source,
-            omit = omit,
-            include = include,
-            debug = debug,
+            data_suffix=options.parallel_mode,
+            cover_pylib=options.pylib,
+            timid=options.timid,
+            branch=options.branch,
+            config_file=options.rcfile,
+            source=source,
+            omit=omit,
+            include=include,
+            debug=debug,
+            concurrency=options.concurrency,
             )
 
-        if 'debug' in options.actions:
+        if options.action == "debug":
             return self.do_debug(args)
 
-        if 'erase' in options.actions or options.erase_first:
+        elif options.action == "erase":
             self.coverage.erase()
-        else:
-            self.coverage.load()
+            return OK
 
-        if 'execute' in options.actions:
-            self.do_execute(options, args)
+        elif options.action == "run":
+            return self.do_run(options, args)
 
-        if 'combine' in options.actions:
-            self.coverage.combine()
+        elif options.action == "combine":
+            if options.append:
+                self.coverage.load()
+            data_dirs = args or None
+            self.coverage.combine(data_dirs)
             self.coverage.save()
+            return OK
 
         # Remaining actions are reporting, with some common options.
         report_args = dict(
-            morfs = args,
-            ignore_errors = options.ignore_errors,
-            omit = omit,
-            include = include,
+            morfs=unglob_args(args),
+            ignore_errors=options.ignore_errors,
+            omit=omit,
+            include=include,
             )
 
-        if 'report' in options.actions:
+        self.coverage.load()
+
+        total = None
+        if options.action == "report":
             total = self.coverage.report(
-                show_missing=options.show_missing, **report_args)
-        if 'annotate' in options.actions:
+                show_missing=options.show_missing,
+                skip_covered=options.skip_covered, **report_args)
+        elif options.action == "annotate":
             self.coverage.annotate(
                 directory=options.directory, **report_args)
-        if 'html' in options.actions:
+        elif options.action == "html":
             total = self.coverage.html_report(
                 directory=options.directory, title=options.title,
                 **report_args)
-        if 'xml' in options.actions:
+        elif options.action == "xml":
             outfile = options.outfile
             total = self.coverage.xml_report(outfile=outfile, **report_args)
 
-        if options.fail_under is not None:
-            if total >= options.fail_under:
-                return OK
-            else:
-                return FAIL_UNDER
-        else:
-            return OK
+        if total is not None:
+            # Apply the command line fail-under options, and then use the config
+            # value, so we can get fail_under from the config file.
+            if options.fail_under is not None:
+                self.coverage.set_option("report:fail_under", options.fail_under)
+
+            if self.coverage.get_option("report:fail_under"):
+                # Total needs to be rounded, but don't want to report 100
+                # unless it is really 100.
+                if 99 < total < 100:
+                    total = 99
+                else:
+                    total = round(total)
+
+                if total >= self.coverage.get_option("report:fail_under"):
+                    return OK
+                else:
+                    return FAIL_UNDER
+
+        return OK
 
     def help(self, error=None, topic=None, parser=None):
         """Display an error message, or the named topic."""
         assert error or topic or parser
         if error:
             print(error)
-            print("Use 'coverage help' for help.")
+            print("Use '%s help' for help." % (self.program_name,))
         elif parser:
             print(parser.format_help().strip())
         else:
-            help_msg = HELP_TOPICS.get(topic, '').strip()
+            help_params = dict(self.covpkg.__dict__)
+            help_params['program_name'] = self.program_name
+            if CTracer is not None:
+                help_params['extension_modifier'] = 'with C extension'
+            else:
+                help_params['extension_modifier'] = 'without C extension'
+            help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
             if help_msg:
-                print(help_msg % self.covpkg.__dict__)
+                print(help_msg.format(**help_params))
             else:
                 print("Don't know topic %r" % topic)
 
@@ -494,13 +563,13 @@ class CoverageScript(object):
         """
         # Handle help.
         if options.help:
-            if self.classic:
+            if self.global_option:
                 self.help_fn(topic='help')
             else:
                 self.help_fn(parser=parser)
             return True
 
-        if "help" in options.actions:
+        if options.action == "help":
             if args:
                 for a in args:
                     parser = CMDS.get(a)
@@ -519,98 +588,97 @@ class CoverageScript(object):
 
         return False
 
-    def args_ok(self, options, args):
-        """Check for conflicts and problems in the options.
-
-        Returns True if everything is ok, or False if not.
-
-        """
-        for i in ['erase', 'execute']:
-            for j in ['annotate', 'html', 'report', 'combine']:
-                if (i in options.actions) and (j in options.actions):
-                    self.help_fn("You can't specify the '%s' and '%s' "
-                              "options at the same time." % (i, j))
-                    return False
-
-        if not options.actions:
-            self.help_fn(
-                "You must specify at least one of -e, -x, -c, -r, -a, or -b."
-                )
-            return False
-        args_allowed = (
-            'execute' in options.actions or
-            'annotate' in options.actions or
-            'html' in options.actions or
-            'debug' in options.actions or
-            'report' in options.actions or
-            'xml' in options.actions
-            )
-        if not args_allowed and args:
-            self.help_fn("Unexpected arguments: %s" % " ".join(args))
-            return False
+    def do_run(self, options, args):
+        """Implementation of 'coverage run'."""
 
-        if 'execute' in options.actions and not args:
+        if not args:
             self.help_fn("Nothing to do.")
-            return False
-
-        return True
+            return ERR
 
-    def do_execute(self, options, args):
-        """Implementation of 'coverage run'."""
+        if options.append and self.coverage.get_option("run:parallel"):
+            self.help_fn("Can't append to data files in parallel mode.")
+            return ERR
 
-        # Set the first path element properly.
-        old_path0 = sys.path[0]
+        if options.concurrency == "multiprocessing":
+            # Can't set other run-affecting command line options with
+            # multiprocessing.
+            for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
+                # As it happens, all of these options have no default, meaning
+                # they will be None if they have not been specified.
+                if getattr(options, opt_name) is not None:
+                    self.help_fn(
+                        "Options affecting multiprocessing must be specified "
+                        "in a configuration file."
+                    )
+                    return ERR
+
+        if not self.coverage.get_option("run:parallel"):
+            if not options.append:
+                self.coverage.erase()
 
         # Run the script.
         self.coverage.start()
         code_ran = True
         try:
-            try:
-                if options.module:
-                    sys.path[0] = ''
-                    self.run_python_module(args[0], args)
-                else:
-                    filename = args[0]
-                    sys.path[0] = os.path.abspath(os.path.dirname(filename))
-                    self.run_python_file(filename, args)
-            except NoSource:
-                code_ran = False
-                raise
+            if options.module:
+                self.run_python_module(args[0], args)
+            else:
+                filename = args[0]
+                self.run_python_file(filename, args)
+        except NoSource:
+            code_ran = False
+            raise
         finally:
             self.coverage.stop()
             if code_ran:
+                if options.append:
+                    data_file = self.coverage.get_option("run:data_file")
+                    if self.path_exists(data_file):
+                        self.coverage.combine(data_paths=[data_file])
                 self.coverage.save()
 
-            # Restore the old path
-            sys.path[0] = old_path0
+        return OK
 
     def do_debug(self, args):
         """Implementation of 'coverage debug'."""
 
         if not args:
-            self.help_fn("What information would you like: data, sys?")
+            self.help_fn("What information would you like: config, data, sys?")
             return ERR
+
         for info in args:
             if info == 'sys':
-                print("-- sys ----------------------------------------")
-                for line in info_formatter(self.coverage.sysinfo()):
+                sys_info = self.coverage.sys_info()
+                print(info_header("sys"))
+                for line in info_formatter(sys_info):
                     print(" %s" % line)
             elif info == 'data':
-                print("-- data ---------------------------------------")
                 self.coverage.load()
-                print("path: %s" % self.coverage.data.filename)
-                print("has_arcs: %r" % self.coverage.data.has_arcs())
-                summary = self.coverage.data.summary(fullpath=True)
-                if summary:
+                data = self.coverage.data
+                print(info_header("data"))
+                print("path: %s" % self.coverage.data_files.filename)
+                if data:
+                    print("has_arcs: %r" % data.has_arcs())
+                    summary = data.line_counts(fullpath=True)
                     filenames = sorted(summary.keys())
                     print("\n%d files:" % len(filenames))
                     for f in filenames:
-                        print("%s: %d lines" % (f, summary[f]))
+                        line = "%s: %d lines" % (f, summary[f])
+                        plugin = data.file_tracer(f)
+                        if plugin:
+                            line += " [%s]" % plugin
+                        print(line)
                 else:
                     print("No data collected")
+            elif info == 'config':
+                print(info_header("config"))
+                config_info = self.coverage.config.__dict__.items()
+                for line in info_formatter(config_info):
+                    print(" %s" % line)
             else:
                 self.help_fn("Don't know what you mean by %r" % info)
                 return ERR
+
         return OK
 
 
@@ -618,98 +686,63 @@ def unshell_list(s):
     """Turn a command-line argument into a list."""
     if not s:
         return None
-    if sys.platform == 'win32':
-        # When running coverage as coverage.exe, some of the behavior
+    if env.WINDOWS:
+        # When running coverage.py as coverage.exe, some of the behavior
         # of the shell is emulated: wildcards are expanded into a list of
-        # filenames.  So you have to single-quote patterns on the command
+        # file names.  So you have to single-quote patterns on the command
         # line, but (not) helpfully, the single quotes are included in the
         # argument, so we have to strip them off here.
         s = s.strip("'")
     return s.split(',')
 
 
+def unglob_args(args):
+    """Interpret shell wildcards for platforms that need it."""
+    if env.WINDOWS:
+        globbed = []
+        for arg in args:
+            if '?' in arg or '*' in arg:
+                globbed.extend(glob.glob(arg))
+            else:
+                globbed.append(arg)
+        args = globbed
+    return args
+
+
 HELP_TOPICS = {
-# -------------------------
-'classic':
-r"""Coverage.py version %(__version__)s
-Measure, collect, and report on code coverage in Python programs.
-
-Usage:
-
-coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
-    Execute the module, passing the given command-line arguments, collecting
-    coverage data.  With the -p option, include the machine name and process
-    id in the .coverage file name.  With -L, measure coverage even inside the
-    Python installed library, which isn't done by default.  With --timid, use a
-    simpler but slower trace method.
-
-coverage -e
-    Erase collected coverage data.
-
-coverage -c
-    Combine data from multiple coverage files (as created by -p option above)
-    and store it into a single file representing the union of the coverage.
-
-coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Report on the statement coverage for the given files.  With the -m
-    option, show line numbers of the statements that weren't executed.
-
-coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Create an HTML report of the coverage of the given files.  Each file gets
-    its own page, with the file listing decorated to show executed, excluded,
-    and missed lines.
-
-coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Make annotated copies of the given files, marking statements that
-    are executed with > and statements that are missed with !.
-
--d DIR
-    Write output files for -b or -a to this directory.
-
--i  Ignore errors while reporting or annotating.
-
--o DIR,...
-    Omit reporting or annotating files when their filename path starts with
-    a directory listed in the omit list.
-    e.g. coverage -i -r -o c:\python25,lib\enthought\traits
-
-Coverage data is saved in the file .coverage by default.  Set the
-COVERAGE_FILE environment variable to save it somewhere else.
-""",
-# -------------------------
-'help': """\
-Coverage.py, version %(__version__)s
-Measure, collect, and report on code coverage in Python programs.
-
-usage: coverage <command> [options] [args]
-
-Commands:
-    annotate    Annotate source files with execution information.
-    combine     Combine a number of data files.
-    erase       Erase previously collected coverage data.
-    help        Get help on using coverage.py.
-    html        Create an HTML report.
-    report      Report coverage stats on modules.
-    run         Run a Python program and measure code execution.
-    xml         Create an XML report of coverage results.
-
-Use "coverage help <command>" for detailed help on any command.
-Use "coverage help classic" for help on older command syntax.
-For more information, see %(__url__)s
-""",
-# -------------------------
-'minimum_help': """\
-Code coverage for Python.  Use 'coverage help' for help.
-""",
-# -------------------------
-'version': """\
-Coverage.py, version %(__version__)s.  %(__url__)s
-""",
+    'help': """\
+        Coverage.py, version {__version__} {extension_modifier}
+        Measure, collect, and report on code coverage in Python programs.
+
+        usage: {program_name} <command> [options] [args]
+
+        Commands:
+            annotate    Annotate source files with execution information.
+            combine     Combine a number of data files.
+            erase       Erase previously collected coverage data.
+            help        Get help on using coverage.py.
+            html        Create an HTML report.
+            report      Report coverage stats on modules.
+            run         Run a Python program and measure code execution.
+            xml         Create an XML report of coverage results.
+
+        Use "{program_name} help <command>" for detailed help on any command.
+        For full documentation, see {__url__}
+    """,
+
+    'minimum_help': """\
+        Code coverage for Python.  Use '{program_name} help' for help.
+    """,
+
+    'version': """\
+        Coverage.py, version {__version__} {extension_modifier}
+        Documentation at {__url__}
+    """,
 }
 
 
 def main(argv=None):
-    """The main entry point to Coverage.
+    """The main entry point to coverage.py.
 
     This is installed as the script entry point.
 
@@ -717,26 +750,19 @@ def main(argv=None):
     if argv is None:
         argv = sys.argv[1:]
     try:
-        start = time.clock()
         status = CoverageScript().command_line(argv)
-        end = time.clock()
-        if 0:
-            print("time: %.3fs" % (end - start))
-    except ExceptionDuringRun:
+    except ExceptionDuringRun as err:
         # An exception was caught while running the product code.  The
         # sys.exc_info() return tuple is packed into an ExceptionDuringRun
         # exception.
-        _, err, _ = sys.exc_info()
         traceback.print_exception(*err.args)
         status = ERR
-    except CoverageException:
+    except CoverageException as err:
         # A controlled error inside coverage.py: print the message to the user.
-        _, err, _ = sys.exc_info()
         print(err)
         status = ERR
-    except SystemExit:
+    except SystemExit as err:
         # The user called `sys.exit()`.  Exit with their argument, if any.
-        _, err, _ = sys.exc_info()
         if err.args:
             status = err.args[0]
         else:
diff --git a/python/helpers/coveragepy/coverage/codeunit.py b/python/helpers/coveragepy/coverage/codeunit.py
deleted file mode 100644 (file)
index ca1ae5c..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-"""Code unit (module) handling for Coverage."""
-
-import glob, os
-
-from coverage.backward import open_source, string_class, StringIO
-from coverage.misc import CoverageException
-
-
-def code_unit_factory(morfs, file_locator):
-    """Construct a list of CodeUnits from polymorphic inputs.
-
-    `morfs` is a module or a filename, or a list of same.
-
-    `file_locator` is a FileLocator that can help resolve filenames.
-
-    Returns a list of CodeUnit objects.
-
-    """
-    # Be sure we have a list.
-    if not isinstance(morfs, (list, tuple)):
-        morfs = [morfs]
-
-    # On Windows, the shell doesn't expand wildcards.  Do it here.
-    globbed = []
-    for morf in morfs:
-        if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
-            globbed.extend(glob.glob(morf))
-        else:
-            globbed.append(morf)
-    morfs = globbed
-
-    code_units = [CodeUnit(morf, file_locator) for morf in morfs]
-
-    return code_units
-
-
-class CodeUnit(object):
-    """Code unit: a filename or module.
-
-    Instance attributes:
-
-    `name` is a human-readable name for this code unit.
-    `filename` is the os path from which we can read the source.
-    `relative` is a boolean.
-
-    """
-    def __init__(self, morf, file_locator):
-        self.file_locator = file_locator
-
-        if hasattr(morf, '__file__'):
-            f = morf.__file__
-        else:
-            f = morf
-        # .pyc files should always refer to a .py instead.
-        if f.endswith('.pyc') or f.endswith('.pyo'):
-            f = f[:-1]
-        elif f.endswith('$py.class'): # Jython
-            f = f[:-9] + ".py"
-        self.filename = self.file_locator.canonical_filename(f)
-
-        if hasattr(morf, '__name__'):
-            n = modname = morf.__name__
-            self.relative = True
-        else:
-            n = os.path.splitext(morf)[0]
-            rel = self.file_locator.relative_filename(n)
-            if os.path.isabs(n):
-                self.relative = (rel != n)
-            else:
-                self.relative = True
-            n = rel
-            modname = None
-        self.name = n
-        self.modname = modname
-
-    def __repr__(self):
-        return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename)
-
-    # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
-    # of them defined.
-
-    def __lt__(self, other):
-        return self.name < other.name
-    def __le__(self, other):
-        return self.name <= other.name
-    def __eq__(self, other):
-        return self.name == other.name
-    def __ne__(self, other):
-        return self.name != other.name
-    def __gt__(self, other):
-        return self.name > other.name
-    def __ge__(self, other):
-        return self.name >= other.name
-
-    def flat_rootname(self):
-        """A base for a flat filename to correspond to this code unit.
-
-        Useful for writing files about the code where you want all the files in
-        the same directory, but need to differentiate same-named files from
-        different directories.
-
-        For example, the file a/b/c.py might return 'a_b_c'
-
-        """
-        if self.modname:
-            return self.modname.replace('.', '_')
-        else:
-            root = os.path.splitdrive(self.name)[1]
-            return root.replace('\\', '_').replace('/', '_').replace('.', '_')
-
-    def source_file(self):
-        """Return an open file for reading the source of the code unit."""
-        if os.path.exists(self.filename):
-            # A regular text file: open it.
-            return open_source(self.filename)
-
-        # Maybe it's in a zip file?
-        source = self.file_locator.get_zip_data(self.filename)
-        if source is not None:
-            return StringIO(source)
-
-        # Couldn't find source.
-        raise CoverageException(
-            "No source for code '%s'." % self.filename
-            )
-
-    def should_be_python(self):
-        """Does it seem like this file should contain Python?
-
-        This is used to decide if a file reported as part of the exection of
-        a program was really likely to have contained Python in the first
-        place.
-
-        """
-        # Get the file extension.
-        _, ext = os.path.splitext(self.filename)
-
-        # Anything named *.py* should be Python.
-        if ext.startswith('.py'):
-            return True
-        # A file with no extension should be Python.
-        if not ext:
-            return True
-        # Everything else is probably not Python.
-        return False
index 8ba7d87cd4e055b2e11efccd487bc7493f58d19d..3e28b3b149f458e33e5c5e252cd7f92bab5576fd 100644 (file)
-"""Raw data collector for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Raw data collector for coverage.py."""
+
+import os
+import sys
+
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import abs_file
+from coverage.misc import CoverageException, isolate_module
+from coverage.pytracer import PyTracer
+
+os = isolate_module(os)
 
-import os, sys, threading
 
 try:
     # Use the C extension code when we can, for speed.
-    from coverage.tracer import CTracer         # pylint: disable=F0401,E0611
+    from coverage.tracer import CTracer, CFileDisposition
 except ImportError:
     # Couldn't import the C extension, maybe it isn't built.
     if os.getenv('COVERAGE_TEST_TRACER') == 'c':
-        # During testing, we use the COVERAGE_TEST_TRACER env var to indicate
-        # that we've fiddled with the environment to test this fallback code.
-        # If we thought we had a C tracer, but couldn't import it, then exit
-        # quickly and clearly instead of dribbling confusing errors. I'm using
-        # sys.exit here instead of an exception because an exception here
-        # causes all sorts of other noise in unittest.
-        sys.stderr.write(
-            "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
-            )
+        # During testing, we use the COVERAGE_TEST_TRACER environment variable
+        # to indicate that we've fiddled with the environment to test this
+        # fallback code.  If we thought we had a C tracer, but couldn't import
+        # it, then exit quickly and clearly instead of dribbling confusing
+        # errors. I'm using sys.exit here instead of an exception because an
+        # exception here causes all sorts of other noise in unittest.
+        sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
         sys.exit(1)
     CTracer = None
 
 
-class PyTracer(object):
-    """Python implementation of the raw data tracer."""
-
-    # Because of poor implementations of trace-function-manipulating tools,
-    # the Python trace function must be kept very simple.  In particular, there
-    # must be only one function ever set as the trace function, both through
-    # sys.settrace, and as the return value from the trace function.  Put
-    # another way, the trace function must always return itself.  It cannot
-    # swap in other functions, or return None to avoid tracing a particular
-    # frame.
-    #
-    # The trace manipulator that introduced this restriction is DecoratorTools,
-    # which sets a trace function, and then later restores the pre-existing one
-    # by calling sys.settrace with a function it found in the current frame.
-    #
-    # Systems that use DecoratorTools (or similar trace manipulations) must use
-    # PyTracer to get accurate results.  The command-line --timid argument is
-    # used to force the use of this tracer.
-
-    def __init__(self):
-        self.data = None
-        self.should_trace = None
-        self.should_trace_cache = None
-        self.warn = None
-        self.cur_file_data = None
-        self.last_line = 0
-        self.data_stack = []
-        self.last_exc_back = None
-        self.last_exc_firstlineno = 0
-        self.arcs = False
-        self.thread = None
-        self.stopped = False
-
-    def _trace(self, frame, event, arg_unused):
-        """The trace function passed to sys.settrace."""
-
-        if self.stopped:
-            return
-
-        if 0:
-            sys.stderr.write("trace event: %s %r @%d\n" % (
-                event, frame.f_code.co_filename, frame.f_lineno
-            ))
-
-        if self.last_exc_back:
-            if frame == self.last_exc_back:
-                # Someone forgot a return event.
-                if self.arcs and self.cur_file_data:
-                    pair = (self.last_line, -self.last_exc_firstlineno)
-                    self.cur_file_data[pair] = None
-                self.cur_file_data, self.last_line = self.data_stack.pop()
-            self.last_exc_back = None
-
-        if event == 'call':
-            # Entering a new function context.  Decide if we should trace
-            # in this file.
-            self.data_stack.append((self.cur_file_data, self.last_line))
-            filename = frame.f_code.co_filename
-            if filename not in self.should_trace_cache:
-                tracename = self.should_trace(filename, frame)
-                self.should_trace_cache[filename] = tracename
-            else:
-                tracename = self.should_trace_cache[filename]
-            #print("called, stack is %d deep, tracename is %r" % (
-            #               len(self.data_stack), tracename))
-            if tracename:
-                if tracename not in self.data:
-                    self.data[tracename] = {}
-                self.cur_file_data = self.data[tracename]
-            else:
-                self.cur_file_data = None
-            # Set the last_line to -1 because the next arc will be entering a
-            # code block, indicated by (-1, n).
-            self.last_line = -1
-        elif event == 'line':
-            # Record an executed line.
-            if self.cur_file_data is not None:
-                if self.arcs:
-                    #print("lin", self.last_line, frame.f_lineno)
-                    self.cur_file_data[(self.last_line, frame.f_lineno)] = None
-                else:
-                    #print("lin", frame.f_lineno)
-                    self.cur_file_data[frame.f_lineno] = None
-            self.last_line = frame.f_lineno
-        elif event == 'return':
-            if self.arcs and self.cur_file_data:
-                first = frame.f_code.co_firstlineno
-                self.cur_file_data[(self.last_line, -first)] = None
-            # Leaving this function, pop the filename stack.
-            self.cur_file_data, self.last_line = self.data_stack.pop()
-            #print("returned, stack is %d deep" % (len(self.data_stack)))
-        elif event == 'exception':
-            #print("exc", self.last_line, frame.f_lineno)
-            self.last_exc_back = frame.f_back
-            self.last_exc_firstlineno = frame.f_code.co_firstlineno
-        return self._trace
-
-    def start(self):
-        """Start this Tracer.
+class FileDisposition(object):
+    """A simple value type for recording what to do with a file."""
+    pass
 
-        Return a Python function suitable for use with sys.settrace().
 
-        """
-        self.thread = threading.currentThread()
-        sys.settrace(self._trace)
-        return self._trace
-
-    def stop(self):
-        """Stop this Tracer."""
-        self.stopped = True
-        if self.thread != threading.currentThread():
-            # Called on a different thread than started us: we can't unhook
-            # ourseves, but we've set the flag that we should stop, so we won't
-            # do any more tracing.
-            return
-
-        if hasattr(sys, "gettrace") and self.warn:
-            if sys.gettrace() != self._trace:
-                msg = "Trace function changed, measurement is likely wrong: %r"
-                self.warn(msg % (sys.gettrace(),))
-        #print("Stopping tracer on %s" % threading.current_thread().ident)
-        sys.settrace(None)
-
-    def get_stats(self):
-        """Return a dictionary of statistics, or None."""
-        return None
+def should_start_context(frame):
+    """Who-Tests-What hack: Determine whether this frame begins a new who-context."""
+    fn_name = frame.f_code.co_name
+    if fn_name.startswith("test"):
+        return fn_name
 
 
 class Collector(object):
@@ -170,12 +65,17 @@ class Collector(object):
     # the top, and resumed when they become the top again.
     _collectors = []
 
-    def __init__(self, should_trace, timid, branch, warn):
+    # The concurrency settings we support here.
+    SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"])
+
+    def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
         """Create a collector.
 
-        `should_trace` is a function, taking a filename, and returning a
-        canonicalized filename, or None depending on whether the file should
-        be traced or not.
+        `should_trace` is a function, taking a file name, and returning a
+        `coverage.FileDisposition object`.
+
+        `check_include` is a function taking a file name and a frame. It returns
+        a boolean: True if the file should be traced, False if not.
 
         If `timid` is true, then a slower simpler trace function will be
         used.  This is important for some environments where manipulation of
@@ -189,10 +89,55 @@ class Collector(object):
         `warn` is a warning function, taking a single string message argument,
         to be used if a warning needs to be issued.
 
+        `concurrency` is a list of strings indicating the concurrency libraries
+        in use.  Valid values are "greenlet", "eventlet", "gevent", or "thread"
+        (the default).  Of these four values, only one can be supplied.  Other
+        values are ignored.
+
         """
         self.should_trace = should_trace
+        self.check_include = check_include
         self.warn = warn
         self.branch = branch
+        self.threading = None
+
+        self.concur_id_func = None
+
+        # We can handle a few concurrency options here, but only one at a time.
+        these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
+        if len(these_concurrencies) > 1:
+            raise CoverageException("Conflicting concurrency settings: %s" % concurrency)
+        self.concurrency = these_concurrencies.pop() if these_concurrencies else ''
+
+        try:
+            if self.concurrency == "greenlet":
+                import greenlet
+                self.concur_id_func = greenlet.getcurrent
+            elif self.concurrency == "eventlet":
+                import eventlet.greenthread     # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = eventlet.greenthread.getcurrent
+            elif self.concurrency == "gevent":
+                import gevent                   # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = gevent.getcurrent
+            elif self.concurrency == "thread" or not self.concurrency:
+                # It's important to import threading only if we need it.  If
+                # it's imported early, and the program being measured uses
+                # gevent, then gevent's monkey-patching won't work properly.
+                import threading
+                self.threading = threading
+            else:
+                raise CoverageException("Don't understand concurrency=%s" % concurrency)
+        except ImportError:
+            raise CoverageException(
+                "Couldn't trace with concurrency=%s, the module isn't installed." % (
+                    self.concurrency,
+                )
+            )
+
+        # Who-Tests-What is just a hack at the moment, so turn it on with an
+        # environment variable.
+        self.wtw = int(os.getenv('COVERAGE_WTW', 0))
+
         self.reset()
 
         if timid:
@@ -203,8 +148,15 @@ class Collector(object):
             # trace function.
             self._trace_class = CTracer or PyTracer
 
+        if self._trace_class is CTracer:
+            self.file_disposition_class = CFileDisposition
+            self.supports_plugins = True
+        else:
+            self.file_disposition_class = FileDisposition
+            self.supports_plugins = False
+
     def __repr__(self):
-        return "<Collector at 0x%x>" % id(self)
+        return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
 
     def tracer_name(self):
         """Return the class name of the tracer we're using."""
@@ -212,14 +164,46 @@ class Collector(object):
 
     def reset(self):
         """Clear collected data, and prepare to collect more."""
-        # A dictionary mapping filenames to dicts with linenumber keys,
-        # or mapping filenames to dicts with linenumber pairs as keys.
+        # A dictionary mapping file names to dicts with line number keys (if not
+        # branch coverage), or mapping file names to dicts with line number
+        # pairs as keys (if branch coverage).
         self.data = {}
 
-        # A cache of the results from should_trace, the decision about whether
-        # to trace execution in a file. A dict of filename to (filename or
-        # None).
-        self.should_trace_cache = {}
+        # A dict mapping contexts to data dictionaries.
+        self.contexts = {}
+        self.contexts[None] = self.data
+
+        # A dictionary mapping file names to file tracer plugin names that will
+        # handle them.
+        self.file_tracers = {}
+
+        # The .should_trace_cache attribute is a cache from file names to
+        # coverage.FileDisposition objects, or None.  When a file is first
+        # considered for tracing, a FileDisposition is obtained from
+        # Coverage.should_trace.  Its .trace attribute indicates whether the
+        # file should be traced or not.  If it should be, a plugin with dynamic
+        # file names can decide not to trace it based on the dynamic file name
+        # being excluded by the inclusion rules, in which case the
+        # FileDisposition will be replaced by None in the cache.
+        if env.PYPY:
+            import __pypy__                     # pylint: disable=import-error
+            # Alex Gaynor said:
+            # should_trace_cache is a strictly growing key: once a key is in
+            # it, it never changes.  Further, the keys used to access it are
+            # generally constant, given sufficient context. That is to say, at
+            # any given point _trace() is called, pypy is able to know the key.
+            # This is because the key is determined by the physical source code
+            # line, and that's invariant with the call site.
+            #
+            # This property of a dict with immutable keys, combined with
+            # call-site-constant keys is a match for PyPy's module dict,
+            # which is optimized for such workloads.
+            #
+            # This gives a 20% benefit on the workload described at
+            # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
+            self.should_trace_cache = __pypy__.newdict("module")
+        else:
+            self.should_trace_cache = {}
 
         # Our active Tracers.
         self.tracers = []
@@ -228,12 +212,35 @@ class Collector(object):
         """Start a new Tracer object, and store it in self.tracers."""
         tracer = self._trace_class()
         tracer.data = self.data
-        tracer.arcs = self.branch
+        tracer.trace_arcs = self.branch
         tracer.should_trace = self.should_trace
         tracer.should_trace_cache = self.should_trace_cache
         tracer.warn = self.warn
+
+        if hasattr(tracer, 'concur_id_func'):
+            tracer.concur_id_func = self.concur_id_func
+        elif self.concur_id_func:
+            raise CoverageException(
+                "Can't support concurrency=%s with %s, only threads are supported" % (
+                    self.concurrency, self.tracer_name(),
+                )
+            )
+
+        if hasattr(tracer, 'file_tracers'):
+            tracer.file_tracers = self.file_tracers
+        if hasattr(tracer, 'threading'):
+            tracer.threading = self.threading
+        if hasattr(tracer, 'check_include'):
+            tracer.check_include = self.check_include
+        if self.wtw:
+            if hasattr(tracer, 'should_start_context'):
+                tracer.should_start_context = should_start_context
+            if hasattr(tracer, 'switch_context'):
+                tracer.switch_context = self.switch_context
+
         fn = tracer.start()
         self.tracers.append(tracer)
+
         return fn
 
     # The trace function has to be set individually on each thread before
@@ -242,16 +249,16 @@ class Collector(object):
     # install this as a trace function, and the first time it's called, it does
     # the real trace installation.
 
-    def _installation_trace(self, frame_unused, event_unused, arg_unused):
+    def _installation_trace(self, frame, event, arg):
         """Called on new threads, installs the real tracer."""
-        # Remove ourselves as the trace function
+        # Remove ourselves as the trace function.
         sys.settrace(None)
         # Install the real tracer.
         fn = self._start_tracer()
         # Invoke the real trace function with the current event, to be sure
         # not to lose an event.
         if fn:
-            fn = fn(frame_unused, event_unused, arg_unused)
+            fn = fn(frame, event, arg)
         # Return the new trace function to continue tracing in this scope.
         return fn
 
@@ -259,39 +266,47 @@ class Collector(object):
         """Start collecting trace information."""
         if self._collectors:
             self._collectors[-1].pause()
-        self._collectors.append(self)
-        #print("Started: %r" % self._collectors, file=sys.stderr)
 
-        # Check to see whether we had a fullcoverage tracer installed.
+        # Check to see whether we had a fullcoverage tracer installed. If so,
+        # get the stack frames it stashed away for us.
         traces0 = []
-        if hasattr(sys, "gettrace"):
-            fn0 = sys.gettrace()
-            if fn0:
-                tracer0 = getattr(fn0, '__self__', None)
-                if tracer0:
-                    traces0 = getattr(tracer0, 'traces', [])
-
-        # Install the tracer on this thread.
-        fn = self._start_tracer()
+        fn0 = sys.gettrace()
+        if fn0:
+            tracer0 = getattr(fn0, '__self__', None)
+            if tracer0:
+                traces0 = getattr(tracer0, 'traces', [])
+
+        try:
+            # Install the tracer on this thread.
+            fn = self._start_tracer()
+        except:
+            if self._collectors:
+                self._collectors[-1].resume()
+            raise
+
+        # If _start_tracer succeeded, then we add ourselves to the global
+        # stack of collectors.
+        self._collectors.append(self)
 
+        # Replay all the events from fullcoverage into the new trace function.
         for args in traces0:
             (frame, event, arg), lineno = args
             try:
                 fn(frame, event, arg, lineno=lineno)
             except TypeError:
-                raise Exception(
-                    "fullcoverage must be run with the C trace function."
-                )
+                raise Exception("fullcoverage must be run with the C trace function.")
 
         # Install our installation tracer in threading, to jump start other
         # threads.
-        threading.settrace(self._installation_trace)
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
 
     def stop(self):
         """Stop collecting trace information."""
-        #print >>sys.stderr, "Stopping: %r" % self._collectors
         assert self._collectors
-        assert self._collectors[-1] is self
+        assert self._collectors[-1] is self, (
+            "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
+        )
 
         self.pause()
         self.tracers = []
@@ -310,44 +325,48 @@ class Collector(object):
             if stats:
                 print("\nCoverage.py tracer stats:")
                 for k in sorted(stats.keys()):
-                    print("%16s: %s" % (k, stats[k]))
-        threading.settrace(None)
+                    print("%20s: %s" % (k, stats[k]))
+        if self.threading:
+            self.threading.settrace(None)
 
     def resume(self):
         """Resume tracing after a `pause`."""
         for tracer in self.tracers:
             tracer.start()
-        threading.settrace(self._installation_trace)
-
-    def get_line_data(self):
-        """Return the line data collected.
-
-        Data is { filename: { lineno: None, ...}, ...}
-
-        """
-        if self.branch:
-            # If we were measuring branches, then we have to re-build the dict
-            # to show line data.
-            line_data = {}
-            for f, arcs in self.data.items():
-                line_data[f] = ldf = {}
-                for l1, _ in list(arcs.keys()):
-                    if l1:
-                        ldf[l1] = None
-            return line_data
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
         else:
-            return self.data
+            self._start_tracer()
 
-    def get_arc_data(self):
-        """Return the arc data collected.
+    def switch_context(self, new_context):
+        """Who-Tests-What hack: switch to a new who-context."""
+        # Make a new data dict, or find the existing one, and switch all the
+        # tracers to use it.
+        data = self.contexts.setdefault(new_context, {})
+        for tracer in self.tracers:
+            tracer.data = data
 
-        Data is { filename: { (l1, l2): None, ...}, ...}
+    def save_data(self, covdata):
+        """Save the collected data to a `CoverageData`.
 
-        Note that no data is collected or returned if the Collector wasn't
-        created with `branch` true.
+        Also resets the collector.
 
         """
+        def abs_file_dict(d):
+            """Return a dict like d, but with keys modified by `abs_file`."""
+            return dict((abs_file(k), v) for k, v in iitems(d))
+
         if self.branch:
-            return self.data
+            covdata.add_arcs(abs_file_dict(self.data))
         else:
-            return {}
+            covdata.add_lines(abs_file_dict(self.data))
+        covdata.add_file_tracers(abs_file_dict(self.file_tracers))
+
+        if self.wtw:
+            # Just a hack, so just hack it.
+            import pprint
+            out_file = "coverage_wtw_{:06}.py".format(os.getpid())
+            with open(out_file, "w") as wtw_out:
+                pprint.pprint(self.contexts, wtw_out)
+
+        self.reset()
index 87318ff12452b6f0f833920f2cd1fd04e56b71d7..d6f5af0a6f5ab6f86ddd3dccfb8bf3c09995536a 100644 (file)
@@ -1,31 +1,68 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Config file for coverage.py"""
 
-import os, re, sys
-from coverage.backward import string_class, iitems
+import collections
+import os
+import re
+import sys
+
+from coverage.backward import configparser, iitems, string_class
+from coverage.misc import contract, CoverageException, isolate_module
 
-# In py3, # ConfigParser was renamed to the more-standard configparser
-try:
-    import configparser                             # pylint: disable=F0401
-except ImportError:
-    import ConfigParser as configparser
+os = isolate_module(os)
 
 
 class HandyConfigParser(configparser.RawConfigParser):
     """Our specialization of ConfigParser."""
 
+    def __init__(self, section_prefix):
+        configparser.RawConfigParser.__init__(self)
+        self.section_prefix = section_prefix
+
     def read(self, filename):
-        """Read a filename as UTF-8 configuration data."""
+        """Read a file name as UTF-8 configuration data."""
         kwargs = {}
         if sys.version_info >= (3, 2):
             kwargs['encoding'] = "utf-8"
         return configparser.RawConfigParser.read(self, filename, **kwargs)
 
-    def get(self, *args, **kwargs):
-        v = configparser.RawConfigParser.get(self, *args, **kwargs)
+    def has_option(self, section, option):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_option(self, section, option)
+
+    def has_section(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_section(self, section)
+
+    def options(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.options(self, section)
+
+    def get_section(self, section):
+        """Get the contents of a section, as a dictionary."""
+        d = {}
+        for opt in self.options(section):
+            d[opt] = self.get(section, opt)
+        return d
+
+    def get(self, section, *args, **kwargs):
+        """Get a value, replacing environment variables also.
+
+        The arguments are the same as `RawConfigParser.get`, but in the found
+        value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
+        environment variable ``WORD``.
+
+        Returns the finished value.
+
+        """
+        section = self.section_prefix + section
+        v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
         def dollar_replace(m):
             """Called for each $replacement."""
             # Only one of the groups will have matched, just get its text.
-            word = [w for w in m.groups() if w is not None][0]
+            word = next(w for w in m.groups() if w is not None)     # pragma: part covered
             if word == "$":
                 return "$"
             else:
@@ -59,28 +96,39 @@ class HandyConfigParser(configparser.RawConfigParser):
                     values.append(value)
         return values
 
-    def getlinelist(self, section, option):
-        """Read a list of full-line strings.
+    def getregexlist(self, section, option):
+        """Read a list of full-line regexes.
 
         The value of `section` and `option` is treated as a newline-separated
-        list of strings.  Each value is stripped of whitespace.
+        list of regexes.  Each value is stripped of whitespace.
 
         Returns the list of strings.
 
         """
-        value_list = self.get(section, option)
-        return list(filter(None, value_list.split('\n')))
-
-
-# The default line exclusion regexes
+        line_list = self.get(section, option)
+        value_list = []
+        for value in line_list.splitlines():
+            value = value.strip()
+            try:
+                re.compile(value)
+            except re.error as e:
+                raise CoverageException(
+                    "Invalid [%s].%s value %r: %s" % (section, option, value, e)
+                )
+            if value:
+                value_list.append(value)
+        return value_list
+
+
+# The default line exclusion regexes.
 DEFAULT_EXCLUDE = [
-    '(?i)# *pragma[: ]*no *cover',
-    ]
+    r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
+]
 
 # The default partial branch regexes, to be modified by the user.
 DEFAULT_PARTIAL = [
-    '(?i)# *pragma[: ]*no *branch',
-    ]
+    r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
+]
 
 # The default partial branch regexes, based on Python semantics.
 # These are any Python branching constructs that can't actually execute all
@@ -88,7 +136,7 @@ DEFAULT_PARTIAL = [
 DEFAULT_PARTIAL_ALWAYS = [
     'while (True|1|False|0):',
     'if (True|1|False|0):',
-    ]
+]
 
 
 class CoverageConfig(object):
@@ -106,44 +154,44 @@ class CoverageConfig(object):
 
         # Defaults for [run]
         self.branch = False
+        self.concurrency = None
         self.cover_pylib = False
         self.data_file = ".coverage"
+        self.debug = []
+        self.note = None
         self.parallel = False
-        self.timid = False
+        self.plugins = []
         self.source = None
-        self.debug = []
+        self.timid = False
 
         # Defaults for [report]
         self.exclude_list = DEFAULT_EXCLUDE[:]
+        self.fail_under = 0
         self.ignore_errors = False
         self.include = None
         self.omit = None
-        self.partial_list = DEFAULT_PARTIAL[:]
         self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
+        self.partial_list = DEFAULT_PARTIAL[:]
         self.precision = 0
         self.show_missing = False
+        self.skip_covered = False
 
         # Defaults for [html]
-        self.html_dir = "htmlcov"
         self.extra_css = None
+        self.html_dir = "htmlcov"
         self.html_title = "Coverage report"
 
         # Defaults for [xml]
         self.xml_output = "coverage.xml"
+        self.xml_package_depth = 99
 
         # Defaults for [paths]
         self.paths = {}
 
-    def from_environment(self, env_var):
-        """Read configuration from the `env_var` environment variable."""
-        # Timidity: for nose users, read an environment variable.  This is a
-        # cheap hack, since the rest of the command line arguments aren't
-        # recognized, but it solves some users' problems.
-        env = os.environ.get(env_var, '')
-        if env:
-            self.timid = ('--timid' in env)
+        # Options for plugins
+        self.plugin_options = {}
 
-    MUST_BE_LIST = ["omit", "include", "debug"]
+    MUST_BE_LIST = ["omit", "include", "debug", "plugins", "concurrency"]
 
     def from_args(self, **kwargs):
         """Read config values from `kwargs`."""
@@ -153,61 +201,167 @@ class CoverageConfig(object):
                     v = [v]
                 setattr(self, k, v)
 
-    def from_file(self, filename):
+    @contract(filename=str)
+    def from_file(self, filename, section_prefix=""):
         """Read configuration from a .rc file.
 
         `filename` is a file name to read.
 
+        Returns True or False, whether the file could be read.
+
         """
         self.attempted_config_files.append(filename)
 
-        cp = HandyConfigParser()
-        files_read = cp.read(filename)
-        if files_read is not None:  # return value changed in 2.4
-            self.config_files.extend(files_read)
+        cp = HandyConfigParser(section_prefix)
+        try:
+            files_read = cp.read(filename)
+        except configparser.Error as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
+        if not files_read:
+            return False
+
+        self.config_files.extend(files_read)
 
+        try:
+            for option_spec in self.CONFIG_FILE_OPTIONS:
+                self._set_attr_from_config_option(cp, *option_spec)
+        except ValueError as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
+
+        # Check that there are no unrecognized options.
+        all_options = collections.defaultdict(set)
         for option_spec in self.CONFIG_FILE_OPTIONS:
-            self.set_attr_from_config_option(cp, *option_spec)
+            section, option = option_spec[1].split(":")
+            all_options[section].add(option)
+
+        for section, options in iitems(all_options):
+            if cp.has_section(section):
+                for unknown in set(cp.options(section)) - options:
+                    if section_prefix:
+                        section = section_prefix + section
+                    raise CoverageException(
+                        "Unrecognized option '[%s] %s=' in config file %s" % (
+                            section, unknown, filename
+                        )
+                    )
 
         # [paths] is special
         if cp.has_section('paths'):
             for option in cp.options('paths'):
                 self.paths[option] = cp.getlist('paths', option)
 
+        # plugins can have options
+        for plugin in self.plugins:
+            if cp.has_section(plugin):
+                self.plugin_options[plugin] = cp.get_section(plugin)
+
+        return True
+
     CONFIG_FILE_OPTIONS = [
+        # These are *args for _set_attr_from_config_option:
+        #   (attr, where, type_="")
+        #
+        #   attr is the attribute to set on the CoverageConfig object.
+        #   where is the section:name to read from the configuration file.
+        #   type_ is the optional type to apply, by using .getTYPE to read the
+        #       configuration value from the file.
+
         # [run]
         ('branch', 'run:branch', 'boolean'),
+        ('concurrency', 'run:concurrency', 'list'),
         ('cover_pylib', 'run:cover_pylib', 'boolean'),
         ('data_file', 'run:data_file'),
         ('debug', 'run:debug', 'list'),
         ('include', 'run:include', 'list'),
+        ('note', 'run:note'),
         ('omit', 'run:omit', 'list'),
         ('parallel', 'run:parallel', 'boolean'),
+        ('plugins', 'run:plugins', 'list'),
         ('source', 'run:source', 'list'),
         ('timid', 'run:timid', 'boolean'),
 
         # [report]
-        ('exclude_list', 'report:exclude_lines', 'linelist'),
+        ('exclude_list', 'report:exclude_lines', 'regexlist'),
+        ('fail_under', 'report:fail_under', 'int'),
         ('ignore_errors', 'report:ignore_errors', 'boolean'),
         ('include', 'report:include', 'list'),
         ('omit', 'report:omit', 'list'),
-        ('partial_list', 'report:partial_branches', 'linelist'),
-        ('partial_always_list', 'report:partial_branches_always', 'linelist'),
+        ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
+        ('partial_list', 'report:partial_branches', 'regexlist'),
         ('precision', 'report:precision', 'int'),
         ('show_missing', 'report:show_missing', 'boolean'),
+        ('skip_covered', 'report:skip_covered', 'boolean'),
+        ('sort', 'report:sort'),
 
         # [html]
-        ('html_dir', 'html:directory'),
         ('extra_css', 'html:extra_css'),
+        ('html_dir', 'html:directory'),
         ('html_title', 'html:title'),
 
         # [xml]
         ('xml_output', 'xml:output'),
-        ]
+        ('xml_package_depth', 'xml:package_depth', 'int'),
+    ]
 
-    def set_attr_from_config_option(self, cp, attr, where, type_=''):
+    def _set_attr_from_config_option(self, cp, attr, where, type_=''):
         """Set an attribute on self if it exists in the ConfigParser."""
         section, option = where.split(":")
         if cp.has_option(section, option):
-            method = getattr(cp, 'get'+type_)
+            method = getattr(cp, 'get' + type_)
             setattr(self, attr, method(section, option))
+
+    def get_plugin_options(self, plugin):
+        """Get a dictionary of options for the plugin named `plugin`."""
+        return self.plugin_options.get(plugin, {})
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        `value` is the new value for the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                setattr(self, attr, value)
+                return
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            self.plugin_options.setdefault(plugin_name, {})[key] = value
+            return
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
+
+    def get_option(self, option_name):
+        """Get an option from the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        Returns the value of the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                return getattr(self, attr)
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            return self.plugin_options.get(plugin_name, {}).get(key)
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
index f75a3dda5b1b479a45e52c50d1390b7d076a8b1f..d3e6708563c0516124f657dae06d27fff02973f1 100644 (file)
@@ -1,49 +1,67 @@
-"""Core control stuff for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import atexit, os, random, socket, sys
+"""Core control stuff for coverage.py."""
 
+import atexit
+import inspect
+import os
+import platform
+import re
+import sys
+import traceback
+
+from coverage import env, files
 from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class, iitems, sorted  # pylint: disable=W0622
-from coverage.codeunit import code_unit_factory, CodeUnit
+from coverage.backward import string_class, iitems
 from coverage.collector import Collector
 from coverage.config import CoverageConfig
-from coverage.data import CoverageData
+from coverage.data import CoverageData, CoverageDataFiles
 from coverage.debug import DebugControl
-from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
+from coverage.files import TreeMatcher, FnmatchMatcher
 from coverage.files import PathAliases, find_python_files, prep_patterns
+from coverage.files import ModuleMatcher, abs_file
 from coverage.html import HtmlReporter
 from coverage.misc import CoverageException, bool_or_none, join_regex
-from coverage.misc import file_be_gone
+from coverage.misc import file_be_gone, isolate_module
+from coverage.multiproc import patch_multiprocessing
+from coverage.plugin import FileReporter
+from coverage.plugin_support import Plugins
+from coverage.python import PythonFileReporter
 from coverage.results import Analysis, Numbers
 from coverage.summary import SummaryReporter
 from coverage.xmlreport import XmlReporter
 
+os = isolate_module(os)
+
 # Pypy has some unusual stuff in the "stdlib".  Consider those locations
 # when deciding where the stdlib is.
 try:
-    import _structseq       # pylint: disable=F0401
+    import _structseq
 except ImportError:
     _structseq = None
 
 
-class coverage(object):
+class Coverage(object):
     """Programmatic access to coverage.py.
 
     To use::
 
-        from coverage import coverage
+        from coverage import Coverage
 
-        cov = coverage()
+        cov = Coverage()
         cov.start()
         #.. call your code ..
         cov.stop()
         cov.html_report(directory='covhtml')
 
     """
-    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
-                auto_data=False, timid=None, branch=None, config_file=True,
-                source=None, omit=None, include=None, debug=None,
-                debug_file=None):
+    def __init__(
+        self, data_file=None, data_suffix=None, cover_pylib=None,
+        auto_data=False, timid=None, branch=None, config_file=True,
+        source=None, omit=None, include=None, debug=None,
+        concurrency=None,
+    ):
         """
         `data_file` is the base name of the data file to use, defaulting to
         ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
@@ -65,187 +83,389 @@ class coverage(object):
         If `branch` is true, then branch coverage will be measured in addition
         to the usual statement coverage.
 
-        `config_file` determines what config file to read.  If it is a string,
-        it is the name of the config file to read.  If it is True, then a
-        standard file is read (".coveragerc").  If it is False, then no file is
-        read.
+        `config_file` determines what configuration file to read:
+
+            * If it is ".coveragerc", it is interpreted as if it were True,
+              for backward compatibility.
+
+            * If it is a string, it is the name of the file to read.  If the
+              file can't be read, it is an error.
+
+            * If it is True, then a few standard files names are tried
+              (".coveragerc", "setup.cfg").  It is not an error for these files
+              to not be found.
+
+            * If it is False, then no configuration file is read.
 
         `source` is a list of file paths or package names.  Only code located
         in the trees indicated by the file paths or package names will be
         measured.
 
-        `include` and `omit` are lists of filename patterns. Files that match
+        `include` and `omit` are lists of file name patterns. Files that match
         `include` will be measured, files that match `omit` will not.  Each
         will also accept a single string argument.
 
         `debug` is a list of strings indicating what debugging information is
-        desired. `debug_file` is the file to write debug messages to,
-        defaulting to stderr.
+        desired.
 
-        """
-        from coverage import __version__
+        `concurrency` is a string indicating the concurrency library being used
+        in the measured code.  Without this, coverage.py will get incorrect
+        results if these libraries are in use.  Valid strings are "greenlet",
+        "eventlet", "gevent", "multiprocessing", or "thread" (the default).
+        This can also be a list of these strings.
 
-        # A record of all the warnings that have been issued.
-        self._warnings = []
+        .. versionadded:: 4.0
+            The `concurrency` parameter.
 
+        .. versionadded:: 4.2
+            The `concurrency` parameter can now be a list of strings.
+
+        """
         # Build our configuration from a number of sources:
         # 1: defaults:
         self.config = CoverageConfig()
 
-        # 2: from the coveragerc file:
+        # 2: from the rcfile, .coveragerc or setup.cfg file:
         if config_file:
-            if config_file is True:
+            # pylint: disable=redefined-variable-type
+            did_read_rc = False
+            # Some API users were specifying ".coveragerc" to mean the same as
+            # True, so make it so.
+            if config_file == ".coveragerc":
+                config_file = True
+            specified_file = (config_file is not True)
+            if not specified_file:
                 config_file = ".coveragerc"
-            try:
-                self.config.from_file(config_file)
-            except ValueError:
-                _, err, _ = sys.exc_info()
-                raise CoverageException(
-                    "Couldn't read config file %s: %s" % (config_file, err)
-                    )
+            self.config_file = config_file
+
+            did_read_rc = self.config.from_file(config_file)
+
+            if not did_read_rc:
+                if specified_file:
+                    raise CoverageException(
+                        "Couldn't read '%s' as a config file" % config_file
+                        )
+                self.config.from_file("setup.cfg", section_prefix="coverage:")
 
         # 3: from environment variables:
-        self.config.from_environment('COVERAGE_OPTIONS')
         env_data_file = os.environ.get('COVERAGE_FILE')
         if env_data_file:
             self.config.data_file = env_data_file
+        debugs = os.environ.get('COVERAGE_DEBUG')
+        if debugs:
+            self.config.debug.extend(debugs.split(","))
 
         # 4: from constructor arguments:
         self.config.from_args(
             data_file=data_file, cover_pylib=cover_pylib, timid=timid,
             branch=branch, parallel=bool_or_none(data_suffix),
             source=source, omit=omit, include=include, debug=debug,
+            concurrency=concurrency,
             )
 
-        # Create and configure the debugging controller.
-        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
+        self._debug_file = None
+        self._auto_data = auto_data
+        self._data_suffix = data_suffix
+
+        # The matchers for _should_trace.
+        self.source_match = None
+        self.source_pkgs_match = None
+        self.pylib_match = self.cover_match = None
+        self.include_match = self.omit_match = None
+
+        # Is it ok for no data to be collected?
+        self._warn_no_data = True
+        self._warn_unimported_source = True
+
+        # A record of all the warnings that have been issued.
+        self._warnings = []
 
-        self.auto_data = auto_data
+        # Other instance attributes, set later.
+        self.omit = self.include = self.source = None
+        self.source_pkgs = None
+        self.data = self.data_files = self.collector = None
+        self.plugins = None
+        self.pylib_dirs = self.cover_dirs = None
+        self.data_suffix = self.run_suffix = None
+        self._exclude_re = None
+        self.debug = None
 
-        # _exclude_re is a dict mapping exclusion list names to compiled
+        # State machine variables:
+        # Have we initialized everything?
+        self._inited = False
+        # Have we started collecting and not stopped it?
+        self._started = False
+        # Have we measured some data and not harvested it?
+        self._measured = False
+
+        # If we have sub-process measurement happening automatically, then we
+        # want any explicit creation of a Coverage object to mean, this process
+        # is already coverage-aware, so don't auto-measure it.  By now, the
+        # auto-creation of a Coverage object has already happened.  But we can
+        # find it and tell it not to save its data.
+        if not env.METACOV:
+            _prevent_sub_process_measurement()
+
+    def _init(self):
+        """Set all the initial state.
+
+        This is called by the public methods to initialize state. This lets us
+        construct a :class:`Coverage` object, then tweak its state before this
+        function is called.
+
+        """
+        if self._inited:
+            return
+
+        # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
+        # is an environment variable, the name of a file to append debug logs
+        # to.
+        if self._debug_file is None:
+            debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
+            if debug_file_name:
+                self._debug_file = open(debug_file_name, "a")
+            else:
+                self._debug_file = sys.stderr
+        self.debug = DebugControl(self.config.debug, self._debug_file)
+
+        # Load plugins
+        self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
+
+        # _exclude_re is a dict that maps exclusion list names to compiled
         # regexes.
         self._exclude_re = {}
         self._exclude_regex_stale()
 
-        self.file_locator = FileLocator()
+        files.set_relative_directory()
 
         # The source argument can be directories or package names.
         self.source = []
         self.source_pkgs = []
         for src in self.config.source or []:
             if os.path.exists(src):
-                self.source.append(self.file_locator.canonical_filename(src))
+                self.source.append(files.canonical_filename(src))
             else:
                 self.source_pkgs.append(src)
 
         self.omit = prep_patterns(self.config.omit)
         self.include = prep_patterns(self.config.include)
 
+        concurrency = self.config.concurrency or []
+        if "multiprocessing" in concurrency:
+            patch_multiprocessing(rcfile=self.config_file)
+            #concurrency = None
+            # Multi-processing uses parallel for the subprocesses, so also use
+            # it for the main process.
+            self.config.parallel = True
+
         self.collector = Collector(
-            self._should_trace, timid=self.config.timid,
-            branch=self.config.branch, warn=self._warn
+            should_trace=self._should_trace,
+            check_include=self._check_include_omit_etc,
+            timid=self.config.timid,
+            branch=self.config.branch,
+            warn=self._warn,
+            concurrency=concurrency,
             )
 
+        # Early warning if we aren't going to be able to support plugins.
+        if self.plugins.file_tracers and not self.collector.supports_plugins:
+            self._warn(
+                "Plugin file tracers (%s) aren't supported with %s" % (
+                    ", ".join(
+                        plugin._coverage_plugin_name
+                            for plugin in self.plugins.file_tracers
+                        ),
+                    self.collector.tracer_name(),
+                    )
+                )
+            for plugin in self.plugins.file_tracers:
+                plugin._coverage_enabled = False
+
         # Suffixes are a bit tricky.  We want to use the data suffix only when
         # collecting data, not when combining data.  So we save it as
         # `self.run_suffix` now, and promote it to `self.data_suffix` if we
         # find that we are collecting data later.
-        if data_suffix or self.config.parallel:
-            if not isinstance(data_suffix, string_class):
+        if self._data_suffix or self.config.parallel:
+            if not isinstance(self._data_suffix, string_class):
                 # if data_suffix=True, use .machinename.pid.random
-                data_suffix = True
+                self._data_suffix = True
         else:
-            data_suffix = None
+            self._data_suffix = None
         self.data_suffix = None
-        self.run_suffix = data_suffix
+        self.run_suffix = self._data_suffix
 
         # Create the data file.  We do this at construction time so that the
         # data file will be written into the directory where the process
         # started rather than wherever the process eventually chdir'd to.
-        self.data = CoverageData(
-            basename=self.config.data_file,
-            collector="coverage v%s" % __version__,
-            debug=self.debug,
-            )
+        self.data = CoverageData(debug=self.debug)
+        self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn)
 
-        # The dirs for files considered "installed with the interpreter".
-        self.pylib_dirs = []
+        # The directories for files considered "installed with the interpreter".
+        self.pylib_dirs = set()
         if not self.config.cover_pylib:
             # Look at where some standard modules are located. That's the
             # indication for "installed with the interpreter". In some
             # environments (virtualenv, for example), these modules may be
             # spread across a few locations. Look at all the candidate modules
             # we've imported, and take all the different ones.
-            for m in (atexit, os, random, socket, _structseq):
+            for m in (atexit, inspect, os, platform, re, _structseq, traceback):
                 if m is not None and hasattr(m, "__file__"):
-                    m_dir = self._canonical_dir(m)
-                    if m_dir not in self.pylib_dirs:
-                        self.pylib_dirs.append(m_dir)
-
-        # To avoid tracing the coverage code itself, we skip anything located
-        # where we are.
-        self.cover_dir = self._canonical_dir(__file__)
-
-        # The matchers for _should_trace.
-        self.source_match = None
-        self.pylib_match = self.cover_match = None
-        self.include_match = self.omit_match = None
+                    self.pylib_dirs.add(self._canonical_dir(m))
+            if _structseq and not hasattr(_structseq, '__file__'):
+                # PyPy 2.4 has no __file__ in the builtin modules, but the code
+                # objects still have the file names.  So dig into one to find
+                # the path to exclude.
+                structseq_new = _structseq.structseq_new
+                try:
+                    structseq_file = structseq_new.func_code.co_filename
+                except AttributeError:
+                    structseq_file = structseq_new.__code__.co_filename
+                self.pylib_dirs.add(self._canonical_dir(structseq_file))
+
+        # To avoid tracing the coverage.py code itself, we skip anything
+        # located where we are.
+        self.cover_dirs = [self._canonical_dir(__file__)]
+        if env.TESTING:
+            # When testing, we use PyContracts, which should be considered
+            # part of coverage.py, and it uses six. Exclude those directories
+            # just as we exclude ourselves.
+            import contracts
+            import six
+            for mod in [contracts, six]:
+                self.cover_dirs.append(self._canonical_dir(mod))
 
         # Set the reporting precision.
         Numbers.set_precision(self.config.precision)
 
-        # Is it ok for no data to be collected?
-        self._warn_no_data = True
-        self._warn_unimported_source = True
+        atexit.register(self._atexit)
 
-        # State machine variables:
-        # Have we started collecting and not stopped it?
-        self._started = False
-        # Have we measured some data and not harvested it?
-        self._measured = False
+        self._inited = True
 
-        atexit.register(self._atexit)
+        # Create the matchers we need for _should_trace
+        if self.source or self.source_pkgs:
+            self.source_match = TreeMatcher(self.source)
+            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
+        else:
+            if self.cover_dirs:
+                self.cover_match = TreeMatcher(self.cover_dirs)
+            if self.pylib_dirs:
+                self.pylib_match = TreeMatcher(self.pylib_dirs)
+        if self.include:
+            self.include_match = FnmatchMatcher(self.include)
+        if self.omit:
+            self.omit_match = FnmatchMatcher(self.omit)
+
+        # The user may want to debug things, show info if desired.
+        wrote_any = False
+        if self.debug.should('config'):
+            config_info = sorted(self.config.__dict__.items())
+            self.debug.write_formatted_info("config", config_info)
+            wrote_any = True
+
+        if self.debug.should('sys'):
+            self.debug.write_formatted_info("sys", self.sys_info())
+            for plugin in self.plugins:
+                header = "sys: " + plugin._coverage_plugin_name
+                info = plugin.sys_info()
+                self.debug.write_formatted_info(header, info)
+            wrote_any = True
+
+        if wrote_any:
+            self.debug.write_formatted_info("end", ())
 
     def _canonical_dir(self, morf):
         """Return the canonical directory of the module or file `morf`."""
-        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
+        morf_filename = PythonFileReporter(morf, self).filename
+        return os.path.split(morf_filename)[0]
 
     def _source_for_file(self, filename):
-        """Return the source file for `filename`."""
-        if not filename.endswith(".py"):
-            if filename[-4:-1] == ".py":
-                filename = filename[:-1]
-            elif filename.endswith("$py.class"): # jython
-                filename = filename[:-9] + ".py"
+        """Return the source file for `filename`.
+
+        Given a file name being traced, return the best guess as to the source
+        file to attribute it to.
+
+        """
+        if filename.endswith(".py"):
+            # .py files are themselves source files.
+            return filename
+
+        elif filename.endswith((".pyc", ".pyo")):
+            # Bytecode files probably have source files near them.
+            py_filename = filename[:-1]
+            if os.path.exists(py_filename):
+                # Found a .py file, use that.
+                return py_filename
+            if env.WINDOWS:
+                # On Windows, it could be a .pyw file.
+                pyw_filename = py_filename + "w"
+                if os.path.exists(pyw_filename):
+                    return pyw_filename
+            # Didn't find source, but it's probably the .py file we want.
+            return py_filename
+
+        elif filename.endswith("$py.class"):
+            # Jython is easy to guess.
+            return filename[:-9] + ".py"
+
+        # No idea, just use the file name as-is.
         return filename
 
-    def _should_trace_with_reason(self, filename, frame):
+    def _name_for_module(self, module_globals, filename):
+        """Get the name of the module for a set of globals and file name.
+
+        For configurability's sake, we allow __main__ modules to be matched by
+        their importable name.
+
+        If loaded via runpy (aka -m), we can usually recover the "original"
+        full dotted module name, otherwise, we resort to interpreting the
+        file name to get the module's name.  In the case that the module name
+        can't be determined, None is returned.
+
+        """
+        dunder_name = module_globals.get('__name__', None)
+
+        if isinstance(dunder_name, str) and dunder_name != '__main__':
+            # This is the usual case: an imported module.
+            return dunder_name
+
+        loader = module_globals.get('__loader__', None)
+        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
+            if hasattr(loader, attrname):
+                fullname = getattr(loader, attrname)
+            else:
+                continue
+
+            if isinstance(fullname, str) and fullname != '__main__':
+                # Module loaded via: runpy -m
+                return fullname
+
+        # Script as first argument to Python command line.
+        inspectedname = inspect.getmodulename(filename)
+        if inspectedname is not None:
+            return inspectedname
+        else:
+            return dunder_name
+
+    def _should_trace_internal(self, filename, frame):
         """Decide whether to trace execution in `filename`, with a reason.
 
         This function is called from the trace function.  As each new file name
         is encountered, this function determines whether it is traced or not.
 
-        Returns a pair of values:  the first indicates whether the file should
-        be traced: it's a canonicalized filename if it should be traced, None
-        if it should not.  The second value is a string, the resason for the
-        decision.
+        Returns a FileDisposition object.
 
         """
-        if not filename:
-            # Empty string is pretty useless
-            return None, "empty string isn't a filename"
-
-        if filename.startswith('<'):
-            # Lots of non-file execution is represented with artificial
-            # filenames like "<string>", "<doctest readme.txt[0]>", or
-            # "<exec_function>".  Don't ever trace these executions, since we
-            # can't do anything with the data later anyway.
-            return None, "not a real filename"
+        original_filename = filename
+        disp = _disposition_init(self.collector.file_disposition_class, filename)
 
-        self._check_for_packages()
+        def nope(disp, reason):
+            """Simple helper to make it easy to return NO."""
+            disp.trace = False
+            disp.reason = reason
+            return disp
 
-        # Compiled Python files have two filenames: frame.f_code.co_filename is
-        # the filename at the time the .pyc was compiled.  The second name is
+        # Compiled Python files have two file names: frame.f_code.co_filename is
+        # the file name at the time the .pyc was compiled.  The second name is
         # __file__, which is where the .pyc was actually loaded from.  Since
         # .pyc files can be moved after compilation (for example, by being
         # installed), we look for __file__ in the frame and prefer it to the
@@ -253,167 +473,248 @@ class coverage(object):
         dunder_file = frame.f_globals.get('__file__')
         if dunder_file:
             filename = self._source_for_file(dunder_file)
+            if original_filename and not original_filename.startswith('<'):
+                orig = os.path.basename(original_filename)
+                if orig != os.path.basename(filename):
+                    # Files shouldn't be renamed when moved. This happens when
+                    # exec'ing code.  If it seems like something is wrong with
+                    # the frame's file name, then just use the original.
+                    filename = original_filename
+
+        if not filename:
+            # Empty string is pretty useless.
+            return nope(disp, "empty string isn't a file name")
+
+        if filename.startswith('memory:'):
+            return nope(disp, "memory isn't traceable")
+
+        if filename.startswith('<'):
+            # Lots of non-file execution is represented with artificial
+            # file names like "<string>", "<doctest readme.txt[0]>", or
+            # "<exec_function>".  Don't ever trace these executions, since we
+            # can't do anything with the data later anyway.
+            return nope(disp, "not a real file name")
+
+        # pyexpat does a dumb thing, calling the trace function explicitly from
+        # C code with a C file name.
+        if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
+            return nope(disp, "pyexpat lies about itself")
 
         # Jython reports the .class file to the tracer, use the source file.
         if filename.endswith("$py.class"):
             filename = filename[:-9] + ".py"
 
-        canonical = self.file_locator.canonical_filename(filename)
+        canonical = files.canonical_filename(filename)
+        disp.canonical_filename = canonical
+
+        # Try the plugins, see if they have an opinion about the file.
+        plugin = None
+        for plugin in self.plugins.file_tracers:
+            if not plugin._coverage_enabled:
+                continue
+
+            try:
+                file_tracer = plugin.file_tracer(canonical)
+                if file_tracer is not None:
+                    file_tracer._coverage_plugin = plugin
+                    disp.trace = True
+                    disp.file_tracer = file_tracer
+                    if file_tracer.has_dynamic_source_filename():
+                        disp.has_dynamic_filename = True
+                    else:
+                        disp.source_filename = files.canonical_filename(
+                            file_tracer.source_filename()
+                        )
+                    break
+            except Exception:
+                self._warn(
+                    "Disabling plugin %r due to an exception:" % (
+                        plugin._coverage_plugin_name
+                    )
+                )
+                traceback.print_exc()
+                plugin._coverage_enabled = False
+                continue
+        else:
+            # No plugin wanted it: it's Python.
+            disp.trace = True
+            disp.source_filename = canonical
+
+        if not disp.has_dynamic_filename:
+            if not disp.source_filename:
+                raise CoverageException(
+                    "Plugin %r didn't set source_filename for %r" %
+                    (plugin, disp.original_filename)
+                )
+            reason = self._check_include_omit_etc_internal(
+                disp.source_filename, frame,
+            )
+            if reason:
+                nope(disp, reason)
+
+        return disp
+
+    def _check_include_omit_etc_internal(self, filename, frame):
+        """Check a file name against the include, omit, etc, rules.
+
+        Returns a string or None.  String means, don't trace, and is the reason
+        why.  None means no reason found to not trace.
+
+        """
+        modulename = self._name_for_module(frame.f_globals, filename)
 
         # If the user specified source or include, then that's authoritative
         # about the outer bound of what to measure and we don't have to apply
         # any canned exclusions. If they didn't, then we have to exclude the
         # stdlib and coverage.py directories.
         if self.source_match:
-            if not self.source_match.match(canonical):
-                return None, "falls outside the --source trees"
+            if self.source_pkgs_match.match(modulename):
+                if modulename in self.source_pkgs:
+                    self.source_pkgs.remove(modulename)
+                return None  # There's no reason to skip this file.
+
+            if not self.source_match.match(filename):
+                return "falls outside the --source trees"
         elif self.include_match:
-            if not self.include_match.match(canonical):
-                return None, "falls outside the --include trees"
+            if not self.include_match.match(filename):
+                return "falls outside the --include trees"
         else:
             # If we aren't supposed to trace installed code, then check if this
             # is near the Python standard library and skip it if so.
-            if self.pylib_match and self.pylib_match.match(canonical):
-                return None, "is in the stdlib"
+            if self.pylib_match and self.pylib_match.match(filename):
+                return "is in the stdlib"
 
-            # We exclude the coverage code itself, since a little of it will be
-            # measured otherwise.
-            if self.cover_match and self.cover_match.match(canonical):
-                return None, "is part of coverage.py"
+            # We exclude the coverage.py code itself, since a little of it
+            # will be measured otherwise.
+            if self.cover_match and self.cover_match.match(filename):
+                return "is part of coverage.py"
 
         # Check the file against the omit pattern.
-        if self.omit_match and self.omit_match.match(canonical):
-            return None, "is inside an --omit pattern"
+        if self.omit_match and self.omit_match.match(filename):
+            return "is inside an --omit pattern"
 
-        return canonical, "because we love you"
+        # No reason found to skip this file.
+        return None
 
     def _should_trace(self, filename, frame):
         """Decide whether to trace execution in `filename`.
 
-        Calls `_should_trace_with_reason`, and returns just the decision.
+        Calls `_should_trace_internal`, and returns the FileDisposition.
 
         """
-        canonical, reason = self._should_trace_with_reason(filename, frame)
+        disp = self._should_trace_internal(filename, frame)
         if self.debug.should('trace'):
-            if not canonical:
-                msg = "Not tracing %r: %s" % (filename, reason)
+            self.debug.write(_disposition_debug_msg(disp))
+        return disp
+
+    def _check_include_omit_etc(self, filename, frame):
+        """Check a file name against the include/omit/etc, rules, verbosely.
+
+        Returns a boolean: True if the file should be traced, False if not.
+
+        """
+        reason = self._check_include_omit_etc_internal(filename, frame)
+        if self.debug.should('trace'):
+            if not reason:
+                msg = "Including %r" % (filename,)
             else:
-                msg = "Tracing %r" % (filename,)
+                msg = "Not including %r: %s" % (filename, reason)
             self.debug.write(msg)
-        return canonical
+
+        return not reason
 
     def _warn(self, msg):
         """Use `msg` as a warning."""
         self._warnings.append(msg)
+        if self.debug.should('pid'):
+            msg = "[%d] %s" % (os.getpid(), msg)
         sys.stderr.write("Coverage.py warning: %s\n" % msg)
 
-    def _check_for_packages(self):
-        """Update the source_match matcher with latest imported packages."""
-        # Our self.source_pkgs attribute is a list of package names we want to
-        # measure.  Each time through here, we see if we've imported any of
-        # them yet.  If so, we add its file to source_match, and we don't have
-        # to look for that package any more.
-        if self.source_pkgs:
-            found = []
-            for pkg in self.source_pkgs:
-                try:
-                    mod = sys.modules[pkg]
-                except KeyError:
-                    continue
+    def get_option(self, option_name):
+        """Get an option from the configuration.
 
-                found.append(pkg)
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
 
-                try:
-                    pkg_file = mod.__file__
-                except AttributeError:
-                    pkg_file = None
-                else:
-                    d, f = os.path.split(pkg_file)
-                    if f.startswith('__init__'):
-                        # This is actually a package, return the directory.
-                        pkg_file = d
-                    else:
-                        pkg_file = self._source_for_file(pkg_file)
-                    pkg_file = self.file_locator.canonical_filename(pkg_file)
-                    if not os.path.exists(pkg_file):
-                        pkg_file = None
-
-                if pkg_file:
-                    self.source.append(pkg_file)
-                    self.source_match.add(pkg_file)
-                else:
-                    self._warn("Module %s has no Python source." % pkg)
+        Returns the value of the option.
 
-            for pkg in found:
-                self.source_pkgs.remove(pkg)
+        .. versionadded:: 4.0
 
-    def use_cache(self, usecache):
-        """Control the use of a data file (incorrectly called a cache).
+        """
+        return self.config.get_option(option_name)
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with ``"run:branch"``.
+
+        `value` is the new value for the option.  This should be a Python
+        value where appropriate.  For example, use True for booleans, not the
+        string ``"True"``.
+
+        As an example, calling::
 
-        `usecache` is true or false, whether to read and write data on disk.
+            cov.set_option("run:branch", True)
+
+        has the same effect as this configuration file::
+
+            [run]
+            branch = True
+
+        .. versionadded:: 4.0
 
         """
-        self.data.usefile(usecache)
+        self.config.set_option(option_name, value)
+
+    def use_cache(self, usecache):
+        """Obsolete method."""
+        self._init()
+        if not usecache:
+            self._warn("use_cache(False) is no longer supported.")
 
     def load(self):
         """Load previously-collected coverage data from the data file."""
+        self._init()
         self.collector.reset()
-        self.data.read()
+        self.data_files.read(self.data)
 
     def start(self):
         """Start measuring code coverage.
 
-        Coverage measurement actually occurs in functions called after `start`
-        is invoked.  Statements in the same scope as `start` won't be measured.
+        Coverage measurement actually occurs in functions called after
+        :meth:`start` is invoked.  Statements in the same scope as
+        :meth:`start` won't be measured.
 
-        Once you invoke `start`, you must also call `stop` eventually, or your
-        process might not shut down cleanly.
+        Once you invoke :meth:`start`, you must also call :meth:`stop`
+        eventually, or your process might not shut down cleanly.
 
         """
+        self._init()
         if self.run_suffix:
             # Calling start() means we're running code, so use the run_suffix
             # as the data_suffix when we eventually save the data.
             self.data_suffix = self.run_suffix
-        if self.auto_data:
+        if self._auto_data:
             self.load()
 
-        # Create the matchers we need for _should_trace
-        if self.source or self.source_pkgs:
-            self.source_match = TreeMatcher(self.source)
-        else:
-            if self.cover_dir:
-                self.cover_match = TreeMatcher([self.cover_dir])
-            if self.pylib_dirs:
-                self.pylib_match = TreeMatcher(self.pylib_dirs)
-        if self.include:
-            self.include_match = FnmatchMatcher(self.include)
-        if self.omit:
-            self.omit_match = FnmatchMatcher(self.omit)
-
-        # The user may want to debug things, show info if desired.
-        if self.debug.should('config'):
-            self.debug.write("Configuration values:")
-            config_info = sorted(self.config.__dict__.items())
-            self.debug.write_formatted_info(config_info)
-
-        if self.debug.should('sys'):
-            self.debug.write("Debugging info:")
-            self.debug.write_formatted_info(self.sysinfo())
-
         self.collector.start()
         self._started = True
         self._measured = True
 
     def stop(self):
         """Stop measuring code coverage."""
+        if self._started:
+            self.collector.stop()
         self._started = False
-        self.collector.stop()
 
     def _atexit(self):
         """Clean up on process shutdown."""
         if self._started:
             self.stop()
-        if self.auto_data:
+        if self._auto_data:
             self.save()
 
     def erase(self):
@@ -423,11 +724,14 @@ class coverage(object):
         discarding the data file.
 
         """
+        self._init()
         self.collector.reset()
         self.data.erase()
+        self.data_files.erase(parallel=self.config.parallel)
 
     def clear_exclude(self, which='exclude'):
         """Clear the exclude list."""
+        self._init()
         setattr(self.config, which + "_list", [])
         self._exclude_regex_stale()
 
@@ -446,6 +750,7 @@ class coverage(object):
         is marked for special treatment during reporting.
 
         """
+        self._init()
         excl_list = getattr(self.config, which + "_list")
         excl_list.append(regex)
         self._exclude_regex_stale()
@@ -464,79 +769,86 @@ class coverage(object):
     def get_exclude_list(self, which='exclude'):
         """Return a list of excluded regex patterns.
 
-        `which` indicates which list is desired.  See `exclude` for the lists
-        that are available, and their meaning.
+        `which` indicates which list is desired.  See :meth:`exclude` for the
+        lists that are available, and their meaning.
 
         """
+        self._init()
         return getattr(self.config, which + "_list")
 
     def save(self):
         """Save the collected coverage data to the data file."""
-        data_suffix = self.data_suffix
-        if data_suffix is True:
-            # If data_suffix was a simple true value, then make a suffix with
-            # plenty of distinguishing information.  We do this here in
-            # `save()` at the last minute so that the pid will be correct even
-            # if the process forks.
-            extra = ""
-            if _TEST_NAME_FILE:
-                f = open(_TEST_NAME_FILE)
-                test_name = f.read()
-                f.close()
-                extra = "." + test_name
-            data_suffix = "%s%s.%s.%06d" % (
-                socket.gethostname(), extra, os.getpid(),
-                random.randint(0, 999999)
-                )
+        self._init()
+        self.get_data()
+        self.data_files.write(self.data, suffix=self.data_suffix)
 
-        self._harvest_data()
-        self.data.write(suffix=data_suffix)
-
-    def combine(self):
+    def combine(self, data_paths=None):
         """Combine together a number of similarly-named coverage data files.
 
         All coverage data files whose name starts with `data_file` (from the
         coverage() constructor) will be read, and combined together into the
         current measurements.
 
+        `data_paths` is a list of files or directories from which data should
+        be combined. If no list is passed, then the data files from the
+        directory indicated by the current data file (probably the current
+        directory) will be combined.
+
+        .. versionadded:: 4.0
+            The `data_paths` parameter.
+
         """
+        self._init()
+        self.get_data()
+
         aliases = None
         if self.config.paths:
-            aliases = PathAliases(self.file_locator)
+            aliases = PathAliases()
             for paths in self.config.paths.values():
                 result = paths[0]
                 for pattern in paths[1:]:
                     aliases.add(pattern, result)
-        self.data.combine_parallel_data(aliases=aliases)
 
-    def _harvest_data(self):
+        self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths)
+
+    def get_data(self):
         """Get the collected data and reset the collector.
 
         Also warn about various problems collecting data.
 
+        Returns a :class:`coverage.CoverageData`, the collected coverage data.
+
+        .. versionadded:: 4.0
+
         """
+        self._init()
         if not self._measured:
-            return
+            return self.data
 
-        self.data.add_line_data(self.collector.get_line_data())
-        self.data.add_arc_data(self.collector.get_arc_data())
-        self.collector.reset()
+        self.collector.save_data(self.data)
 
         # If there are still entries in the source_pkgs list, then we never
         # encountered those packages.
         if self._warn_unimported_source:
             for pkg in self.source_pkgs:
-                self._warn("Module %s was never imported." % pkg)
+                if pkg not in sys.modules:
+                    self._warn("Module %s was never imported." % pkg)
+                elif not (
+                    hasattr(sys.modules[pkg], '__file__') and
+                    os.path.exists(sys.modules[pkg].__file__)
+                ):
+                    self._warn("Module %s has no Python source." % pkg)
+                else:
+                    self._warn("Module %s was previously imported, but not measured." % pkg)
 
         # Find out if we got any data.
-        summary = self.data.summary()
-        if not summary and self._warn_no_data:
+        if not self.data and self._warn_no_data:
             self._warn("No data was collected.")
 
         # Find files that were never executed at all.
         for src in self.source:
             for py_file in find_python_files(src):
-                py_file = self.file_locator.canonical_filename(py_file)
+                py_file = files.canonical_filename(py_file)
 
                 if self.omit_match and self.omit_match.match(py_file):
                     # Turns out this file was omitted, so don't pull it back
@@ -545,7 +857,11 @@ class coverage(object):
 
                 self.data.touch_file(py_file)
 
+        if self.config.note:
+            self.data.add_run_info(note=self.config.note)
+
         self._measured = False
+        return self.data
 
     # Backward compatibility with version 1.
     def analysis(self, morf):
@@ -556,10 +872,10 @@ class coverage(object):
     def analysis2(self, morf):
         """Analyze a module.
 
-        `morf` is a module or a filename.  It will be analyzed to determine
+        `morf` is a module or a file name.  It will be analyzed to determine
         its coverage statistics.  The return value is a 5-tuple:
 
-        * The filename for the module.
+        * The file name for the module.
         * A list of line numbers of executable statements.
         * A list of line numbers of excluded statements.
         * A list of line numbers of statements not run (missing from
@@ -570,6 +886,7 @@ class coverage(object):
         coverage data.
 
         """
+        self._init()
         analysis = self._analyze(morf)
         return (
             analysis.filename,
@@ -585,38 +902,92 @@ class coverage(object):
         Returns an `Analysis` object.
 
         """
-        self._harvest_data()
-        if not isinstance(it, CodeUnit):
-            it = code_unit_factory(it, self.file_locator)[0]
+        self.get_data()
+        if not isinstance(it, FileReporter):
+            it = self._get_file_reporter(it)
+
+        return Analysis(self.data, it)
+
+    def _get_file_reporter(self, morf):
+        """Get a FileReporter for a module or file name."""
+        plugin = None
+        file_reporter = "python"
+
+        if isinstance(morf, string_class):
+            abs_morf = abs_file(morf)
+            plugin_name = self.data.file_tracer(abs_morf)
+            if plugin_name:
+                plugin = self.plugins.get(plugin_name)
+
+        if plugin:
+            file_reporter = plugin.file_reporter(abs_morf)
+            if file_reporter is None:
+                raise CoverageException(
+                    "Plugin %r did not provide a file reporter for %r." % (
+                        plugin._coverage_plugin_name, morf
+                    )
+                )
+
+        if file_reporter == "python":
+            # pylint: disable=redefined-variable-type
+            file_reporter = PythonFileReporter(morf, self)
+
+        return file_reporter
+
+    def _get_file_reporters(self, morfs=None):
+        """Get a list of FileReporters for a list of modules or file names.
+
+        For each module or file name in `morfs`, find a FileReporter.  Return
+        the list of FileReporters.
+
+        If `morfs` is a single module or file name, this returns a list of one
+        FileReporter.  If `morfs` is empty or None, then the list of all files
+        measured is used to find the FileReporters.
+
+        """
+        if not morfs:
+            morfs = self.data.measured_files()
 
-        return Analysis(self, it)
+        # Be sure we have a list.
+        if not isinstance(morfs, (list, tuple)):
+            morfs = [morfs]
 
-    def report(self, morfs=None, show_missing=True, ignore_errors=None,
-                file=None,                          # pylint: disable=W0622
-                omit=None, include=None
-                ):
+        file_reporters = []
+        for morf in morfs:
+            file_reporter = self._get_file_reporter(morf)
+            file_reporters.append(file_reporter)
+
+        return file_reporters
+
+    def report(
+        self, morfs=None, show_missing=None, ignore_errors=None,
+        file=None,                  # pylint: disable=redefined-builtin
+        omit=None, include=None, skip_covered=None,
+    ):
         """Write a summary report to `file`.
 
         Each module in `morfs` is listed, with counts of statements, executed
         statements, missing statements, and a list of lines missed.
 
-        `include` is a list of filename patterns.  Modules whose filenames
-        match those patterns will be included in the report. Modules matching
-        `omit` will not be included in the report.
+        `include` is a list of file name patterns.  Files that match will be
+        included in the report. Files matching `omit` will not be included in
+        the report.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
-            show_missing=show_missing,
+            show_missing=show_missing, skip_covered=skip_covered,
             )
         reporter = SummaryReporter(self, self.config)
         return reporter.report(morfs, outfile=file)
 
-    def annotate(self, morfs=None, directory=None, ignore_errors=None,
-                    omit=None, include=None):
+    def annotate(
+        self, morfs=None, directory=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
         """Annotate a list of modules.
 
         Each module in `morfs` is annotated.  The source is written to a new
@@ -624,10 +995,10 @@ class coverage(object):
         marker to indicate the coverage of the line.  Covered lines have ">",
         excluded lines have "-", and missing lines have "!".
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include
             )
@@ -648,12 +1019,12 @@ class coverage(object):
         `title` is a text string (not HTML) to use as the title of the HTML
         report.
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
             html_dir=directory, extra_css=extra_css, html_title=title,
@@ -661,8 +1032,10 @@ class coverage(object):
         reporter = HtmlReporter(self, self.config)
         return reporter.report(morfs)
 
-    def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
-                    omit=None, include=None):
+    def xml_report(
+        self, morfs=None, outfile=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
         """Generate an XML report of coverage results.
 
         The report is compatible with Cobertura reports.
@@ -670,12 +1043,12 @@ class coverage(object):
         Each module in `morfs` is included in the report.  `outfile` is the
         path to write the file to, "-" will write to stdout.
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
             xml_output=outfile,
@@ -686,69 +1059,115 @@ class coverage(object):
             if self.config.xml_output == '-':
                 outfile = sys.stdout
             else:
-                outfile = open(self.config.xml_output, "w")
+                # Ensure that the output directory is created; done here
+                # because this report pre-opens the output file.
+                # HTMLReport does this using the Report plumbing because
+                # its task is more complex, being multiple files.
+                output_dir = os.path.dirname(self.config.xml_output)
+                if output_dir and not os.path.isdir(output_dir):
+                    os.makedirs(output_dir)
+                open_kwargs = {}
+                if env.PY3:
+                    open_kwargs['encoding'] = 'utf8'
+                outfile = open(self.config.xml_output, "w", **open_kwargs)
                 file_to_close = outfile
         try:
-            try:
-                reporter = XmlReporter(self, self.config)
-                return reporter.report(morfs, outfile=outfile)
-            except CoverageException:
-                delete_file = True
-                raise
+            reporter = XmlReporter(self, self.config)
+            return reporter.report(morfs, outfile=outfile)
+        except CoverageException:
+            delete_file = True
+            raise
         finally:
             if file_to_close:
                 file_to_close.close()
                 if delete_file:
                     file_be_gone(self.config.xml_output)
 
-    def sysinfo(self):
+    def sys_info(self):
         """Return a list of (key, value) pairs showing internal information."""
 
         import coverage as covmod
-        import platform, re
 
-        try:
-            implementation = platform.python_implementation()
-        except AttributeError:
-            implementation = "unknown"
+        self._init()
+
+        ft_plugins = []
+        for ft in self.plugins.file_tracers:
+            ft_name = ft._coverage_plugin_name
+            if not ft._coverage_enabled:
+                ft_name += " (disabled)"
+            ft_plugins.append(ft_name)
 
         info = [
             ('version', covmod.__version__),
             ('coverage', covmod.__file__),
-            ('cover_dir', self.cover_dir),
+            ('cover_dirs', self.cover_dirs),
             ('pylib_dirs', self.pylib_dirs),
             ('tracer', self.collector.tracer_name()),
+            ('plugins.file_tracers', ft_plugins),
             ('config_files', self.config.attempted_config_files),
             ('configs_read', self.config.config_files),
-            ('data_path', self.data.filename),
+            ('data_path', self.data_files.filename),
             ('python', sys.version.replace('\n', '')),
             ('platform', platform.platform()),
-            ('implementation', implementation),
+            ('implementation', platform.python_implementation()),
             ('executable', sys.executable),
             ('cwd', os.getcwd()),
             ('path', sys.path),
-            ('environment', sorted([
-                ("%s = %s" % (k, v)) for k, v in iitems(os.environ)
-                    if re.search(r"^COV|^PY", k)
-                ])),
+            ('environment', sorted(
+                ("%s = %s" % (k, v))
+                for k, v in iitems(os.environ)
+                if k.startswith(("COV", "PY"))
+            )),
             ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
             ]
-        if self.source_match:
-            info.append(('source_match', self.source_match.info()))
-        if self.include_match:
-            info.append(('include_match', self.include_match.info()))
-        if self.omit_match:
-            info.append(('omit_match', self.omit_match.info()))
-        if self.cover_match:
-            info.append(('cover_match', self.cover_match.info()))
-        if self.pylib_match:
-            info.append(('pylib_match', self.pylib_match.info()))
+
+        matcher_names = [
+            'source_match', 'source_pkgs_match',
+            'include_match', 'omit_match',
+            'cover_match', 'pylib_match',
+            ]
+
+        for matcher_name in matcher_names:
+            matcher = getattr(self, matcher_name)
+            if matcher:
+                matcher_info = matcher.info()
+            else:
+                matcher_info = '-none-'
+            info.append((matcher_name, matcher_info))
 
         return info
 
 
+# FileDisposition "methods": FileDisposition is a pure value object, so it can
+# be implemented in either C or Python.  Acting on them is done with these
+# functions.
+
+def _disposition_init(cls, original_filename):
+    """Construct and initialize a new FileDisposition object."""
+    disp = cls()
+    disp.original_filename = original_filename
+    disp.canonical_filename = original_filename
+    disp.source_filename = None
+    disp.trace = False
+    disp.reason = ""
+    disp.file_tracer = None
+    disp.has_dynamic_filename = False
+    return disp
+
+
+def _disposition_debug_msg(disp):
+    """Make a nice debug message of what the FileDisposition is doing."""
+    if disp.trace:
+        msg = "Tracing %r" % (disp.original_filename,)
+        if disp.file_tracer:
+            msg += ": will be traced by %r" % disp.file_tracer
+    else:
+        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
+    return msg
+
+
 def process_startup():
-    """Call this at Python startup to perhaps measure coverage.
+    """Call this at Python start-up to perhaps measure coverage.
 
     If the environment variable COVERAGE_PROCESS_START is defined, coverage
     measurement is started.  The value of the variable is the config file
@@ -766,14 +1185,41 @@ def process_startup():
 
         import coverage; coverage.process_startup()
 
+    Returns the :class:`Coverage` instance that was started, or None if it was
+    not started by this call.
+
     """
     cps = os.environ.get("COVERAGE_PROCESS_START")
-    if cps:
-        cov = coverage(config_file=cps, auto_data=True)
-        cov.start()
-        cov._warn_no_data = False
-        cov._warn_unimported_source = False
-
-
-# A hack for debugging testing in subprocesses.
-_TEST_NAME_FILE = "" #"/tmp/covtest.txt"
+    if not cps:
+        # No request for coverage, nothing to do.
+        return None
+
+    # This function can be called more than once in a process. This happens
+    # because some virtualenv configurations make the same directory visible
+    # twice in sys.path.  This means that the .pth file will be found twice,
+    # and executed twice, executing this function twice.  We set a global
+    # flag (an attribute on this function) to indicate that coverage.py has
+    # already been started, so we can avoid doing it twice.
+    #
+    # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
+    # details.
+
+    if hasattr(process_startup, "coverage"):
+        # We've annotated this function before, so we must have already
+        # started coverage.py in this process.  Nothing to do.
+        return None
+
+    cov = Coverage(config_file=cps, auto_data=True)
+    process_startup.coverage = cov
+    cov.start()
+    cov._warn_no_data = False
+    cov._warn_unimported_source = False
+
+    return cov
+
+
+def _prevent_sub_process_measurement():
+    """Stop any subprocess auto-measurement from writing data."""
+    auto_created_coverage = getattr(process_startup, "coverage", None)
+    if auto_created_coverage is not None:
+        auto_created_coverage._auto_data = False
index fb88c5b1e638cdd5954c3818a08d000a4ec499eb..60e104d962ad471259a9e318a9ab4aa9d27567b0 100644 (file)
-"""Coverage data for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import os
+"""Coverage data for coverage.py."""
 
-from coverage.backward import iitems, pickle, sorted    # pylint: disable=W0622
+import glob
+import itertools
+import json
+import optparse
+import os
+import os.path
+import random
+import re
+import socket
+
+from coverage import env
+from coverage.backward import iitems, string_class
+from coverage.debug import _TEST_NAME_FILE
 from coverage.files import PathAliases
-from coverage.misc import file_be_gone
+from coverage.misc import CoverageException, file_be_gone, isolate_module
+
+os = isolate_module(os)
 
 
 class CoverageData(object):
     """Manages collected coverage data, including file storage.
 
-    The data file format is a pickled dict, with these keys:
+    This class is the public supported API to the data coverage.py collects
+    during program execution.  It includes information about what code was
+    executed. It does not include information from the analysis phase, to
+    determine what lines could have been executed, or what lines were not
+    executed.
 
-        * collector: a string identifying the collecting software
+    .. note::
 
-        * lines: a dict mapping filenames to sorted lists of line numbers
-          executed:
-            { 'file1': [17,23,45],  'file2': [1,2,3], ... }
+        The file format is not documented or guaranteed.  It will change in
+        the future, in possibly complicated ways.  Do not read coverage.py
+        data files directly.  Use this API to avoid disruption.
 
-        * arcs: a dict mapping filenames to sorted lists of line number pairs:
-            { 'file1': [(17,23), (17,25), (25,26)], ... }
+    There are a number of kinds of data that can be collected:
 
-    """
+    * **lines**: the line numbers of source lines that were executed.
+      These are always available.
 
-    def __init__(self, basename=None, collector=None, debug=None):
-        """Create a CoverageData.
+    * **arcs**: pairs of source and destination line numbers for transitions
+      between source lines.  These are only available if branch coverage was
+      used.
 
-        `basename` is the name of the file to use for storing data.
+    * **file tracer names**: the module names of the file tracer plugins that
+      handled each file in the data.
 
-        `collector` is a string describing the coverage measurement software.
+    * **run information**: information about the program execution.  This is
+      written during "coverage run", and then accumulated during "coverage
+      combine".
 
-        `debug` is a `DebugControl` object for writing debug messages.
+    Lines, arcs, and file tracer names are stored for each source file. File
+    names in this API are case-sensitive, even on platforms with
+    case-insensitive file systems.
 
-        """
-        self.collector = collector or 'unknown'
-        self.debug = debug
+    To read a coverage.py data file, use :meth:`read_file`, or
+    :meth:`read_fileobj` if you have an already-opened file.  You can then
+    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
+    or :meth:`file_tracer`.  Run information is available with
+    :meth:`run_infos`.
+
+    The :meth:`has_arcs` method indicates whether arc data is available.  You
+    can get a list of the files in the data with :meth:`measured_files`.
+    A summary of the line data is available from :meth:`line_counts`.  As with
+    most Python containers, you can determine if there is any data at all by
+    using this object as a boolean value.
+
+
+    Most data files will be created by coverage.py itself, but you can use
+    methods here to create data files if you like.  The :meth:`add_lines`,
+    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
+    that are convenient for coverage.py.  The :meth:`add_run_info` method adds
+    key-value pairs to the run information.
 
-        self.use_file = True
+    To add a file without any measured data, use :meth:`touch_file`.
 
-        # Construct the filename that will be used for data file storage, if we
-        # ever do any file storage.
-        self.filename = basename or ".coverage"
-        self.filename = os.path.abspath(self.filename)
+    You write to a named file with :meth:`write_file`, or to an already opened
+    file with :meth:`write_fileobj`.
+
+    You can clear the data in memory with :meth:`erase`.  Two data collections
+    can be combined by using :meth:`update` on one :class:`CoverageData`,
+    passing it the other.
+
+    """
+
+    # The data file format is JSON, with these keys:
+    #
+    #     * lines: a dict mapping file names to lists of line numbers
+    #       executed::
+    #
+    #         { "file1": [17,23,45], "file2": [1,2,3], ... }
+    #
+    #     * arcs: a dict mapping file names to lists of line number pairs::
+    #
+    #         { "file1": [[17,23], [17,25], [25,26]], ... }
+    #
+    #     * file_tracers: a dict mapping file names to plugin names::
+    #
+    #         { "file1": "django.coverage", ... }
+    #
+    #     * runs: a list of dicts of information about the coverage.py runs
+    #       contributing to the data::
+    #
+    #         [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
+    #
+    # Only one of `lines` or `arcs` will be present: with branch coverage, data
+    # is stored as arcs. Without branch coverage, it is stored as lines.  The
+    # line data is easily recovered from the arcs: it is all the first elements
+    # of the pairs that are greater than zero.
+
+    def __init__(self, debug=None):
+        """Create a CoverageData.
+
+        `debug` is a `DebugControl` object for writing debug messages.
+
+        """
+        self._debug = debug
 
         # A map from canonical Python source file name to a dictionary in
         # which there's an entry for each line number that has been
         # executed:
         #
-        #   {
-        #       'filename1.py': { 12: None, 47: None, ... },
-        #       ...
-        #       }
+        #   { 'filename1.py': [12, 47, 1001], ... }
         #
-        self.lines = {}
+        self._lines = None
 
         # A map from canonical Python source file name to a dictionary with an
         # entry for each pair of line numbers forming an arc:
         #
-        #   {
-        #       'filename1.py': { (12,14): None, (47,48): None, ... },
-        #       ...
-        #       }
+        #   { 'filename1.py': [(12,14), (47,48), ... ], ... }
+        #
+        self._arcs = None
+
+        # A map from canonical source file name to a plugin module name:
+        #
+        #   { 'filename1.py': 'django.coverage', ... }
         #
-        self.arcs = {}
+        self._file_tracers = {}
 
-    def usefile(self, use_file=True):
-        """Set whether or not to use a disk file for data."""
-        self.use_file = use_file
+        # A list of dicts of information about the coverage.py runs.
+        self._runs = []
 
-    def read(self):
-        """Read coverage data from the coverage data file (if it exists)."""
-        if self.use_file:
-            self.lines, self.arcs = self._read_file(self.filename)
-        else:
-            self.lines, self.arcs = {}, {}
+    def __repr__(self):
+        return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
+            klass=self.__class__.__name__,
+            lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
+            arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
+            tracers="{{{0}}}".format(len(self._file_tracers)),
+            runs="[{0}]".format(len(self._runs)),
+        )
 
-    def write(self, suffix=None):
-        """Write the collected coverage data to a file.
+    ##
+    ## Reading data
+    ##
 
-        `suffix` is a suffix to append to the base file name. This can be used
-        for multiple or parallel execution, so that many coverage data files
-        can exist simultaneously.  A dot will be used to join the base name and
-        the suffix.
+    def has_arcs(self):
+        """Does this data have arcs?
+
+        Arc data is only available if branch coverage was used during
+        collection.
+
+        Returns a boolean.
 
         """
-        if self.use_file:
-            filename = self.filename
-            if suffix:
-                filename += "." + suffix
-            self.write_file(filename)
+        return self._has_arcs()
 
-    def erase(self):
-        """Erase the data, both in this object, and from its file storage."""
-        if self.use_file:
-            if self.filename:
-                file_be_gone(self.filename)
-        self.lines = {}
-        self.arcs = {}
-
-    def line_data(self):
-        """Return the map from filenames to lists of line numbers executed."""
-        return dict(
-            [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
-            )
+    def lines(self, filename):
+        """Get the list of lines executed for a file.
 
-    def arc_data(self):
-        """Return the map from filenames to lists of line number pairs."""
-        return dict(
-            [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
-            )
+        If the file was not measured, returns None.  A file might be measured,
+        and have no lines executed, in which case an empty list is returned.
 
-    def write_file(self, filename):
-        """Write the coverage data to `filename`."""
+        If the file was executed, returns a list of integers, the line numbers
+        executed in the file. The list is in no particular order.
 
-        # Create the file data.
-        data = {}
+        """
+        if self._arcs is not None:
+            arcs = self._arcs.get(filename)
+            if arcs is not None:
+                all_lines = itertools.chain.from_iterable(arcs)
+                return list(set(l for l in all_lines if l > 0))
+        elif self._lines is not None:
+            return self._lines.get(filename)
+        return None
+
+    def arcs(self, filename):
+        """Get the list of arcs executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no arcs executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of 2-tuples of integers. Each
+        pair is a starting line number and an ending line number for a
+        transition from one line to another. The list is in no particular
+        order.
+
+        Negative numbers have special meaning.  If the starting line number is
+        -N, it represents an entry to the code object that starts at line N.
+        If the ending ling number is -N, it's an exit from the code object that
+        starts at line N.
 
-        data['lines'] = self.line_data()
-        arcs = self.arc_data()
-        if arcs:
-            data['arcs'] = arcs
+        """
+        if self._arcs is not None:
+            if filename in self._arcs:
+                return self._arcs[filename]
+        return None
 
-        if self.collector:
-            data['collector'] = self.collector
+    def file_tracer(self, filename):
+        """Get the plugin name of the file tracer for a file.
 
-        if self.debug and self.debug.should('dataio'):
-            self.debug.write("Writing data to %r" % (filename,))
+        Returns the name of the plugin that handles this file.  If the file was
+        measured, but didn't use a plugin, then "" is returned.  If the file
+        was not measured, then None is returned.
 
-        # Write the pickle to the file.
-        fdata = open(filename, 'wb')
-        try:
-            pickle.dump(data, fdata, 2)
-        finally:
-            fdata.close()
+        """
+        # Because the vast majority of files involve no plugin, we don't store
+        # them explicitly in self._file_tracers.  Check the measured data
+        # instead to see if it was a known file with no plugin.
+        if filename in (self._arcs or self._lines or {}):
+            return self._file_tracers.get(filename, "")
+        return None
 
-    def read_file(self, filename):
-        """Read the coverage data from `filename`."""
-        self.lines, self.arcs = self._read_file(filename)
-
-    def raw_data(self, filename):
-        """Return the raw pickled data from `filename`."""
-        if self.debug and self.debug.should('dataio'):
-            self.debug.write("Reading data from %r" % (filename,))
-        fdata = open(filename, 'rb')
-        try:
-            data = pickle.load(fdata)
-        finally:
-            fdata.close()
-        return data
+    def run_infos(self):
+        """Return the list of dicts of run information.
 
-    def _read_file(self, filename):
-        """Return the stored coverage data from the given file.
+        For data collected during a single run, this will be a one-element
+        list.  If data has been combined, there will be one element for each
+        original data file.
 
-        Returns two values, suitable for assigning to `self.lines` and
-        `self.arcs`.
+        """
+        return self._runs
+
+    def measured_files(self):
+        """A list of all files that had been measured."""
+        return list(self._arcs or self._lines or {})
+
+    def line_counts(self, fullpath=False):
+        """Return a dict summarizing the line coverage data.
+
+        Keys are based on the file names, and values are the number of executed
+        lines.  If `fullpath` is true, then the keys are the full pathnames of
+        the files, otherwise they are the basenames of the files.
+
+        Returns a dict mapping file names to counts of lines.
 
         """
-        lines = {}
-        arcs = {}
-        try:
-            data = self.raw_data(filename)
-            if isinstance(data, dict):
-                # Unpack the 'lines' item.
-                lines = dict([
-                    (f, dict.fromkeys(linenos, None))
-                        for f, linenos in iitems(data.get('lines', {}))
-                    ])
-                # Unpack the 'arcs' item.
-                arcs = dict([
-                    (f, dict.fromkeys(arcpairs, None))
-                        for f, arcpairs in iitems(data.get('arcs', {}))
-                    ])
-        except Exception:
-            pass
-        return lines, arcs
-
-    def combine_parallel_data(self, aliases=None):
-        """Combine a number of data files together.
+        summ = {}
+        if fullpath:
+            filename_fn = lambda f: f
+        else:
+            filename_fn = os.path.basename
+        for filename in self.measured_files():
+            summ[filename_fn(filename)] = len(self.lines(filename))
+        return summ
 
-        Treat `self.filename` as a file prefix, and combine the data from all
-        of the data files starting with that prefix plus a dot.
+    def __nonzero__(self):
+        return bool(self._lines or self._arcs)
 
-        If `aliases` is provided, it's a `PathAliases` object that is used to
-        re-map paths to match the local machine's.
+    __bool__ = __nonzero__
+
+    def read_fileobj(self, file_obj):
+        """Read the coverage data from the given file object.
+
+        Should only be used on an empty CoverageData object.
 
         """
-        aliases = aliases or PathAliases()
-        data_dir, local = os.path.split(self.filename)
-        localdot = local + '.'
-        for f in os.listdir(data_dir or '.'):
-            if f.startswith(localdot):
-                full_path = os.path.join(data_dir, f)
-                new_lines, new_arcs = self._read_file(full_path)
-                for filename, file_data in iitems(new_lines):
-                    filename = aliases.map(filename)
-                    self.lines.setdefault(filename, {}).update(file_data)
-                for filename, file_data in iitems(new_arcs):
-                    filename = aliases.map(filename)
-                    self.arcs.setdefault(filename, {}).update(file_data)
-                if f != local:
-                    os.remove(full_path)
-
-    def add_line_data(self, line_data):
-        """Add executed line data.
-
-        `line_data` is { filename: { lineno: None, ... }, ...}
+        data = self._read_raw_data(file_obj)
+
+        self._lines = self._arcs = None
+
+        if 'lines' in data:
+            self._lines = data['lines']
+        if 'arcs' in data:
+            self._arcs = dict(
+                (fname, [tuple(pair) for pair in arcs])
+                for fname, arcs in iitems(data['arcs'])
+            )
+        self._file_tracers = data.get('file_tracers', {})
+        self._runs = data.get('runs', [])
+
+        self._validate()
+
+    def read_file(self, filename):
+        """Read the coverage data from `filename` into this object."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Reading data from %r" % (filename,))
+        try:
+            with self._open_for_reading(filename) as f:
+                self.read_fileobj(f)
+        except Exception as exc:
+            raise CoverageException(
+                "Couldn't read data from '%s': %s: %s" % (
+                    filename, exc.__class__.__name__, exc,
+                )
+            )
+
+    _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
+
+    @classmethod
+    def _open_for_reading(cls, filename):
+        """Open a file appropriately for reading data."""
+        return open(filename, "r")
+
+    @classmethod
+    def _read_raw_data(cls, file_obj):
+        """Read the raw data from a file object."""
+        go_away = file_obj.read(len(cls._GO_AWAY))
+        if go_away != cls._GO_AWAY:
+            raise CoverageException("Doesn't seem to be a coverage.py data file")
+        return json.load(file_obj)
+
+    @classmethod
+    def _read_raw_data_file(cls, filename):
+        """Read the raw data from a file, for debugging."""
+        with cls._open_for_reading(filename) as f:
+            return cls._read_raw_data(f)
+
+    ##
+    ## Writing data
+    ##
+
+    def add_lines(self, line_data):
+        """Add measured line data.
+
+        `line_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { lineno: None, ... }, ...}
 
         """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding lines: %d files, %d lines total" % (
+                len(line_data), sum(len(lines) for lines in line_data.values())
+            ))
+        if self._has_arcs():
+            raise CoverageException("Can't add lines to existing arc data")
+
+        if self._lines is None:
+            self._lines = {}
         for filename, linenos in iitems(line_data):
-            self.lines.setdefault(filename, {}).update(linenos)
+            if filename in self._lines:
+                new_linenos = set(self._lines[filename])
+                new_linenos.update(linenos)
+                linenos = new_linenos
+            self._lines[filename] = list(linenos)
 
-    def add_arc_data(self, arc_data):
+        self._validate()
+
+    def add_arcs(self, arc_data):
         """Add measured arc data.
 
-        `arc_data` is { filename: { (l1,l2): None, ... }, ...}
+        `arc_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { (l1,l2): None, ... }, ...}
 
         """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding arcs: %d files, %d arcs total" % (
+                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+            ))
+        if self._has_lines():
+            raise CoverageException("Can't add arcs to existing line data")
+
+        if self._arcs is None:
+            self._arcs = {}
         for filename, arcs in iitems(arc_data):
-            self.arcs.setdefault(filename, {}).update(arcs)
+            if filename in self._arcs:
+                new_arcs = set(self._arcs[filename])
+                new_arcs.update(arcs)
+                arcs = new_arcs
+            self._arcs[filename] = list(arcs)
+
+        self._validate()
+
+    def add_file_tracers(self, file_tracers):
+        """Add per-file plugin information.
+
+        `file_tracers` is { filename: plugin_name, ... }
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
+
+        existing_files = self._arcs or self._lines or {}
+        for filename, plugin_name in iitems(file_tracers):
+            if filename not in existing_files:
+                raise CoverageException(
+                    "Can't add file tracer data for unmeasured file '%s'" % (filename,)
+                )
+            existing_plugin = self._file_tracers.get(filename)
+            if existing_plugin is not None and plugin_name != existing_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, existing_plugin, plugin_name,
+                    )
+                )
+            self._file_tracers[filename] = plugin_name
+
+        self._validate()
+
+    def add_run_info(self, **kwargs):
+        """Add information about the run.
+
+        Keywords are arbitrary, and are stored in the run dictionary. Values
+        must be JSON serializable.  You may use this function more than once,
+        but repeated keywords overwrite each other.
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding run info: %r" % (kwargs,))
+        if not self._runs:
+            self._runs = [{}]
+        self._runs[0].update(kwargs)
+        self._validate()
 
     def touch_file(self, filename):
         """Ensure that `filename` appears in the data, empty if needed."""
-        self.lines.setdefault(filename, {})
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Touching %r" % (filename,))
+        if not self._has_arcs() and not self._has_lines():
+            raise CoverageException("Can't touch files in an empty CoverageData")
 
-    def measured_files(self):
-        """A list of all files that had been measured."""
-        return list(self.lines.keys())
+        if self._has_arcs():
+            where = self._arcs
+        else:
+            where = self._lines
+        where.setdefault(filename, [])
+
+        self._validate()
+
+    def write_fileobj(self, file_obj):
+        """Write the coverage data to `file_obj`."""
+
+        # Create the file data.
+        file_data = {}
+
+        if self._has_arcs():
+            file_data['arcs'] = self._arcs
+
+        if self._has_lines():
+            file_data['lines'] = self._lines
+
+        if self._file_tracers:
+            file_data['file_tracers'] = self._file_tracers
+
+        if self._runs:
+            file_data['runs'] = self._runs
+
+        # Write the data to the file.
+        file_obj.write(self._GO_AWAY)
+        json.dump(file_data, file_obj)
+
+    def write_file(self, filename):
+        """Write the coverage data to `filename`."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Writing data to %r" % (filename,))
+        with open(filename, 'w') as fdata:
+            self.write_fileobj(fdata)
 
-    def executed_lines(self, filename):
-        """A map containing all the line numbers executed in `filename`.
+    def erase(self):
+        """Erase the data in this object."""
+        self._lines = None
+        self._arcs = None
+        self._file_tracers = {}
+        self._runs = []
+        self._validate()
+
+    def update(self, other_data, aliases=None):
+        """Update this data with data from another `CoverageData`.
 
-        If `filename` hasn't been collected at all (because it wasn't executed)
-        then return an empty map.
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
 
         """
-        return self.lines.get(filename) or {}
+        if self._has_lines() and other_data._has_arcs():
+            raise CoverageException("Can't combine arc data with line data")
+        if self._has_arcs() and other_data._has_lines():
+            raise CoverageException("Can't combine line data with arc data")
 
-    def executed_arcs(self, filename):
-        """A map containing all the arcs executed in `filename`."""
-        return self.arcs.get(filename) or {}
+        aliases = aliases or PathAliases()
 
-    def add_to_hash(self, filename, hasher):
-        """Contribute `filename`'s data to the Md5Hash `hasher`."""
-        hasher.update(self.executed_lines(filename))
-        hasher.update(self.executed_arcs(filename))
+        # _file_tracers: only have a string, so they have to agree.
+        # Have to do these first, so that our examination of self._arcs and
+        # self._lines won't be confused by data updated from other_data.
+        for filename in other_data.measured_files():
+            other_plugin = other_data.file_tracer(filename)
+            filename = aliases.map(filename)
+            this_plugin = self.file_tracer(filename)
+            if this_plugin is None:
+                if other_plugin:
+                    self._file_tracers[filename] = other_plugin
+            elif this_plugin != other_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, this_plugin, other_plugin,
+                    )
+                )
+
+        # _runs: add the new runs to these runs.
+        self._runs.extend(other_data._runs)
+
+        # _lines: merge dicts.
+        if other_data._has_lines():
+            if self._lines is None:
+                self._lines = {}
+            for filename, file_lines in iitems(other_data._lines):
+                filename = aliases.map(filename)
+                if filename in self._lines:
+                    lines = set(self._lines[filename])
+                    lines.update(file_lines)
+                    file_lines = list(lines)
+                self._lines[filename] = file_lines
+
+        # _arcs: merge dicts.
+        if other_data._has_arcs():
+            if self._arcs is None:
+                self._arcs = {}
+            for filename, file_arcs in iitems(other_data._arcs):
+                filename = aliases.map(filename)
+                if filename in self._arcs:
+                    arcs = set(self._arcs[filename])
+                    arcs.update(file_arcs)
+                    file_arcs = list(arcs)
+                self._arcs[filename] = file_arcs
+
+        self._validate()
+
+    ##
+    ## Miscellaneous
+    ##
+
+    def _validate(self):
+        """If we are in paranoid mode, validate that everything is right."""
+        if env.TESTING:
+            self._validate_invariants()
+
+    def _validate_invariants(self):
+        """Validate internal invariants."""
+        # Only one of _lines or _arcs should exist.
+        assert not(self._has_lines() and self._has_arcs()), (
+            "Shouldn't have both _lines and _arcs"
+        )
+
+        # _lines should be a dict of lists of ints.
+        if self._has_lines():
+            for fname, lines in iitems(self._lines):
+                assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) for x in lines), (
+                    "_lines[%r] shouldn't be %r" % (fname, lines)
+                )
+
+        # _arcs should be a dict of lists of pairs of ints.
+        if self._has_arcs():
+            for fname, arcs in iitems(self._arcs):
+                assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
+                    "_arcs[%r] shouldn't be %r" % (fname, arcs)
+                )
+
+        # _file_tracers should have only non-empty strings as values.
+        for fname, plugin in iitems(self._file_tracers):
+            assert isinstance(fname, string_class), (
+                "Key in _file_tracers shouldn't be %r" % (fname,)
+            )
+            assert plugin and isinstance(plugin, string_class), (
+                "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
+            )
 
-    def summary(self, fullpath=False):
-        """Return a dict summarizing the coverage data.
+        # _runs should be a list of dicts.
+        for val in self._runs:
+            assert isinstance(val, dict)
+            for key in val:
+                assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
 
-        Keys are based on the filenames, and values are the number of executed
-        lines.  If `fullpath` is true, then the keys are the full pathnames of
-        the files, otherwise they are the basenames of the files.
+    def add_to_hash(self, filename, hasher):
+        """Contribute `filename`'s data to the `hasher`.
+
+        `hasher` is a `coverage.misc.Hasher` instance to be updated with
+        the file's data.  It should only get the results data, not the run
+        data.
 
         """
-        summ = {}
-        if fullpath:
-            filename_fn = lambda f: f
+        if self._has_arcs():
+            hasher.update(sorted(self.arcs(filename) or []))
         else:
-            filename_fn = os.path.basename
-        for filename, lines in iitems(self.lines):
-            summ[filename_fn(filename)] = len(lines)
-        return summ
+            hasher.update(sorted(self.lines(filename) or []))
+        hasher.update(self.file_tracer(filename))
 
-    def has_arcs(self):
-        """Does this data have arcs?"""
-        return bool(self.arcs)
+    ##
+    ## Internal
+    ##
+
+    def _has_lines(self):
+        """Do we have data in self._lines?"""
+        return self._lines is not None
+
+    def _has_arcs(self):
+        """Do we have data in self._arcs?"""
+        return self._arcs is not None
+
+
+class CoverageDataFiles(object):
+    """Manage the use of coverage data files."""
+
+    def __init__(self, basename=None, warn=None):
+        """Create a CoverageDataFiles to manage data files.
+
+        `warn` is the warning function to use.
+
+        `basename` is the name of the file to use for storing data.
+
+        """
+        self.warn = warn
+        # Construct the file name that will be used for data storage.
+        self.filename = os.path.abspath(basename or ".coverage")
+
+    def erase(self, parallel=False):
+        """Erase the data from the file storage.
+
+        If `parallel` is true, then also deletes data files created from the
+        basename by parallel-mode.
+
+        """
+        file_be_gone(self.filename)
+        if parallel:
+            data_dir, local = os.path.split(self.filename)
+            localdot = local + '.*'
+            pattern = os.path.join(os.path.abspath(data_dir), localdot)
+            for filename in glob.glob(pattern):
+                file_be_gone(filename)
+
+    def read(self, data):
+        """Read the coverage data."""
+        if os.path.exists(self.filename):
+            data.read_file(self.filename)
+
+    def write(self, data, suffix=None):
+        """Write the collected coverage data to a file.
+
+        `suffix` is a suffix to append to the base file name. This can be used
+        for multiple or parallel execution, so that many coverage data files
+        can exist simultaneously.  A dot will be used to join the base name and
+        the suffix.
+
+        """
+        filename = self.filename
+        if suffix is True:
+            # If data_suffix was a simple true value, then make a suffix with
+            # plenty of distinguishing information.  We do this here in
+            # `save()` at the last minute so that the pid will be correct even
+            # if the process forks.
+            extra = ""
+            if _TEST_NAME_FILE:                             # pragma: debugging
+                with open(_TEST_NAME_FILE) as f:
+                    test_name = f.read()
+                extra = "." + test_name
+            suffix = "%s%s.%s.%06d" % (
+                socket.gethostname(), extra, os.getpid(),
+                random.randint(0, 999999)
+            )
+
+        if suffix:
+            filename += "." + suffix
+        data.write_file(filename)
+
+    def combine_parallel_data(self, data, aliases=None, data_paths=None):
+        """Combine a number of data files together.
+
+        Treat `self.filename` as a file prefix, and combine the data from all
+        of the data files starting with that prefix plus a dot.
+
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
+
+        If `data_paths` is provided, it is a list of directories or files to
+        combine.  Directories are searched for files that start with
+        `self.filename` plus dot as a prefix, and those files are combined.
+
+        If `data_paths` is not provided, then the directory portion of
+        `self.filename` is used as the directory to search for data files.
+
+        Every data file found and combined is then deleted from disk. If a file
+        cannot be read, a warning will be issued, and the file will not be
+        deleted.
+
+        """
+        # Because of the os.path.abspath in the constructor, data_dir will
+        # never be an empty string.
+        data_dir, local = os.path.split(self.filename)
+        localdot = local + '.*'
+
+        data_paths = data_paths or [data_dir]
+        files_to_combine = []
+        for p in data_paths:
+            if os.path.isfile(p):
+                files_to_combine.append(os.path.abspath(p))
+            elif os.path.isdir(p):
+                pattern = os.path.join(os.path.abspath(p), localdot)
+                files_to_combine.extend(glob.glob(pattern))
+            else:
+                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
+
+        for f in files_to_combine:
+            new_data = CoverageData()
+            try:
+                new_data.read_file(f)
+            except CoverageException as exc:
+                if self.warn:
+                    # The CoverageException has the file name in it, so just
+                    # use the message as the warning.
+                    self.warn(str(exc))
+            else:
+                data.update(new_data, aliases=aliases)
+                file_be_gone(f)
+
+
+def canonicalize_json_data(data):
+    """Canonicalize our JSON data so it can be compared."""
+    for fname, lines in iitems(data.get('lines', {})):
+        data['lines'][fname] = sorted(lines)
+    for fname, arcs in iitems(data.get('arcs', {})):
+        data['arcs'][fname] = sorted(arcs)
+
+
+def pretty_data(data):
+    """Format data as JSON, but as nicely as possible.
+
+    Returns a string.
+
+    """
+    # Start with a basic JSON dump.
+    out = json.dumps(data, indent=4, sort_keys=True)
+    # But pairs of numbers shouldn't be split across lines...
+    out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
+    # Trailing spaces mess with tests, get rid of them.
+    out = re.sub(r"(?m)\s+$", "", out)
+    return out
+
+
+def debug_main(args):
+    """Dump the raw data from data files.
+
+    Run this as::
+
+        $ python -m coverage.data [FILE]
+
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        "-c", "--canonical", action="store_true",
+        help="Sort data into a canonical order",
+    )
+    options, args = parser.parse_args(args)
+
+    for filename in (args or [".coverage"]):
+        print("--- {0} ------------------------------".format(filename))
+        data = CoverageData._read_raw_data_file(filename)
+        if options.canonical:
+            canonicalize_json_data(data)
+        print(pretty_data(data))
 
 
 if __name__ == '__main__':
-    # Ad-hoc: show the raw data in a data file.
-    import pprint, sys
-    covdata = CoverageData()
-    if sys.argv[1:]:
-        fname = sys.argv[1]
-    else:
-        fname = covdata.filename
-    pprint.pprint(covdata.raw_data(fname))
+    import sys
+    debug_main(sys.argv[1:])
index 104f3b1d0a432efd951eea8a4b855611588e6960..8ed664ce2ce7703fc7d24abafd23b4083023d095 100644 (file)
@@ -1,6 +1,15 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Control of and utilities for debugging."""
 
+import inspect
 import os
+import sys
+
+from coverage.misc import isolate_module
+
+os = isolate_module(os)
 
 
 # When debugging, it can be helpful to force some options, especially when
@@ -8,6 +17,9 @@ import os
 # This is a list of forced debugging options.
 FORCED_DEBUG = []
 
+# A hack for debugging testing in sub-processes.
+_TEST_NAME_FILE = ""    # "/tmp/covtest.txt"
+
 
 class DebugControl(object):
     """Control and output for debugging."""
@@ -17,6 +29,9 @@ class DebugControl(object):
         self.options = options
         self.output = output
 
+    def __repr__(self):
+        return "<DebugControl options=%r output=%r>" % (self.options, self.output)
+
     def should(self, option):
         """Decide whether to output debug information in category `option`."""
         return (option in self.options or option in FORCED_DEBUG)
@@ -26,14 +41,22 @@ class DebugControl(object):
         if self.should('pid'):
             msg = "pid %5d: %s" % (os.getpid(), msg)
         self.output.write(msg+"\n")
+        if self.should('callers'):
+            dump_stack_frames(out=self.output)
         self.output.flush()
 
-    def write_formatted_info(self, info):
+    def write_formatted_info(self, header, info):
         """Write a sequence of (label,data) pairs nicely."""
+        self.write(info_header(header))
         for line in info_formatter(info):
             self.write(" %s" % line)
 
 
+def info_header(label):
+    """Make a nice header string."""
+    return "--{0:-<60s}".format(" "+label+" ")
+
+
 def info_formatter(info):
     """Produce a sequence of formatted lines from info.
 
@@ -41,14 +64,51 @@ def info_formatter(info):
     nicely formatted, ready to print.
 
     """
-    label_len = max([len(l) for l, _d in info])
+    info = list(info)
+    if not info:
+        return
+    label_len = max(len(l) for l, _d in info)
     for label, data in info:
         if data == []:
             data = "-none-"
-        if isinstance(data, (list, tuple)):
+        if isinstance(data, (list, set, tuple)):
             prefix = "%*s:" % (label_len, label)
             for e in data:
                 yield "%*s %s" % (label_len+1, prefix, e)
                 prefix = ""
         else:
             yield "%*s: %s" % (label_len, label, data)
+
+
+def short_stack(limit=None):                                # pragma: debugging
+    """Return a string summarizing the call stack.
+
+    The string is multi-line, with one line per stack frame. Each line shows
+    the function name, the file name, and the line number:
+
+        ...
+        start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
+        import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
+        import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
+        ...
+
+    `limit` is the number of frames to include, defaulting to all of them.
+
+    """
+    stack = inspect.stack()[limit:0:-1]
+    return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
+
+
+def dump_stack_frames(limit=None, out=None):                # pragma: debugging
+    """Print a summary of the stack to stdout, or some place else."""
+    out = out or sys.stdout
+    out.write(short_stack(limit=limit))
+    out.write("\n")
+
+
+def log(msg, stack=False):                                  # pragma: debugging
+    """Write a log message as forcefully as possible."""
+    with open("/tmp/covlog.txt", "a") as f:
+        f.write("{pid}: {msg}\n".format(pid=os.getpid(), msg=msg))
+        if stack:
+            dump_stack_frames(out=f)
diff --git a/python/helpers/coveragepy/coverage/env.py b/python/helpers/coveragepy/coverage/env.py
new file mode 100644 (file)
index 0000000..4cd02c0
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Determine facts about the environment."""
+
+import os
+import sys
+
+# Operating systems.
+WINDOWS = sys.platform == "win32"
+LINUX = sys.platform == "linux2"
+
+# Python implementations.
+PYPY = '__pypy__' in sys.builtin_module_names
+
+# Python versions.
+PYVERSION = sys.version_info
+PY2 = PYVERSION < (3, 0)
+PY3 = PYVERSION >= (3, 0)
+
+# Coverage.py specifics.
+
+# Are we using the C-implemented trace function?
+C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
+
+# Are we coverage-measuring ourselves?
+METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
+
+# Are we running our test suite?
+# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
+# test-specific behavior like contracts.
+TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
index f6ebdf79bb9e8a1fb9268af492cdbea4a09ab522..3e20a527d93172efe5e5fc1a89b99f6675e7fcb9 100644 (file)
@@ -1,41 +1,73 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Execute files of Python code."""
 
-import imp, marshal, os, sys
+import marshal
+import os
+import sys
+import types
 
-from coverage.backward import exec_code_object, open_source
-from coverage.misc import ExceptionDuringRun, NoCode, NoSource
+from coverage.backward import BUILTINS
+from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
+from coverage.misc import ExceptionDuringRun, NoCode, NoSource, isolate_module
+from coverage.phystokens import compile_unicode
+from coverage.python import get_python_source
 
+os = isolate_module(os)
 
-try:
-    # In Py 2.x, the builtins were in __builtin__
-    BUILTINS = sys.modules['__builtin__']
-except KeyError:
-    # In Py 3.x, they're in builtins
-    BUILTINS = sys.modules['builtins']
 
+class DummyLoader(object):
+    """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
 
-def rsplit1(s, sep):
-    """The same as s.rsplit(sep, 1), but works in 2.3"""
-    parts = s.split(sep)
-    return sep.join(parts[:-1]), parts[-1]
+    Currently only implements the .fullname attribute
+    """
+    def __init__(self, fullname, *_args):
+        self.fullname = fullname
 
 
-def run_python_module(modulename, args):
-    """Run a python module, as though with ``python -m name args...``.
+if importlib_util_find_spec:
+    def find_module(modulename):
+        """Find the module named `modulename`.
 
-    `modulename` is the name of the module, possibly a dot-separated name.
-    `args` is the argument array to present as sys.argv, including the first
-    element naming the module being executed.
-
-    """
-    openfile = None
-    glo, loc = globals(), locals()
-    try:
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        try:
+            spec = importlib_util_find_spec(modulename)
+        except ImportError as err:
+            raise NoSource(str(err))
+        if not spec:
+            raise NoSource("No module named %r" % (modulename,))
+        pathname = spec.origin
+        packagename = spec.name
+        if pathname.endswith("__init__.py") and not modulename.endswith("__init__"):
+            mod_main = modulename + ".__main__"
+            spec = importlib_util_find_spec(mod_main)
+            if not spec:
+                raise NoSource(
+                    "No module named %s; "
+                    "%r is a package and cannot be directly executed"
+                    % (mod_main, modulename)
+                )
+            pathname = spec.origin
+            packagename = spec.name
+        packagename = packagename.rpartition(".")[0]
+        return pathname, packagename
+else:
+    def find_module(modulename):
+        """Find the module named `modulename`.
+
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        openfile = None
+        glo, loc = globals(), locals()
         try:
             # Search for the module - inside its parent package, if any - using
             # standard import mechanics.
             if '.' in modulename:
-                packagename, name = rsplit1(modulename, '.')
+                packagename, name = modulename.rsplit('.', 1)
                 package = __import__(packagename, glo, loc, ['__path__'])
                 searchpath = package.__path__
             else:
@@ -57,51 +89,92 @@ def run_python_module(modulename, args):
                 package = __import__(packagename, glo, loc, ['__path__'])
                 searchpath = package.__path__
                 openfile, pathname, _ = imp.find_module(name, searchpath)
-        except ImportError:
-            _, err, _ = sys.exc_info()
+        except ImportError as err:
             raise NoSource(str(err))
-    finally:
-        if openfile:
-            openfile.close()
+        finally:
+            if openfile:
+                openfile.close()
+
+        return pathname, packagename
+
+
+def run_python_module(modulename, args):
+    """Run a Python module, as though with ``python -m name args...``.
+
+    `modulename` is the name of the module, possibly a dot-separated name.
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the module being executed.
+
+    """
+    pathname, packagename = find_module(modulename)
 
-    # Finally, hand the file off to run_python_file for execution.
     pathname = os.path.abspath(pathname)
     args[0] = pathname
-    run_python_file(pathname, args, package=packagename)
+    run_python_file(pathname, args, package=packagename, modulename=modulename, path0="")
 
 
-def run_python_file(filename, args, package=None):
-    """Run a python file as if it were the main program on the command line.
+def run_python_file(filename, args, package=None, modulename=None, path0=None):
+    """Run a Python file as if it were the main program on the command line.
 
     `filename` is the path to the file to execute, it need not be a .py file.
     `args` is the argument array to present as sys.argv, including the first
     element naming the file being executed.  `package` is the name of the
     enclosing package, if any.
 
+    `modulename` is the name of the module the file was run as.
+
+    `path0` is the value to put into sys.path[0].  If it's None, then this
+    function will decide on a value.
+
     """
+    if modulename is None and sys.version_info >= (3, 3):
+        modulename = '__main__'
+
     # Create a module to serve as __main__
     old_main_mod = sys.modules['__main__']
-    main_mod = imp.new_module('__main__')
+    main_mod = types.ModuleType('__main__')
     sys.modules['__main__'] = main_mod
     main_mod.__file__ = filename
     if package:
         main_mod.__package__ = package
+    if modulename:
+        main_mod.__loader__ = DummyLoader(modulename)
+
     main_mod.__builtins__ = BUILTINS
 
     # Set sys.argv properly.
     old_argv = sys.argv
     sys.argv = args
 
+    if os.path.isdir(filename):
+        # Running a directory means running the __main__.py file in that
+        # directory.
+        my_path0 = filename
+
+        for ext in [".py", ".pyc", ".pyo"]:
+            try_filename = os.path.join(filename, "__main__" + ext)
+            if os.path.exists(try_filename):
+                filename = try_filename
+                break
+        else:
+            raise NoSource("Can't find '__main__' module in '%s'" % filename)
+    else:
+        my_path0 = os.path.abspath(os.path.dirname(filename))
+
+    # Set sys.path correctly.
+    old_path0 = sys.path[0]
+    sys.path[0] = path0 if path0 is not None else my_path0
+
     try:
         # Make a code object somehow.
-        if filename.endswith(".pyc") or filename.endswith(".pyo"):
+        if filename.endswith((".pyc", ".pyo")):
             code = make_code_from_pyc(filename)
         else:
             code = make_code_from_py(filename)
 
         # Execute the code object.
         try:
-            exec_code_object(code, main_mod.__dict__)
+            exec(code, main_mod.__dict__)
         except SystemExit:
             # The user called sys.exit().  Just pass it along to the upper
             # layers, where it will be handled.
@@ -109,37 +182,34 @@ def run_python_file(filename, args, package=None):
         except:
             # Something went wrong while executing the user code.
             # Get the exc_info, and pack them into an exception that we can
-            # throw up to the outer loop.  We peel two layers off the traceback
+            # throw up to the outer loop.  We peel one layer off the traceback
             # so that the coverage.py code doesn't appear in the final printed
             # traceback.
             typ, err, tb = sys.exc_info()
-            raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
+
+            # PyPy3 weirdness.  If I don't access __context__, then somehow it
+            # is non-None when the exception is reported at the upper layer,
+            # and a nested exception is shown to the user.  This getattr fixes
+            # it somehow? https://bitbucket.org/pypy/pypy/issue/1903
+            getattr(err, '__context__', None)
+
+            raise ExceptionDuringRun(typ, err, tb.tb_next)
     finally:
-        # Restore the old __main__
+        # Restore the old __main__, argv, and path.
         sys.modules['__main__'] = old_main_mod
-
-        # Restore the old argv and path
         sys.argv = old_argv
+        sys.path[0] = old_path0
+
 
 def make_code_from_py(filename):
     """Get source from `filename` and make a code object of it."""
     # Open the source file.
     try:
-        source_file = open_source(filename)
-    except IOError:
-        raise NoSource("No file to run: %r" % filename)
-
-    try:
-        source = source_file.read()
-    finally:
-        source_file.close()
-
-    # We have the source.  `compile` still needs the last line to be clean,
-    # so make sure it is, then compile a code object from it.
-    if not source or source[-1] != '\n':
-        source += '\n'
-    code = compile(source, filename, "exec")
+        source = get_python_source(filename)
+    except (IOError, NoSource):
+        raise NoSource("No file to run: '%s'" % filename)
 
+    code = compile_unicode(source, filename, "exec")
     return code
 
 
@@ -148,13 +218,13 @@ def make_code_from_pyc(filename):
     try:
         fpyc = open(filename, "rb")
     except IOError:
-        raise NoCode("No file to run: %r" % filename)
+        raise NoCode("No file to run: '%s'" % filename)
 
-    try:
+    with fpyc:
         # First four bytes are a version-specific magic number.  It has to
         # match or we won't run the file.
         magic = fpyc.read(4)
-        if magic != imp.get_magic():
+        if magic != PYC_MAGIC_NUMBER:
             raise NoCode("Bad magic number in .pyc file")
 
         # Skip the junk in the header that we don't need.
@@ -165,7 +235,5 @@ def make_code_from_pyc(filename):
 
         # The rest of the file is the code object we want.
         code = marshal.load(fpyc)
-    finally:
-        fpyc.close()
 
     return code
index 464535a81653ca833ba33b462467941e373c0927..44997d12c6bc5bf9b56461ec9148f482145d7739 100644 (file)
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """File wrangling."""
 
-from coverage.backward import to_string
-from coverage.misc import CoverageException
-import fnmatch, os, os.path, re, sys
-import ntpath, posixpath
+import fnmatch
+import ntpath
+import os
+import os.path
+import posixpath
+import re
+import sys
 
-class FileLocator(object):
-    """Understand how filenames work."""
+from coverage import env
+from coverage.backward import unicode_class
+from coverage.misc import contract, CoverageException, join_regex, isolate_module
 
-    def __init__(self):
-        # The absolute path to our current directory.
-        self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
 
-        # Cache of results of calling the canonical_filename() method, to
-        # avoid duplicating work.
-        self.canonical_filename_cache = {}
+os = isolate_module(os)
 
-    def relative_filename(self, filename):
-        """Return the relative form of `filename`.
 
-        The filename will be relative to the current directory when the
-        `FileLocator` was constructed.
+def set_relative_directory():
+    """Set the directory that `relative_filename` will be relative to."""
+    global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
 
-        """
-        fnorm = os.path.normcase(filename)
-        if fnorm.startswith(self.relative_dir):
-            filename = filename[len(self.relative_dir):]
-        return filename
+    # The absolute path to our current directory.
+    RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
 
-    def canonical_filename(self, filename):
-        """Return a canonical filename for `filename`.
+    # Cache of results of calling the canonical_filename() method, to
+    # avoid duplicating work.
+    CANONICAL_FILENAME_CACHE = {}
 
-        An absolute path with no redundant components and normalized case.
 
-        """
-        if filename not in self.canonical_filename_cache:
-            if not os.path.isabs(filename):
-                for path in [os.curdir] + sys.path:
-                    if path is None:
-                        continue
-                    f = os.path.join(path, filename)
-                    if os.path.exists(f):
-                        filename = f
-                        break
-            cf = abs_file(filename)
-            self.canonical_filename_cache[filename] = cf
-        return self.canonical_filename_cache[filename]
-
-    def get_zip_data(self, filename):
-        """Get data from `filename` if it is a zip file path.
-
-        Returns the string data read from the zip file, or None if no zip file
-        could be found or `filename` isn't in it.  The data returned will be
-        an empty string if the file is empty.
+def relative_directory():
+    """Return the directory that `relative_filename` is relative to."""
+    return RELATIVE_DIR
 
-        """
-        import zipimport
-        markers = ['.zip'+os.sep, '.egg'+os.sep]
-        for marker in markers:
-            if marker in filename:
-                parts = filename.split(marker)
-                try:
-                    zi = zipimport.zipimporter(parts[0]+marker[:-1])
-                except zipimport.ZipImportError:
-                    continue
-                try:
-                    data = zi.get_data(parts[1])
-                except IOError:
+
+@contract(returns='unicode')
+def relative_filename(filename):
+    """Return the relative form of `filename`.
+
+    The file name will be relative to the current directory when the
+    `set_relative_directory` was called.
+
+    """
+    fnorm = os.path.normcase(filename)
+    if fnorm.startswith(RELATIVE_DIR):
+        filename = filename[len(RELATIVE_DIR):]
+    return unicode_filename(filename)
+
+
+@contract(returns='unicode')
+def canonical_filename(filename):
+    """Return a canonical file name for `filename`.
+
+    An absolute path with no redundant components and normalized case.
+
+    """
+    if filename not in CANONICAL_FILENAME_CACHE:
+        if not os.path.isabs(filename):
+            for path in [os.curdir] + sys.path:
+                if path is None:
                     continue
-                return to_string(data)
-        return None
+                f = os.path.join(path, filename)
+                if os.path.exists(f):
+                    filename = f
+                    break
+        cf = abs_file(filename)
+        CANONICAL_FILENAME_CACHE[filename] = cf
+    return CANONICAL_FILENAME_CACHE[filename]
+
+
+def flat_rootname(filename):
+    """A base for a flat file name to correspond to this file.
 
+    Useful for writing files about the code where you want all the files in
+    the same directory, but need to differentiate same-named files from
+    different directories.
 
-if sys.platform == 'win32':
+    For example, the file a/b/c.py will return 'a_b_c_py'
+
+    """
+    name = ntpath.splitdrive(filename)[1]
+    return re.sub(r"[\\/.:]", "_", name)
+
+
+if env.WINDOWS:
+
+    _ACTUAL_PATH_CACHE = {}
+    _ACTUAL_PATH_LIST_CACHE = {}
 
     def actual_path(path):
         """Get the actual path of `path`, including the correct case."""
-        if path in actual_path.cache:
-            return actual_path.cache[path]
+        if env.PY2 and isinstance(path, unicode_class):
+            path = path.encode(sys.getfilesystemencoding())
+        if path in _ACTUAL_PATH_CACHE:
+            return _ACTUAL_PATH_CACHE[path]
 
         head, tail = os.path.split(path)
         if not tail:
-            actpath = head
+            # This means head is the drive spec: normalize it.
+            actpath = head.upper()
         elif not head:
             actpath = tail
         else:
             head = actual_path(head)
-            if head in actual_path.list_cache:
-                files = actual_path.list_cache[head]
+            if head in _ACTUAL_PATH_LIST_CACHE:
+                files = _ACTUAL_PATH_LIST_CACHE[head]
             else:
                 try:
                     files = os.listdir(head)
                 except OSError:
                     files = []
-                actual_path.list_cache[head] = files
+                _ACTUAL_PATH_LIST_CACHE[head] = files
             normtail = os.path.normcase(tail)
             for f in files:
                 if os.path.normcase(f) == normtail:
                     tail = f
                     break
             actpath = os.path.join(head, tail)
-        actual_path.cache[path] = actpath
+        _ACTUAL_PATH_CACHE[path] = actpath
         return actpath
 
-    actual_path.cache = {}
-    actual_path.list_cache = {}
-
 else:
     def actual_path(filename):
         """The actual path for non-Windows platforms."""
         return filename
 
 
+if env.PY2:
+    @contract(returns='unicode')
+    def unicode_filename(filename):
+        """Return a Unicode version of `filename`."""
+        if isinstance(filename, str):
+            encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+            filename = filename.decode(encoding, "replace")
+        return filename
+else:
+    @contract(filename='unicode', returns='unicode')
+    def unicode_filename(filename):
+        """Return a Unicode version of `filename`."""
+        return filename
+
+
+@contract(returns='unicode')
 def abs_file(filename):
     """Return the absolute normalized form of `filename`."""
     path = os.path.expandvars(os.path.expanduser(filename))
     path = os.path.abspath(os.path.realpath(path))
     path = actual_path(path)
+    path = unicode_filename(path)
     return path
 
 
+RELATIVE_DIR = None
+CANONICAL_FILENAME_CACHE = None
+set_relative_directory()
+
+
 def isabs_anywhere(filename):
     """Is `filename` an absolute path on any OS?"""
     return ntpath.isabs(filename) or posixpath.isabs(filename)
@@ -137,7 +175,7 @@ def prep_patterns(patterns):
     """
     prepped = []
     for p in patterns or []:
-        if p.startswith("*") or p.startswith("?"):
+        if p.startswith(("*", "?")):
             prepped.append(p)
         else:
             prepped.append(abs_file(p))
@@ -147,7 +185,7 @@ def prep_patterns(patterns):
 class TreeMatcher(object):
     """A matcher for files in a tree."""
     def __init__(self, directories):
-        self.dirs = directories[:]
+        self.dirs = list(directories)
 
     def __repr__(self):
         return "<TreeMatcher %r>" % self.dirs
@@ -156,10 +194,6 @@ class TreeMatcher(object):
         """A list of strings for displaying when dumping state."""
         return self.dirs
 
-    def add(self, directory):
-        """Add another directory to the list we match for."""
-        self.dirs.append(directory)
-
     def match(self, fpath):
         """Does `fpath` indicate a file in one of our trees?"""
         for d in self.dirs:
@@ -173,10 +207,49 @@ class TreeMatcher(object):
         return False
 
 
+class ModuleMatcher(object):
+    """A matcher for modules in a tree."""
+    def __init__(self, module_names):
+        self.modules = list(module_names)
+
+    def __repr__(self):
+        return "<ModuleMatcher %r>" % (self.modules)
+
+    def info(self):
+        """A list of strings for displaying when dumping state."""
+        return self.modules
+
+    def match(self, module_name):
+        """Does `module_name` indicate a module in one of our packages?"""
+        if not module_name:
+            return False
+
+        for m in self.modules:
+            if module_name.startswith(m):
+                if module_name == m:
+                    return True
+                if module_name[len(m)] == '.':
+                    # This is a module in the package
+                    return True
+
+        return False
+
+
 class FnmatchMatcher(object):
-    """A matcher for files by filename pattern."""
+    """A matcher for files by file name pattern."""
     def __init__(self, pats):
         self.pats = pats[:]
+        # fnmatch is platform-specific. On Windows, it does the Windows thing
+        # of treating / and \ as equivalent. But on other platforms, we need to
+        # take care of that ourselves.
+        fnpats = (fnmatch.translate(p) for p in pats)
+        fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
+        if env.WINDOWS:
+            # Windows is also case-insensitive.  BTW: the regex docs say that
+            # flags like (?i) have to be at the beginning, but fnmatch puts
+            # them at the end, and having two there seems to work fine.
+            fnpats = (p + "(?i)" for p in fnpats)
+        self.re = re.compile(join_regex(fnpats))
 
     def __repr__(self):
         return "<FnmatchMatcher %r>" % self.pats
@@ -186,11 +259,8 @@ class FnmatchMatcher(object):
         return self.pats
 
     def match(self, fpath):
-        """Does `fpath` match one of our filename patterns?"""
-        for pat in self.pats:
-            if fnmatch.fnmatch(fpath, pat):
-                return True
-        return False
+        """Does `fpath` match one of our file name patterns?"""
+        return self.re.match(fpath) is not None
 
 
 def sep(s):
@@ -213,12 +283,9 @@ class PathAliases(object):
     A `PathAliases` object tracks a list of pattern/result pairs, and can
     map a path through those aliases to produce a unified path.
 
-    `locator` is a FileLocator that is used to canonicalize the results.
-
     """
-    def __init__(self, locator=None):
+    def __init__(self):
         self.aliases = []
-        self.locator = locator
 
     def add(self, pattern, result):
         """Add the `pattern`/`result` pair to the list of aliases.
@@ -245,11 +312,10 @@ class PathAliases(object):
             pattern = abs_file(pattern)
         pattern += pattern_sep
 
-        # Make a regex from the pattern.  fnmatch always adds a \Z or $ to
+        # Make a regex from the pattern.  fnmatch always adds a \Z to
         # match the whole string, which we don't want.
         regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
-        if regex_pat.endswith("$"):
-            regex_pat = regex_pat[:-1]
+
         # We want */a/b.py to match on Windows too, so change slash to match
         # either separator.
         regex_pat = regex_pat.replace(r"\/", r"[\\/]")
@@ -272,6 +338,10 @@ class PathAliases(object):
         The separator style in the result is made to match that of the result
         in the alias.
 
+        Returns the mapped path.  If a mapping has happened, this is a
+        canonical path.  If no mapping has happened, it is the original value
+        of `path` unchanged.
+
         """
         for regex, result, pattern_sep, result_sep in self.aliases:
             m = regex.match(path)
@@ -279,8 +349,7 @@ class PathAliases(object):
                 new = path.replace(m.group(0), result)
                 if pattern_sep != result_sep:
                     new = new.replace(pattern_sep, result_sep)
-                if self.locator:
-                    new = self.locator.canonical_filename(new)
+                new = canonical_filename(new)
                 return new
         return path
 
@@ -291,7 +360,7 @@ def find_python_files(dirname):
     To be importable, the files have to be in a directory with a __init__.py,
     except for `dirname` itself, which isn't required to have one.  The
     assumption is that `dirname` was specified directly, so the user knows
-    best, but subdirectories are checked for a __init__.py to be sure we only
+    best, but sub-directories are checked for a __init__.py to be sure we only
     find the importable files.
 
     """
diff --git a/python/helpers/coveragepy/coverage/fullcoverage/encodings.py b/python/helpers/coveragepy/coverage/fullcoverage/encodings.py
deleted file mode 100644 (file)
index 6a258d6..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-"""Imposter encodings module that installs a coverage-style tracer.
-
-This is NOT the encodings module; it is an imposter that sets up tracing
-instrumentation and then replaces itself with the real encodings module.
-
-If the directory that holds this file is placed first in the PYTHONPATH when
-using "coverage" to run Python's tests, then this file will become the very
-first module imported by the internals of Python 3.  It installs a
-coverage-compatible trace function that can watch Standard Library modules
-execute from the very earliest stages of Python's own boot process.  This fixes
-a problem with coverage - that it starts too late to trace the coverage of many
-of the most fundamental modules in the Standard Library.
-
-"""
-
-import sys
-
-class FullCoverageTracer(object):
-    def __init__(self):
-        # `traces` is a list of trace events.  Frames are tricky: the same
-        # frame object is used for a whole scope, with new line numbers
-        # written into it.  So in one scope, all the frame objects are the
-        # same object, and will eventually all will point to the last line
-        # executed.  So we keep the line numbers alongside the frames.
-        # The list looks like:
-        #
-        #   traces = [
-        #       ((frame, event, arg), lineno), ...
-        #       ]
-        #
-        self.traces = []
-
-    def fullcoverage_trace(self, *args):
-        frame, event, arg = args
-        self.traces.append((args, frame.f_lineno))
-        return self.fullcoverage_trace
-
-sys.settrace(FullCoverageTracer().fullcoverage_trace)
-
-# In coverage/files.py is actual_filename(), which uses glob.glob.  I don't
-# understand why, but that use of glob borks everything if fullcoverage is in
-# effect.  So here we make an ugly hail-mary pass to switch off glob.glob over
-# there.  This means when using fullcoverage, Windows path names will not be
-# their actual case.
-
-#sys.fullcoverage = True
-
-# Finally, remove our own directory from sys.path; remove ourselves from
-# sys.modules; and re-import "encodings", which will be the real package
-# this time.  Note that the delete from sys.modules dictionary has to
-# happen last, since all of the symbols in this module will become None
-# at that exact moment, including "sys".
-
-parentdir = max(filter(__file__.startswith, sys.path), key=len)
-sys.path.remove(parentdir)
-del sys.modules['encodings']
-import encodings
index 5242236c1ed9826d19f7abe1a7c4499dc90414dd..e5b1db2a01dcfb41991595e178f14ddd67a0288b 100644 (file)
@@ -1,15 +1,24 @@
-"""HTML reporting for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import os, re, shutil, sys
+"""HTML reporting for coverage.py."""
+
+import datetime
+import json
+import os
+import shutil
 
 import coverage
-from coverage.backward import pickle
-from coverage.misc import CoverageException, Hasher
-from coverage.phystokens import source_token_lines, source_encoding
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import flat_rootname
+from coverage.misc import CoverageException, Hasher, isolate_module
 from coverage.report import Reporter
 from coverage.results import Numbers
 from coverage.templite import Templite
 
+os = isolate_module(os)
+
 
 # Static files are looked for in a list of places.
 STATIC_PATH = [
@@ -20,6 +29,7 @@ STATIC_PATH = [
     os.path.join(os.path.dirname(__file__), "htmlfiles"),
 ]
 
+
 def data_filename(fname, pkgdir=""):
     """Return the path to a data file of ours.
 
@@ -27,69 +37,84 @@ def data_filename(fname, pkgdir=""):
     is returned.
 
     Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
-    is provided, at that subdirectory.
+    is provided, at that sub-directory.
 
     """
+    tried = []
     for static_dir in STATIC_PATH:
         static_filename = os.path.join(static_dir, fname)
         if os.path.exists(static_filename):
             return static_filename
+        else:
+            tried.append(static_filename)
         if pkgdir:
             static_filename = os.path.join(static_dir, pkgdir, fname)
             if os.path.exists(static_filename):
                 return static_filename
-    raise CoverageException("Couldn't find static file %r" % fname)
+            else:
+                tried.append(static_filename)
+    raise CoverageException(
+        "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
+    )
 
 
-def data(fname):
+def read_data(fname):
     """Return the contents of a data file of ours."""
-    data_file = open(data_filename(fname))
-    try:
+    with open(data_filename(fname)) as data_file:
         return data_file.read()
-    finally:
-        data_file.close()
+
+
+def write_html(fname, html):
+    """Write `html` to `fname`, properly encoded."""
+    with open(fname, "wb") as fout:
+        fout.write(html.encode('ascii', 'xmlcharrefreplace'))
 
 
 class HtmlReporter(Reporter):
     """HTML reporting."""
 
-    # These files will be copied from the htmlfiles dir to the output dir.
+    # These files will be copied from the htmlfiles directory to the output
+    # directory.
     STATIC_FILES = [
-            ("style.css", ""),
-            ("jquery.min.js", "jquery"),
-            ("jquery.hotkeys.js", "jquery-hotkeys"),
-            ("jquery.isonscreen.js", "jquery-isonscreen"),
-            ("jquery.tablesorter.min.js", "jquery-tablesorter"),
-            ("coverage_html.js", ""),
-            ("keybd_closed.png", ""),
-            ("keybd_open.png", ""),
-            ]
+        ("style.css", ""),
+        ("jquery.min.js", "jquery"),
+        ("jquery.debounce.min.js", "jquery-debounce"),
+        ("jquery.hotkeys.js", "jquery-hotkeys"),
+        ("jquery.isonscreen.js", "jquery-isonscreen"),
+        ("jquery.tablesorter.min.js", "jquery-tablesorter"),
+        ("coverage_html.js", ""),
+        ("keybd_closed.png", ""),
+        ("keybd_open.png", ""),
+    ]
 
     def __init__(self, cov, config):
         super(HtmlReporter, self).__init__(cov, config)
         self.directory = None
+        title = self.config.html_title
+        if env.PY2:
+            title = title.decode("utf8")
         self.template_globals = {
             'escape': escape,
-            'title': self.config.html_title,
+            'pair': pair,
+            'title': title,
             '__url__': coverage.__url__,
             '__version__': coverage.__version__,
-            }
-        self.source_tmpl = Templite(
-            data("pyfile.html"), self.template_globals
-            )
+        }
+        self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals)
 
         self.coverage = cov
 
         self.files = []
-        self.arcs = self.coverage.data.has_arcs()
+        self.has_arcs = self.coverage.data.has_arcs()
         self.status = HtmlStatus()
         self.extra_css = None
         self.totals = Numbers()
+        self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
 
     def report(self, morfs):
         """Generate an HTML report for `morfs`.
 
-        `morfs` is a list of modules or filenames.
+        `morfs` is a list of modules or file names.
 
         """
         assert self.config.html_dir, "must give a directory for html reporting"
@@ -100,7 +125,7 @@ class HtmlReporter(Reporter):
         # Check that this run used the same settings as the last run.
         m = Hasher()
         m.update(self.config)
-        these_settings = m.digest()
+        these_settings = m.hexdigest()
         if self.status.settings_hash() != these_settings:
             self.status.reset()
             self.status.set_settings_hash(these_settings)
@@ -119,8 +144,7 @@ class HtmlReporter(Reporter):
         self.index_file()
 
         self.make_local_static_report_files()
-
-        return self.totals.pc_covered
+        return self.totals.n_statements and self.totals.pc_covered
 
     def make_local_static_report_files(self):
         """Make local instances of static files for HTML report."""
@@ -129,63 +153,43 @@ class HtmlReporter(Reporter):
             shutil.copyfile(
                 data_filename(static, pkgdir),
                 os.path.join(self.directory, static)
-                )
+            )
 
         # The user may have extra CSS they want copied.
         if self.extra_css:
             shutil.copyfile(
                 self.config.extra_css,
                 os.path.join(self.directory, self.extra_css)
-                )
-
-    def write_html(self, fname, html):
-        """Write `html` to `fname`, properly encoded."""
-        fout = open(fname, "wb")
-        try:
-            fout.write(html.encode('ascii', 'xmlcharrefreplace'))
-        finally:
-            fout.close()
+            )
 
-    def file_hash(self, source, cu):
+    def file_hash(self, source, fr):
         """Compute a hash that changes if the file needs to be re-reported."""
         m = Hasher()
         m.update(source)
-        self.coverage.data.add_to_hash(cu.filename, m)
-        return m.digest()
+        self.coverage.data.add_to_hash(fr.filename, m)
+        return m.hexdigest()
 
-    def html_file(self, cu, analysis):
+    def html_file(self, fr, analysis):
         """Generate an HTML file for one source file."""
-        source_file = cu.source_file()
-        try:
-            source = source_file.read()
-        finally:
-            source_file.close()
+        source = fr.source()
 
         # Find out if the file on disk is already correct.
-        flat_rootname = cu.flat_rootname()
-        this_hash = self.file_hash(source, cu)
-        that_hash = self.status.file_hash(flat_rootname)
+        rootname = flat_rootname(fr.relative_filename())
+        this_hash = self.file_hash(source.encode('utf-8'), fr)
+        that_hash = self.status.file_hash(rootname)
         if this_hash == that_hash:
             # Nothing has changed to require the file to be reported again.
-            self.files.append(self.status.index_info(flat_rootname))
+            self.files.append(self.status.index_info(rootname))
             return
 
-        self.status.set_file_hash(flat_rootname, this_hash)
-
-        # If need be, determine the encoding of the source file. We use it
-        # later to properly write the HTML.
-        if sys.version_info < (3, 0):
-            encoding = source_encoding(source)
-            # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
-            if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
-                source = source[3:]
-                encoding = "utf-8"
+        self.status.set_file_hash(rootname, this_hash)
 
         # Get the numbers for this file.
         nums = analysis.numbers
 
-        if self.arcs:
+        if self.has_arcs:
             missing_branch_arcs = analysis.missing_branch_arcs()
+            arcs_executed = analysis.arcs_executed()
 
         # These classes determine which lines are highlighted by default.
         c_run = "run hide_run"
@@ -195,35 +199,44 @@ class HtmlReporter(Reporter):
 
         lines = []
 
-        for lineno, line in enumerate(source_token_lines(source)):
-            lineno += 1     # 1-based line numbers.
+        for lineno, line in enumerate(fr.source_token_lines(), start=1):
             # Figure out how to mark this line.
             line_class = []
             annotate_html = ""
-            annotate_title = ""
+            annotate_long = ""
             if lineno in analysis.statements:
                 line_class.append("stm")
             if lineno in analysis.excluded:
                 line_class.append(c_exc)
             elif lineno in analysis.missing:
                 line_class.append(c_mis)
-            elif self.arcs and lineno in missing_branch_arcs:
+            elif self.has_arcs and lineno in missing_branch_arcs:
                 line_class.append(c_par)
-                annlines = []
+                shorts = []
+                longs = []
                 for b in missing_branch_arcs[lineno]:
                     if b < 0:
-                        annlines.append("exit")
+                        shorts.append("exit")
                     else:
-                        annlines.append(str(b))
-                annotate_html = "&nbsp;&nbsp; ".join(annlines)
-                if len(annlines) > 1:
-                    annotate_title = "no jumps to these line numbers"
-                elif len(annlines) == 1:
-                    annotate_title = "no jump to this line number"
+                        shorts.append(b)
+                    longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
+                # 202F is NARROW NO-BREAK SPACE.
+                # 219B is RIGHTWARDS ARROW WITH STROKE.
+                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
+                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d) for d in shorts)
+
+                if len(longs) == 1:
+                    annotate_long = longs[0]
+                else:
+                    annotate_long = "%d missed branches: %s" % (
+                        len(longs),
+                        ", ".join("%d) %s" % (num, ann_long)
+                            for num, ann_long in enumerate(longs, start=1)),
+                    )
             elif lineno in analysis.statements:
                 line_class.append(c_run)
 
-            # Build the HTML for the line
+            # Build the HTML for the line.
             html = []
             for tok_type, tok_text in line:
                 if tok_type == "ws":
@@ -231,61 +244,59 @@ class HtmlReporter(Reporter):
                 else:
                     tok_html = escape(tok_text) or '&nbsp;'
                     html.append(
-                        "<span class='%s'>%s</span>" % (tok_type, tok_html)
-                        )
+                        '<span class="%s">%s</span>' % (tok_type, tok_html)
+                    )
 
             lines.append({
                 'html': ''.join(html),
                 'number': lineno,
                 'class': ' '.join(line_class) or "pln",
                 'annotate': annotate_html,
-                'annotate_title': annotate_title,
+                'annotate_long': annotate_long,
             })
 
         # Write the HTML page for this file.
-        html = spaceless(self.source_tmpl.render({
-            'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
-            'arcs': self.arcs, 'extra_css': self.extra_css,
-            'cu': cu, 'nums': nums, 'lines': lines,
-        }))
-
-        if sys.version_info < (3, 0):
-            html = html.decode(encoding)
+        html = self.source_tmpl.render({
+            'c_exc': c_exc,
+            'c_mis': c_mis,
+            'c_par': c_par,
+            'c_run': c_run,
+            'has_arcs': self.has_arcs,
+            'extra_css': self.extra_css,
+            'fr': fr,
+            'nums': nums,
+            'lines': lines,
+            'time_stamp': self.time_stamp,
+        })
 
-        html_filename = flat_rootname + ".html"
+        html_filename = rootname + ".html"
         html_path = os.path.join(self.directory, html_filename)
-        self.write_html(html_path, html)
+        write_html(html_path, html)
 
         # Save this file's information for the index file.
         index_info = {
             'nums': nums,
             'html_filename': html_filename,
-            'name': cu.name,
-            }
+            'relative_filename': fr.relative_filename(),
+        }
         self.files.append(index_info)
-        self.status.set_index_info(flat_rootname, index_info)
+        self.status.set_index_info(rootname, index_info)
 
     def index_file(self):
         """Write the index.html file for this report."""
-        index_tmpl = Templite(
-            data("index.html"), self.template_globals
-            )
+        index_tmpl = Templite(read_data("index.html"), self.template_globals)
 
-        self.totals = sum([f['nums'] for f in self.files])
+        self.totals = sum(f['nums'] for f in self.files)
 
         html = index_tmpl.render({
-            'arcs': self.arcs,
+            'has_arcs': self.has_arcs,
             'extra_css': self.extra_css,
             'files': self.files,
             'totals': self.totals,
+            'time_stamp': self.time_stamp,
         })
 
-        if sys.version_info < (3, 0):
-            html = html.decode("utf-8")
-        self.write_html(
-            os.path.join(self.directory, "index.html"),
-            html
-            )
+        write_html(os.path.join(self.directory, "index.html"), html)
 
         # Write the latest hashes for next time.
         self.status.write(self.directory)
@@ -294,9 +305,37 @@ class HtmlReporter(Reporter):
 class HtmlStatus(object):
     """The status information we keep to support incremental reporting."""
 
-    STATUS_FILE = "status.dat"
+    STATUS_FILE = "status.json"
     STATUS_FORMAT = 1
 
+    #           pylint: disable=wrong-spelling-in-comment,useless-suppression
+    #  The data looks like:
+    #
+    #  {
+    #      'format': 1,
+    #      'settings': '540ee119c15d52a68a53fe6f0897346d',
+    #      'version': '4.0a1',
+    #      'files': {
+    #          'cogapp___init__': {
+    #              'hash': 'e45581a5b48f879f301c0f30bf77a50c',
+    #              'index': {
+    #                  'html_filename': 'cogapp___init__.html',
+    #                  'name': 'cogapp/__init__',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
+    #              }
+    #          },
+    #          ...
+    #          'cogapp_whiteutils': {
+    #              'hash': '8504bb427fc488c4176809ded0277d51',
+    #              'index': {
+    #                  'html_filename': 'cogapp_whiteutils.html',
+    #                  'name': 'cogapp/whiteutils',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7d90>,
+    #              }
+    #          },
+    #      },
+    #  }
+
     def __init__(self):
         self.reset()
 
@@ -310,11 +349,8 @@ class HtmlStatus(object):
         usable = False
         try:
             status_file = os.path.join(directory, self.STATUS_FILE)
-            fstatus = open(status_file, "rb")
-            try:
-                status = pickle.load(fstatus)
-            finally:
-                fstatus.close()
+            with open(status_file, "r") as fstatus:
+                status = json.load(fstatus)
         except (IOError, ValueError):
             usable = False
         else:
@@ -325,7 +361,10 @@ class HtmlStatus(object):
                 usable = False
 
         if usable:
-            self.files = status['files']
+            self.files = {}
+            for filename, fileinfo in iitems(status['files']):
+                fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
+                self.files[filename] = fileinfo
             self.settings = status['settings']
         else:
             self.reset()
@@ -333,17 +372,26 @@ class HtmlStatus(object):
     def write(self, directory):
         """Write the current status to `directory`."""
         status_file = os.path.join(directory, self.STATUS_FILE)
+        files = {}
+        for filename, fileinfo in iitems(self.files):
+            fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
+            files[filename] = fileinfo
+
         status = {
             'format': self.STATUS_FORMAT,
             'version': coverage.__version__,
             'settings': self.settings,
-            'files': self.files,
-            }
-        fout = open(status_file, "wb")
-        try:
-            pickle.dump(status, fout)
-        finally:
-            fout.close()
+            'files': files,
+        }
+        with open(status_file, "w") as fout:
+            json.dump(status, fout)
+
+        # Older versions of ShiningPanda look for the old name, status.dat.
+        # Accomodate them if we are running under Jenkins.
+        # https://issues.jenkins-ci.org/browse/JENKINS-28428
+        if "JENKINS_URL" in os.environ:
+            with open(os.path.join(directory, "status.dat"), "w") as dat:
+                dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
 
     def settings_hash(self):
         """Get the hash of the coverage.py settings."""
@@ -373,24 +421,15 @@ class HtmlStatus(object):
 # Helpers for templates and generating HTML
 
 def escape(t):
-    """HTML-escape the text in `t`."""
-    return (t
-            # Convert HTML special chars into HTML entities.
-            .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
-            .replace("'", "&#39;").replace('"', "&quot;")
-            # Convert runs of spaces: "......" -> "&nbsp;.&nbsp;.&nbsp;."
-            .replace("  ", "&nbsp; ")
-            # To deal with odd-length runs, convert the final pair of spaces
-            # so that "....." -> "&nbsp;.&nbsp;&nbsp;."
-            .replace("  ", "&nbsp; ")
-        )
-
-def spaceless(html):
-    """Squeeze out some annoying extra space from an HTML string.
-
-    Nicely-formatted templates mean lots of extra space in the result.
-    Get rid of some.
+    """HTML-escape the text in `t`.
+
+    This is only suitable for HTML text, not attributes.
 
     """
-    html = re.sub(r">\s+<p ", ">\n<p ", html)
-    return html
+    # Convert HTML special chars into HTML entities.
+    return t.replace("&", "&amp;").replace("<", "&lt;")
+
+
+def pair(ratio):
+    """Format a pair of numbers so JavaScript can read them in an attribute."""
+    return "%s %s" % ratio
index b24006d25e0005e37baf5500cdc7ee04e271609c..f6f5de20771ce0e4bdc1b6c826e25c7c47808599 100644 (file)
@@ -1,10 +1,13 @@
+// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+// For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 // Coverage.py HTML report browser code.
 /*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
 /*global coverage: true, document, window, $ */
 
 coverage = {};
 
-// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key.
+// Find all the elements with shortkey_* class, and use them to assign a shortcut key.
 coverage.assign_shortkeys = function () {
     $("*[class*='shortkey_']").each(function (i, e) {
         $.each($(e).attr("class").split(" "), function (i, c) {
@@ -35,6 +38,135 @@ coverage.wire_up_help_panel = function () {
     });
 };
 
+// Create the events for the filter box.
+coverage.wire_up_filter = function () {
+    // Cache elements.
+    var table = $("table.index");
+    var table_rows = table.find("tbody tr");
+    var table_row_names = table_rows.find("td.name a");
+    var no_rows = $("#no_rows");
+
+    // Create a duplicate table footer that we can modify with dynamic summed values.
+    var table_footer = $("table.index tfoot tr");
+    var table_dynamic_footer = table_footer.clone();
+    table_dynamic_footer.attr('class', 'total_dynamic hidden');
+    table_footer.after(table_dynamic_footer);
+
+    // Observe filter keyevents.
+    $("#filter").on("keyup change", $.debounce(150, function (event) {
+        var filter_value = $(this).val();
+
+        if (filter_value === "") {
+            // Filter box is empty, remove all filtering.
+            table_rows.removeClass("hidden");
+
+            // Show standard footer, hide dynamic footer.
+            table_footer.removeClass("hidden");
+            table_dynamic_footer.addClass("hidden");
+
+            // Hide placeholder, show table.
+            if (no_rows.length > 0) {
+                no_rows.hide();
+            }
+            table.show();
+
+        }
+        else {
+            // Filter table items by value.
+            var hidden = 0;
+            var shown = 0;
+
+            // Hide / show elements.
+            $.each(table_row_names, function () {
+                var element = $(this).parents("tr");
+
+                if ($(this).text().indexOf(filter_value) === -1) {
+                    // hide
+                    element.addClass("hidden");
+                    hidden++;
+                }
+                else {
+                    // show
+                    element.removeClass("hidden");
+                    shown++;
+                }
+            });
+
+            // Show placeholder if no rows will be displayed.
+            if (no_rows.length > 0) {
+                if (shown === 0) {
+                    // Show placeholder, hide table.
+                    no_rows.show();
+                    table.hide();
+                }
+                else {
+                    // Hide placeholder, show table.
+                    no_rows.hide();
+                    table.show();
+                }
+            }
+
+            // Manage dynamic header:
+            if (hidden > 0) {
+                // Calculate new dynamic sum values based on visible rows.
+                for (var column = 2; column < 20; column++) {
+                    // Calculate summed value.
+                    var cells = table_rows.find('td:nth-child(' + column + ')');
+                    if (!cells.length) {
+                        // No more columns...!
+                        break;
+                    }
+
+                    var sum = 0, numer = 0, denom = 0;
+                    $.each(cells.filter(':visible'), function () {
+                        var ratio = $(this).data("ratio");
+                        if (ratio) {
+                            var splitted = ratio.split(" ");
+                            numer += parseInt(splitted[0], 10);
+                            denom += parseInt(splitted[1], 10);
+                        }
+                        else {
+                            sum += parseInt(this.innerHTML, 10);
+                        }
+                    });
+
+                    // Get footer cell element.
+                    var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')');
+
+                    // Set value into dynamic footer cell element.
+                    if (cells[0].innerHTML.indexOf('%') > -1) {
+                        // Percentage columns use the numerator and denominator,
+                        // and adapt to the number of decimal places.
+                        var match = /\.([0-9]+)/.exec(cells[0].innerHTML);
+                        var places = 0;
+                        if (match) {
+                            places = match[1].length;
+                        }
+                        var pct = numer * 100 / denom;
+                        footer_cell.text(pct.toFixed(places) + '%');
+                    }
+                    else {
+                        footer_cell.text(sum);
+                    }
+                }
+
+                // Hide standard footer, show dynamic footer.
+                table_footer.addClass("hidden");
+                table_dynamic_footer.removeClass("hidden");
+            }
+            else {
+                // Show standard footer, hide dynamic footer.
+                table_footer.removeClass("hidden");
+                table_dynamic_footer.addClass("hidden");
+            }
+        }
+    }));
+
+    // Trigger change event on setup, to force filter on page refresh
+    // (filter value may still be present).
+    $("#filter").trigger("change");
+};
+
 // Loaded on index.html
 coverage.index_ready = function ($) {
     // Look for a cookie containing previous sort settings:
@@ -95,6 +227,7 @@ coverage.index_ready = function ($) {
 
     coverage.assign_shortkeys();
     coverage.wire_up_help_panel();
+    coverage.wire_up_filter();
 
     // Watch for page unload events so we can save the final sort settings:
     $(window).unload(function () {
@@ -129,6 +262,11 @@ coverage.pyfile_ready = function ($) {
 
     coverage.assign_shortkeys();
     coverage.wire_up_help_panel();
+
+    coverage.init_scroll_markers();
+
+    // Rebuild scroll markers after window high changing
+    $(window).resize(coverage.resize_scroll_markers);
 };
 
 coverage.toggle_lines = function (btn, cls) {
@@ -187,12 +325,13 @@ coverage.to_next_chunk = function () {
 
     // Find the start of the next colored chunk.
     var probe = c.sel_end;
+    var color, probe_line;
     while (true) {
-        var probe_line = c.line_elt(probe);
+        probe_line = c.line_elt(probe);
         if (probe_line.length === 0) {
             return;
         }
-        var color = probe_line.css("background-color");
+        color = probe_line.css("background-color");
         if (!c.is_transparent(color)) {
             break;
         }
@@ -374,3 +513,72 @@ coverage.scroll_window = function (to_pos) {
 coverage.finish_scrolling = function () {
     $("html,body").stop(true, true);
 };
+
+coverage.init_scroll_markers = function () {
+    var c = coverage;
+    // Init some variables
+    c.lines_len = $('td.text p').length;
+    c.body_h = $('body').height();
+    c.header_h = $('div#header').height();
+    c.missed_lines = $('td.text p.mis, td.text p.par');
+
+    // Build html
+    c.resize_scroll_markers();
+};
+
+coverage.resize_scroll_markers = function () {
+    var c = coverage,
+        min_line_height = 3,
+        max_line_height = 10,
+        visible_window_h = $(window).height();
+
+    $('#scroll_marker').remove();
+    // Don't build markers if the window has no scroll bar.
+    if (c.body_h <= visible_window_h) {
+        return;
+    }
+
+    $("body").append("<div id='scroll_marker'>&nbsp;</div>");
+    var scroll_marker = $('#scroll_marker'),
+        marker_scale = scroll_marker.height() / c.body_h,
+        line_height = scroll_marker.height() / c.lines_len;
+
+    // Line height must be between the extremes.
+    if (line_height > min_line_height) {
+        if (line_height > max_line_height) {
+            line_height = max_line_height;
+        }
+    }
+    else {
+        line_height = min_line_height;
+    }
+
+    var previous_line = -99,
+        last_mark,
+        last_top;
+
+    c.missed_lines.each(function () {
+        var line_top = Math.round($(this).offset().top * marker_scale),
+            id_name = $(this).attr('id'),
+            line_number = parseInt(id_name.substring(1, id_name.length));
+
+        if (line_number === previous_line + 1) {
+            // If this solid missed block just make previous mark higher.
+            last_mark.css({
+                'height': line_top + line_height - last_top
+            });
+        }
+        else {
+            // Add colored line in scroll_marker block.
+            scroll_marker.append('<div id="m' + line_number + '" class="marker"></div>');
+            last_mark = $('#m' + line_number);
+            last_mark.css({
+                'height': line_height,
+                'top': line_top
+            });
+            last_top = line_top;
+        }
+
+        previous_line = line_number;
+    });
+};
index c831823dd23944f7c88d651f20af75c574df7c72..ee2deab0b62722b411ad680e0c72316786fe1c48 100644 (file)
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
+{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #}
+
+<!DOCTYPE html>
 <html>
 <head>
-    <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
     <title>{{ title|escape }}</title>
-    <link rel='stylesheet' href='style.css' type='text/css'>
+    <link rel="stylesheet" href="style.css" type="text/css">
     {% if extra_css %}
-        <link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
+        <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
     {% endif %}
-    <script type='text/javascript' src='jquery.min.js'></script>
-    <script type='text/javascript' src='jquery.tablesorter.min.js'></script>
-    <script type='text/javascript' src='jquery.hotkeys.js'></script>
-    <script type='text/javascript' src='coverage_html.js'></script>
-    <script type='text/javascript' charset='utf-8'>
+    <script type="text/javascript" src="jquery.min.js"></script>
+    <script type="text/javascript" src="jquery.debounce.min.js"></script>
+    <script type="text/javascript" src="jquery.tablesorter.min.js"></script>
+    <script type="text/javascript" src="jquery.hotkeys.js"></script>
+    <script type="text/javascript" src="coverage_html.js"></script>
+    <script type="text/javascript">
         jQuery(document).ready(coverage.index_ready);
     </script>
 </head>
-<body id='indexfile'>
+<body class="indexfile">
 
-<div id='header'>
-    <div class='content'>
+<div id="header">
+    <div class="content">
         <h1>{{ title|escape }}:
-            <span class='pc_cov'>{{totals.pc_covered_str}}%</span>
+            <span class="pc_cov">{{totals.pc_covered_str}}%</span>
         </h1>
-        <img id='keyboard_icon' src='keybd_closed.png'>
+
+        <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
+
+        <form id="filter_container">
+            <input id="filter" type="text" value="" placeholder="filter..." />
+        </form>
     </div>
 </div>
 
-<div class='help_panel'>
-    <img id='panel_icon' src='keybd_open.png'>
-    <p class='legend'>Hot-keys on this page</p>
+<div class="help_panel">
+    <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
+    <p class="legend">Hot-keys on this page</p>
     <div>
-    <p class='keyhelp'>
-        <span class='key'>n</span>
-        <span class='key'>s</span>
-        <span class='key'>m</span>
-        <span class='key'>x</span>
-        {% if arcs %}
-        <span class='key'>b</span>
-        <span class='key'>p</span>
+    <p class="keyhelp">
+        <span class="key">n</span>
+        <span class="key">s</span>
+        <span class="key">m</span>
+        <span class="key">x</span>
+        {% if has_arcs %}
+        <span class="key">b</span>
+        <span class="key">p</span>
         {% endif %}
-        <span class='key'>c</span> &nbsp; change column sorting
+        <span class="key">c</span> &nbsp; change column sorting
     </p>
     </div>
 </div>
 
-<div id='index'>
-    <table class='index'>
+<div id="index">
+    <table class="index">
         <thead>
-            {# The title='' attr doesn't work in Safari. #}
-            <tr class='tablehead' title='Click to sort'>
-                <th class='name left headerSortDown shortkey_n'>Module</th>
-                <th class='shortkey_s'>statements</th>
-                <th class='shortkey_m'>missing</th>
-                <th class='shortkey_x'>excluded</th>
-                {% if arcs %}
-                <th class='shortkey_b'>branches</th>
-                <th class='shortkey_p'>partial</th>
+            {# The title="" attr doesn"t work in Safari. #}
+            <tr class="tablehead" title="Click to sort">
+                <th class="name left headerSortDown shortkey_n">Module</th>
+                <th class="shortkey_s">statements</th>
+                <th class="shortkey_m">missing</th>
+                <th class="shortkey_x">excluded</th>
+                {% if has_arcs %}
+                <th class="shortkey_b">branches</th>
+                <th class="shortkey_p">partial</th>
                 {% endif %}
-                <th class='right shortkey_c'>coverage</th>
+                <th class="right shortkey_c">coverage</th>
             </tr>
         </thead>
         {# HTML syntax requires thead, tfoot, tbody #}
         <tfoot>
-            <tr class='total'>
-                <td class='name left'>Total</td>
+            <tr class="total">
+                <td class="name left">Total</td>
                 <td>{{totals.n_statements}}</td>
                 <td>{{totals.n_missing}}</td>
                 <td>{{totals.n_excluded}}</td>
-                {% if arcs %}
+                {% if has_arcs %}
                 <td>{{totals.n_branches}}</td>
                 <td>{{totals.n_partial_branches}}</td>
                 {% endif %}
-                <td class='right'>{{totals.pc_covered_str}}%</td>
+                <td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
             </tr>
         </tfoot>
         <tbody>
             {% for file in files %}
-            <tr class='file'>
-                <td class='name left'><a href='{{file.html_filename}}'>{{file.name}}</a></td>
+            <tr class="file">
+                <td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
                 <td>{{file.nums.n_statements}}</td>
                 <td>{{file.nums.n_missing}}</td>
                 <td>{{file.nums.n_excluded}}</td>
-                {% if arcs %}
+                {% if has_arcs %}
                 <td>{{file.nums.n_branches}}</td>
                 <td>{{file.nums.n_partial_branches}}</td>
                 {% endif %}
-                <td class='right'>{{file.nums.pc_covered_str}}%</td>
+                <td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
             </tr>
             {% endfor %}
         </tbody>
     </table>
+
+    <p id="no_rows">
+        No items found using the specified filter.
+    </p>
 </div>
 
-<div id='footer'>
-    <div class='content'>
+<div id="footer">
+    <div class="content">
         <p>
-            <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
+            <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
+            created at {{ time_stamp }}
         </p>
     </div>
 </div>
diff --git a/python/helpers/coveragepy/coverage/htmlfiles/jquery.debounce.min.js b/python/helpers/coveragepy/coverage/htmlfiles/jquery.debounce.min.js
new file mode 100644 (file)
index 0000000..648fe5d
--- /dev/null
@@ -0,0 +1,9 @@
+/*
+ * jQuery throttle / debounce - v1.1 - 3/7/2010
+ * http://benalman.com/projects/jquery-throttle-debounce-plugin/
+ *
+ * Copyright (c) 2010 "Cowboy" Ben Alman
+ * Dual licensed under the MIT and GPL licenses.
+ * http://benalman.com/about/license/
+ */
+(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this);
index c941a5f7a9f3b32f49f049ff5c411c22176ccf6f..d1608e37ffa979b8689bfb868ad8b061b191f6f6 100644 (file)
@@ -1,166 +1,4 @@
-/*!
- * jQuery JavaScript Library v1.4.3
- * http://jquery.com/
- *
- * Copyright 2010, John Resig
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * Includes Sizzle.js
- * http://sizzlejs.com/
- * Copyright 2010, The Dojo Foundation
- * Released under the MIT, BSD, and GPL Licenses.
- *
- * Date: Thu Oct 14 23:10:06 2010 -0400
- */
-(function(E,A){function U(){return false}function ba(){return true}function ja(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ga(a){var b,d,e=[],f=[],h,k,l,n,s,v,B,D;k=c.data(this,this.nodeType?"events":"__events__");if(typeof k==="function")k=k.events;if(!(a.liveFired===this||!k||!k.live||a.button&&a.type==="click")){if(a.namespace)D=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var H=k.live.slice(0);for(n=0;n<H.length;n++){k=H[n];k.origType.replace(X,
-"")===a.type?f.push(k.selector):H.splice(n--,1)}f=c(a.target).closest(f,a.currentTarget);s=0;for(v=f.length;s<v;s++){B=f[s];for(n=0;n<H.length;n++){k=H[n];if(B.selector===k.selector&&(!D||D.test(k.namespace))){l=B.elem;h=null;if(k.preType==="mouseenter"||k.preType==="mouseleave"){a.type=k.preType;h=c(a.relatedTarget).closest(k.selector)[0]}if(!h||h!==l)e.push({elem:l,handleObj:k,level:B.level})}}}s=0;for(v=e.length;s<v;s++){f=e[s];if(d&&f.level>d)break;a.currentTarget=f.elem;a.data=f.handleObj.data;
-a.handleObj=f.handleObj;D=f.handleObj.origHandler.apply(f.elem,arguments);if(D===false||a.isPropagationStopped()){d=f.level;if(D===false)b=false}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(Ha,"`").replace(Ia,"&")}function ka(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Ja.test(b))return c.filter(b,
-e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function la(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var k in e[h])c.event.add(this,h,e[h][k],e[h][k].data)}}})}function Ka(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}
-function ma(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?La:Ma,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function ca(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Na.test(a)?e(a,h):ca(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)?
-e(a,""):c.each(b,function(f,h){ca(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(na.concat.apply([],na.slice(0,b)),function(){d[this]=a});return d}function oa(a){if(!da[a]){var b=c("<"+a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";da[a]=d}return da[a]}function ea(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var u=E.document,c=function(){function a(){if(!b.isReady){try{u.documentElement.doScroll("left")}catch(i){setTimeout(a,
-1);return}b.ready()}}var b=function(i,r){return new b.fn.init(i,r)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,k=/\S/,l=/^\s+/,n=/\s+$/,s=/\W/,v=/\d/,B=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,D=/^[\],:{}\s]*$/,H=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,G=/(?:^|:|,)(?:\s*\[)+/g,M=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,j=/(msie) ([\w.]+)/,o=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false,
-q=[],t,x=Object.prototype.toString,C=Object.prototype.hasOwnProperty,P=Array.prototype.push,N=Array.prototype.slice,R=String.prototype.trim,Q=Array.prototype.indexOf,L={};b.fn=b.prototype={init:function(i,r){var y,z,F;if(!i)return this;if(i.nodeType){this.context=this[0]=i;this.length=1;return this}if(i==="body"&&!r&&u.body){this.context=u;this[0]=u.body;this.selector="body";this.length=1;return this}if(typeof i==="string")if((y=h.exec(i))&&(y[1]||!r))if(y[1]){F=r?r.ownerDocument||r:u;if(z=B.exec(i))if(b.isPlainObject(r)){i=
-[u.createElement(z[1])];b.fn.attr.call(i,r,true)}else i=[F.createElement(z[1])];else{z=b.buildFragment([y[1]],[F]);i=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,i)}else{if((z=u.getElementById(y[2]))&&z.parentNode){if(z.id!==y[2])return f.find(i);this.length=1;this[0]=z}this.context=u;this.selector=i;return this}else if(!r&&!s.test(i)){this.selector=i;this.context=u;i=u.getElementsByTagName(i);return b.merge(this,i)}else return!r||r.jquery?(r||f).find(i):b(r).find(i);
-else if(b.isFunction(i))return f.ready(i);if(i.selector!==A){this.selector=i.selector;this.context=i.context}return b.makeArray(i,this)},selector:"",jquery:"1.4.3",length:0,size:function(){return this.length},toArray:function(){return N.call(this,0)},get:function(i){return i==null?this.toArray():i<0?this.slice(i)[0]:this[i]},pushStack:function(i,r,y){var z=b();b.isArray(i)?P.apply(z,i):b.merge(z,i);z.prevObject=this;z.context=this.context;if(r==="find")z.selector=this.selector+(this.selector?" ":
-"")+y;else if(r)z.selector=this.selector+"."+r+"("+y+")";return z},each:function(i,r){return b.each(this,i,r)},ready:function(i){b.bindReady();if(b.isReady)i.call(u,b);else q&&q.push(i);return this},eq:function(i){return i===-1?this.slice(i):this.slice(i,+i+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(i){return this.pushStack(b.map(this,function(r,y){return i.call(r,
-y,r)}))},end:function(){return this.prevObject||b(null)},push:P,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var i=arguments[0]||{},r=1,y=arguments.length,z=false,F,I,K,J,fa;if(typeof i==="boolean"){z=i;i=arguments[1]||{};r=2}if(typeof i!=="object"&&!b.isFunction(i))i={};if(y===r){i=this;--r}for(;r<y;r++)if((F=arguments[r])!=null)for(I in F){K=i[I];J=F[I];if(i!==J)if(z&&J&&(b.isPlainObject(J)||(fa=b.isArray(J)))){if(fa){fa=false;clone=K&&b.isArray(K)?K:[]}else clone=
-K&&b.isPlainObject(K)?K:{};i[I]=b.extend(z,clone,J)}else if(J!==A)i[I]=J}return i};b.extend({noConflict:function(i){E.$=e;if(i)E.jQuery=d;return b},isReady:false,readyWait:1,ready:function(i){i===true&&b.readyWait--;if(!b.readyWait||i!==true&&!b.isReady){if(!u.body)return setTimeout(b.ready,1);b.isReady=true;if(!(i!==true&&--b.readyWait>0)){if(q){for(var r=0;i=q[r++];)i.call(u,b);q=null}b.fn.triggerHandler&&b(u).triggerHandler("ready")}}},bindReady:function(){if(!p){p=true;if(u.readyState==="complete")return setTimeout(b.ready,
-1);if(u.addEventListener){u.addEventListener("DOMContentLoaded",t,false);E.addEventListener("load",b.ready,false)}else if(u.attachEvent){u.attachEvent("onreadystatechange",t);E.attachEvent("onload",b.ready);var i=false;try{i=E.frameElement==null}catch(r){}u.documentElement.doScroll&&i&&a()}}},isFunction:function(i){return b.type(i)==="function"},isArray:Array.isArray||function(i){return b.type(i)==="array"},isWindow:function(i){return i&&typeof i==="object"&&"setInterval"in i},isNaN:function(i){return i==
-null||!v.test(i)||isNaN(i)},type:function(i){return i==null?String(i):L[x.call(i)]||"object"},isPlainObject:function(i){if(!i||b.type(i)!=="object"||i.nodeType||b.isWindow(i))return false;if(i.constructor&&!C.call(i,"constructor")&&!C.call(i.constructor.prototype,"isPrototypeOf"))return false;for(var r in i);return r===A||C.call(i,r)},isEmptyObject:function(i){for(var r in i)return false;return true},error:function(i){throw i;},parseJSON:function(i){if(typeof i!=="string"||!i)return null;i=b.trim(i);
-if(D.test(i.replace(H,"@").replace(w,"]").replace(G,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(i):(new Function("return "+i))();else b.error("Invalid JSON: "+i)},noop:function(){},globalEval:function(i){if(i&&k.test(i)){var r=u.getElementsByTagName("head")[0]||u.documentElement,y=u.createElement("script");y.type="text/javascript";if(b.support.scriptEval)y.appendChild(u.createTextNode(i));else y.text=i;r.insertBefore(y,r.firstChild);r.removeChild(y)}},nodeName:function(i,r){return i.nodeName&&i.nodeName.toUpperCase()===
-r.toUpperCase()},each:function(i,r,y){var z,F=0,I=i.length,K=I===A||b.isFunction(i);if(y)if(K)for(z in i){if(r.apply(i[z],y)===false)break}else for(;F<I;){if(r.apply(i[F++],y)===false)break}else if(K)for(z in i){if(r.call(i[z],z,i[z])===false)break}else for(y=i[0];F<I&&r.call(y,F,y)!==false;y=i[++F]);return i},trim:R?function(i){return i==null?"":R.call(i)}:function(i){return i==null?"":i.toString().replace(l,"").replace(n,"")},makeArray:function(i,r){var y=r||[];if(i!=null){var z=b.type(i);i.length==
-null||z==="string"||z==="function"||z==="regexp"||b.isWindow(i)?P.call(y,i):b.merge(y,i)}return y},inArray:function(i,r){if(r.indexOf)return r.indexOf(i);for(var y=0,z=r.length;y<z;y++)if(r[y]===i)return y;return-1},merge:function(i,r){var y=i.length,z=0;if(typeof r.length==="number")for(var F=r.length;z<F;z++)i[y++]=r[z];else for(;r[z]!==A;)i[y++]=r[z++];i.length=y;return i},grep:function(i,r,y){var z=[],F;y=!!y;for(var I=0,K=i.length;I<K;I++){F=!!r(i[I],I);y!==F&&z.push(i[I])}return z},map:function(i,
-r,y){for(var z=[],F,I=0,K=i.length;I<K;I++){F=r(i[I],I,y);if(F!=null)z[z.length]=F}return z.concat.apply([],z)},guid:1,proxy:function(i,r,y){if(arguments.length===2)if(typeof r==="string"){y=i;i=y[r];r=A}else if(r&&!b.isFunction(r)){y=r;r=A}if(!r&&i)r=function(){return i.apply(y||this,arguments)};if(i)r.guid=i.guid=i.guid||r.guid||b.guid++;return r},access:function(i,r,y,z,F,I){var K=i.length;if(typeof r==="object"){for(var J in r)b.access(i,J,r[J],z,F,y);return i}if(y!==A){z=!I&&z&&b.isFunction(y);
-for(J=0;J<K;J++)F(i[J],r,z?y.call(i[J],J,F(i[J],r)):y,I);return i}return K?F(i[0],r):A},now:function(){return(new Date).getTime()},uaMatch:function(i){i=i.toLowerCase();i=M.exec(i)||g.exec(i)||j.exec(i)||i.indexOf("compatible")<0&&o.exec(i)||[];return{browser:i[1]||"",version:i[2]||"0"}},browser:{}});b.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(i,r){L["[object "+r+"]"]=r.toLowerCase()});m=b.uaMatch(m);if(m.browser){b.browser[m.browser]=true;b.browser.version=
-m.version}if(b.browser.webkit)b.browser.safari=true;if(Q)b.inArray=function(i,r){return Q.call(r,i)};if(!/\s/.test("\u00a0")){l=/^[\s\xA0]+/;n=/[\s\xA0]+$/}f=b(u);if(u.addEventListener)t=function(){u.removeEventListener("DOMContentLoaded",t,false);b.ready()};else if(u.attachEvent)t=function(){if(u.readyState==="complete"){u.detachEvent("onreadystatechange",t);b.ready()}};return E.jQuery=E.$=b}();(function(){c.support={};var a=u.documentElement,b=u.createElement("script"),d=u.createElement("div"),
-e="script"+c.now();d.style.display="none";d.innerHTML="   <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],k=u.createElement("select"),l=k.appendChild(u.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")),
-hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:l.selected,optDisabled:false,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};k.disabled=true;c.support.optDisabled=!l.disabled;b.type="text/javascript";try{b.appendChild(u.createTextNode("window."+e+"=1;"))}catch(n){}a.insertBefore(b,
-a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function s(){c.support.noCloneEvent=false;d.detachEvent("onclick",s)});d.cloneNode(true).fireEvent("onclick")}d=u.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=u.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var s=u.createElement("div");
-s.style.width=s.style.paddingLeft="1px";u.body.appendChild(s);c.boxModel=c.support.boxModel=s.offsetWidth===2;if("zoom"in s.style){s.style.display="inline";s.style.zoom=1;c.support.inlineBlockNeedsLayout=s.offsetWidth===2;s.style.display="";s.innerHTML="<div style='width:4px;'></div>";c.support.shrinkWrapBlocks=s.offsetWidth!==2}s.innerHTML="<table><tr><td style='padding:0;display:none'></td><td>t</td></tr></table>";var v=s.getElementsByTagName("td");c.support.reliableHiddenOffsets=v[0].offsetHeight===
-0;v[0].style.display="";v[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&v[0].offsetHeight===0;s.innerHTML="";u.body.removeChild(s).style.display="none"});a=function(s){var v=u.createElement("div");s="on"+s;var B=s in v;if(!B){v.setAttribute(s,"return;");B=typeof v[s]==="function"}return B};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",
-cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var pa={},Oa=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?pa:a;var e=a.nodeType,f=e?a[c.expando]:null,h=c.cache;if(!(e&&!f&&typeof b==="string"&&d===A)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]=
-c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==A)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?pa:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);else if(d)delete f[e];else for(var k in a)delete a[k]}},acceptData:function(a){if(a.nodeName){var b=
-c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){if(typeof a==="undefined")return this.length?c.data(this[0]):null;else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===A){var e=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(e===A&&this.length){e=c.data(this[0],a);if(e===A&&this[0].nodeType===1){e=this[0].getAttribute("data-"+a);if(typeof e===
-"string")try{e=e==="true"?true:e==="false"?false:e==="null"?null:!c.isNaN(e)?parseFloat(e):Oa.test(e)?c.parseJSON(e):e}catch(f){}else e=A}}return e===A&&d[1]?this.data(d[0]):e}else return this.each(function(){var h=c(this),k=[d[0],b];h.triggerHandler("setData"+d[1]+"!",k);c.data(this,a,b);h.triggerHandler("changeData"+d[1]+"!",k)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=c.data(a,b);if(!d)return e||
-[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===A)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,
-a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var qa=/[\n\t]/g,ga=/\s+/,Pa=/\r/g,Qa=/^(?:href|src|style)$/,Ra=/^(?:button|input)$/i,Sa=/^(?:button|input|object|select|textarea)$/i,Ta=/^a(?:rea)?$/i,ra=/^(?:radio|checkbox)$/i;c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,
-a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(s){var v=c(this);v.addClass(a.call(this,s,v.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1)if(f.className){for(var h=" "+f.className+" ",k=f.className,l=0,n=b.length;l<n;l++)if(h.indexOf(" "+b[l]+" ")<0)k+=" "+b[l];f.className=c.trim(k)}else f.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(n){var s=
-c(this);s.removeClass(a.call(this,n,s.attr("class")))});if(a&&typeof a==="string"||a===A)for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1&&f.className)if(a){for(var h=(" "+f.className+" ").replace(qa," "),k=0,l=b.length;k<l;k++)h=h.replace(" "+b[k]+" "," ");f.className=c.trim(h)}else f.className=""}return this},toggleClass:function(a,b){var d=typeof a,e=typeof b==="boolean";if(c.isFunction(a))return this.each(function(f){var h=c(this);h.toggleClass(a.call(this,
-f,h.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var f,h=0,k=c(this),l=b,n=a.split(ga);f=n[h++];){l=e?l:!k.hasClass(f);k[l?"addClass":"removeClass"](f)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(qa," ").indexOf(a)>-1)return true;return false},
-val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h<e;h++){var k=f[h];if(k.selected&&(c.support.optDisabled?!k.disabled:k.getAttribute("disabled")===null)&&(!k.parentNode.disabled||!c.nodeName(k.parentNode,"optgroup"))){a=c(k).val();if(b)return a;d.push(a)}}return d}if(ra.test(b.type)&&
-!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Pa,"")}return A}var l=c.isFunction(a);return this.each(function(n){var s=c(this),v=a;if(this.nodeType===1){if(l)v=a.call(this,n,s.val());if(v==null)v="";else if(typeof v==="number")v+="";else if(c.isArray(v))v=c.map(v,function(D){return D==null?"":D+""});if(c.isArray(v)&&ra.test(this.type))this.checked=c.inArray(s.val(),v)>=0;else if(c.nodeName(this,"select")){var B=c.makeArray(v);c("option",this).each(function(){this.selected=
-c.inArray(c(this).val(),B)>=0});if(!B.length)this.selectedIndex=-1}else this.value=v}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return A;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==A;b=e&&c.props[b]||b;if(a.nodeType===1){var h=Qa.test(b);if((b in a||a[b]!==A)&&e&&!h){if(f){b==="type"&&Ra.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
-if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:Sa.test(a.nodeName)||Ta.test(a.nodeName)&&a.href?0:A;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return A;a=!c.support.hrefNormalized&&e&&
-h?a.getAttribute(b,2):a.getAttribute(b);return a===null?A:a}}});var X=/\.(.*)$/,ha=/^(?:textarea|input|select)$/i,Ha=/\./g,Ia=/ /g,Ua=/[^\w\s.|`]/g,Va=function(a){return a.replace(Ua,"\\$&")},sa={focusin:0,focusout:0};c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var k=a.nodeType?"events":"__events__",l=h[k],n=h.handle;if(typeof l===
-"function"){n=l.handle;l=l.events}else if(!l){a.nodeType||(h[k]=h=function(){});h.events=l={}}if(!n)h.handle=n=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(n.elem,arguments):A};n.elem=a;b=b.split(" ");for(var s=0,v;k=b[s++];){h=f?c.extend({},f):{handler:d,data:e};if(k.indexOf(".")>-1){v=k.split(".");k=v.shift();h.namespace=v.slice(0).sort().join(".")}else{v=[];h.namespace=""}h.type=k;if(!h.guid)h.guid=d.guid;var B=l[k],D=c.event.special[k]||{};if(!B){B=l[k]=[];
-if(!D.setup||D.setup.call(a,e,v,n)===false)if(a.addEventListener)a.addEventListener(k,n,false);else a.attachEvent&&a.attachEvent("on"+k,n)}if(D.add){D.add.call(a,h);if(!h.handler.guid)h.handler.guid=d.guid}B.push(h);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,k=0,l,n,s,v,B,D,H=a.nodeType?"events":"__events__",w=c.data(a),G=w&&w[H];if(w&&G){if(typeof G==="function"){w=G;G=G.events}if(b&&b.type){d=b.handler;b=b.type}if(!b||
-typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in G)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[k++];){v=f;l=f.indexOf(".")<0;n=[];if(!l){n=f.split(".");f=n.shift();s=RegExp("(^|\\.)"+c.map(n.slice(0).sort(),Va).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(B=G[f])if(d){v=c.event.special[f]||{};for(h=e||0;h<B.length;h++){D=B[h];if(d.guid===D.guid){if(l||s.test(D.namespace)){e==null&&B.splice(h--,1);v.remove&&v.remove.call(a,D)}if(e!=null)break}}if(B.length===0||e!=null&&B.length===1){if(!v.teardown||
-v.teardown.call(a,n)===false)c.removeEvent(a,f,w.handle);delete G[f]}}else for(h=0;h<B.length;h++){D=B[h];if(l||s.test(D.namespace)){c.event.remove(a,v,D.handler,h);B.splice(h--,1)}}}if(c.isEmptyObject(G)){if(b=w.handle)b.elem=null;delete w.events;delete w.handle;if(typeof w==="function")c.removeData(a,H);else c.isEmptyObject(w)&&c.removeData(a)}}}}},trigger:function(a,b,d,e){var f=a.type||a;if(!e){a=typeof a==="object"?a[c.expando]?a:c.extend(c.Event(f),a):c.Event(f);if(f.indexOf("!")>=0){a.type=
-f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return A;a.result=A;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)===
-false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){e=a.target;var k,l=f.replace(X,""),n=c.nodeName(e,"a")&&l==="click",s=c.event.special[l]||{};if((!s._default||s._default.call(d,a)===false)&&!n&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[l]){if(k=e["on"+l])e["on"+l]=null;c.event.triggered=true;e[l]()}}catch(v){}if(k)e["on"+l]=k;c.event.triggered=false}}},handle:function(a){var b,d,e;
-d=[];var f,h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var k=d.length;f<k;f++){var l=d[f];if(b||e.test(l.namespace)){a.handler=l.handler;a.data=
-l.data;a.handleObj=l;l=l.handler.apply(this,h);if(l!==A){a.result=l;if(l===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
-fix:function(a){if(a[c.expando])return a;var b=a;a=c.Event(b);for(var d=this.props.length,e;d;){e=this.props[--d];a[e]=b[e]}if(!a.target)a.target=a.srcElement||u;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=u.documentElement;d=u.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
-d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(a.which==null&&(a.charCode!=null||a.keyCode!=null))a.which=a.charCode!=null?a.charCode:a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==A)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,Y(a.origType,a.selector),c.extend({},a,{handler:Ga,guid:a.handler.guid}))},remove:function(a){c.event.remove(this,
-Y(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,d){if(c.isWindow(this))this.onbeforeunload=d},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};c.removeEvent=u.removeEventListener?function(a,b,d){a.removeEventListener&&a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent&&a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=a;this.type=a.type}else this.type=a;this.timeStamp=
-c.now();this[c.expando]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=ba;var a=this.originalEvent;if(a)if(a.preventDefault)a.preventDefault();else a.returnValue=false},stopPropagation:function(){this.isPropagationStopped=ba;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=ba;this.stopPropagation()},isDefaultPrevented:U,isPropagationStopped:U,isImmediatePropagationStopped:U};
-var ta=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},ua=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?ua:ta,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?ua:ta)}}});if(!c.support.submitBubbles)c.event.special.submit={setup:function(){if(this.nodeName.toLowerCase()!==
-"form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length){a.liveFired=A;return ja("submit",this,arguments)}});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13){a.liveFired=A;return ja("submit",this,arguments)}})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};if(!c.support.changeBubbles){var V,
-va=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ha.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=va(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===A||f===e))if(e!=null||f){a.type="change";a.liveFired=
-A;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",va(a))}},setup:function(){if(this.type===
-"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ha.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ha.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}u.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){sa[b]++===0&&u.addEventListener(a,d,true)},teardown:function(){--sa[b]===
-0&&u.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=A}var k=b==="one"?c.proxy(f,function(n){c(this).unbind(n,k);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var l=this.length;h<l;h++)c.event.add(this[h],d,k,e)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&!a.preventDefault)for(var d in a)this.unbind(d,
-a[d]);else{d=0;for(var e=this.length;d<e;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,e){return this.live(b,d,e,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){var d=c.Event(a);d.preventDefault();d.stopPropagation();c.event.trigger(d,b,this[0]);return d.result}},toggle:function(a){for(var b=arguments,d=
-1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(e){var f=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,f+1);e.preventDefault();return b[f].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var wa={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,e,f,h){var k,l=0,n,s,v=h||this.selector;h=h?this:c(this.context);if(typeof d===
-"object"&&!d.preventDefault){for(k in d)h[b](k,e,d[k],v);return this}if(c.isFunction(e)){f=e;e=A}for(d=(d||"").split(" ");(k=d[l++])!=null;){n=X.exec(k);s="";if(n){s=n[0];k=k.replace(X,"")}if(k==="hover")d.push("mouseenter"+s,"mouseleave"+s);else{n=k;if(k==="focus"||k==="blur"){d.push(wa[k]+s);k+=s}else k=(wa[k]||k)+s;if(b==="live"){s=0;for(var B=h.length;s<B;s++)c.event.add(h[s],"live."+Y(k,v),{data:e,selector:v,handler:f,origType:k,origHandler:f,preType:n})}else h.unbind("live."+Y(k,v),f)}}return this}});
-c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){c.fn[b]=function(d,e){if(e==null){e=d;d=null}return arguments.length>0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});
-(function(){function a(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1&&!q){x.sizcache=o;x.sizset=p}if(x.nodeName.toLowerCase()===j){C=x;break}x=x[g]}m[p]=C}}}function b(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1){if(!q){x.sizcache=o;x.sizset=p}if(typeof j!=="string"){if(x===j){C=true;break}}else if(l.filter(j,
-[x]).length>0){C=x;break}}x=x[g]}m[p]=C}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,k=true;[0,0].sort(function(){k=false;return 0});var l=function(g,j,o,m){o=o||[];var p=j=j||u;if(j.nodeType!==1&&j.nodeType!==9)return[];if(!g||typeof g!=="string")return o;var q=[],t,x,C,P,N=true,R=l.isXML(j),Q=g,L;do{d.exec("");if(t=d.exec(Q)){Q=t[3];q.push(t[1]);if(t[2]){P=t[3];
-break}}}while(t);if(q.length>1&&s.exec(g))if(q.length===2&&n.relative[q[0]])x=M(q[0]+q[1],j);else for(x=n.relative[q[0]]?[j]:l(q.shift(),j);q.length;){g=q.shift();if(n.relative[g])g+=q.shift();x=M(g,x)}else{if(!m&&q.length>1&&j.nodeType===9&&!R&&n.match.ID.test(q[0])&&!n.match.ID.test(q[q.length-1])){t=l.find(q.shift(),j,R);j=t.expr?l.filter(t.expr,t.set)[0]:t.set[0]}if(j){t=m?{expr:q.pop(),set:D(m)}:l.find(q.pop(),q.length===1&&(q[0]==="~"||q[0]==="+")&&j.parentNode?j.parentNode:j,R);x=t.expr?l.filter(t.expr,
-t.set):t.set;if(q.length>0)C=D(x);else N=false;for(;q.length;){t=L=q.pop();if(n.relative[L])t=q.pop();else L="";if(t==null)t=j;n.relative[L](C,t,R)}}else C=[]}C||(C=x);C||l.error(L||g);if(f.call(C)==="[object Array]")if(N)if(j&&j.nodeType===1)for(g=0;C[g]!=null;g++){if(C[g]&&(C[g]===true||C[g].nodeType===1&&l.contains(j,C[g])))o.push(x[g])}else for(g=0;C[g]!=null;g++)C[g]&&C[g].nodeType===1&&o.push(x[g]);else o.push.apply(o,C);else D(C,o);if(P){l(P,p,o,m);l.uniqueSort(o)}return o};l.uniqueSort=function(g){if(w){h=
-k;g.sort(w);if(h)for(var j=1;j<g.length;j++)g[j]===g[j-1]&&g.splice(j--,1)}return g};l.matches=function(g,j){return l(g,null,null,j)};l.matchesSelector=function(g,j){return l(j,null,null,[g]).length>0};l.find=function(g,j,o){var m;if(!g)return[];for(var p=0,q=n.order.length;p<q;p++){var t=n.order[p],x;if(x=n.leftMatch[t].exec(g)){var C=x[1];x.splice(1,1);if(C.substr(C.length-1)!=="\\"){x[1]=(x[1]||"").replace(/\\/g,"");m=n.find[t](x,j,o);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=j.getElementsByTagName("*"));
-return{set:m,expr:g}};l.filter=function(g,j,o,m){for(var p=g,q=[],t=j,x,C,P=j&&j[0]&&l.isXML(j[0]);g&&j.length;){for(var N in n.filter)if((x=n.leftMatch[N].exec(g))!=null&&x[2]){var R=n.filter[N],Q,L;L=x[1];C=false;x.splice(1,1);if(L.substr(L.length-1)!=="\\"){if(t===q)q=[];if(n.preFilter[N])if(x=n.preFilter[N](x,t,o,q,m,P)){if(x===true)continue}else C=Q=true;if(x)for(var i=0;(L=t[i])!=null;i++)if(L){Q=R(L,x,i,t);var r=m^!!Q;if(o&&Q!=null)if(r)C=true;else t[i]=false;else if(r){q.push(L);C=true}}if(Q!==
-A){o||(t=q);g=g.replace(n.match[N],"");if(!C)return[];break}}}if(g===p)if(C==null)l.error(g);else break;p=g}return t};l.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=l.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/,
-POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},relative:{"+":function(g,j){var o=typeof j==="string",m=o&&!/\W/.test(j);o=o&&!m;if(m)j=j.toLowerCase();m=0;for(var p=g.length,q;m<p;m++)if(q=g[m]){for(;(q=q.previousSibling)&&q.nodeType!==1;);g[m]=o||q&&q.nodeName.toLowerCase()===
-j?q||false:q===j}o&&l.filter(j,g,true)},">":function(g,j){var o=typeof j==="string",m,p=0,q=g.length;if(o&&!/\W/.test(j))for(j=j.toLowerCase();p<q;p++){if(m=g[p]){o=m.parentNode;g[p]=o.nodeName.toLowerCase()===j?o:false}}else{for(;p<q;p++)if(m=g[p])g[p]=o?m.parentNode:m.parentNode===j;o&&l.filter(j,g,true)}},"":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=j=j.toLowerCase();p=a}p("parentNode",j,m,g,q,o)},"~":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=
-j=j.toLowerCase();p=a}p("previousSibling",j,m,g,q,o)}},find:{ID:function(g,j,o){if(typeof j.getElementById!=="undefined"&&!o)return(g=j.getElementById(g[1]))&&g.parentNode?[g]:[]},NAME:function(g,j){if(typeof j.getElementsByName!=="undefined"){for(var o=[],m=j.getElementsByName(g[1]),p=0,q=m.length;p<q;p++)m[p].getAttribute("name")===g[1]&&o.push(m[p]);return o.length===0?null:o}},TAG:function(g,j){return j.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,j,o,m,p,q){g=" "+g[1].replace(/\\/g,
-"")+" ";if(q)return g;q=0;for(var t;(t=j[q])!=null;q++)if(t)if(p^(t.className&&(" "+t.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))o||m.push(t);else if(o)j[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var j=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=j[1]+(j[2]||1)-0;g[3]=j[3]-0}g[0]=e++;return g},ATTR:function(g,j,o,
-m,p,q){j=g[1].replace(/\\/g,"");if(!q&&n.attrMap[j])g[1]=n.attrMap[j];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,j,o,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=l(g[3],null,null,j);else{g=l.filter(g[3],j,o,true^p);o||m.push.apply(m,g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===
-true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,j,o){return!!l(o[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===
-g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,j){return j===0},last:function(g,j,o,m){return j===m.length-1},even:function(g,j){return j%2===0},odd:function(g,j){return j%2===1},lt:function(g,j,o){return j<o[3]-0},gt:function(g,j,o){return j>o[3]-0},nth:function(g,j,o){return o[3]-
-0===j},eq:function(g,j,o){return o[3]-0===j}},filter:{PSEUDO:function(g,j,o,m){var p=j[1],q=n.filters[p];if(q)return q(g,o,j,m);else if(p==="contains")return(g.textContent||g.innerText||l.getText([g])||"").indexOf(j[3])>=0;else if(p==="not"){j=j[3];o=0;for(m=j.length;o<m;o++)if(j[o]===g)return false;return true}else l.error("Syntax error, unrecognized expression: "+p)},CHILD:function(g,j){var o=j[1],m=g;switch(o){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(o===
-"first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":o=j[2];var p=j[3];if(o===1&&p===0)return true;var q=j[0],t=g.parentNode;if(t&&(t.sizcache!==q||!g.nodeIndex)){var x=0;for(m=t.firstChild;m;m=m.nextSibling)if(m.nodeType===1)m.nodeIndex=++x;t.sizcache=q}m=g.nodeIndex-p;return o===0?m===0:m%o===0&&m/o>=0}},ID:function(g,j){return g.nodeType===1&&g.getAttribute("id")===j},TAG:function(g,j){return j==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===
-j},CLASS:function(g,j){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(j)>-1},ATTR:function(g,j){var o=j[1];o=n.attrHandle[o]?n.attrHandle[o](g):g[o]!=null?g[o]:g.getAttribute(o);var m=o+"",p=j[2],q=j[4];return o==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&o!==false:p===&qu