summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcatechism <zerbie@gmail.com>2009-04-30 18:01:17 -0500
committercatechism <zerbie@gmail.com>2009-04-30 18:01:17 -0500
commit033da9e5ff584c6c6f95c56b0826f3aee45804b1 (patch)
tree736985529f45a9ec45e1f7b64291c0c5ef7b66f7
parentb4c2964b332b5c2b445b376f2ea2670395af056b (diff)
downloadnose-033da9e5ff584c6c6f95c56b0826f3aee45804b1.tar.gz
i spilled some red ink on the plugin documentation.
-rw-r--r--functional_tests/doc_tests/test_multiprocess/multiprocess.rst65
-rw-r--r--nose/plugins/allmodules.py6
-rw-r--r--nose/plugins/attrib.py6
-rw-r--r--nose/plugins/capture.py6
-rw-r--r--nose/plugins/collect.py8
-rw-r--r--nose/plugins/cover.py17
-rw-r--r--nose/plugins/debug.py6
-rw-r--r--nose/plugins/deprecated.py15
-rw-r--r--nose/plugins/doctests.py35
-rw-r--r--nose/plugins/failuredetail.py12
-rw-r--r--nose/plugins/isolate.py23
-rw-r--r--nose/plugins/logcapture.py31
-rw-r--r--nose/plugins/multiprocess.py61
-rw-r--r--nose/plugins/prof.py13
-rw-r--r--nose/plugins/skip.py2
-rw-r--r--nose/plugins/testid.py41
-rw-r--r--nose/plugins/xunit.py6
17 files changed, 169 insertions, 184 deletions
diff --git a/functional_tests/doc_tests/test_multiprocess/multiprocess.rst b/functional_tests/doc_tests/test_multiprocess/multiprocess.rst
index 1cfa0ab..df11a4f 100644
--- a/functional_tests/doc_tests/test_multiprocess/multiprocess.rst
+++ b/functional_tests/doc_tests/test_multiprocess/multiprocess.rst
@@ -9,12 +9,10 @@ Parallel Testing with nose
..
Using the `nose.plugin.multiprocess` plugin, you can parallelize a
-test run across a configurable number of worker processes. This can
-speed up CPU-bound test runs (as long as the number of work
-processeses is around the number of processors or cores available),
-but is mainly useful for IO-bound tests which can benefit from greater
-parallelization, since most of the tests spend most of their time
-waiting for data to arrive from someplace else.
+test run across a configurable number of worker processes. While this can
+speed up CPU-bound test runs, it is mainly useful for IO-bound tests
+that spend most of their time waiting for data to arrive from someplace
+else and can benefit from parallelization.
.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
@@ -27,15 +25,16 @@ long as the slowest test. This ideal is not attainable in all cases, however,
because many test suites depend on context (class, module or package)
fixtures.
-The multiprocess plugin can't know -- unless you tell it -- whether a given
-context fixture is re-entrant (that is, can be called many times
-concurrently), or may be shared among tests running in different processes, or
-must be run once and only once for a given set of tests in the same process as
-the tests. Therefore, if a context has fixtures, the default behavior is to
-dispatch the entire context suite to a worker as a unit, so that the fixtures
-are run once, in the same process as the tests. That of course how they are
-run when the multiprocess plugin is not active and all tests are run in a
-single process.
+Some context fixtures are re-entrant -- that is, they can be called many times
+concurrently. Other context fixtures can be shared among tests running in
+different processes. Still others must be run once and only once for a given
+set of tests, and must be in the same process as the tests themselves.
+
+The plugin can't know the difference between these types of context fixtures
+unless you tell it, so the default behavior is to dispatch the entire context
+suite to a worker as a unit. This way, the fixtures are run once, in the same
+process as the tests. (That, of course, is how they are run when the plugin
+is not active: All tests are run in a single process.)
Controlling distribution
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -43,7 +42,7 @@ Controlling distribution
There are two context-level variables that you can use to control this default
behavior.
-If a context's fixtures are re-entrant, set `_multiprocess_can_split_ = True`
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
in the context, and the plugin will dispatch tests in suites bound to that
context as if the context had no fixtures. This means that the fixtures will
execute multiple times, typically once per test, and concurrently.
@@ -67,7 +66,7 @@ A class might look like::
Alternatively, If a context's fixtures may only be run once, or may not run
concurrently, but *may* be shared by tests running in different processes
-- for instance a package-level fixture that starts an external http server or
-initializes a shared database -- then set `_multiprocess_shared_ = True` in
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
the context. Fixtures for contexts so marked will execute in the primary nose
process, and tests in those contexts will be individually dispatched to run in
parallel.
@@ -95,7 +94,7 @@ Example
~~~~~~~
Consider three versions of the same test suite. One
-is marked `_multiprocess_shared_`, another `_multiprocess_can_split_`,
+is marked ``_multiprocess_shared_``, another ``_multiprocess_can_split_``,
and the third is unmarked. They all define the same fixtures:
called = []
@@ -108,7 +107,7 @@ and the third is unmarked. They all define the same fixtures:
print "teardown called"
called.append('teardown')
-And each has two tests that just test that `setup()` has been called
+And each has two tests that just test that ``setup()`` has been called
once and only once.
When run without the multiprocess plugin, fixtures for the shared,
@@ -173,12 +172,12 @@ And the module that marks its fixtures as re-entrant.
<BLANKLINE>
OK
-However, when run with the `--processes=2` switch, each test module
+However, when run with the ``--processes=2`` switch, each test module
behaves differently.
>>> from nose.plugins.multiprocess import MultiProcess
-The module marked `_multiprocess_shared_` executes correctly, although as with
+The module marked ``_multiprocess_shared_`` executes correctly, although as with
any use of the multiprocess plugin, the order in which the tests execute is
indeterminate.
@@ -202,14 +201,14 @@ Then we can run the tests again with the multiprocess plugin active.
<BLANKLINE>
OK
-As does the one not marked -- however in this case, `--processes=2`
+As does the one not marked -- however in this case, ``--processes=2``
will do *nothing at all*: since the tests are in a module with
unmarked fixtures, the entire test module will be dispatched to a
single runner process.
-However, the module marked `_multiprocess_can_split_` will fail, since
+However, the module marked ``_multiprocess_can_split_`` will fail, since
the fixtures *are not reentrant*. A module such as this *must not* be
-marked `_multiprocess_can_split_`, or tests will fail in one or more
+marked ``_multiprocess_can_split_``, or tests will fail in one or more
runner processes as fixtures are re-executed.
We have to reset all of the test modules again.
@@ -230,25 +229,25 @@ Then we can run again and see the failures.
Other differences in test running
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The main difference between using the multiprocess plugin and not is obviously
-that tests run concurrently under multiprocess. There are a few other
-differences that may also impact your test suite:
+The main difference between using the multiprocess plugin and not doing so
+is obviously that tests run concurrently under multiprocess. However, there
+are a few other differences that may impact your test suite:
* More tests may be found
Because tests are dispatched to worker processes by name, a worker
process may find and run tests in a module that would not be found during a
- normal test run. For instance, if a non-test module contains a testlike
- function, that function would be discovered as a test in a worker process,
+ normal test run. For instance, if a non-test module contains a test-like
+ function, that function would be discovered as a test in a worker process
if the entire module is dispatched to the worker. This is because worker
processes load tests in *directed* mode -- the same way that nose loads
- tests when you explicitly name a module -- rather than *discovered* mode,
+ tests when you explicitly name a module -- rather than in *discovered* mode,
the mode nose uses when looking for tests in a directory.
* Out-of-order output
Test results are collected by workers and returned to the master process for
- output. Since difference processes may complete their tests at different
+ output. Since different processes may complete their tests at different
times, test result output order is not determinate.
* Plugin interaction warning
@@ -262,9 +261,9 @@ differences that may also impact your test suite:
* Python 2.6 warning
This is unlikely to impact you unless you are writing tests for nose itself,
- but be aware that under python 2.6, the multprocess plugin is not
+ but be aware that under python 2.6, the multiprocess plugin is not
re-entrant. For example, when running nose with the plugin active, you can't
use subprocess to launch another copy of nose that also uses the
multiprocess plugin. This is why this test is skipped under python 2.6 when
- run with the --processes switch.
+ run with the ``--processes`` switch.
diff --git a/nose/plugins/allmodules.py b/nose/plugins/allmodules.py
index 7a960fd..1ccd777 100644
--- a/nose/plugins/allmodules.py
+++ b/nose/plugins/allmodules.py
@@ -1,4 +1,4 @@
-"""Use the AllModules plugin by passing :option:`--all-modules` or setting the
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
NOSE_ALL_MODULES environment variable to enable collection and execution of
tests in all python modules. Normal nose behavior is to look for tests only in
modules that match testMatch.
@@ -11,8 +11,8 @@ More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
from what nose normally considers non-test modules, such as
the :doc:`doctest plugin <doctests>`. This is because any given
object in a module can't be loaded both by a plugin and the normal nose
- :class:`test loader <nose.loader.TestLoader>`. Also, if you have test-like
- functions or classes in non-test modules that are not tests, you will
+ :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+ or classes in non-test modules that look like tests but aren't, you will
likely see errors as nose attempts to run them as tests.
"""
diff --git a/nose/plugins/attrib.py b/nose/plugins/attrib.py
index 544e926..c1b09c1 100644
--- a/nose/plugins/attrib.py
+++ b/nose/plugins/attrib.py
@@ -1,8 +1,8 @@
"""Attribute selector plugin.
Oftentimes when testing you will want to select tests based on
-criteria rather then simply by filename. For example, you might want
-to run all tests except for the slow ones. You can do this with the
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
Attribute selector plugin by setting attributes on your test methods.
Here is an example:
@@ -30,7 +30,7 @@ Here's how to set ``slow=1`` like above with the decorator:
import urllib
# commence slowness...
-And here's how to set an attribute with a specific value :
+And here's how to set an attribute with a specific value:
.. code-block:: python
diff --git a/nose/plugins/capture.py b/nose/plugins/capture.py
index e2230bd..7421276 100644
--- a/nose/plugins/capture.py
+++ b/nose/plugins/capture.py
@@ -1,7 +1,7 @@
"""
-This plugin captures stdout during test execution, appending any
-output captured to the error or failure output, should the test fail
-or raise an error. It is enabled by default but may be disabled with
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
the options ``-s`` or ``--nocapture``.
:Options:
diff --git a/nose/plugins/collect.py b/nose/plugins/collect.py
index c47002e..c029492 100644
--- a/nose/plugins/collect.py
+++ b/nose/plugins/collect.py
@@ -1,11 +1,11 @@
"""
-This plugin bypasses the actual execution of tests, instead just collecting
+This plugin bypasses the actual execution of tests, and instead just collects
test names. Fixtures are also bypassed, so running nosetests with the
collection plugin enabled should be very quick.
-This plugin is useful in combination with the testid plugin (--with-id). Run
-both together to get an indexed list of all tests that will enable you to run
-individual tests by index number.
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
This plugin is also useful for counting tests in a test suite, and making
people watching your demo think all of your tests pass.
diff --git a/nose/plugins/cover.py b/nose/plugins/cover.py
index 5cbf7bf..3fc60c7 100644
--- a/nose/plugins/cover.py
+++ b/nose/plugins/cover.py
@@ -1,11 +1,11 @@
"""If you have Ned Batchelder's coverage_ module installed, you may activate a
-coverage report with the --with-coverage switch or NOSE_WITH_COVERAGE
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
environment variable. The coverage report will cover any python source module
imported after the start of the test run, excluding modules that match
-testMatch. If you want to include those modules too, use the --cover-tests
+testMatch. If you want to include those modules too, use the ``--cover-tests``
switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
restrict the coverage report to modules from a particular package or packages,
-use the --cover-packages switch or the NOSE_COVER_PACKAGES environment
+use the ``--cover-packages`` switch or the NOSE_COVER_PACKAGES environment
variable.
.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
@@ -51,14 +51,7 @@ Percent: %(percent)s %%<br/>
class Coverage(Plugin):
"""
- If you have Ned Batchelder's coverage module installed, you may
- activate a coverage report. The coverage report will cover any
- python source module imported after the start of the test run, excluding
- modules that match testMatch. If you want to include those modules too,
- use the --cover-tests switch, or set the NOSE_COVER_TESTS environment
- variable to a true value. To restrict the coverage report to modules from
- a particular package or packages, use the --cover-packages switch or the
- NOSE_COVER_PACKAGES environment variable.
+ Activate a coverage report using Ned Batchelder's coverage module.
"""
coverTests = False
coverPackages = None
@@ -102,7 +95,7 @@ class Coverage(Plugin):
default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
dest='cover_html_dir',
metavar='DIR',
- help='Produce HTML coverage informaion in dir')
+ help='Produce HTML coverage information in dir')
def configure(self, options, config):
"""
diff --git a/nose/plugins/debug.py b/nose/plugins/debug.py
index 373b0fd..c7fc462 100644
--- a/nose/plugins/debug.py
+++ b/nose/plugins/debug.py
@@ -1,7 +1,7 @@
"""
-This plugin provides --pdb and --pdb-failures options that cause the
-test runner to drop into pdb if it encounters an error or failure,
-respectively.
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
"""
import pdb
diff --git a/nose/plugins/deprecated.py b/nose/plugins/deprecated.py
index e54a151..461a26b 100644
--- a/nose/plugins/deprecated.py
+++ b/nose/plugins/deprecated.py
@@ -1,9 +1,9 @@
"""
This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
-exception. It is enabled by default. When :class:`DeprecatedTest` is raised,
-the exception will be logged in the deprecated attribute of the result, ``D``
-or ``DEPRECATED`` (verbose) will be output, and the exception will not be
-counted as an error or failure.
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
"""
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
@@ -17,11 +17,8 @@ class DeprecatedTest(Exception):
class Deprecated(ErrorClassPlugin):
"""
- Plugin that installs a DEPRECATED error class for the DeprecatedTest
- exception. Enabled by default. When DeprecatedTest is raised, the
- exception will be logged in the deprecated attribute of the result, ``D``
- or ``DEPRECATED`` (verbose) will be output, and the exception will not be
- counted as an error or failure.
+ Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+ by default.
"""
enabled = True
deprecated = ErrorClass(DeprecatedTest,
diff --git a/nose/plugins/doctests.py b/nose/plugins/doctests.py
index 8d88cf8..6b8cdd4 100644
--- a/nose/plugins/doctests.py
+++ b/nose/plugins/doctests.py
@@ -1,21 +1,20 @@
-"""Use the Doctest plugin with --with-doctest or the NOSE_WITH_DOCTEST
-environment variable to enable collection and execution of doctests. doctest_
-tests are usually included in the tested package, not grouped into packages or
-modules of their own. For this reason, nose will try to detect and run doctest
-tests only in the non-test packages it discovers in the working
-directory.
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of doctests_.
+Because doctests are usually included in the tested package (instead of
+being grouped into packages or modules of their own), nose only looks for
+them in the non-test packages it discovers in the working directory.
Doctests may also be placed into files other than python modules, in which
-case they can be collected and executed by using the --doctest-extension
+case they can be collected and executed by using the ``--doctest-extension``
switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
extension(s) to load.
-When loading doctests from non-module files, you may specify how to find
-modules that contains fixtures for the tests using the --doctest-fixtures
-switch. The value of that switch will be appended to the base name of each
-doctest file loaded to produce a module name. For instance, for a doctest file
-"widgets.rst" with the switch ``--doctest_fixtures=_fixt``, fixtures will be
-loaded from the module ``widgets_fixt.py`` if it exists.
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
A fixtures module may define any or all of the following functions:
@@ -36,18 +35,18 @@ A fixtures module may define any or all of the following functions:
* teardown_test(test)
Called after the test, if setup_test did not raise an exception. NOTE: the
- argument passed is a doctest.DocTest instance, *not* a unittest.TestCase
+ argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
-Doctest tests are run like any other test, with the exception that output
-capture does not work, because doctest does its own output capture in the
-course of running a test.
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
.. note ::
See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
additional documentation and examples.
-.. _doctest: http://docs.python.org/lib/module-doctest.html
+.. _doctests: http://docs.python.org/lib/module-doctest.html
"""
from __future__ import generators
diff --git a/nose/plugins/failuredetail.py b/nose/plugins/failuredetail.py
index 6251e01..7dbc6c7 100644
--- a/nose/plugins/failuredetail.py
+++ b/nose/plugins/failuredetail.py
@@ -1,9 +1,9 @@
"""
This plugin provides assert introspection. When the plugin is enabled
-and a test failure occurs, the traceback of the failure exception is
-examined and displayed with extra context around the line where the
-exception was raised. Simple variable substitution is also performed
-in the context output to provide more debugging information.
+and a test failure occurs, the traceback displayed with extra context
+around the line in which the exception was raised. Simple variable
+substitution is also performed in the context output to provide more
+debugging information.
"""
from nose.plugins import Plugin
@@ -11,9 +11,7 @@ from nose.inspector import inspect_traceback
class FailureDetail(Plugin):
"""
- Plugin that provides assert introspection. When a test failure occurs, the
- traceback of the failure exception is examined and displayed with extra
- context around the line where the exception was raised.
+ Plugin that provides assert introspection.
"""
score = 600 # before capture
diff --git a/nose/plugins/isolate.py b/nose/plugins/isolate.py
index bbad530..13235df 100644
--- a/nose/plugins/isolate.py
+++ b/nose/plugins/isolate.py
@@ -1,9 +1,9 @@
-"""Use the isolation plugin with --with-isolation or the
-NOSE_WITH_ISOLATION environment variable to clean sys.modules after
-each test module is loaded and executed.
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
-The isolation module is in effect similar to wrapping the following
-functions around the import and execution of each test module::
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
def setup(module):
module._mods = sys.modules.copy()
@@ -26,10 +26,13 @@ setup and teardown to run for each name, defeating the grouping that
is normally used to ensure that context setup and teardown are run the
fewest possible times for a given set of names.
-PLEASE NOTE that this plugin should not be used in conjunction with
-other plugins that assume that modules once imported will stay
-imported; for instance, it may cause very odd results when used with
-the coverage plugin.
+.. warning ::
+
+ This plugin should not be used in conjunction with other plugins
+ that assume that modules, once imported, will stay imported; for
+ instance, it may cause very odd results when used with the coverage
+ plugin.
+
"""
import logging
@@ -46,7 +49,7 @@ class IsolationPlugin(Plugin):
modules to a single test module or package. The isolation plugin
resets the contents of sys.modules after each test module or
package runs to its state before the test. PLEASE NOTE that this
- plugin should not be used with the coverage plugin in any other case
+ plugin should not be used with the coverage plugin, or in any other case
where module reloading may produce undesirable side-effects.
"""
score = 10 # I want to be last
diff --git a/nose/plugins/logcapture.py b/nose/plugins/logcapture.py
index 67acf3f..7060aa4 100644
--- a/nose/plugins/logcapture.py
+++ b/nose/plugins/logcapture.py
@@ -1,21 +1,18 @@
"""
-This plugin captures logging statements issued during test
-execution, appending any output captured to the error or failure
-output, should the test fail or raise an error. It is enabled by
-default but may be disabled with the options --nologcapture.
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
-To remove any other installed logging handlers, use the
---logging-clear-handlers option.
-
-When an error or failure occurs, captures log messages are attached to
-the running test in the test.capturedLogging attribute, and added to
-the error failure output.
-
-You can filter logging statements captured with the --logging-filter option.
+You can filter captured logging statements with the ``--logging-filter`` option.
If set, it specifies which logger(s) will be captured; loggers that do not match
-will be passed. Example: specifying --logging-filter=sqlalchemy,myapp
-will ensure that only statements logged via sqlalchemy.engine or myapp
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
"""
import logging
@@ -60,7 +57,7 @@ class MyMemoryHandler(BufferingHandler):
class LogCapture(Plugin):
"""
Log capture plugin. Enabled by default. Disable with --nologcapture.
- This plugin captures logging statement issued during test execution,
+ This plugin captures logging statements issued during test execution,
appending any output captured to the error or failure output,
should the test fail or raise an error.
"""
@@ -101,8 +98,8 @@ class LogCapture(Plugin):
default=env.get('NOSE_LOGFILTER'),
metavar="FILTER",
help="Specify which statements to filter in/out. "
- "By default everything is captured. If the output is too"
- " verbose,\nuse this option to filter out needless output\n"
+ "By default, everything is captured. If the output is too"
+ " verbose,\nuse this option to filter out needless output.\n"
"Example: filter=foo will capture statements issued ONLY to\n"
" foo or foo.what.ever.sub but not foobar or other logger.\n"
"Specify multiple loggers with comma: filter=foo,bar,baz."
diff --git a/nose/plugins/multiprocess.py b/nose/plugins/multiprocess.py
index bf17d92..f825cd8 100644
--- a/nose/plugins/multiprocess.py
+++ b/nose/plugins/multiprocess.py
@@ -5,14 +5,17 @@ Overview
The multiprocess plugin enables you to distribute your test run among a set of
worker processes that run tests in parallel. This can speed up CPU-bound test
runs (as long as the number of work processeses is around the number of
-processors or cores available), but is mainly useful for IO-bound tests which
-can benefit from massive parallelization, since most of the tests spend most
-of their time waiting for data to arrive from someplace else.
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else and
+would benefit from massive parallelization.
.. note ::
See :doc:`../doc_tests/test_multiprocess/multiprocess` for additional
- documentation and examples.
+ documentation and examples. Use of this plugin requires the
+ multiprocessing_ module, also available from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
How tests are distributed
=========================
@@ -21,11 +24,11 @@ The ideal case would be to dispatch each test to a worker process
separately. This ideal is not attainable in all cases, however, because many
test suites depend on context (class, module or package) fixtures.
-The plugin can't know (unless you tell it -- see below!) whether a given
-context fixture is re-entrant (that is, can be called many times
-concurrently), or may be shared among tests running in different
-processes. Therefore, if a context has fixtures, the default behavior is to
-dispatch the entire suite to a worker as a unit.
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
Controlling distribution
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -33,49 +36,47 @@ Controlling distribution
There are two context-level variables that you can use to control this default
behavior.
-If a context's fixtures are re-entrant, set `_multiprocess_can_split_ = True`
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
in the context, and the plugin will dispatch tests in suites bound to that
context as if the context had no fixtures. This means that the fixtures will
-execute multiple times, typically once per test, and concurrently.
+execute concurrently and multiple times, typically once per test.
-If a context's fixtures may be shared by tests running in different processes
--- for instance a package-level fixture that starts an external http server or
-initializes a shared database -- then set `_multiprocess_shared_ = True` in
-the context. Fixtures for contexts so marked will execute in the primary nose
-process, and tests in those contexts will be individually dispatched to run in
-parallel.
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
How results are collected and reported
======================================
As each test or suite executes in a worker process, results (failures, errors,
and specially handled exceptions like SkipTest) are collected in that
-process. When the test or suite is complete, the results are returned to the
-main nose process. There, any progress output (dots) is printed, and the
-results from the test or suite combined into a consolidated result
-set. Finally when results have been received for all dispatched tests, or all
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
workers have died, the result summary is output as normal.
Beware!
=======
Not all test suites will benefit from, or even operate correctly using, this
-plugin. If you don't have multiple processors, CPU-bound tests will run more
-slowly than otherwise, for instance. There are also some differences in plugin
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
interactions and behaviors due to the way in which tests are dispatched and
-loaded. In general, test loading under the plugin operates as if it were
-always in directed mode, not discovered mode. For instance, doctests in test
-modules will always be found when using this plugin and the doctest plugin
-together.
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
-But most likely the biggest issue you will face is concurrency. Unless you
+But the biggest issue you will face is probably concurrency. Unless you
have kept your tests as religiously pure unit tests, with no side-effects, no
ordering issues, and no external dependencies, chances are you will experience
odd, intermittent and unexplainable failures and errors when using this
-plugin. This doesn't necessarily mean the plugin is broken: it may mean that
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
your test suite is not safe for concurrency.
-
"""
import logging
import os
diff --git a/nose/plugins/prof.py b/nose/plugins/prof.py
index 15be20f..7ea9f03 100644
--- a/nose/plugins/prof.py
+++ b/nose/plugins/prof.py
@@ -1,10 +1,13 @@
-"""Use the profile plugin with ``--with-profile`` or NOSE_WITH_PROFILE to
-enable profiling using the hotshot profiler. Profiler output can be
-controlled with the ``--profile-sort`` and ``--profile-restrict``, and the
-profiler output file may be changed with ``--profile-stats-file``.
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
-See the hotshot documentation in the standard library documentation for
+See the `hotshot documentation`_ in the standard library documentation for
more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
"""
try:
diff --git a/nose/plugins/skip.py b/nose/plugins/skip.py
index 18750e1..92c4ad4 100644
--- a/nose/plugins/skip.py
+++ b/nose/plugins/skip.py
@@ -3,7 +3,7 @@ This plugin installs a SKIP error class for the SkipTest exception.
When SkipTest is raised, the exception will be logged in the skipped
attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
the exception will not be counted as an error or failure. This plugin
-is enabled by default but may be disabled with the --no-skip option.
+is enabled by default but may be disabled with the ``--no-skip`` option.
"""
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
diff --git a/nose/plugins/testid.py b/nose/plugins/testid.py
index 0dc9297..91704af 100644
--- a/nose/plugins/testid.py
+++ b/nose/plugins/testid.py
@@ -11,19 +11,19 @@ For example, if your normal test run looks like::
tests.test_b ... ok
tests.test_c ... ok
-When adding --with-id you'll see::
+When adding ``--with-id`` you'll see::
% nosetests -v --with-id
#1 tests.test_a ... ok
#2 tests.test_b ... ok
#2 tests.test_c ... ok
-Then you can rerun individual tests by supplying just the id numbers::
+Then you can re-run individual tests by supplying just an id number::
% nosetests -v --with-id 2
#2 tests.test_b ... ok
-Then you can rerun individual tests by supplying just the id numbers::
+You can also pass multiple id numbers::
% nosetests -v --with-id 2 3
#2 tests.test_b ... ok
@@ -38,9 +38,9 @@ the ids file is still written.
Looping over failed tests
-------------------------
-This plugin also adds a mode where it will direct the test run to record
-failed tests, and on subsequent runs, include only the tests that failed the
-last time. Activate this mode with the --failed switch::
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
% nosetests -v --failed
#1 test.test_a ... ok
@@ -48,14 +48,13 @@ last time. Activate this mode with the --failed switch::
#3 test.test_c ... FAILED
#4 test.test_d ... ok
-And on the 2nd run, only tests #2 and #3 will run::
+On the second run, only tests #2 and #3 will run::
% nosetests -v --failed
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
-Then as you correct errors and tests pass, they'll drop out of subsequent
-runs.
+As you correct errors and tests pass, they'll drop out of subsequent runs.
First::
@@ -68,8 +67,7 @@ Second::
% nosetests -v --failed
#3 test.test_c ... FAILED
-Until finally when all tests pass, the full set will run again on the next
-invocation.
+When all tests pass, the full set will run on the next invocation.
First::
@@ -86,13 +84,13 @@ Second::
.. note ::
- If you expect to want to use --failed often, a good practice is to always
- run with the --with-id option active, so that an ids file is always recorded
- and you can then add --failed to the command line as soon as you have
- failing tests. If --with-id is not active, your first invocation with
- --failed will (perhaps surprisingly) run all tests, because there will be
- no ids file recording the failed tests from the previous run, during which
- this plugin was not active.
+ If you expect to use ``--failed`` regularly, it's a good idea to always run
+ run using the ``--with-id`` option. This will ensure that an id file is
+ always created, allowing you to add ``--failed`` to the command line as soon
+ as you have failing tests. Otherwise, your first run using ``--failed`` will
+ (perhaps surprisingly) run *all* tests, because there won't be an id file
+ containing the record of failed tests from your previous run.
+
"""
__test__ = False
@@ -111,11 +109,8 @@ log = logging.getLogger(__name__)
class TestId(Plugin):
"""
- Activate to add a test id (like #1) to each test name output. After
- you've run once to generate test ids, you can re-run individual
- tests by activating the plugin and passing the ids (with or
- without the # prefix) instead of test names. Activate with --failed
- to rerun failing tests only.
+ Activate to add a test id (like #1) to each test name output. Activate
+ with --failed to rerun failing tests only.
"""
name = 'id'
idfile = None
diff --git a/nose/plugins/xunit.py b/nose/plugins/xunit.py
index bb3d279..c83407a 100644
--- a/nose/plugins/xunit.py
+++ b/nose/plugins/xunit.py
@@ -2,8 +2,8 @@
"""This plugin provides test results in the standard XUnit XML format.
It was designed for the `Hudson`_ continuous build system but will
-probably work for anything else that understands an XUnit
-formatted XML representation of test results.
+probably work for anything else that understands an XUnit-formatted XML
+representation of test results.
Add this shell command to your builder ::
@@ -12,7 +12,7 @@ Add this shell command to your builder ::
And by default a file named nosetests.xml will be written to the
working directory.
-In a Hudson builder, tick the box named Publish JUnit test result report
+In a Hudson builder, tick the box named "Publish JUnit test result report"
under the Post-build Actions and enter this value for Test report XMLs ::
**/nosetests.xml