File: //opt/alt/python37/lib/python3.7/site-packages/nose/plugins/__pycache__/plugintest.cpython-37.pyc
B
2�Se�4 � @ s d Z ddlZddlZddlmZ yddlmZ W n ek
rP ddlmZ Y nX ddgZddl m
Z
G dd � d e�Zydd
l
mZ eZW n ek
r� eZY nX G dd� de�ZG dd
� d
e�Zdd� Zdd� Zdd� Zdd� Zdd� Zdd� Zdd� Zedk�rddlZe�� dS )a_
Testing Plugins
===============
The plugin interface is well-tested enough to safely unit test your
use of its hooks with some level of confidence. However, there is also
a mixin for unittest.TestCase called PluginTester that's designed to
test plugins in their native runtime environment.
Here's a simple example with a do-nothing plugin and a composed suite.
>>> import unittest
>>> from nose.plugins import Plugin, PluginTester
>>> class FooPlugin(Plugin):
... pass
>>> class TestPluginFoo(PluginTester, unittest.TestCase):
... activate = '--with-foo'
... plugins = [FooPlugin()]
... def test_foo(self):
... for line in self.output:
... # i.e. check for patterns
... pass
...
... # or check for a line containing ...
... assert "ValueError" in self.output
... def makeSuite(self):
... class TC(unittest.TestCase):
... def runTest(self):
... raise ValueError("I hate foo")
... return [TC('runTest')]
...
>>> res = unittest.TestResult()
>>> case = TestPluginFoo('test_foo')
>>> _ = case(res)
>>> res.errors
[]
>>> res.failures
[]
>>> res.wasSuccessful()
True
>>> res.testsRun
1
And here is a more complex example of testing a plugin that has extra
arguments and reads environment variables.
>>> import unittest, os
>>> from nose.plugins import Plugin, PluginTester
>>> class FancyOutputter(Plugin):
... name = "fancy"
... def configure(self, options, conf):
... Plugin.configure(self, options, conf)
... if not self.enabled:
... return
... self.fanciness = 1
... if options.more_fancy:
... self.fanciness = 2
... if 'EVEN_FANCIER' in self.env:
... self.fanciness = 3
...
... def options(self, parser, env=os.environ):
... self.env = env
... parser.add_option('--more-fancy', action='store_true')
... Plugin.options(self, parser, env=env)
...
... def report(self, stream):
... stream.write("FANCY " * self.fanciness)
...
>>> class TestFancyOutputter(PluginTester, unittest.TestCase):
... activate = '--with-fancy' # enables the plugin
... plugins = [FancyOutputter()]
... args = ['--more-fancy']
... env = {'EVEN_FANCIER': '1'}
...
... def test_fancy_output(self):
... assert "FANCY FANCY FANCY" in self.output, (
... "got: %s" % self.output)
... def makeSuite(self):
... class TC(unittest.TestCase):
... def runTest(self):
... raise ValueError("I hate fancy stuff")
... return [TC('runTest')]
...
>>> res = unittest.TestResult()
>>> case = TestFancyOutputter('test_fancy_output')
>>> _ = case(res)
>>> res.errors
[]
>>> res.failures
[]
>>> res.wasSuccessful()
True
>>> res.testsRun
1
� N)�warn)�StringIO�PluginTester�run)�getpidc @ sJ e Zd ZdZdd� Zdd� Zdd� Zdd � Zddd�Zd
d� Z dd� Z
dS )�MultiProcessFilea\
helper for testing multiprocessing
multiprocessing poses a problem for doctests, since the strategy
of replacing sys.stdout/stderr with file-like objects then
inspecting the results won't work: the child processes will
write to the objects, but the data will not be reflected
in the parent doctest-ing process.
The solution is to create file-like objects which will interact with
multiprocessing in a more desirable way.
All processes can write to this object, but only the creator can read.
This allows the testing system to see a unified picture of I/O.
c C s&