binman: Run tests concurrently

At present the tests run one after the other using a single CPU. This is
not very efficient. Bring in the concurrencytest module and run the tests
concurrently, using one process for each CPU by default. A -P option
allows this to be overridden, which is necessary for code-coverage to
function correctly.

This requires fixing a few tests which are currently not fully
independent.

At some point we might consider doing this across all pytests in U-Boot.
There is a pytest version that supports specifying the number of processes
to use, but it did not work for me.

Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
Simon Glass 2018-10-01 21:12:47 -06:00
parent 2673afe221
commit 11ae93eef4
12 changed files with 274 additions and 22 deletions

View File

@ -47,6 +47,7 @@ install:
- virtualenv /tmp/venv
- . /tmp/venv/bin/activate
- pip install pytest
- pip install python-subunit
- grub-mkimage -o ~/grub_x86.efi -O i386-efi normal echo lsefimmap lsefi lsefisystab efinet tftp minicmd
- mkdir ~/grub2-arm
- ( cd ~/grub2-arm; wget -O - http://download.opensuse.org/ports/armv7hl/distribution/leap/42.2/repo/oss/suse/armv7hl/grub2-arm-efi-2.02~beta2-87.1.armv7hl.rpm | rpm2cpio | cpio -di )

View File

@ -29,6 +29,7 @@ tests. Similar package names should exist in other distributions.
| -------------- | ----------------------------- |
| python | 2.7.5-5ubuntu3 |
| python-pytest | 2.5.1-1 |
| python-subunit | - |
| gdisk | 0.8.8-1ubuntu0.1 |
| dfu-util | 0.5-1 |
| dtc | 1.4.0+dfsg-1 |

View File

@ -10,6 +10,7 @@
"""See README for more information"""
import glob
import multiprocessing
import os
import sys
import traceback
@ -17,7 +18,7 @@ import unittest
# Bring in the patman and dtoc libraries
our_path = os.path.dirname(os.path.realpath(__file__))
for dirname in ['../patman', '../dtoc', '..']:
for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
sys.path.insert(0, os.path.join(our_path, dirname))
# Bring in the libfdt module
@ -27,16 +28,22 @@ sys.path.insert(0, os.path.join(our_path,
import cmdline
import command
use_concurrent = True
try:
from concurrencytest import ConcurrentTestSuite, fork_for_tests
except:
use_concurrent = False
import control
import test_util
def RunTests(debug, args):
def RunTests(debug, processes, args):
"""Run the functional tests and any embedded doctests
Args:
debug: True to enable debugging, which shows a full stack trace on error
args: List of positional args provided to binman. This can hold a test
name to execute (as in 'binman -t testSections', for example)
processes: Number of processes to use to run tests (None=same as #CPUs)
"""
import elf_test
import entry_test
@ -54,19 +61,28 @@ def RunTests(debug, args):
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
if debug:
sys.argv.append('-D')
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
test_name = args and args[0] or None
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
elf_test.TestElf, image_test.TestImage):
if test_name:
try:
suite = unittest.TestLoader().loadTestsFromName(test_name, module)
suite.addTests(loader.loadTestsFromName(test_name, module))
except AttributeError:
continue
else:
suite = unittest.TestLoader().loadTestsFromTestCase(module)
suite.addTests(loader.loadTestsFromTestCase(module))
if use_concurrent and processes != 1:
concurrent_suite = ConcurrentTestSuite(suite,
fork_for_tests(processes or multiprocessing.cpu_count()))
concurrent_suite.run(result)
else:
suite.run(result)
print result
@ -115,7 +131,7 @@ def RunBinman(options, args):
sys.tracebacklimit = 0
if options.test:
ret_code = RunTests(options.debug, args[1:])
ret_code = RunTests(options.debug, options.processes, args[1:])
elif options.test_coverage:
RunTestCoverage()

View File

@ -46,6 +46,8 @@ def ParseArgs(argv):
parser.add_option('-p', '--preserve', action='store_true',\
help='Preserve temporary output directory even if option -O is not '
'given')
parser.add_option('-P', '--processes', type=int,
help='set number of processes to use for running tests')
parser.add_option('-t', '--test', action='store_true',
default=False, help='run tests')
parser.add_option('-T', '--test-coverage', action='store_true',

View File

@ -13,6 +13,8 @@ import fdt
import fdt_util
import tools
entry = None
class TestEntry(unittest.TestCase):
def setUp(self):
tools.PrepareOutputDir(None)
@ -38,7 +40,10 @@ class TestEntry(unittest.TestCase):
def test2EntryImportLib(self):
del sys.modules['importlib']
global entry
reload(entry)
if entry:
reload(entry)
else:
import entry
entry.Entry.Create(None, self.GetNode(), 'u-boot-spl')
del entry

View File

@ -367,6 +367,16 @@ class TestFunctional(unittest.TestCase):
os.makedirs(pathname)
return pathname
@classmethod
def _SetupSplElf(self, src_fname='bss_data'):
"""Set up an ELF file with a '_dt_ucode_base_size' symbol
Args:
Filename of ELF file to use as SPL
"""
with open(self.TestFile(src_fname)) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
@classmethod
def TestFile(self, fname):
return os.path.join(self._binman_dir, 'test', fname)
@ -715,8 +725,7 @@ class TestFunctional(unittest.TestCase):
def testImagePadByte(self):
"""Test that the image pad byte can be specified"""
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf()
data = self._DoReadFile('21_image_pad.dts')
self.assertEqual(U_BOOT_SPL_DATA + (chr(0xff) * 1) + U_BOOT_DATA, data)
@ -739,6 +748,7 @@ class TestFunctional(unittest.TestCase):
def testPackSorted(self):
"""Test that entries can be sorted"""
self._SetupSplElf()
data = self._DoReadFile('24_sorted.dts')
self.assertEqual(chr(0) * 1 + U_BOOT_SPL_DATA + chr(0) * 2 +
U_BOOT_DATA, data)
@ -781,6 +791,7 @@ class TestFunctional(unittest.TestCase):
def testPackX86Rom(self):
"""Test that a basic x86 ROM can be created"""
self._SetupSplElf()
data = self._DoReadFile('29_x86-rom.dts')
self.assertEqual(U_BOOT_DATA + chr(0) * 7 + U_BOOT_SPL_DATA +
chr(0) * 2, data)
@ -1017,15 +1028,13 @@ class TestFunctional(unittest.TestCase):
def testSplBssPad(self):
"""Test that we can pad SPL's BSS with zeros"""
# ELF file with a '__bss_size' symbol
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf()
data = self._DoReadFile('47_spl_bss_pad.dts')
self.assertEqual(U_BOOT_SPL_DATA + (chr(0) * 10) + U_BOOT_DATA, data)
def testSplBssPadMissing(self):
"""Test that a missing symbol is detected"""
with open(self.TestFile('u_boot_ucode_ptr')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf('u_boot_ucode_ptr')
with self.assertRaises(ValueError) as e:
self._DoReadFile('47_spl_bss_pad.dts')
self.assertIn('Expected __bss_size symbol in spl/u-boot-spl',
@ -1050,9 +1059,7 @@ class TestFunctional(unittest.TestCase):
ucode_second: True if the microsecond entry is second instead of
third
"""
# ELF file with a '_dt_ucode_base_size' symbol
with open(self.TestFile('u_boot_ucode_ptr')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf('u_boot_ucode_ptr')
first, pos_and_size = self._RunMicrocodeTest(dts, U_BOOT_SPL_NODTB_DATA,
ucode_second=ucode_second)
self.assertEqual('splnodtb with microc' + pos_and_size +
@ -1094,8 +1101,7 @@ class TestFunctional(unittest.TestCase):
addr = elf.GetSymbolAddress(elf_fname, '__image_copy_start')
self.assertEqual(syms['_binman_u_boot_spl_prop_offset'].address, addr)
with open(self.TestFile('u_boot_binman_syms')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf('u_boot_binman_syms')
data = self._DoReadFile('53_symbols.dts')
sym_values = struct.pack('<LQL', 0x24 + 0, 0x24 + 24, 0x24 + 20)
expected = (sym_values + U_BOOT_SPL_DATA[16:] + chr(0xff) +
@ -1727,16 +1733,14 @@ class TestFunctional(unittest.TestCase):
def testElf(self):
"""Basic test of ELF entries"""
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf()
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('-boot', fd.read())
data = self._DoReadFile('96_elf.dts')
def testElfStripg(self):
"""Basic test of ELF entries"""
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
self._SetupSplElf()
with open(self.TestFile('bss_data')) as fd:
TestFunctional._MakeInputFile('-boot', fd.read())
data = self._DoReadFile('97_elf_strip.dts')

1
tools/concurrencytest/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,74 @@
concurrencytest
===============
![testing goats](https://raw.github.com/cgoldberg/concurrencytest/master/testing-goats.png "testing goats")
Python testtools extension for running unittest suites concurrently.
----
Install from PyPI:
```
pip install concurrencytest
```
----
Requires:
* [testtools](https://pypi.python.org/pypi/testtools) : `pip install testtools`
* [python-subunit](https://pypi.python.org/pypi/python-subunit) : `pip install python-subunit`
----
Example:
```python
import time
import unittest
from concurrencytest import ConcurrentTestSuite, fork_for_tests
class SampleTestCase(unittest.TestCase):
"""Dummy tests that sleep for demo."""
def test_me_1(self):
time.sleep(0.5)
def test_me_2(self):
time.sleep(0.5)
def test_me_3(self):
time.sleep(0.5)
def test_me_4(self):
time.sleep(0.5)
# Load tests from SampleTestCase defined above
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
runner = unittest.TextTestRunner()
# Run tests sequentially
runner.run(suite)
# Run same tests across 4 processes
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
runner.run(concurrent_suite)
```
Output:
```
....
----------------------------------------------------------------------
Ran 4 tests in 2.003s
OK
....
----------------------------------------------------------------------
Ran 4 tests in 0.504s
OK
```

View File

@ -0,0 +1,144 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0+
#
# Modified by: Corey Goldberg, 2013
#
# Original code from:
# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
# Copyright (C) 2005-2011 Canonical Ltd
"""Python testtools extension for running unittest suites concurrently.
The `testtools` project provides a ConcurrentTestSuite class, but does
not provide a `make_tests` implementation needed to use it.
This allows you to parallelize a test run across a configurable number
of worker processes. While this can speed up CPU-bound test runs, it is
mainly useful for IO-bound tests that spend most of their time waiting for
data to arrive from someplace else and can benefit from cocncurrency.
Unix only.
"""
import os
import sys
import traceback
import unittest
from itertools import cycle
from multiprocessing import cpu_count
from subunit import ProtocolTestCase, TestProtocolClient
from subunit.test_results import AutoTimingTestResultDecorator
from testtools import ConcurrentTestSuite, iterate_tests
_all__ = [
'ConcurrentTestSuite',
'fork_for_tests',
'partition_tests',
]
CPU_COUNT = cpu_count()
def fork_for_tests(concurrency_num=CPU_COUNT):
"""Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
:param concurrency_num: number of processes to use.
"""
def do_fork(suite):
"""Take suite and start up multiple runners by forking (Unix only).
:param suite: TestSuite object.
:return: An iterable of TestCase-like objects which can each have
run(result) called on them to feed tests to result.
"""
result = []
test_blocks = partition_tests(suite, concurrency_num)
# Clear the tests from the original suite so it doesn't keep them alive
suite._tests[:] = []
for process_tests in test_blocks:
process_suite = unittest.TestSuite(process_tests)
# Also clear each split list so new suite has only reference
process_tests[:] = []
c2pread, c2pwrite = os.pipe()
pid = os.fork()
if pid == 0:
try:
stream = os.fdopen(c2pwrite, 'wb', 1)
os.close(c2pread)
# Leave stderr and stdout open so we can see test noise
# Close stdin so that the child goes away if it decides to
# read from stdin (otherwise its a roulette to see what
# child actually gets keystrokes for pdb etc).
sys.stdin.close()
subunit_result = AutoTimingTestResultDecorator(
TestProtocolClient(stream)
)
process_suite.run(subunit_result)
except:
# Try and report traceback on stream, but exit with error
# even if stream couldn't be created or something else
# goes wrong. The traceback is formatted to a string and
# written in one go to avoid interleaving lines from
# multiple failing children.
try:
stream.write(traceback.format_exc())
finally:
os._exit(1)
os._exit(0)
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb', 1)
test = ProtocolTestCase(stream)
result.append(test)
return result
return do_fork
def partition_tests(suite, count):
"""Partition suite into count lists of tests."""
# This just assigns tests in a round-robin fashion. On one hand this
# splits up blocks of related tests that might run faster if they shared
# resources, but on the other it avoids assigning blocks of slow tests to
# just one partition. So the slowest partition shouldn't be much slower
# than the fastest.
partitions = [list() for _ in range(count)]
tests = iterate_tests(suite)
for partition, test in zip(cycle(partitions), tests):
partition.append(test)
return partitions
if __name__ == '__main__':
import time
class SampleTestCase(unittest.TestCase):
"""Dummy tests that sleep for demo."""
def test_me_1(self):
time.sleep(0.5)
def test_me_2(self):
time.sleep(0.5)
def test_me_3(self):
time.sleep(0.5)
def test_me_4(self):
time.sleep(0.5)
# Load tests from SampleTestCase defined above
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
runner = unittest.TextTestRunner()
# Run tests sequentially
runner.run(suite)
# Run same tests across 4 processes
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
runner.run(concurrent_suite)

View File

@ -89,6 +89,8 @@ parser.add_option('--include-disabled', action='store_true',
help='Include disabled nodes')
parser.add_option('-o', '--output', action='store', default='-',
help='Select output filename')
parser.add_option('-P', '--processes', type=int,
help='set number of processes to use for running tests')
parser.add_option('-t', '--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-T', '--test-coverage', action='store_true',

View File

@ -547,6 +547,8 @@ if __name__ != '__main__':
parser = OptionParser()
parser.add_option('-B', '--build-dir', type='string', default='b',
help='Directory containing the build output')
parser.add_option('-P', '--processes', type=int,
help='set number of processes to use for running tests')
parser.add_option('-t', '--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-T', '--test-coverage', action='store_true',

View File

@ -43,7 +43,7 @@ def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
glob_list += exclude_list
glob_list += ['*libfdt.py', '*site-packages*']
cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools python-coverage run '
'--omit "%s" %s -t' % (build_dir, ','.join(glob_list), prog))
'--omit "%s" %s -P1 -t' % (build_dir, ','.join(glob_list), prog))
os.system(cmd)
stdout = command.Output('python-coverage', 'report')
lines = stdout.splitlines()