From dbec26921fd50ae4987fa013e20fddc78d8437a6 Mon Sep 17 00:00:00 2001 From: Gennadiy Rozental Date: Mon, 17 Oct 2011 11:13:55 +0000 Subject: [PATCH] latest state of sources Fixes #4982 [SVN r75007] --- doc/src/UTF.log.xsd | 146 --- doc/src/UTF.report.xsd | 82 -- doc/src/btl-toc.xml | 161 +-- doc/src/btl.xml | 5 +- doc/src/examples/example.sln | 6 +- doc/src/examples/example.vcproj | 196 --- doc/src/examples/example.vcxproj | 89 ++ doc/src/examples/example25.cpp | 2 +- doc/src/examples/example38.cpp | 4 +- doc/src/examples/example38.output | 2 +- doc/src/execution-monitor.xml | 6 +- doc/src/faq.xml | 4 +- doc/src/minimal-testing.xml | 4 +- doc/src/program-execution-monitor.xml | 2 +- doc/src/utf.examples.xml | 304 +++++ doc/src/utf.tutorials.xml | 4 +- ...fixture.xml => utf.user-guide.fixture.xml} | 0 doc/src/utf.user-guide.glossary.xml | 235 ++++ doc/src/utf.user-guide.initialization.xml | 150 +++ doc/src/utf.user-guide.runtime-config.xml | 126 +- ...l => utf.user-guide.test-organization.xml} | 378 +++--- ...put.xml => utf.user-guide.test-output.xml} | 8 +- doc/src/utf.user-guide.test-runners.xml | 155 +++ doc/src/utf.user-guide.testing-tools.xml | 1168 +++++++++++++++++ doc/src/utf.user-guide.usage-variants.xml | 158 +++ doc/src/utf.user-guide.xml | 68 + doc/src/utf.users-guide.xml | 698 ---------- doc/src/utf.xml | 24 +- doc/src/xsl/html.xsl | 2 +- 29 files changed, 2762 insertions(+), 1425 deletions(-) delete mode 100755 doc/src/UTF.log.xsd delete mode 100755 doc/src/UTF.report.xsd delete mode 100755 doc/src/examples/example.vcproj create mode 100755 doc/src/examples/example.vcxproj create mode 100644 doc/src/utf.examples.xml rename doc/src/{utf.users-guide.fixture.xml => utf.user-guide.fixture.xml} (100%) create mode 100644 doc/src/utf.user-guide.glossary.xml create mode 100644 doc/src/utf.user-guide.initialization.xml rename doc/src/{utf.users-guide.test-organization.xml => utf.user-guide.test-organization.xml} (90%) rename doc/src/{utf.users-guide.test-output.xml => utf.user-guide.test-output.xml} (98%) create mode 100644 doc/src/utf.user-guide.test-runners.xml create mode 100644 doc/src/utf.user-guide.testing-tools.xml create mode 100644 doc/src/utf.user-guide.usage-variants.xml create mode 100644 doc/src/utf.user-guide.xml delete mode 100644 doc/src/utf.users-guide.xml diff --git a/doc/src/UTF.log.xsd b/doc/src/UTF.log.xsd deleted file mode 100755 index dedcb486..00000000 --- a/doc/src/UTF.log.xsd +++ /dev/null @@ -1,146 +0,0 @@ - - - - - - name of the test unit - - - - - Specified with value "yes" only if test unit was skipped during execution - - - - - - - - - Line number corresponding to the log entry - - - - - file name corresponding to the log entry - - - - - - - - Exception description - - - - - Location of last checkpoint before exception occured - - - - - - obsolete? - - - - - - - - - different log entries - - - - Log entry corresponding to the successfully passed assertion - - - - - Log entry corresponding to the message generated during test execution - - - - - Log entry corresponding to the warning generated during test execution - - - - - Log entry corresponding to the non-fatal error occured during test execution - - - - - Log entry corresponding to the fatal error occured during test execution - - - - - Log entry corresponding to an exception occured during test execution - - - - - - approximate time spent on test unit execution - - - - - - - - - - - List of test units composing test suite - - - - - - - - - - - - Holds whole unit test log content - - - - - - Optional build information for the unit test. - - - - - unique identifier for the platform unit test was compiled on - - - - - unique identifier for the compiler unit test was compiled with - - - - - unique identifier for the STL implementation used during unit test compilation - - - - - version of the boost used - - - - - - - - - diff --git a/doc/src/UTF.report.xsd b/doc/src/UTF.report.xsd deleted file mode 100755 index 3a8d62e4..00000000 --- a/doc/src/UTF.report.xsd +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - - - - - - - - - - name of the test unit - - - - - result status for the test unit: passed, failed, skipped or aborted - - - - - number of assertions that passed during execution of the test unit - - - - - number of assertion that failed during execution of the test unit - - - - - number of assertions that expected to fail in the test unit - - - - - - - - - - - - - - - - - - - - number of test cases that passed in the test suite - - - - - number of test cases that failed in the test suite - - - - - number of test cases that were skipped in the test suite - - - - - number of test cases in the test suite that were aborted during execution by an exception or a fatal error - - - - - - - - - - - - diff --git a/doc/src/btl-toc.xml b/doc/src/btl-toc.xml index 171c9799..f20e3ebc 100644 --- a/doc/src/btl-toc.xml +++ b/doc/src/btl-toc.xml @@ -20,38 +20,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -104,46 +72,37 @@ - - - - - - - - - - - - + + + - - - + + + - - - + + + @@ -152,21 +111,27 @@ - - - - + + + + + + - - + + + - - + + + - - + + + + @@ -211,25 +176,32 @@ - - - - - - - + + + + + + + + + + + + + + - - - + + - - - + + + + + - - - + + @@ -248,6 +220,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/src/btl.xml b/doc/src/btl.xml index c052a272..50987e5d 100644 --- a/doc/src/btl.xml +++ b/doc/src/btl.xml @@ -18,6 +18,9 @@ 2006 2007 2008 + 2009 + 2010 + 2011 Gennadiy Rozental @@ -156,10 +159,10 @@ + - diff --git a/doc/src/examples/example.sln b/doc/src/examples/example.sln index c76ff3ad..957e488b 100755 --- a/doc/src/examples/example.sln +++ b/doc/src/examples/example.sln @@ -1,7 +1,7 @@  -Microsoft Visual Studio Solution File, Format Version 9.00 -# Visual Studio 2005 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "example", "example.vcproj", "{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}" +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual C++ Express 2010 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "example", "example.vcxproj", "{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution diff --git a/doc/src/examples/example.vcproj b/doc/src/examples/example.vcproj deleted file mode 100755 index dec3eeca..00000000 --- a/doc/src/examples/example.vcproj +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/src/examples/example.vcxproj b/doc/src/examples/example.vcxproj new file mode 100755 index 00000000..3dcd1977 --- /dev/null +++ b/doc/src/examples/example.vcxproj @@ -0,0 +1,89 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + + + + + {9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5} + Win32Proj + + + + Application + + + Application + + + + + + + + + + + + + <_ProjectFileVersion>10.0.30319.1 + Debug\ + Debug\ + true + Release\ + Release\ + true + + + + Disabled + ../../../../../;%(AdditionalIncludeDirectories) + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebugDLL + + + Level3 + EditAndContinue + + + true + Console + MachineX86 + + + "$(TargetDir)\$(TargetName).exe" --result_code=no + + + + + ../../../../../;%(AdditionalIncludeDirectories) + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + MultiThreadedDLL + + + Level3 + ProgramDatabase + + + true + Console + true + true + MachineX86 + + + + + + \ No newline at end of file diff --git a/doc/src/examples/example25.cpp b/doc/src/examples/example25.cpp index 60fbcd15..c9bc9b9d 100755 --- a/doc/src/examples/example25.cpp +++ b/doc/src/examples/example25.cpp @@ -3,7 +3,7 @@ //____________________________________________________________________________// -int foo() { throw std::runtime_exception( "big trouble" ); } +int foo() { throw std::runtime_error( "big trouble" ); } //____________________________________________________________________________// diff --git a/doc/src/examples/example38.cpp b/doc/src/examples/example38.cpp index f38a3633..d6c9bd41 100755 --- a/doc/src/examples/example38.cpp +++ b/doc/src/examples/example38.cpp @@ -7,9 +7,9 @@ BOOST_AUTO_TEST_CASE( test ) { - double res = std::sin( 45. ); + double res = std::sin( 45. ); // sin 45 radians is actually ~ 0.85, sin 45 degrees is ~0.707 - BOOST_WARN_MESSAGE( res > 1, "sin(45){" << res << "} is <= 1. Hmm.. Strange. " ); + BOOST_WARN_MESSAGE( res < 0.71, "sin(45){" << res << "} is > 0.71. Arg is not in radian?" ); } //____________________________________________________________________________// diff --git a/doc/src/examples/example38.output b/doc/src/examples/example38.output index fa759260..c9d8e0dc 100755 --- a/doc/src/examples/example38.output +++ b/doc/src/examples/example38.output @@ -1,5 +1,5 @@ > example --log_level=warning Running 1 test case... -test.cpp(12): warning in "test": sin(45){0.850904} is <= 1. Hmm.. Strange. +test.cpp(12): warning in "test": sin(45){0.850904} is > 0.71. Arg is not in radian? *** No errors detected \ No newline at end of file diff --git a/doc/src/execution-monitor.xml b/doc/src/execution-monitor.xml index 3de9aeca..522ee251 100644 --- a/doc/src/execution-monitor.xml +++ b/doc/src/execution-monitor.xml @@ -256,7 +256,7 @@ second and so on). Unfortunately this feature is, at the moment, implemented only for the Microsoft family of compilers (and Intel, if it employs Microsoft C Runtime Library). Also it can not be tuned per instance of the monitor and is only triggered globally and reported after the whole program execution is done. In a future this - ought to be improved. An interface is composed from two free functions residing in namespace boost: + ought to be improved. An interface is composed from two free functions residing in namespace boost::debug: @@ -266,8 +266,8 @@ void break_memory_alloc( long mem_alloc_order_num ); Use function detect_memory_leaks to switch memory leaks detection on/off. Use break_memory_alloc to break a program execution at allocation specified by mem_alloc_order_num argument. The Unit Test Framework - provides a runtime parameter (--detect_memory_leak=yes or no) allowing you to manage this feature during monitored - unit tests. + provides a runtime parameter (--detect_memory_leaks=0 or 1 or N>1, where N is memory allocation number) + allowing you to manage this feature during monitored unit tests. diff --git a/doc/src/faq.xml b/doc/src/faq.xml index 426d085f..66d8c722 100644 --- a/doc/src/faq.xml +++ b/doc/src/faq.xml @@ -30,7 +30,7 @@ You can send a bug report to the boost users' mailing list and/or directly to - Gennadiy Rozental. + Gennadiy Rozental. @@ -44,7 +44,7 @@ You can send a request to the boost developers' mailing list and/or directly to - Gennadiy Rozental. + Gennadiy Rozental. diff --git a/doc/src/minimal-testing.xml b/doc/src/minimal-testing.xml index 58417ad7..a89efead 100644 --- a/doc/src/minimal-testing.xml +++ b/doc/src/minimal-testing.xml @@ -14,7 +14,7 @@ original version of Boost.Test. As the name suggest, it provides only minimal basic facilities for test creation. It have no configuration parameters (either command line arguments or environment variables) and it supplies a limited set of testing tools which behaves similarly to ones defined amount - the Unit Test Framework Testing tools. The &mtf; supplies its own function + the Unit Test Framework Testing tools. The &mtf; supplies its own function main() (so can not be used for multi unit testing) and will execute the test program in a monitored environment. @@ -131,7 +131,7 @@ - Their behavior is modeled after the similarly named tools + Their behavior is modeled after the similarly named tools implemented by the Unit Test Framework. diff --git a/doc/src/program-execution-monitor.xml b/doc/src/program-execution-monitor.xml index 53f32384..bc3f19ed 100644 --- a/doc/src/program-execution-monitor.xml +++ b/doc/src/program-execution-monitor.xml @@ -40,7 +40,7 @@ Uniform error reporting can be also useful in test environments such as the Boost regression tests. Be aware though in such case it might be preferable to use the Unit Test Framework, cause it allows one - to use the Testing tools and generate more detailed error information. + to use the Testing tools and generate more detailed error information. diff --git a/doc/src/utf.examples.xml b/doc/src/utf.examples.xml new file mode 100644 index 00000000..5547aed7 --- /dev/null +++ b/doc/src/utf.examples.xml @@ -0,0 +1,304 @@ + +UTF"> +]> +
+ The &utf; usage examples collection + Examples collection + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
\ No newline at end of file diff --git a/doc/src/utf.tutorials.xml b/doc/src/utf.tutorials.xml index 008156a9..a51ff052 100644 --- a/doc/src/utf.tutorials.xml +++ b/doc/src/utf.tutorials.xml @@ -8,7 +8,9 @@ You think writing tests is difficult, annoying and fruitless work? I beg to differ. Read through these tutorials - and I am sure you will agree. + and I am sure you will agree. One other thing I do suggest you to take a look is + compilation instructions, especially if you plan to build and use standalone + library. diff --git a/doc/src/utf.users-guide.fixture.xml b/doc/src/utf.user-guide.fixture.xml similarity index 100% rename from doc/src/utf.users-guide.fixture.xml rename to doc/src/utf.user-guide.fixture.xml diff --git a/doc/src/utf.user-guide.glossary.xml b/doc/src/utf.user-guide.glossary.xml new file mode 100644 index 00000000..6cfcf8d3 --- /dev/null +++ b/doc/src/utf.user-guide.glossary.xml @@ -0,0 +1,235 @@ + +UTF"> +]> +
+ Introduction … or what's your name?Introduction + + + Without further ado, let's define terms regularly used by the &utf;. + + + + + + + + The test module + + + This is a single binary that performs the test. Physically a test module consists of one or more test source files, + which can be built into an executable or a dynamic library. A test module that consists of a single test source + file is called single-file test module. Otherwise + it's called multi-file test module. Logically a test + module consists of four parts: test setup (or test initialization), + test body, test cleanup and + test runner. The test runner part is optional. If a test module is built as + an executable the test runner is built-in. If a test module is built as a dynamic library, it is run by an + external test runner. + + + + + The test body + + + This is the part of a test module that actually performs the test. + Logically test body is a collection of test assertions wrapped in + test cases, which are organized in a test tree + . + + + + + The test tree + + + This is a hierarchical structure of test suites (non-leaf nodes) and + test cases (leaf nodes). + + + + + The test unit + + + This is a collective name when referred to either test suite or + test case + + + + + Test assertion + + + This is a single binary condition (binary in a sense that is has two outcomes: pass and fail) checked + by a test module. + + + There are different schools of thought on how many test assertions a test case should consist of. Two polar + positions are the one advocated by TDD followers - one assertion per test case; and opposite of this - all test + assertions within single test case - advocated by those only interested in the first error in a + test module. The &utf; supports both approaches. + + + + + The test case + + + This is an independently monitored function within a test module that + consists of one or more test assertions. The term "independently monitored" in the definition above is + used to emphasize the fact, that all test cases are monitored independently. An uncaught exception or other normal + test case execution termination doesn't cause the testing to cease. Instead the error is caught by the test + case execution monitor, reported by the &utf; and testing proceeds to the next test case. Later on you are going + to see that this is on of the primary reasons to prefer multiple small test cases to a single big test function. + + + + + The test suite + + + This is a container for one or more test cases. The test suite gives you an ability to group + test cases into a single referable entity. There are various reasons why you may opt to do so, including: + + + + To group test cases per subsystems of the unit being tested. + + + To share test case setup/cleanup code. + + + To run selected group of test cases only. + + + To see test report split by groups of test cases + + + To skip groups of test cases based on the result of another test unit in a test tree. + + + + A test suite can also contain other test suites, thus allowing a hierarchical test tree structure to be formed. + The &utf; requires the test tree to contain at least one test suite with at least one test case. The top level + test suite - root node of the test tree - is called the master test suite. + + + + + The test setup + + + This is the part of a test module that is responsible for the test + preparation. It includes the following operations that take place prior to a start of the test: + + + + + The &utf; initialization + + + + + Test tree construction + + + + + Global test module setup code + + + + + Per test case" setup code, invoked for every test case it's assigned to, is also attributed to the + test initialization, even though it's executed as a part of the test case. + + + + + The test cleanup + + + This is the part of test module that is responsible for cleanup operations. + + + + + The test fixture + + + Matching setup and cleanup operations are frequently united into a single entity called test fixture. + + + + + The test runner + + + This is an "executive manager" that runs the show. The test runner's functionality should include + the following interfaces and operations: + + + + + Entry point to a test module. This is usually either the function main() itself or single function that can be + invoked from it to start testing. + + + + + Initialize the &utf; based on runtime parameters + + + + + Select an output media for the test log and the test results report + + + + + Select test cases to execute based on runtime parameters + + + + + Execute all or selected test cases + + + + + Produce the test results report + + + + + Generate a test module result code. + + + + + An advanced test runner may provide additional features, including interactive GUI interfaces, + test coverage and profiling support. + + + + + The test log + + + This is the record of all events that occur during the testing. + + + + + The test results report + + + This is the report produced by the &utf; after the testing is completed, that indicates which test cases/test + suites passed and which failed. + + + + +
diff --git a/doc/src/utf.user-guide.initialization.xml b/doc/src/utf.user-guide.initialization.xml new file mode 100644 index 00000000..1cc6d340 --- /dev/null +++ b/doc/src/utf.user-guide.initialization.xml @@ -0,0 +1,150 @@ + +UTF"> +]> +
+ Test module initialization … or ready, set … + Test module initialization + + + There are two tasks that you may need to perform before actual testing can start: + + + + + + The test tree needs to be built (unless you are using automated test units registration). + + + + + Custom test module initialization needs to be performed. This includes + initialization of the code under test and custom tune-up of the &utf; parameters (for example the test log or the + test results report output streams redirection). + + + + + + The function dedicated for this purpose is called the test module initialization function. Alternatively you can + employ global fixtures, covered in details, including differences in two approaches, in + . + + + + The &utf; requires you to implement the test module initialization function. The test runner supplied with the static + library or single-header variants of the &utf; requires the specific function specification. The test runner supplied + with the dynamic library variant of the &utf; requires the specific initialization function signature only. + + + + For many test modules you don't need to do any custom initialization + and test tree construction is automated. In this case you don't really need the initialization function and + the &utf; provides a way to automatically generate an empty one for you. + + + + Original design of the &utf; supported the manual test tree construction only. Later versions introduced the + automated registration of test units. In later versions of the &utf; the original initialization function + specification became inconvenient and unnecessary unsafe. So the alternative initialization function specification + was introduced. This change is not backward compatible. The test runners supplied with the static library and + single-header variants of the &utf; by default still require original initialization function specification, but + support compilation flags that switch to the alternative one. The test + runner supplied with dynamic library variant of the &utf; requires new specification and doesn't support + original one. The plan is to deprecate the original initialization function specification in one of the future + releases and ultimately to stop supporting it. + + + + The initialization function invocation is monitored by the &utf; the same way as all the test cases. An unexpected + exception or system error detected during initialization function invocation is treated as initialization error and + is reported as such. + + +
+ Original initialization function signature and name + Original initialization function + + + The original design of the &utf; initialization required you to implement the function with the following + specification: + + + boost::unit_test::test_suite* init_unit_test_suite( int argc, char* argv[] ); + + + This function was intended to initialize and return a master test suite. The null value was considered an initialization + error. The current design of the &utf; maintains master test suite instance internally and does not treat the null result + value as an initialization error. In fact it's recommended to return null value always and register test units in the + master test suite using the regular test suite add interface. The only way to indicate an initialization error is to throw the + boost::unit_test::framework::setup_error exception. + + + + The initialization function parameters argc, argv provide the command line arguments specified during test + module invocation. It's guarantied that any framework-specific command line arguments are excluded. To be + consisted with the alternative initialization function specification it's recommended though to access the + command line arguments using the master test suite interface. + +
+ +
+ Alternative initialization function signature and name + Alternative initialization function + + + The alternative design of the &utf; initialization requires you to implement a function with the following + specification: + + + bool init_unit_test(); + + + The result value of this function indicates whether or not initialization was successful. To register test + units in a master test suite use the test suite add interface. To access command line arguments use the master + test suite interface. It's guarantied that any framework-specific command line arguments are excluded. + +
+ +
+ Initialization function signature access + + + The test runner interface needs to refer to the initialization function signature. The &utf; provides the typedef + that resolves to proper signature in all configurations: + + + namespace boost { +namespace unit_test { +#ifdef BOOST_TEST_ALTERNATIVE_INIT_API +typedef bool (*init_unit_test_func)(); +#else +typedef test_suite* (*init_unit_test_func)( int, char* [] ); +#endif +} +} + +
+ +
+ Automated generation of the test module initialization function + Automated generation + + + To automatically generate an empty test module initialization function you need to define + before including the + boost/test/unit_test.hpp header. The value of this define is ignored. + Alternatively you can define the macro to be equal to + any string (not necessarily in quotes). This macro causes the same result as + , and in addition the macro value becomes the name of the + master test suite. + + + + + For a test module consisting of multiple source files you have to define these flags in a single test file only. + Otherwise you end up with multiple instances of the initialization function. + + +
+
diff --git a/doc/src/utf.user-guide.runtime-config.xml b/doc/src/utf.user-guide.runtime-config.xml index 86b079a4..d17592d5 100644 --- a/doc/src/utf.user-guide.runtime-config.xml +++ b/doc/src/utf.user-guide.runtime-config.xml @@ -265,6 +265,24 @@ Leaving test suite "example" + + Break execution path + BOOST_TEST_BREAK_EXEC_PATH + break_exec_path" + + + string consisting of space separate test_name:execution_path_number pairs + + + + + this runtime parameter is used by exception safety tester. By default exception safety tester only reports index of + execution path and test case name where failure occurred. Using this parameter you can make the tester to break the + execution right before entering this path. + + + + Print build info BOOST_TEST_BUILD_INFO @@ -301,25 +319,20 @@ Leaving test suite "example" - - Detect memory leaks - BOOST_TEST_DETECT_MEMORY_LEAK - detect_memory_leaks + + Produce color output + BOOST_TEST_COLOR_OUTPUT + color_output - 0 - 1 - integer value > 1 + no + yes - positive value tells the framework to detect the memory leaks (if any). Any value greater then 1 in addition - is treated as leak allocation number and setup runtime breakpoint. In other words setting this parameter to - the positive value N greater than 1 causes the framework to set a breakpoint at Nth memory allocation (don't - do that from the command line - only when you are under debugger). Note: if your test program produce memory - leaks notifications, they are combined with allocation number values you could use to set a breakpoint. - Currently only applies to MS family of compilers. + The &utf; is able to produce color output on systems which supports it. To enable this behavior set the parameter to + 'yes'. By default the output produces in not colored. @@ -340,7 +353,30 @@ Leaving test suite "example" - + + + Detect memory leaks + BOOST_TEST_DETECT_MEMORY_LEAK + detect_memory_leaks + + + 0 + 1 + integer value > 1 + + + + + positive value tells the framework to detect the memory leaks (if any). Any value greater then 1 in addition + is treated as leak allocation number and setups runtime breakpoint. In other words setting this parameter to + the positive value N greater than 1 causes the framework to set a breakpoint at Nth memory allocation (don't + do that from the command line - only when you are under debugger). Note: if your test program produce memory + leaks notifications, they are combined with allocation number values you could use to set a breakpoint. + Currently only applies to MS family of compilers. + + + + The log format BOOST_TEST_LOG_FORMAT @@ -419,6 +455,27 @@ Leaving test suite "example" + + The log sink name + BOOST_TEST_LOG_SINK + log_sink + + + stdout + stderr + arbitrary file name + + + + + This parameter allows easily redirect the test log. The parameter value is the string containing either a file + name, in which case the &utf; will redirect log into file with that name, or 'stdout', in which case log is + redirected into standard output stream, or 'stderr' , in which case log is redirected into standard error stream. + Default is 'stdout' + + + + The output format BOOST_TEST_OUTPUT_FORMAT @@ -495,6 +552,27 @@ Leaving test suite "example" + + The report sink name + BOOST_TEST_REPORT_SINK + report_sink + + + stderr + stdout + arbitrary file name + + + + + This parameter allows easily redirect the test results report. The parameter value is the string containing either + a file name, in which case the &utf; will redirect results report into file with that name, or 'stdout', in which case + report is redirected into standard output stream, or 'stderr', in which case report is redirected into standard error + stream. Default is 'stderr'. + + + + [Do not] return result code BOOST_TEST_RESULT_CODE @@ -532,6 +610,26 @@ Leaving test suite "example" + + Save patterm + BOOST_TEST_SAVE_PATTERN + save_pattern + + + no + yes + + + + + this parameter serves no particular purpose within the framework itself. It can be used by test modules relying + on output_test_stream to implement testing logic. output_test_stream has two modes of operation: save the pattern + file and match against stored pattern. You can use this parameter to switch between these modes, by passing the + parameter value to the output_test_stream constructor. + + + + Show progress BOOST_TEST_SHOW_PROGRESS diff --git a/doc/src/utf.users-guide.test-organization.xml b/doc/src/utf.user-guide.test-organization.xml similarity index 90% rename from doc/src/utf.users-guide.test-organization.xml rename to doc/src/utf.user-guide.test-organization.xml index 54123376..3110bfaa 100644 --- a/doc/src/utf.users-guide.test-organization.xml +++ b/doc/src/utf.user-guide.test-organization.xml @@ -113,16 +113,47 @@ - Manually registered test case + Test case with automated registration - Test case with automated registration + Manually registered test case +
+ Nullary function based test case with automated registration + Automated registration + + + To create a nullary free function cased test case, which is registered in place of implementation, employ the + macro BOOST_AUTO_TEST_CASE. + + + + + + + + + + The macro is designed to closely mimic nullary free function syntax. Changes that are required to make an + existing test case, implemented as a free function, registered in place are illustrated in the following + example (compare with ): + + + + Nullary function based test case with automated registration + + + + With this macro you don't need to implement the initialization function at all. The macro creates and + registers the test case with the name free_test_function automatically. + +
+
Manually registered nullary function based test case Manual registration @@ -212,37 +243,6 @@ .
- -
- Nullary function based test case with automated registration - Automated registration - - - To create a nullary free function cased test case, which is registered in place of implementation, employ the - macro BOOST_AUTO_TEST_CASE. - - - - - - - - - - The macro is designed to closely mimic nullary free function syntax. Changes that are required to make an - existing test case, implemented as a free function, registered in place are illustrated in the following - example (compare with ): - - - - Nullary function based test case with automated registration - - - - With this macro you don't need to implement the initialization function at all. The macro creates and - registers the test case with the name free_test_function automatically. - -
Unary function based test case @@ -329,20 +329,76 @@ - - - Manually registered test case - template - - Test case template with automated registration + + + Manually registered test case + template + + +
+ Test case template with automated registration + Automated registration + + + To create a test case template registered in place of implementation, employ the macro + BOOST_AUTO_TEST_CASE_TEMPLATE. This facility is also called auto test case template. + + + + + + + + + + + + The macro BOOST_AUTO_TEST_CASE_TEMPLATE requires three arguments: + + + + + + + + The test case template name + + + unique test cases template identifier + + + + + The name of a formal template parameter + + + name of the type the test case template is instantiated with + + + + + The collection of types to instantiate test case template with + + + arbitrary MPL sequence + + + + + + + Test case template with automated registration + +
+
Manually registered test case template Manual registration @@ -438,62 +494,6 @@ Manually registered test case template
- -
- Test case template with automated registration - Automated registration - - - To create a test case template registered in place of implementation, employ the macro - BOOST_AUTO_TEST_CASE_TEMPLATE. This facility is also called auto test case template. - - - - - - - - - - - - The macro BOOST_AUTO_TEST_CASE_TEMPLATE requires three arguments: - - - - - - - - The test case template name - - - unique test cases template identifier - - - - - The name of a formal template parameter - - - name of the type the test case template is instantiated with - - - - - The collection of types to instantiate test case template with - - - arbitrary MPL sequence - - - - - - - Test case template with automated registration - -
@@ -510,99 +510,22 @@ - Manually registered test suite + Test suite with automated registration - Test suite with automated registration + Manually registered test suite -
- Test unit registration interface - - - The &utf; models the notion of test case container - test suite - using class boost::unit_test::test_suite. For - complete class interface reference check advanced section of this documentation. Here you should only be - interested in a single test unit registration interface: - - - void test_suite::add( test_unit* tc, counter_t expected_failures = 0, int timeout = 0 ); - - - The first parameter is a pointer to a newly created test unit. The second optional parameter - - expected_failures - defines the number of test assertions that are expected to fail within the test unit. By - default no errors are expected. - - - - - Be careful when supplying a number of expected failures for test suites. By default the &utf; calculates the - number of expected failures in test suite as the sum of appropriate values in all test units that constitute - it. And it rarely makes sense to change this. - - - - - The third optional parameter - timeout - defines the timeout value for the test unit. As of now the &utf; - isn't able to set a timeout for the test suite execution, so this parameter makes sense only for test case - registration. By default no timeout is set. See the method - boost::execution_monitor::execute for more details about the timeout value. - - - - To register group of test units in one function call the boost::unit_test::test_suite provides another add - interface covered in the advanced section of this documentation. - -
- -
- Manually registered test suites - Manual registration - - - To create a test suite manually, employ the macro BOOST_TEST_SUITE: - - - - - - - - - - BOOST_TEST_SUITE creates an instance of the class boost::unit_test::test_suite and returns a pointer to the - constructed instance. Alternatively you can create an instance of class boost::unit_test::test_suite yourself. - - - - - boost::unit_test::test_suite instances have to be allocated on the heap and the compiler won't allow you - to create one on the stack. - - - - - Newly created test suite has to be registered in a parent one using add interface. Both test suite creation and - registration is performed in the test module initialization function. - - - - Manually registered test suites - - - - This example creates a test tree, which can be represented by the following hierarchy: - - - - - - - -
+ + In addition the &utf; presents a notion of + Master Test Suite. The most important + reason to learn about this component is that it provides an ability to access command line arguments supplied + to a test module. +
Test suites with automated registration @@ -656,6 +579,101 @@
+ +
+ Manually registered test suites + Manual registration + + + To create a test suite manually you need to create an instance of boost::unit_test::test_suite class, register + it in test tree and populate it with test cases (or lower level test suites). + + +
+ Test unit registration interface + + + The &utf; models the notion of test case container - test suite - using class boost::unit_test::test_suite. For + complete class interface reference check advanced section of this documentation. Here you should only be + interested in a single test unit registration interface: + + + void test_suite::add( test_unit* tc, counter_t expected_failures = 0, int timeout = 0 ); + + + The first parameter is a pointer to a newly created test unit. The second optional parameter - + expected_failures - defines the number of test assertions that are expected to fail within the test unit. By + default no errors are expected. + + + + + Be careful when supplying a number of expected failures for test suites. By default the &utf; calculates the + number of expected failures in test suite as the sum of appropriate values in all test units that constitute + it. And it rarely makes sense to change this. + + + + + The third optional parameter - timeout - defines the timeout value for the test unit. As of now the &utf; + isn't able to set a timeout for the test suite execution, so this parameter makes sense only for test case + registration. By default no timeout is set. See the method + boost::execution_monitor::execute for more details about the timeout value. + + + + To register group of test units in one function call the boost::unit_test::test_suite provides another add + interface covered in the advanced section of this documentation. + +
+ +
+ Test suite instance construction + + + To create a test suite instance manually, employ the macro BOOST_TEST_SUITE. It hides all implementation + details and you only required to specify the test suite name: + + + + + + + + + + BOOST_TEST_SUITE creates an instance of the class boost::unit_test::test_suite and returns a pointer to the + constructed instance. Alternatively you can create an instance of class boost::unit_test::test_suite yourself. + + + + + boost::unit_test::test_suite instances have to be allocated on the heap and the compiler won't allow you + to create one on the stack. + + + + + Newly created test suite has to be registered in a parent one using add interface. Both test suite creation and + registration is performed in the test module initialization function. + +
+ + + Manually registered test suites + + + + This example creates a test tree, which can be represented by the following hierarchy: + + + + + + + +
+
Master Test Suite diff --git a/doc/src/utf.users-guide.test-output.xml b/doc/src/utf.user-guide.test-output.xml similarity index 98% rename from doc/src/utf.users-guide.test-output.xml rename to doc/src/utf.user-guide.test-output.xml index bf6f5cf7..8a3e42b4 100644 --- a/doc/src/utf.users-guide.test-output.xml +++ b/doc/src/utf.user-guide.test-output.xml @@ -16,7 +16,7 @@ All test errors are reported uniformly The test execution monitor along with standardized output from all included - testing tools provides uniform reporting for all errors including fatal + testing tools provides uniform reporting for all errors including fatal errors, like memory assess violation and uncaught exceptions. @@ -175,10 +175,10 @@ Logging tool arguments - Most of the testing tools print values of their arguments to the output + Most of the testing tools print values of their arguments to the output stream in some form of log statement. If arguments type does not support operator<<(std::ostream&, ArgumentType const&) interface you will get a compilation error. You can either implement above - interface or prohibit the testing tools from logging argument values for + interface or prohibit the testing tools from logging argument values for specified type. To do so use following statement on file level before first test case that includes statement failing to compile: @@ -473,7 +473,7 @@ Boost : $BOOST_VERSION - Advanced testing tools may produce more complicated error messages. + Advanced testing tools may produce more complicated error messages.
diff --git a/doc/src/utf.user-guide.test-runners.xml b/doc/src/utf.user-guide.test-runners.xml new file mode 100644 index 00000000..bada48be --- /dev/null +++ b/doc/src/utf.user-guide.test-runners.xml @@ -0,0 +1,155 @@ + +UTF"> +]> + +
+ The supplied test runners … or where is the entrance? + Supplied test runners + + + All usage variants of the &utf;, excluding the + external test runner, supply the test runner in a form of + free function named unit_test_main with the following signature: + + + int unit_test_main( init_unit_test_func init_func, int argc, char* argv[] ); + + + To invoke the test runner you are required to supply the pointer to the test module + initialization function as the first argument to the test runner function. In majority of the cases this function is + invoked directly from test executable entry point - function main(). In most usage variants the &utf; can + automatically generate default function main() implementation as either part of the library or test module itself. + Since the function main needs to refer to the initialization function by name, it is predefined by the default + implementation and you are required to match both specific signature and name, when implementing initialization + function. If you for any reason prefer more flexibility you can opt to implement the function main() yourself, in + which case it's going to be your responsibility to invoke the test runner, but the initialization function name is + not enforces the &utf;. See below for flags that needs to be defined/undefined in each usage variant to enable this. + + + + + In spite syntactic similarity the signatures of the test runner function in fact are different for different usage + variants. The cause is different signature of the test module initialization function referred by the + typedef init_unit_test_func. This makes static + and dynamic library usage variants incompatible and they can't be easily switched on a fly. + + + +
+ Static library variant of the &utf; + Static library + + + By default this variant supplies the function main() as part of static library. If this is for any reason undesirable + you need to define the flag during the library + compilation and the function main() implementation won't be generated. + + + + In addition to the initialization function signature requirement + default function main() implementation assumes the name of initialization function is init_unit_test_suite + + +
+ +
+ Dynamic library variant of the &utf; + Dynamic library + + + Unlike the static library variant function main() can't reside in the dynamic library body. Instead this variant + supplies default function main() implementation as part of the header + boost/test/unit_test.hpp to be generated as part of your test file body. + The function main() is generated only if either the or + the flags are defined during a test module compilation. + For single-file test module flags can be defined either in a + test module's makefile or before the header boost/test/unit_test.hpp + inclusion. For a flags can't + be defined in makefile and have to be defined in only one of the test files to avoid duplicate copies of the + function main(). + + + + + The same flags also govern generation of an empty + test module initialization function. This means that if you + need to implement either function main() or initialization function manually, you can't define the above flags + and are required to manually implement both of them. + + +
+ +
+ Single-header variant of the &utf; + Single header + + + By default this variant supplies function main() as part of the header + boost/test/included/unit_test.hpp to be generated as part of your test file + body. If this is for any reason undesirable you need to define the flag + during test module compilation and the function main() + implementation won't be generated. + +
+ +
+ External test runner variant of the &utf; + External test runner + + + The external test runner variant of the &utf; supplies the test runner in a form of standalone utility + boost_test_runner. You are free to implement different, more advanced, test runners that can be used with this + variant. + + + + + +
+ +
+ Generated exit status values + + + Once testing is finished, all supplied test runners report the results and returns an exit status value. Here are + the summary of all possible generated values: + + + + Generated exit status values + + + + + + Value + Meaning + + + + + boost::exit_success + + No errors occurred during the test or the success result code was explicitly requested with the no_result_code + parameter. + + + + boost::exit_test_failure + + Non-fatal errors detected and no uncaught exceptions were thrown during testing or the &utf; fails during + initialization. + + + + boost::exit_exception_failure + + Fatal errors were detected or uncaught exceptions thrown during testing. + + + + +
+
+
diff --git a/doc/src/utf.user-guide.testing-tools.xml b/doc/src/utf.user-guide.testing-tools.xml new file mode 100644 index 00000000..653463da --- /dev/null +++ b/doc/src/utf.user-guide.testing-tools.xml @@ -0,0 +1,1168 @@ +UTF"> +]> +
+ The &utf; testing tools … or tester's toolbox for all occasions + Testing tools + +
+ Introduction + + + The &utf;'s supplies a toolbox of testing tools to ease creation and maintenance of test programs and + provide a uniform error reporting mechanism. The toolbox supplied in most part in a form of macro and function + declarations. While the functions can be called directly, the usual way to use testing tools is via convenience + macros. All macros arguments are calculated once, so it's safe to pass complex expressions in their place. + All tools automatically supply an error location: a file name and a line number. The testing tools are intended + for unit test code rather than library or production code, where throwing exceptions, using assert(), + boost::concept_check or BOOST_STATIC_ASSERT() may be more suitable + ways to detect and report errors. For list of all supplied testing tools and usage examples see the reference. + +
+ +
+ Testing tools flavors + + + All the tools are supplied in three flavors(levels): WARN, CHECK and + REQUIRE. For example: BOOST_WARN_EQUAL, + BOOST_CHECK_EQUAL, BOOST_REQUIRE_EQUAL. If an assertion designated by + the tool passes, confirmation message can be printed in log outputto manage what messages appear + in the test log stream set the proper log + level. If an assertion designated by the tool failed, depending on the level following + will happenedin some cases log message can be slightly different to reflect failed tool + specifics: + + + + Testing tools levels differences + + + + + + + + + Level + Test log content + Errors counter + Test execution + + + + + + WARN + + warning in <test case name>: condition + <assertion description> is not satisfied + + not affected + continues + + + + CHECK + + error in <test case name>: test + <assertion description> failed + + increased + continues + + + + REQUIRE + + fatal error in <test case name>: critical test + <assertion description> failed + + increased + aborts + + + +
+ + + Regularly you should use CHECK level tools to implement your assertions. You can use WARN level tools to validate + aspects less important then correctness: performance, portability, usability etc. You should use REQUIRE level + tools only if continuation of the test case doesn't make sense if this assertions fails. + +
+ +
+ Output testing tool + + + How do you perform correctness test for operator<<( std::ostream&, ... ) + operations? You can print into the standard output stream and manually check that it is matching your expectations. + Unfortunately, this is not really acceptable for the regression testing and doesn't serve a long term purpose of a + unit test. You can use std::stringstream and compare resulting output buffer with the + expected pattern string, but you are required to perform several additional operations with every check you do. So it + becomes tedious very fast. The class output_test_stream is designed to + automate these tasks for you. This is a simple, but powerful tool for testing standard + std::ostream based output operation. The class output_test_stream + complies to std::ostream interface so it can be used in place of any + std::ostream parameter. It provides several test methods to validate output content, + including test for match to expected output content or test for expected output length. Flushing, synchronizing, + string comparison and error message generation is automated by the tool implementation. + + + + All output_test_stream validation methods by default flush the stream once check is performed. + If you want to perform several checks with the same output, specify parameter flush_stream + with value false. This parameter is supported on all comparison methods. + + + + In some cases manual generation of expected output is either too time consuming or is impossible at all bacause + of sheer volume. What we need in cases like this is to be able to check once manually that the output is as expected + and to be able in a future check that it stays the same. To help manage this logic the class + output_test_stream allows matching output content versus specified pattern file and produce + pattern file based on successful test run. + + + + Detailed specification of class output_test_stream is covered in reference section. + + +
+ Usage + + + There are two ways to employ the class output_test_stream: explicit output checks and + pattern file matching. + +
+ + + output_test_stream usage with explicit output checks + + + Use the instance of class output_test_stream as an output stream and check output content using tool's methods. + Note use of false to prevent output flushing in first two invocation of check functions. Unless + you want to perform several different checks for the same output you wouldn't need to use it though. Your + test will look like a serious of output operators followed by one check. And so on again. Try to perform checks as + frequently as possible. It not only simplifies patterns you compare with, but also allows you to more closely + identify possible source of failure. + + + + + output_test_stream usage for pattern file matching + + + Even simpler: no need to generate expected patterns. Though you need to keep the pattern file all the time somewhere + around. Your testing will look like a serious of output operators followed by match pattern checks repeated several + times. Try to perform checks as frequently as possible, because it allows you to more closely identify possible source + of failure. Content of the pattern file is: + + + i=2 +File: test.cpp Line: 14 + + +
+ +
+ Custom predicate support + + + Even though supplied testing tools cover wide range of possible checks and provide detailed report on cause of error + in some cases you may want to implement and use custom predicate that perform complex check and produce intelligent + report on failure. To satisfy this need testing tools implement custom predicate support. There two layers of custom + predicate support implemented by testing tools toolbox: with and without custom error message generation. + + + + The first layer is supported by BOOST_CHECK_PREDICATE family of testing tools. You can use it to check any custom + predicate that reports the result as boolean value. The values of the predicate arguments are reported by the tool + automatically in case of failure. + + + + Custom predicate support using BOOST_CHECK_PREDICATE + + + + To use second layer your predicate have to return + boost::test_tools::predicate_result. This class encapsulates boolean result value along + with any error or information message you opt to report. + + + + Usually you construct the instance of class boost::test_tools::predicate_result inside your + predicate function and return it by value. The constructor expects one argument - the boolean result value. The + constructor is implicit, so you can simply return boolean value from your predicate and + boost::test_tools::predicate_result is constructed automatically to hold your value and empty + message. You can also assign boolean value to the constructed instance. You can check the current predicate value by + using operator!() or directly accessing public read-only property p_predicate_value. The + error message is stored in public read-write property p_message. + + + + Custom predicate support using class predicate_result + +
+ +
+ Floating-point comparison algorithms + + + In most cases it is unreasonable to use an operator==(...) for a floating-point values equality check. + The simple, absolute value comparison based, solution for a floating-point values u, + v and a tolerance &egr;: + + + + |uv| ≤ &egr; + + + + does not produce expected results in many circumstances - specifically for very small or very big values (See + for examples). The &utf; implements floating-point comparison algorithm that is + based on the more confident solution first presented in : + + + + |uv| ≤ &egr; × |u| ∧ |uv| ≤ &egr; × |v| + + + + defines a very close with tolerance &egr; relationship between u and v + + + + |uv| ≤ &egr; × |u| ∨ |uv| ≤ &egr; × |v| + + + + defines a close enough with tolerance &egr; relationship between u and v + + + + Both relationships are commutative but are not transitive. The relationship defined by inequations + (2) is stronger + that the relationship defined by inequations (3) + (i.e. (2) ⇒ + (3)). Because of the multiplication in the right side + of inequations, that can cause an unwanted underflow condition, the implementation is using modified version of the + inequations (2) and + (3) where all underflow, overflow conditions can be + guarded safely: + + + + |uv| / |u| ≤ &egr; ∧ |uv| / |v| ≤ &egr; + + + + |uv| / |u| ≤ &egr; ∨ |uv| / |v| ≤ &egr; + + + + Checks based on equations (4) and + (5) are implemented by two predicates with + alternative interfaces: binary predicate close_at_tolerancecheck type + and tolerance value are fixed at predicate construction time and predicate with four arguments + check_is_closecheck type and tolerance value are the arguments of the + predicate. + + + + While equations (4) and + (5) in general are preferred for the general floating + point comparison check over equation (1), they are + unusable for the test on closeness to zero. The later check is still might be useful in some cases and the &utf; + implements an algorithm based on equation (1) in + binary predicate check_is_smallv is zero. + + + + On top of the generic, flexible predicates the &utf; implements macro based family of tools + BOOST_CHECK_CLOSE and BOOST_CHECK_SMALL. These tools limit the check + flexibility to strong-only checks, but automate failed check arguments reporting. + + +
+ Tolerance selection considerations + + + In case of absence of domain specific requirements the value of tolerance can be chosen as a sum of the predicted + upper limits for "relative rounding errors" of compared values. The "rounding" is the operation by which a real + value 'x' is represented in a floating-point format with 'p' binary digits (bits) as the floating-point value 'X'. + The "relative rounding error" is the difference between the real and the floating point values in relation to real + value: |x-X|/|x|. The discrepancy between real and floating point value may be caused by several reasons: + + + + Type promotion + Arithmetic operations + Conversion from a decimal presentation to a binary presentation + Non-arithmetic operation + + + + The first two operations proved to have a relative rounding error that does not exceed ½ × + "machine epsilon value" for the appropriate floating point type (represented by + std::numeric_limits<FPT>::epsilon()). Conversion to binary presentation, sadly, does + not have such requirement. So we can't assume that float 1.1 is close to real 1.1 with tolerance ½ + × "machine epsilon value" for float (though for 11./10 we can). Non arithmetic operations either do not have a + predicted upper limit relative rounding errors. Note that both arithmetic and non-arithmetic operations might also + produce others "non-rounding" errors, such as underflow/overflow, division-by-zero or 'operation errors'. + + + + All theorems about the upper limit of a rounding error, including that of ½ × epsilon, refer only to + the 'rounding' operation, nothing more. This means that the 'operation error', that is, the error incurred by the + operation itself, besides rounding, isn't considered. In order for numerical software to be able to actually + predict error bounds, the IEEE754 standard requires arithmetic operations to be 'correctly or exactly rounded'. + That is, it is required that the internal computation of a given operation be such that the floating point result + is the exact result rounded to the number of working bits. In other words, it is required that the computation used + by the operation itself doesn't introduce any additional errors. The IEEE754 standard does not require same behavior + from most non-arithmetic operation. The underflow/overflow and division-by-zero errors may cause rounding errors + with unpredictable upper limits. + + + + At last be aware that ½ × epsilon rules are not transitive. In other words combination of two + arithmetic operations may produce rounding error that significantly exceeds 2 × ½ × epsilon. All + in all there are no generic rules on how to select the tolerance and users need to apply common sense and domain/ + problem specific knowledge to decide on tolerance value. + + + + To simplify things in most usage cases latest version of algorithm below opted to use percentage values for + tolerance specification (instead of fractions of related values). In other words now you use it to check that + difference between two values does not exceed x percent. + + + + For more reading about floating-point comparison see references below. + +
+ + + A floating-point comparison related references + + Books + + + KnuthII + + The art of computer programming (vol II) + Donald. E.Knuth + 1998Addison-Wesley Longman, Inc. + 0-201-89684-2 + Addison-Wesley Professional; 3 edition + + + + Kulisch + + + Rounding near zero + + + <ulink url="http://www.amazon.com/Advanced-Arithmetic-Digital-Computer-Kulisch/dp/3211838708">Advanced Arithmetic for the Digital Computer</ulink> + + Ulrich WKulisch + 2002Springer, Inc. + 0-201-89684-2 + Springer; 1 edition + + + + + + Periodicals + + + Squassabia + + <ulink url="http://www.adtmag.com/joop/carticle.aspx?ID=396">Comparing Floats: How To Determine if Floating Quantities Are Close Enough Once a Tolerance Has Been Reached</ulink> + AlbertoSquassabia + + + C++ Report + March 2000. + + + + + Becker + + + The Journeyman's Shop: Trap Handlers, Sticky Bits, and Floating-Point Comparisons + PeteBecker + + + C/C++ Users Journal + December 2000. + + + + + + Publications + + + Goldberg + + + <ulink url="http://citeseer.ist.psu.edu/goldberg91what.html">What Every Computer Scientist Should Know About Floating-Point Arithmetic</ulink> + DavidGoldberg + 1991Association for Computing Machinery, Inc. + 150-230 + + + Computing Surveys + March. + + + + + Langlois + + <ulink url="http://www.inria.fr/rrrt/rr-3967.html">From Rounding Error Estimation to Automatic Correction with Automatic Differentiation</ulink> + PhilippeLanglois + 2000 + 0249-6399 + + + + Kahan + + <ulink url="http://www.cs.berkeley.edu/~wkahan/">Lots of information on William Kahan home page</ulink> + WilliamKahan + + + + +
+ +
+ The &utf; testing tools reference + Reference + + + + + + + + + + + + + + + + + These tools are used to validate the predicate value. The only parameter for these tools is a boolean predicate + value that gets validated. It could be any expression that could be evaluated and converted to boolean value. The + expression gets evaluated only once, so it's safe to pass complex expression for validation. + + + + BOOST_<level> usage + + + + BOOST_<level>_MESSAGE + + + + + + + + + + + + + + + + + + + + + These tools are used to perform bitwise comparison of two values. The check shows all positions where left and + right value's bits mismatch. + + + + The first parameter is the left compared value. The second parameter is the right compared value. Parameters are + not required to be of the same type, but warning is issued if their type's size does not coincide. + + + + BOOST_<level>_BITWISE_EQUAL usage + + + + BOOST_<level>_EQUAL + + + + + + + + + + + + + + + + + + + + + + + + These tools are used to check on closeness using strong relationship defined by the predicate + check_is_close( left, right, tolerance ). To check for the weak relationship use + BOOST_<level>_PREDICATE family of tools with explicit check_is_close + invocation. + + + + The first parameter is the left compared value. The second parameter is the + right compared value. Last third parameter defines the tolerance for the comparison in + percentage units. + + + + + It is required for left and right parameters to be of the same floating point type. You will need to explicitly + resolve any type mismatch to select which type to use for comparison. + + + + + + Note that to use these tools you need to include additional header floating_point_comparison.hpp. + + + + + BOOST_<level>_CLOSE usage with very small values + + + + BOOST_<level>_CLOSE usage with very big values + + + + BOOST_<level>_CLOSE_FRACTION, BOOST_<level>_SMALL, BOOST_<level>_EQUAL, + Floating point comparison algorithms + + + + + + + + + + + + + + + + + + + + + + + + These tools are used to check on closeness using strong relationship defined by the predicate + check_is_close( left, right, tolerance ). To check for the weak relationship use + BOOST_<level>_PREDICATE family of tools with explicit check_is_close + invocation. + + + + The first parameter is the left compared value. The second parameter is the + right compared value. Last third parameter defines the tolerance for the comparison as + fraction of absolute + values being compared. + + + + + It is required for left and right parameters to be of the same floating point type. You will need to explicitly + resolve any type mismatch to select which type to use for comparison. + + + + + + Note that to use these tools you need to include additional header floating_point_comparison.hpp. + + + + + BOOST_<level>_CLOSE_FRACTION usage + + + + BOOST_<level>_CLOSE, BOOST_<level>_SMALL, BOOST_<level>_EQUAL, + Floating point comparison algorithms + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left == right ). + The difference is that the mismatched values are reported as well. + + + + + It is bad idea to use these tools to compare floating point values. Use BOOST_<level>_CLOSE or + BOOST_<level>_CLOSE_FRACTION tools instead. + + + + + BOOST_<level>_EQUAL usage + + + + BOOST_<level>, BOOST_<level>_CLOSE, BOOST_<level>_NE + + + + + + + + + + + + + + + + + + + + + + + + + + + These tools are used to perform an element by element comparison of two collections. They print all mismatched + positions, collection elements at these positions and check that the collections have the same size. The first two + parameters designate begin and end of the first collection. The two parameters designate begin and end of the + second collection. + + + + BOOST_<level>_EQUAL_COLLECTIONS usage + + + + BOOST_<level>_EQUAL + + + + + + + + + + + + + + + + + + + + + + + + These tools are used to perform an exception detection and validation check. Tools execute the supplied expression + and validate that it throws an exception of supplied class (or the one derived from it) that complies with the + supplied predicate. If the expression throws any other unrelated exception, doesn't throw at all or + predicate evaluates to false, check fails. In comparison with BOOST_<level>_THROW tools these + allow performing more fine-grained checks. For example: make sure that an expected exception has specific + error message. + + + + BOOST_<level>_EXCEPTION usage + + + + BOOST_<level>_THROW + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left >= right ). + The difference is that the argument values are reported as well. + + + + BOOST_<level>_GE usage + + + + BOOST_<level>_LE, BOOST_<level>_LT, BOOST_<level>_GT + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left > right ). + The difference is that the argument values are reported as well. + + + + BOOST_<level>_GT usage + + + + BOOST_<level>_LE, BOOST_<level>_LT, BOOST_<level>_GE + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left <= right ). + The difference is that the argument values are reported as well. + + + + BOOST_<level>_LE usage + + + + BOOST_<level>_LT, BOOST_<level>_GE, BOOST_<level>_GT + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left < right ). + The difference is that the argument values are reported as well. + + + + BOOST_<level>_LT usage + + + + BOOST_<level>_LE, BOOST_<level>_GE, BOOST_<level>_GT + + + + + + + + + + + + + + + + + + + + + These tools perform exactly the same check as BOOST_<level> tools. The only difference is that + instead of generating an error/confirm message these use the supplied one. + + + + The first parameter is the boolean expression. The second parameter is the message reported in case of check + failure. The message argument can be constructed of components of any type supporting the + std::ostream& operator<<(std::ostream&). + + + + BOOST_<level>_MESSAGE usage + + + + BOOST_<level> + + + + + + + + + + + + + + + + + + + + + Check performed by these tools is the same as the one performed by BOOST_<level>( left != right ). + The difference is that the matched values are reported as well. + + + + BOOST_<level>_NE usage + + + + BOOST_<level>_EQUAL + + + + + + + + + + + + + + + + + + These tools are used to perform a "no throw" check. Tools execute the supplied expression and validate that it does + not throw any exceptions. Error would be reported by the framework even if the statement appear directly in test + case body and throw any exception. But these tools allow proceeding further with test case in case of failure. + + + + If check is successful, tools may produce a confirmation message, in other case they produce an error message in + a form "error in <test case name>exception was thrown by <expression>. + + + + The only parameter is an expression to execute. You can use do-while(0) block if you want to execute more than one + statement. + + + + BOOST_<level>_NO_THROW usage + + + + BOOST_<level>_THROW + + + + + + + + + + + + + + + + + + + + + These are generic tools used to validate an arbitrary supplied predicate functor (there is a compile time limit on + predicate arity defined by the configurable macro BOOST_TEST_MAX_PREDICATE_ARITY). To + validate zero arity predicate use BOOST_<level> tools. In other cases prefer theses tools. The + advantage of these tools is that they show arguments values in case of predicate failure. + + + + The first parameter is the predicate itself. The second parameter is the list of predicate arguments each wrapped + in round brackets (BOOST_PP sequence format). + + + + + Note difference in error log from + + BOOST_<level>_PREDICATE usage + + + + BOOST_<level> + + + + + + + + + + + + + + + + + + + + + These tools are used to check that supplied value is small enough. The "smallness" is defined by absolute value + of the tolerance supplied as a second argument. Use these tools with caution. To compare to values on closeness + it's preferable to use BOOST_<level>_CLOSE tools instead. + + + + The first parameter is the value to check. The second parameter is the tolerance. + + + + + Note that to use these tools you need to include additional header floating_point_comparison.hpp. + + + + + BOOST_<level>_SMALL usage + + + + BOOST_<level>_CLOSE, BOOST_<level>_CLOSE_FRACTION, + Floating point comparison algorithms + + + + + + + + + + + + + + + + + + + + + These tools are used to perform an exception detection check. Tools execute the supplied expression and validate + that it throws an exception of supplied class (or the one derived from it) or it's child. If the statement + throws any other unrelated exception or doesn't throw at all, check fails. + + + + If check is successful, the tool produces a confirmation message, in other case it produces an error message in a + form "error in test case name: exception exception expected. + + + + The first parameter is the expression to execute. Use do-while(0) block if you want to execute more than one + statement. The second parameter is an expected exception. + + + + BOOST_<level>_THROW usage + + + + BOOST_<level>NO_THROW + + + + + + + + + + + + BOOST_ERROR tool behave the same way as BOOST_CHECK_MESSAGE( false, message ). This tool is used for + an unconditional error counter increasing and message logging. + + + + The only tool's parameter is an error message to log. + + + + BOOST_ERROR usage + + + + BOOST_<level> + + + + + + + + + + + + BOOST_FAIL behave the same way as BOOST_REQUIRE_MESSAGE( false, message ). This tool is used for an + unconditional error counter increasing, message logging and the current test case aborting. + + + + The only tool's parameter is an error message to log. + + + + BOOST_FAIL usage + + + + BOOST_<level> + + + + + + + + + + + + Unlike the rest of the tools in the toolbox this tool does not perform the logging itself. Its only purpose + is to check at runtime whether or not the supplied preprocessor symbol is defined. Use it in combination with + BOOST_<level> to perform and log validation. Macros of any arity could be checked. To check the + macro definition with non-zero arity specify dummy arguments for it. See below for example. + + + + The only tool's parameter is a preprocessor symbol that gets validated. + + + + BOOST_IS_DEFINED usage + + + + BOOST_<level> + + + + + +
+
diff --git a/doc/src/utf.user-guide.usage-variants.xml b/doc/src/utf.user-guide.usage-variants.xml new file mode 100644 index 00000000..3187dc4b --- /dev/null +++ b/doc/src/utf.user-guide.usage-variants.xml @@ -0,0 +1,158 @@ + +UTF"> +]> +
+ The &utf; usage variants … or the <ulink url="http://en.wikipedia.org/wiki/Buridan's_ass">Buridan's donkey</ulink> parable + Usage variants + + + The &utf; presents you with 4 different variants how it can be used. + + + + + The static library variant + + + The dynamic library variant + + + The single-header variant + + + The external test runner variant + + + + + Unlike the Buridan's donkey though, you shouldn't have problems deciding which one to use, since there are + clear reasons why would you prefer each one. + + + + In most cases to compile a test module based on the &utf; all you need to include is just the single header + boost/test/unit_test.hpp. This header includes internally most of the other + headers that contains the &utf; definitions. Some advanced features, like the floating point comparison or the + logged expectations testing, are defined in independent headers and need to be included explicitly. + + +
+ The static library variant of the &utf;Static library + + The &utf; can be built into a static library. If you opt to link a test module with the + standalone static library, this usage is called the static library + variant of the &utf;. + + + + The test runner supplied with this variant required you to implement the test + module initialization function that matches one of the two specifications depending on the compilation flag + . If flag isn't defined you are required + to match the original specification. If you define the flag during a test module compilation you are required to use the alternative + initialization function specification. The &utf; provides an ability to + automatically generate an empty test module + initialization function with correct specification if no custom initialization is required by a test module. + + + + + If you opted to use an alternative initialization API, for a test module to be able to link with prebuilt library, + the flag has to be defined both during + library and a test module compilation. + + +
+ +
+ The dynamic library variant of the &utf; + Dynamic library + + + In the project with large number of test modules the static + library variant of the &utf; may cause you to waste a lot of disk space, since the &utf; is linked + statically with every test module. The solution is to link with the &utf; built into a dynamic library. If you opt + to link a test module with the prebuilt dynamic library, this usage is called the dynamic library variant of the + &utf;. This variant requires you to define the flag + either in a makefile or before the header boost/test/unit_test.hpp + inclusion. + + + + The test runner supplied with this variant requires you to implement the test + module initialization function that matches the alternative initialization function signature. The &utf; + provides an ability to automatically generate + an empty test module initialization function with correct signature if no custom initialization is required by a + test module. + + + + + The name of the test module initialization function is not enforced, since the function is passed as an argument + to the test runner. + + +
+ +
+ The single-header variant of the &utf; + Single header + + + If you prefer to avoid the standalone library compilation, you + should use the single-header variant of the &utf;. This variant is implemented, as it follows from its name, in + the single header boost/test/included/unit_test.hpp. An inclusion of + the header causes the complete implementation of the &utf; to be included as a part of a test module's + source file. The header boost/test/unit_test.hpp doesn't have to be + included anymore. You don't have to worry about disabling + auto-linking feature either. It's done in the implementation header already. This variant + can't be used with the . + Otherwise it's almost identical from the usage prospective to the static library variant of the &utf;. + In fact the only difference is the name of the include file: + boost/test/included/unit_test.hpp instead of + boost/test/unit_test.hpp. + + + + The test runner supplied with this variant requires you to implement the test + module initialization function that matches one of the two specifications depending on the compilation flag + . If flag isn't defined you are required to + match the original specification. If you define the flag + during a test module compilation you are + required to use the alternative initialization function specification. The &utf; provides an ability to + automatically generate an empty test module + initialization function with correct specification if no custom initialization is required by a test module. + +
+ +
+ The external test runner variant of the &utf; + External test runner + + + All other usage variants employ the build-in test runners. If you plan to use an external test runner with your + test module you need to build it as a dynamic library. This usage of the &utf; is called the external test runner + variant of the &utf;. The variant requires you to define the flag + either in a makefile or before the header + boost/test/unit_test.hpp inclusion. An external test runner utility is + required to link with dynamic library. + + + + If an external test runner is based on the test runner built in to the dynamic library (like the standalone + boost_test_runner utility supplied by the &utf;), it requires you to implement the + test module initialization function that matches the alternative initialization function signature. The + &utf; provides an ability to automatically generate + an empty test module initialization function with correct signature if no custom initialization is required + by a test module. + + + + + An advanced test runner doesn't have to be based on the build-in one and may require a different + test module initialization function signature and/or name. + + +
+
diff --git a/doc/src/utf.user-guide.xml b/doc/src/utf.user-guide.xml new file mode 100644 index 00000000..a6f63e23 --- /dev/null +++ b/doc/src/utf.user-guide.xml @@ -0,0 +1,68 @@ + +UTF"> +]> +
+ Unit Test Framework: User's guideUser's guide + +
+ Introduction … or where to start?Introduction + + + Without further ado, I'd like to start … but where? It's not obvious what is the best order to describe the framework. + One can use bottom up approach, starting with with basics and going up to cover real interfaces based on them. The downside is + that you'll have to dig through the multiple pages of information you may not never need in real life. One can follow the order + of test program execution. From test initialization to test tree construction to the report and log generation. This also + unfortunately may not be most clear way. The Boost.Test &utf; is very flexible and a lot of details of various test initialization + options may not necessarily important for average user, while understanding test output is. + + + Well … This is a User's Guide after all. Let's go by what you need to know to successfully use the &utf;. Thus I follow + the order of decisions you as a user have to make and order of complexity of the problems you have to solve. If you find yourself + faces with some unclear term feel free to jump directly to the Glossary section, + where I collection short definition for all used terms. And again if you want to jump right into coding the + Tutorials section would be a better place to start. + + + The &utf; has several usage variants. And the first decision you have to make is which one to use. These variants are covered in + section dedicated to Usage variants. The next step, probably the most important + for you, is to start writing test cases, bind them in test suites and implement your checks. First two topics are coverred in + Test organization section, while + Testing tools section arms you with rich set of tools enough to implement + almost arbitrary check you need. + + + Next you'll learn how to understand and manipulate the &utf; output in a Test output + section. At that point you should be able to build and run most simple test modules and almost inevitable find a need to configure + how the test module is executed. Whether you want to change output format, select which test case to run or run test cases in random order + these and may other runtime configuration parameters are discribed in Runtime configuration + section. + + + One of the first non trivial things you might want toadd to your test module is test fixture. Fixture support is coverred in + Test fixture section. Usually th default test module initialization will work just fine, + but if you want to implement some custom initialization or change how default initialization behaves you need to first look in + Test module initialization section. Here you'll learn about various options the &utf; + provides for you to customize this behavior. + + + Finally you might want to learn about how the &utf; implements entry points into the test modules. This is especially important if you + intend to implement main function yourself (and not use the main function provided by the &utf;). The + Test runners section covers this subject. Different usage variants employ slightly + different approached to implementing test module entry points and presents slightly different interfaces. This section intended for advanced + user some of the details of the implementation are described there. + + +
+ + + + + + + + + + + +
diff --git a/doc/src/utf.users-guide.xml b/doc/src/utf.users-guide.xml deleted file mode 100644 index 5ac7b9e0..00000000 --- a/doc/src/utf.users-guide.xml +++ /dev/null @@ -1,698 +0,0 @@ - -UTF"> -]> -
- Unit Test Framework: User's guideUser's guide - -
- Introduction … or what's your name?Introduction - - - Without further ado, let's define terms regularly used by the &utf;. - - - - - - - - The test module - - - This is a single binary that performs the test. Physically a test module consists of one or more test source files, - which can be built into an executable or a dynamic library. A test module that consists of a single test source - file is called single-file test module. Otherwise - it's called multi-file test module. Logically a test - module consists of four parts: test setup (or test initialization), - test body, test cleanup and - test runner. The test runner part is optional. If a test module is built as - an executable the test runner is built-in. If a test module is built as a dynamic library, it is run by an - external test runner. - - - - - The test body - - - This is the part of a test module that actually performs the test. - Logically test body is a collection of test assertions wrapped in - test cases, which are organized in a test tree - . - - - - - The test tree - - - This is a hierarchical structure of test suites (non-leaf nodes) and - test cases (leaf nodes). - - - - - The test unit - - - This is a collective name when referred to either test suite or - test case - - - - - Test assertion - - - This is a single binary condition (binary in a sense that is has two outcomes: pass and fail) checked - by a test module. - - - There are different schools of thought on how many test assertions a test case should consist of. Two polar - positions are the one advocated by TDD followers - one assertion per test case; and opposite of this - all test - assertions within single test case - advocated by those only interested in the first error in a - test module. The &utf; supports both approaches. - - - - - The test case - - - This is an independently monitored function within a test module that - consists of one or more test assertions. The term "independently monitored" in the definition above is - used to emphasize the fact, that all test cases are monitored independently. An uncaught exception or other normal - test case execution termination doesn't cause the testing to cease. Instead the error is caught by the test - case execution monitor, reported by the &utf; and testing proceeds to the next test case. Later on you are going - to see that this is on of the primary reasons to prefer multiple small test cases to a single big test function. - - - - - The test suite - - - This is a container for one or more test cases. The test suite gives you an ability to group - test cases into a single referable entity. There are various reasons why you may opt to do so, including: - - - - To group test cases per subsystems of the unit being tested. - - - To share test case setup/cleanup code. - - - To run selected group of test cases only. - - - To see test report split by groups of test cases - - - To skip groups of test cases based on the result of another test unit in a test tree. - - - - A test suite can also contain other test suites, thus allowing a hierarchical test tree structure to be formed. - The &utf; requires the test tree to contain at least one test suite with at least one test case. The top level - test suite - root node of the test tree - is called the master test suite. - - - - - The test setup - - - This is the part of a test module that is responsible for the test - preparation. It includes the following operations that take place prior to a start of the test: - - - - - The &utf; initialization - - - - - Test tree construction - - - - - Global test module setup code - - - - - Per test case" setup code, invoked for every test case it's assigned to, is also attributed to the - test initialization, even though it's executed as a part of the test case. - - - - - The test cleanup - - - This is the part of test module that is responsible for cleanup operations. - - - - - The test fixture - - - Matching setup and cleanup operations are frequently united into a single entity called test fixture. - - - - - The test runner - - - This is an "executive manager" that runs the show. The test runner's functionality should include - the following interfaces and operations: - - - - - Entry point to a test module. This is usually either the function main() itself or single function that can be - invoked from it to start testing. - - - - - Initialize the &utf; based on runtime parameters - - - - - Select an output media for the test log and the test results report - - - - - Select test cases to execute based on runtime parameters - - - - - Execute all or selected test cases - - - - - Produce the test results report - - - - - Generate a test module result code. - - - - - An advanced test runner may provide additional features, including interactive GUI interfaces, - test coverage and profiling support. - - - - - The test log - - - This is the record of all events that occur during the testing. - - - - - The test results report - - - This is the report produced by the &utf; after the testing is completed, that indicates which test cases/test - suites passed and which failed. - - - - -
- -
- The &utf; usage variants … or the <ulink url="http://en.wikipedia.org/wiki/Buridan's_ass">Buridan's donkey</ulink> parable - Usage variants - - - The &utf; presents you with 4 different variants how it can be used. - - - - - The static library variant - - - The dynamic library variant - - - The single-header variant - - - The external test runner variant - - - - - Unlike the Buridan's donkey though, you shouldn't have problems deciding which one to use, since there are - clear reasons why would you prefer each one. - - - - In most cases to compile a test module based on the &utf; all you need to include is just the single header - boost/test/unit_test.hpp. This header includes internally most of the other - headers that contains the &utf; definitions. Some advanced features, like the floating point comparison or the - logged expectations testing, are defined in independent headers and need to be included explicitly. - - -
- The static library variant of the &utf;Static library - - The &utf; can be built into a static library. If you opt to link a test module with the - standalone static library, this usage is called the static library - variant of the &utf;. - - - - The test runner supplied with this variant requires you to implement the test - module initialization function that matches one of the two specifications depending on the compilation flag - . If flag isn't defined you are required - to match the original specification. If you define the flag during a test module compilation you are required to use the alternative - initialization function specification. The &utf; provides an ability to - automatically generate an empty test module - initialization function with correct specification if no custom initialization is required by a test module. - - - - - If you opted to use an alternative initialization API, for a test module to be able to link with prebuilt library, - the flag has to be defined both during - library and a test module compilation. - - -
- -
- The dynamic library variant of the &utf; - Dynamic library - - - In the project with large number of test modules the static - library variant of the &utf; may cause you to waste a lot of disk space, since the &utf; is linked - statically with every test module. The solution is to link with the &utf; built into a dynamic library. If you opt - to link a test module with the prebuilt dynamic library, this usage is called the dynamic library variant of the - &utf;. This variant requires you to define the flag - either in a makefile or before the header boost/test/unit_test.hpp - inclusion. - - - - The test runner supplied with this variant requires you to implement the test - module initialization function that matches the alternative initialization function signature. The &utf; - provides an ability to automatically generate - an empty test module initialization function with correct signature if no custom initialization is required by a - test module. - - - - - The name of the test module initialization function is not enforced, since the function is passed as an argument - to the test runner. - - -
- -
- The single-header variant of the &utf; - Single header - - - If you prefer to avoid the standalone library compilation, you - should use the single-header variant of the &utf;. This variant is implemented, as it follows from its name, in - the single header boost/test/included/unit_test.hpp. An inclusion of - the header causes the complete implementation of the &utf; to be included as a part of a test module's - source file. The header boost/test/unit_test.hpp doesn't have to be - included anymore. You don't have to worry about disabling - auto-linking feature either. It's done in the implementation header already. This variant - can't be used with the . - Otherwise it's almost identical from the usage prospective to the static library variant of the &utf;. - In fact the only difference is the name of the include file: - boost/test/included/unit_test.hpp instead of - boost/test/unit_test.hpp. - - - - The test runner supplied with this variant requires you to implement the test - module initialization function that matches one of the two specifications depending on the compilation flag - . If flag isn't defined you are required to - match the original specification. If you define the flag - during a test module compilation you are - required to use the alternative initialization function specification. The &utf; provides an ability to - automatically generate an empty test module - initialization function with correct specification if no custom initialization is required by a test module. - -
- -
- The external test runner variant of the &utf; - External test runner - - - All other usage variants employ the build-in test runners. If you plan to use an external test runner with your - test module you need to build it as a dynamic library. This usage of the &utf; is called the external test runner - variant of the &utf;. The variant requires you to define the flag - either in a makefile or before the header - boost/test/unit_test.hpp inclusion. An external test runner utility is - required to link with dynamic library. - - - - If an external test runner is based on the test runner built in to the dynamic library (like the standalone - boost_test_runner utility supplied by the &utf;), it requires you to implement the - test module initialization function that matches the alternative initialization function signature. The - &utf; provides an ability to automatically generate - an empty test module initialization function with correct signature if no custom initialization is required - by a test module. - - - - - An advanced test runner doesn't have to be based on the build-in one and may require a different - test module initialization function signature and/or name. - - -
-
- -
- The supplied test runners … or where is the entrance? - Supplied test runners - - - All usage variants of the &utf;, excluding the - external test runner, supply the test runner in a form of - free function named unit_test_main with the following signature: - - - int unit_test_main( init_unit_test_func init_func, int argc, char* argv[] ); - - - To invoke the test runner you are required to supply the pointer to the test module - initialization function as the first argument to the test runner function. In majority of the cases this function is - invoked directly from test executable entry point - function main(). In most usage variants the &utf; can - automatically generate default function main() implementation as either part of the library or test module itself. - Since the function main needs to refer to the initialization function by name, it is predefined by the default - implementation and you are required to match both specific signature and name, when implementing initialization - function. If you for any reason prefer more flexibility you can opt to implement the function main() yourself, in - which case it's going to be your responsibility to invoke the test runner, but the initialization function name is - not enforces the &utf;. See below for flags that needs to be defined/undefined in each usage variant to enable this. - - - - - In spite syntactic similarity the signatures of the test runner function in fact are different for different usage - variants. The cause is different signature of the test module initialization function referred by the - typedef init_unit_test_func. This makes static - and dynamic library usage variants incompatible and they can't be easily switched on a fly. - - - -
- Static library variant of the &utf; - Static library - - - By default this variant supplies the function main() as part of static library. If this is for any reason undesirable - you need to define the flag during the library - compilation and the function main() implementation won't be generated. - - - - In addition to the initialization function signature requirement - default function main() implementation assumes the name of initialization function is init_unit_test_suite - - -
- -
- Dynamic library variant of the &utf; - Dynamic library - - - Unlike the static library variant function main() can't reside in the dynamic library body. Instead this variant - supplies default function main() implementation as part of the header - boost/test/unit_test.hpp to be generated as part of your test file body. - The function main() is generated only if either the or - the flags are defined during a test module compilation. - For single-file test module flags can be defined either in a - test module's makefile or before the header boost/test/unit_test.hpp - inclusion. For a flags can't - be defined in makefile and have to be defined in only one of the test files to avoid duplicate copies of the - function main(). - - - - - The same flags also govern generation of an empty - test module initialization function. This means that if you - need to implement either function main() or initialization function manually, you can't define the above flags - and are required to manually implement both of them. - - -
- -
- Single-header variant of the &utf; - Single header - - - By default this variant supplies function main() as part of the header - boost/test/included/unit_test.hpp to be generated as part of your test file - body. If this is for any reason undesirable you need to define the flag - during test module compilation and the function main() - implementation won't be generated. - -
- -
- External test runner variant of the &utf; - External test runner - - - The external test runner variant of the &utf; supplies the test runner in a form of standalone utility - boost_test_runner. You are free to implement different, more advanced, test runners that can be used with this - variant. - - - - - -
- -
- Generated exit status values - - - Once testing is finished, all supplied test runners report the results and returns an exit status value. Here are - the summary of all possible generated values: - - - - Generated exit status values - - - - - - Value - Meaning - - - - - boost::exit_success - - No errors occurred during the test or the success result code was explicitly requested with the no_result_code - parameter. - - - - boost::exit_test_failure - - Non-fatal errors detected and no uncaught exceptions were thrown during testing or the &utf; fails during - initialization. - - - - boost::exit_exception_failure - - Fatal errors were detected or uncaught exceptions thrown during testing. - - - - -
-
-
- -
- Test module initialization … or ready, set … - Test module initialization - - - There are two tasks that you may need to perform before actual testing can start: - - - - - - The test tree needs to be built (unless you are using automated test units registration). - - - - - Custom test module initialization needs to be performed. This includes - initialization of the code under test and custom tune-up of the &utf; parameters (for example the test log or the - test results report output streams redirection). - - - - - - The function dedicated for this purpose is called the test module initialization function. Alternatively you can - employ global fixtures, covered in details, including differences in two approaches, in - . - - - - The &utf; requires you to implement the test module initialization function. The test runner supplied with the static - library or single-header variants of the &utf; requires the specific function specification. The test runner supplied - with the dynamic library variant of the &utf; requires the specific initialization function signature only. - - - - For many test modules you don't need to do any custom initialization - and test tree construction is automated. In this case you don't really need the initialization function and - the &utf; provides a way to automatically generate an empty one for you. - - - - Original design of the &utf; supported the manual test tree construction only. Later versions introduced the - automated registration of test units. In later versions of the &utf; the original initialization function - specification became inconvenient and unnecessary unsafe. So the alternative initialization function specification - was introduced. This change is not backward compatible. The test runners supplied with the static library and - single-header variants of the &utf; by default still require original initialization function specification, but - support compilation flags that switch to the alternative one. The test - runner supplied with dynamic library variant of the &utf; requires new specification and doesn't support - original one. The plan is to deprecate the original initialization function specification in one of the future - releases and ultimately to stop supporting it. - - - - The initialization function invocation is monitored by the &utf; the same way as all the test cases. An unexpected - exception or system error detected during initialization function invocation is treated as initialization error and - is reported as such. - - -
- Original initialization function signature and name - Original initialization function - - - The original design of the &utf; initialization requires you to implement the function with the following - specification: - - - boost::unit_test::test_suite* init_unit_test_suite( int argc, char* argv[] ); - - - In original design of the &utf; this function was intended to initialize and return a master test suite. The null - value was considered an initialization error. The current design of the &utf; maintains master test suite instance - internally and does not treat the null result value as an initialization error. In fact it's recommended to - return null value always and register test units in the master test suite using the regular test suite add - interface. The only way to indicate an initialization error is to throw the - boost::unit_test::framework::setup_error exception. - - - - The initialization function parameters argc, argv provide the command line arguments specified during test - module invocation. It's guarantied that any framework-specific command line arguments are excluded. To be - consisted with the alternative initialization function specification it's recommended though to access the - command line arguments using the master test suite interface. - -
- -
- Alternative initialization function signature and name - Alternative initialization function - - - The alternative design of the &utf; initialization requires you to implement a function with the following - specification: - - - bool init_unit_test(); - - - The result value of this function indicates whether or not initialization was successful. To register test - units in a master test suite use the test suite add interface. To access command line arguments use the master - test suite interface. It's guarantied that any framework-specific command line arguments are excluded. - -
- -
- Initialization function signature access - - - The test runner interface needs to refer to the initialization function signature. The &utf; provides the typedef - that resolves to proper signature in all configurations: - - - namespace boost { -namespace unit_test { -#ifdef BOOST_TEST_ALTERNATIVE_INIT_API -typedef bool (*init_unit_test_func)(); -#else -typedef test_suite* (*init_unit_test_func)( int, char* [] ); -#endif -} -} - -
- -
- Automated generation of the test module initialization function - Automated generation - - - To automatically generate an empty test module initialization function you need to define - before including the - boost/test/unit_test.hpp header. The value of this define is ignored. - Alternatively you can define the macro to be equal to - any string (not necessarily in quotes). This macro causes the same result as - , and in addition the macro value becomes the name of the - master test suite. - - - - - For a test module consisting of multiple source files you have to define these flags in a single test file only. - Otherwise you end up with multiple instances of the initialization function. - - -
-
- - - - - -
diff --git a/doc/src/utf.xml b/doc/src/utf.xml index e57adede..4c9b270f 100644 --- a/doc/src/utf.xml +++ b/doc/src/utf.xml @@ -92,7 +92,7 @@ - Simplify writing test cases by using various testing tools. + Simplify writing test cases by using various testing tools. @@ -143,8 +143,8 @@ - For those interested in getting started quickly please visit collection of - examples presented in this documentation. + For those interested in getting started quickly please visit collection of + examples presented in this documentation.
@@ -161,7 +161,9 @@ The &utf; is comparatively complicated component and is implemented in close to hundred header and source files, so for long term usage the preferable solution is to build the &utf; as a reusable standalone library. Depending on your platform this may save you a significant time during test module compilation and doesn't - really require that much effort. + really require that much effort If you are using Visual studio compilers do not forget to + set a subsystem to console when you build test modules. You can do it either in project properties or by setting + command line /SUBSYTEM:CONSOLE. Number of people reported link error caused specifically by this omission. Boost Getting started tells you how to get pre-built libraries for some platforms. If available, this is the easiest option and you can ignore standalone library compilation instructions below. @@ -229,7 +231,7 @@ -
+
Compilation procedures @@ -237,9 +239,9 @@ linking with the &utf; may require additional steps. The &utf; presents you with options to either built and link with a standalone library or include the implementation directly into a test module. - If you opt to use the library the &utf; headers implement the - auto-linking support. The compilation of the &utf; library and - a test module can be configured using the following compilation flags. + If you opt to use the library the &utf; headers implement the + auto-linking support. The compilation of the &utf; library and a test module can be configured using the + following compilation flags. @@ -314,7 +316,7 @@ (for example Microsoft Visual Studio). The Boost preferred solution is Boost.Build system that is based on top of bjam tool. Make systems require some kind of configuration file that lists all files that constitute the library and all build options. For example the makefile that is used by make, or the Microsoft Visual Studio project file, - Jamfile is used by Boost.Build. For the sake of simplicity let's call this file the makefile.. + Jamfile is used by Boost.Build. For the sake of simplicity let's call this file the makefile.. @@ -409,7 +411,7 @@ - - + + diff --git a/doc/src/xsl/html.xsl b/doc/src/xsl/html.xsl index a9a9b7cc..2609675a 100644 --- a/doc/src/xsl/html.xsl +++ b/doc/src/xsl/html.xsl @@ -370,7 +370,7 @@ - +