2
0
mirror of https://github.com/boostorg/build.git synced 2026-02-12 12:02:24 +00:00
Files
build/src/tools/testing.jam

458 lines
15 KiB
Plaintext

# (C) Copyright David Abrahams 2002.
# (C) Copyright Vladimir Prus 2002-2003.
# Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# This module implements regression testing framework. It declares a number of
# main target rules, which perform some action, and if the results are ok,
# creates an output file.
#
# The exact list of rules is:
# 'compile' -- creates .test file if compilation of sources was successfull
# 'compile-fail' -- creates .test file if compilation of sources failed
# 'run' -- creates .test file is running of executable produced from
# sources was successfull. Also leaves behing .output file
# with the output from program run.
# 'run-fail' -- same as above, but .test file is created if running fails.
#
# In all cases, presense of .test file is an incication that
# the test passed. For more convenient reporting, you might want to use C++ Boost
# regression testing utilities, see
# http://www.boost.org/more/regression.html
#
# For historical reason, a 'unit-test' rule is available which
# has the same syntax as 'exe' and behaves just like 'run'.
# Things to do:
# - Teach compiler_status handle Jamfile.v2.
# Notes:
# - <no-warn> is not implemented, since in Como-specific, and it's not clear how
# to implement it
# - std::locale-support is not impelemted (it's used in one test).
import targets ;
import "class" : new ;
import property ;
import feature ;
import toolset ;
import alias ;
import type ;
import generators ;
import project ;
import property-set ;
import virtual-target ;
import path ;
import os ;
import common ;
import sequence ;
import errors ;
rule init ( ) { }
# The feature which controls the name of program used to
# lanch test programs.
feature.feature testing.launcher : : optional free ;
feature.feature test-info : : free incidental ;
feature.feature testing.arg : : free incidental ;
feature.feature testing.input-file : : free dependency ;
# Register target types.
type.register TEST : test ;
type.register COMPILE : : TEST ;
type.register COMPILE_FAIL : : TEST ;
type.register RUN_OUTPUT : run ;
type.register RUN : : TEST ;
type.register RUN_FAIL : : TEST ;
type.register UNIT_TEST : passed : TEST ;
# Declare the rules which create main targets.
# While the 'type' module already creates rules with the same names for us,
# we need extra convenience: default name of main target, so write
# our own versions.
# Helper rule. Create a test target, using basename of first source if no
# target name is explicitly passed. Remembers the created target in
# a global variable.
rule make-test ( target-type : sources + : requirements * : target-name ? )
{
target-name ?= $(sources[1]:D=:S=) ;
local project = [ project.current ] ;
# The <location-prefix> forces the build system for generate paths in the form
# $build_dir/array1.test/gcc/debug
# This is necessary to allow post-processing tools to work.
local t =
[ targets.create-typed-target
[ type.type-from-rule-name $(target-type) ] : $(project)
: $(target-name) : $(sources)
: $(requirements) <location-prefix>$(target-name).test ] ;
# Remember the test (for --dump-test).
# A good way would be to collect all given a project.
# This has some technical problems: e.g. we can't call this dump from
# Jamfile since projects referred by 'build-project' are not available until
# whole Jamfile is loaded.
.all-tests += $(t) ;
return $(t) ;
}
rule compile ( sources + : requirements * : target-name ? )
{
return [ make-test compile : $(sources) : $(requirements) : $(target-name) ] ;
}
rule compile-fail ( sources + : requirements * : target-name ? )
{
return [ make-test compile-fail : $(sources) : $(requirements) : $(target-name) ] ;
}
rule handle-input-files ( input-files * )
{
if $(input-files[2])
{
# Check that sorting made when creating property-set instance
# won't change the ordering.
if [ sequence.insertion-sort $(input-files) ] != $(input-files)
{
errors.user-error "Names of input files must be sorted alphabetically"
: "due to internal limitations" ;
}
}
return <testing.input-file>$(input-files) ;
}
rule run ( sources + : args * : input-files * : requirements * : target-name ?
: default-build * )
{
requirements += <testing.arg>$(args:J=" ") ;
requirements += [ handle-input-files $(input-files) ] ;
return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
}
rule run-fail ( sources + : args * : input-files * : requirements * : target-name ?
: default-build * )
{
requirements += <testing.arg>$(args:J=" ") ;
requirements += [ handle-input-files $(input-files) ] ;
return [ make-test run-fail : $(sources) : $(requirements) : $(target-name) ] ;
}
# Rule for grouping tests in suites.
rule test-suite ( suite-name : tests + )
{
# In V2, if 'tests' are instances of 'abstract-target', they will be considered
# 'inline-targets' and will suffer some adjustments. This will not be compatible
# with V1 behaviour, so we get names of 'tests' and use them.
local names ;
for local t in $(tests)
{
names += [ $(t).name ] ;
}
modules.call-in [ CALLER_MODULE ] : alias $(suite-name) : $(names) ;
}
# For all main target in 'project-module',
# which are typed target with type derived from 'TEST',
# produce some interesting information.
rule dump-tests # ( project-module )
{
for local t in $(.all-tests)
{
dump-test $(t) ;
}
}
# Given a project location, compute the name of Boost library
local rule get-library-name ( path )
{
# Path is in normalized form, so all slashes are forward.
local match1 = [ MATCH /libs/(.*)/(test|example) : $(path) ] ;
local match2 = [ MATCH /libs/(.*)$ : $(path) ] ;
local match3 = [ MATCH (/status$) : $(path) ] ;
if $(match1) { return $(match1[0]) ; }
else if $(match2) { return $(match2[0]) ; }
else if $(match3) { return "" ; }
else if --dump-tests in [ modules.peek : ARGV ]
{
EXIT Cannot extract library name from path $(path) ;
}
}
# Take a target (instance of 'basic-target') and prints
# - its type
# - its name
# - comments specified via the <test-info> property
# - relative location of all source from the project root.
rule dump-test ( target )
{
local type = [ $(target).type ] ;
local name = [ $(target).name ] ;
local project = [ $(target).project ] ;
local project-root = [ $(project).get project-root ] ;
local library = [ get-library-name
[ path.root [ $(project).get location ] [ path.pwd ] ] ] ;
if $(library)
{
name = $(library)/$(name) ;
}
local sources = [ $(target).sources ] ;
local source-files ;
for local s in $(sources)
{
if [ class.is-a $(s) : file-reference ]
{
source-files +=
[ path.relative
[ path.root [ $(s).location ] [ path.pwd ] ]
[ path.root $(project-root) [ path.pwd ] ] ] ;
}
}
local r = [ $(target).requirements ] ;
# Extract values of the <test-info> feature
local test-info = [ $(r).get <test-info> ] ;
# Format them into a single string of quoted strings
test-info = \"$(test-info:J=\"\ \")\" ;
ECHO boost-test($(type)) \"$(name)\"
[$(test-info)]
":" \"$(source-files)\"
;
}
# Register generators. Depending on target type, either
# 'expect-success' or 'expect-failure' rule will be used.
generators.register-standard testing.expect-success : OBJ : COMPILE ;
generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ;
generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ;
generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ;
# Generator which runs an EXE and captures output.
generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
# Generator which creates target if sources runs successfully.
# Differers from RUN in that run output is not captured.
# The reason why it exists is that the 'run' rule is much better for
# automated testing, but is not user-friendly. See
# http://article.gmane.org/gmane.comp.lib.boost.build/6353/
generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
# The action rules called by generators.
# Causes the 'target' to exist after bjam invocation if and only if all the
# dependencies were successfully built.
rule expect-success ( target : dependency + : requirements * )
{
**passed** $(target) : $(sources) ;
}
# Causes the 'target' to exist after bjam invocation if and only if all some
# of the dependencies were not successfully built.
rule expect-failure ( target : dependency + : properties * )
{
local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
local marker = $(dependency:G=$(grist)*fail) ;
(failed-as-expected) $(marker) ;
FAIL_EXPECTED $(dependency) ;
LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
RMOLD $(marker) ;
DEPENDS $(marker) : $(dependency) ;
DEPENDS $(target) : $(marker) ;
**passed** $(target) : $(marker) ;
}
# The rule/action combination used to report successfull passing
# of a test.
rule **passed**
{
# Dump all the tests, if needed.
# We do it here, since dump should happen after all Jamfiles are read,
# and there's no such place currently defined (but should).
if ! $(.dumped-tests) && --dump-tests in [ modules.peek : ARGV ]
{
.dumped-tests = true ;
dump-tests ;
}
# Force deletion of the target, in case any dependencies failed
# to build.
RMOLD $(<) ;
}
actions **passed**
{
echo passed > $(<)
}
actions (failed-as-expected)
{
echo failed as expected > $(<)
}
rule run-path-setup ( target : source : properties * )
{
# For testing, we need to make sure that all dynamic libraries needed by
# the test are found. So, we collect all paths from dependency libraries
# (via xdll-path property) and add whatever explicit dll-path user has
# specified. The resulting paths are added to environment on each test
# invocation.
local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;
dll-paths += [ on $(source) return $(RUN_PATH) ] ;
dll-paths = [ sequence.unique $(dll-paths) ] ;
if $(dll-paths)
{
dll-paths = [ sequence.transform path.native : $(dll-paths) ] ;
PATH_SETUP on $(target) =
[ common.prepend-path-variable-command
[ os.shared-library-path-variable ] : $(dll-paths) ] ;
}
}
toolset.flags testing.capture-output ARGS <testing.arg> ;
toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;
rule capture-output ( target : source : properties * )
{
output-file on $(target) = $(target:S=.output) ;
LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
# The INCLUDES kill a warning about independent target...
INCLUDES $(target) : $(target:S=.output) ;
# but it also puts .output into dependency graph, so we must tell jam
# it's OK if it cannot find the target or updating rule.
NOCARE $(target:S=.output) ;
# This has two-fold effect. First it adds input files to the dependendency
# graph, preventing a warning. Second, it causes input files to be bound
# before target is created. Therefore, they are bound using SEARCH setting
# on them and not LOCATE setting of $(target), as in other case (due to jam bug).
DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
run-path-setup $(target) : $(source) : $(properties) ;
}
if [ os.name ] = NT
{
STATUS = %status% ;
SET_STATUS = "set status=%ERRORLEVEL%" ;
RUN_OUTPUT_NL = "echo." ;
STATUS_0 = "%status% EQU 0 (" ;
STATUS_NOT_0 = "%status% NEQ 0 (" ;
VERBOSE = "%verbose% EQU 1 (" ;
ENDIF = ")" ;
SHELL_SET = "set " ;
CATENATE = type ;
CP = copy ;
}
else
{
STATUS = "$status" ;
SET_STATUS = "status=$?" ;
RUN_OUTPUT_NL = "echo" ;
STATUS_0 = "test $status -eq 0 ; then" ;
STATUS_NOT_0 = "test $status -ne 0 ; then" ;
VERBOSE = "test $verbose -eq 1 ; then" ;
ENDIF = "fi" ;
SHELL_SET = "" ;
CATENATE = cat ;
CP = cp ;
}
if --verbose-test in [ modules.peek : ARGV ]
{
VERBOSE_TEST = 1 ;
}
else
{
VERBOSE_TEST = 0 ;
}
actions capture-output bind INPUT_FILES output-file
{
$(PATH_SETUP)
$(LAUNCHER) $(>) $(ARGS) "$(INPUT_FILES)" > $(output-file) 2>&1
$(SET_STATUS)
$(RUN_OUTPUT_NL) >> $(output-file)
echo EXIT STATUS: $(STATUS) >> $(output-file)
if $(STATUS_0)
$(CP) $(output-file) $(<)
$(ENDIF)
$(SHELL_SET)verbose=$(VERBOSE_TEST)
if $(STATUS_NOT_0)
$(SHELL_SET)verbose=1
$(ENDIF)
if $(VERBOSE)
echo ====== BEGIN OUTPUT ======
$(CATENATE) $(output-file)
echo ====== END OUTPUT ======
$(ENDIF)
exit $(STATUS)
}
MAKE_FILE = [ common.file-creation-command ] ;
toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
rule unit-test ( target : source : properties * )
{
run-path-setup $(target) : $(source) : $(properties) ;
}
actions unit-test
{
$(PATH_SETUP)
$(LAUNCHER) $(>) && $(MAKE_FILE) $(<)
}
IMPORT $(__name__) : compile compile-fail test-suite run run-fail
: : compile compile-fail test-suite run run-fail ;
type.register TIME : time ;
generators.register-standard testing.time : : TIME ;
rule record-time ( target source : user : system )
{
local src-string = [$(source:G=:J=",")"] " ;
USER_TIME on $(target) += $(src-string)$(user) ;
SYSTEM_TIME on $(target) += $(src-string)$(system) ;
}
IMPORT testing : record-time : : testing.record-time ;
rule time ( target : source : properties * )
{
# Set up rule for recording timing information
__TIMING_RULE__ on $(source) = testing.record-time $(target) ;
# Make sure that the source is rebuilt any time we need to
# retrieve that information
REBUILDS $(target) : $(source) ;
}
actions time
{
echo user: $(USER_TIME)
echo system: $(SYSTEM_TIME)
echo user: $(USER_TIME)" seconds" > $(<)
echo system: $(SYSTEM_TIME)" seconds" > $(<)
}