mirror of
https://github.com/boostorg/build.git
synced 2026-02-16 13:22:11 +00:00
The used 'CALLER_MODULE' to determine the project where the main target is to be declared, which did not work if the rule is called from another module. Thanks to Zbynek Winkler for the bug report. * build/project.jam (current): New rule * test/wrapper.py: New test * other files: Use 'project.current'. [SVN r22569]
334 lines
11 KiB
Plaintext
334 lines
11 KiB
Plaintext
# (C) Copyright David Abrahams 2002.
|
|
# (C) Copyright Vladimir Prus 2002-2003.
|
|
# Permission to copy, use, modify, sell and
|
|
# distribute this software is granted provided this copyright notice appears in
|
|
# all copies. This software is provided "as is" without express or implied
|
|
# warranty, and with no claim as to its suitability for any purpose.
|
|
|
|
# This module implements regression testing framework. It declares a number of
|
|
# main target rules, which perform some action, and if the results are ok,
|
|
# creates an output file.
|
|
#
|
|
# The exact list of rules is:
|
|
# 'compile' -- creates .test file if compilation of sources was successfull
|
|
# 'compile-fail' -- creates .test file if compilation of sources failed
|
|
# 'run' -- creates .test file is running of executable produced from
|
|
# sources was successfull. Also leaves behing .output file
|
|
# with the output from program run.
|
|
# 'run-fail' -- same as above, but .test file is created if running fails.
|
|
# 'unit-test' -- same as 'run', except that output is not stored.
|
|
#
|
|
# In all cases, except for 'unit-test', presense of .test file is an incication that
|
|
# the test passed. For more convenient reporting, you might want to use C++ Boost
|
|
# regression testing utilities, see
|
|
# http://www.boost.org/more/regression.html
|
|
|
|
# Things to do:
|
|
# - Teach compiler_status handle Jamfile.v2.
|
|
# - Grab RUN_PATH logic from V1 testing.jam
|
|
# - Implement all the parameters to 'run': args/input_files
|
|
# Notes:
|
|
# - <no-warn> is not implemented, since in Como-specific, and it's not clear how
|
|
# to implement it
|
|
# - std::locale-support is not impelemted (it's used in one test).
|
|
|
|
|
|
import targets ;
|
|
import "class" : new ;
|
|
import property ;
|
|
import feature ;
|
|
import toolset ;
|
|
import alias ;
|
|
import type ;
|
|
import generators ;
|
|
import project ;
|
|
import property-set ;
|
|
import virtual-target ;
|
|
import path ;
|
|
import os ;
|
|
import common ;
|
|
import sequence ;
|
|
|
|
rule init ( ) { }
|
|
|
|
# The feature which controls the name of program used to
|
|
# lanch test programs.
|
|
feature.feature testing.launcher : : optional free ;
|
|
feature.feature test-info : : free incidental ;
|
|
feature.feature testing.arg : : free incidental ;
|
|
feature.feature testing.input-file : : free dependency ;
|
|
|
|
# Register target types.
|
|
type.register TEST : test ;
|
|
type.register COMPILE : : TEST : main ;
|
|
type.register COMPILE_FAIL : : TEST : main ;
|
|
type.register RUN_OUTPUT : run : : main ;
|
|
type.register RUN : : TEST : main ;
|
|
type.register RUN_FAIL : : TEST : main ;
|
|
type.register UNIT_TEST : passed : TEST : main ;
|
|
|
|
# Declare the rules which create main targets.
|
|
# While the 'type' module already creates rules with the same names for us,
|
|
# we need extra convenience: default name of main target, so write
|
|
# our own versions.
|
|
|
|
# Helper rule. Create a test target, using basename of first source of no
|
|
# target name is explicitly passed. Remembers the created target in
|
|
# a global variable.
|
|
rule make-test ( target-type : sources + : requirements * : target-name ? )
|
|
{
|
|
target-name ?= $(sources[1]:D=:S=) ;
|
|
|
|
local project = [ project.current ] ;
|
|
# The <location-prefix> forces the build system for generate paths in the form
|
|
# $build_dir/array1.test/gcc/debug
|
|
# This is necessary to allow post-processing tools to work.
|
|
local t =
|
|
[ targets.create-typed-target
|
|
[ type.type-from-rule-name $(target-type) ] : $(project)
|
|
: $(target-name) : $(sources)
|
|
: $(requirements) <location-prefix>$(target-name).test ] ;
|
|
|
|
# Remember the test (for --dump-test).
|
|
# A good way would be to collect all given a project.
|
|
# This has some technical problems: e.g. we can't call this dump from
|
|
# Jamfile since projects referred by 'build-project' are not available until
|
|
# whole Jamfile is loaded.
|
|
.all-tests += $(t) ;
|
|
return $(t) ;
|
|
}
|
|
|
|
rule compile ( sources + : requirements * : target-name ? )
|
|
{
|
|
return [ make-test compile : $(sources) : $(requirements) : $(target-name) ] ;
|
|
}
|
|
|
|
rule compile-fail ( sources + : requirements * : target-name ? )
|
|
{
|
|
return [ make-test compile-fail : $(sources) : $(requirements) : $(target-name) ] ;
|
|
}
|
|
|
|
rule run ( sources + : args * : input-files * : requirements * : target-name ?
|
|
: default-build * )
|
|
{
|
|
requirements += <testing.arg>$(args:J=" ") ;
|
|
requirements += <testing.input-file>$(input-files[1]) ;
|
|
if $(input-files[2]) || $(default-build)
|
|
{
|
|
EXIT "NOT supported" ;
|
|
}
|
|
return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
|
|
}
|
|
|
|
rule run-fail ( sources + : args * : input-files * : requirements * : target-name ?
|
|
: default-build * )
|
|
{
|
|
requirements += <testing.arg>$(args:J=" ") ;
|
|
requirements += <testing.input-file>$(input-files[1]) ;
|
|
if $(input-files[2]) || $(default-build)
|
|
{
|
|
EXIT "NOT supported" ;
|
|
}
|
|
return [ make-test run-fail : $(sources) : $(requirements) : $(target-name) ] ;
|
|
}
|
|
|
|
|
|
# Rule for grouping tests in suites.
|
|
rule test-suite ( suite-name : tests + )
|
|
{
|
|
# In V2, if 'tests' are instances of 'abstract-target', they will be considered
|
|
# 'inline-targets' and will suffer some adjustments. This will not be compatible
|
|
# with V1 behaviour, so we get names of 'tests' and use them.
|
|
local names ;
|
|
for local t in $(tests)
|
|
{
|
|
names += [ $(t).name ] ;
|
|
}
|
|
modules.call-in [ CALLER_MODULE ] : alias $(suite-name) : $(names) ;
|
|
}
|
|
|
|
# For all main target in 'project-module',
|
|
# which are typed target with type derived from 'TEST',
|
|
# produce some interesting information.
|
|
rule dump-tests # ( project-module )
|
|
{
|
|
for local t in $(.all-tests)
|
|
{
|
|
dump-test $(t) ;
|
|
}
|
|
}
|
|
|
|
rule dump-test ( target )
|
|
{
|
|
local type = [ $(target).type ] ;
|
|
local name = [ $(target).name ] ;
|
|
local sources = [ $(target).sources ] ;
|
|
local source-files ;
|
|
for local s in $(sources)
|
|
{
|
|
if [ class.is-a $(s) : file-reference ]
|
|
{
|
|
source-files +=
|
|
[ path.relative
|
|
[ path.root [ $(s).location ] [ path.pwd ] ]
|
|
/home/ghost/Work/boost ] ;
|
|
}
|
|
}
|
|
|
|
local r = [ $(t).requirements ] ;
|
|
# Extract values of the <test-info> feature
|
|
local test-info = [ $(r).get <test-info> ] ;
|
|
|
|
# Format them into a single string of quoted strings
|
|
test-info = \"$(test-info:J=\"\ \")\" ;
|
|
|
|
ECHO boost-test($(type)) \"$(name)\"
|
|
[$(test-info)]
|
|
":" \"$(source-files)\"
|
|
;
|
|
}
|
|
|
|
# Register generators. Depending on target type, either
|
|
# 'expect-success' or 'expect-failure' rule will be used.
|
|
generators.register-standard testing.expect-success : OBJ : COMPILE ;
|
|
generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ;
|
|
generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ;
|
|
generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ;
|
|
|
|
# Generator which runs an EXE and captures output.
|
|
generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
|
|
|
|
# Generator which creates target if sources runs successfully.
|
|
# Differers from RUN in that run output is not captured.
|
|
generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
|
|
|
|
# The action rules called by generators.
|
|
|
|
# Causes the 'target' to exist after bjam invocation if and only if all the
|
|
# dependencies were successfully built.
|
|
rule expect-success ( target : dependency + : requirements * )
|
|
{
|
|
**passed** $(target) : $(sources) ;
|
|
}
|
|
|
|
# Causes the 'target' to exist after bjam invocation if and only if all some
|
|
# of the dependencies were not successfully built.
|
|
rule expect-failure ( target : dependency + : properties * )
|
|
{
|
|
local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
|
|
local marker = $(dependency:G=$(grist)*fail) ;
|
|
(failed-as-expected) $(marker) ;
|
|
FAIL_EXPECTED $(dependency) ;
|
|
LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
|
|
RMOLD $(marker) ;
|
|
DEPENDS $(marker) : $(dependency) ;
|
|
|
|
DEPENDS $(target) : $(marker) ;
|
|
**passed** $(target) : $(marker) ;
|
|
}
|
|
|
|
# The rule/action combination used to report successfull passing
|
|
# of a test.
|
|
rule **passed**
|
|
{
|
|
# Dump all the tests, if needed.
|
|
# We do it here, since dump should happen after all Jamfiles are read,
|
|
# and there's no such place currently defined (but should).
|
|
if ! $(.dumped-tests) && --dump-tests in [ modules.peek : ARGV ]
|
|
{
|
|
.dumped-tests = true ;
|
|
dump-tests ;
|
|
}
|
|
|
|
# Force deletion of the target, in case any dependencies failed
|
|
# to build.
|
|
RMOLD $(<) ;
|
|
}
|
|
|
|
actions **passed**
|
|
{
|
|
echo passed > $(<)
|
|
}
|
|
|
|
actions (failed-as-expected)
|
|
{
|
|
echo failed as expected > $(<)
|
|
}
|
|
|
|
MAKE_FILE = [ common.file-creation-command ] ;
|
|
|
|
toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
|
|
actions unit-test
|
|
{
|
|
$(LAUNCHER) $(>) && $(MAKE_FILE) $(<)
|
|
}
|
|
|
|
toolset.flags testing.capture-output ARG <testing.arg> ;
|
|
toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
|
|
rule capture-output ( target : source : properties * )
|
|
{
|
|
output-file on $(target) = $(target:S=.output) ;
|
|
LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
|
|
|
|
# The INCLUDES kill a warning about independent target...
|
|
INCLUDES $(target) : $(target:S=.output) ;
|
|
# but it also puts .output into dependency graph, so we must tell jam
|
|
# it's OK if it cannot find the target or updating rule.
|
|
NOCARE $(target:S=.output) ;
|
|
|
|
# This has two-fold effect. First it adds input files to the dependendency
|
|
# graph, preventing a warning. Second, it causes input files to be bound
|
|
# before target is created. Therefore, they are bound using SEARCH setting
|
|
# on them and not LOCATE setting of $(target), as in other case (due to jam bug).
|
|
DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
|
|
|
|
local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
|
|
if $(dll-paths)
|
|
{
|
|
dll-paths = [ sequence.transform path.native : $(dll-paths) ] ;
|
|
if [ modules.peek : NT ]
|
|
{
|
|
SET_DLL_PATHS on $(target) =
|
|
[ common.path-variable-setting-command PATH : $(dll-paths) %PATH% ] ;
|
|
}
|
|
else
|
|
{
|
|
SET_DLL_PATHS on $(target) =
|
|
[ common.path-variable-setting-command LD_LIBRARY_PATH :
|
|
$(dll-paths) %PATH% : exported ] ;
|
|
}
|
|
}
|
|
}
|
|
|
|
if [ os.name ] = NT
|
|
{
|
|
ENV_PATH = %PATH% ;
|
|
CATENATE = type ;
|
|
CP = copy ;
|
|
}
|
|
else
|
|
{
|
|
ENV_PATH = $PATH ;
|
|
ENV_LD_LIBRARY_PATH = $LD_LIBRARY_PATH ;
|
|
CATENATE = cat ;
|
|
CP = cp ;
|
|
}
|
|
|
|
|
|
RUN_OUTPUT_HEADER = "echo ====== BEGIN OUTPUT ====== &&" ;
|
|
RUN_OUTPUT_FOOTER = " && echo ====== END OUTPUT ======" ;
|
|
if --verbose-test in $(ARGV)
|
|
{
|
|
VERBOSE_CAT = "&& "$(RUN_OUTPUT_HEADER)" "$(CATENATE)" " ;
|
|
}
|
|
|
|
actions capture-output bind INPUT_FILES output-file
|
|
{
|
|
$(SET_DLL_PATHS)
|
|
$(>) $(ARG) "$(INPUT_FILES)" > $(output-file) 2>&1 && $(CP) $(output-file) $(<) $(VERBOSE_CAT)$(<)$(RUN_OUTPUT_FOOTER) || ( $(RUN_OUTPUT_HEADER) $(CATENATE) $(output-file) $(RUN_OUTPUT_FOOTER) && exit 1 )
|
|
}
|
|
|
|
IMPORT $(__name__) : compile compile-fail test-suite run run-fail
|
|
: : compile compile-fail test-suite run run-fail ;
|
|
|