diff --git a/build/Jamfile.v2 b/build/Jamfile.v2 deleted file mode 100644 index d753ea5..0000000 --- a/build/Jamfile.v2 +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2005, 2006 The Trustees of Indiana University. -# Copyright (C) 2005 Douglas Gregor - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Authors: Douglas Gregor -# Andrew Lumsdaine - -import mpi ; -import python ; - -libraries = ; - -if [ mpi.configured ] -{ - -project boost/mpi - : source-location ../src - ; - -lib boost_mpi - : - broadcast.cpp - communicator.cpp - computation_tree.cpp - content_oarchive.cpp - environment.cpp - exception.cpp - graph_communicator.cpp - group.cpp - intercommunicator.cpp - mpi_datatype_cache.cpp - mpi_datatype_oarchive.cpp - packed_iarchive.cpp - packed_oarchive.cpp - packed_skeleton_iarchive.cpp - packed_skeleton_oarchive.cpp - point_to_point.cpp - request.cpp - text_skeleton_oarchive.cpp - timer.cpp - : # Requirements - ../../serialization/build//boost_serialization - /mpi//mpi [ mpi.extra-requirements ] - BOOST_MPI_SOURCE=1 - shared:BOOST_MPI_DYN_LINK=1 - : # Default build - shared - : # Usage requirements - ../../serialization/build//boost_serialization - /mpi//mpi [ mpi.extra-requirements ] - ; - -libraries += boost_mpi ; - - if [ python.configured ] - { - lib boost_mpi_python - : # Sources - python/serialize.cpp - : # Requirements - boost_mpi - /mpi//mpi [ mpi.extra-requirements ] - /boost/python//boost_python - shared:BOOST_MPI_DYN_LINK=1 - shared:BOOST_MPI_PYTHON_DYN_LINK=1 - shared:BOOST_PYTHON_DYN_LINK=1 - BOOST_MPI_PYTHON_SOURCE=1 - : # Default build - shared - : # Usage requirements - /mpi//mpi [ mpi.extra-requirements ] - ; - libraries += boost_mpi_python ; - - python-extension mpi - : # Sources - python/collectives.cpp - python/py_communicator.cpp - python/datatypes.cpp - python/documentation.cpp - python/py_environment.cpp - python/py_exception.cpp - python/module.cpp - python/py_request.cpp - python/skeleton_and_content.cpp - python/status.cpp - python/py_timer.cpp - : # Requirements - /boost/python//boost_python - boost_mpi_python - boost_mpi - /mpi//mpi [ mpi.extra-requirements ] - shared:BOOST_MPI_DYN_LINK=1 - shared:BOOST_MPI_PYTHON_DYN_LINK=1 - shared:BOOST_PYTHON_DYN_LINK=1 - shared - release - ; - } -} -else -{ - ECHO "warning: skipping optional Message Passing Interface (MPI) library." ; - ECHO "note: to enable MPI support, add \"using mpi ;\" to user-config.jam." ; - ECHO "note: to suppress this message, pass \"--without-mpi\" to bjam." ; - ECHO "note: otherwise, you can safely ignore this message." ; -} - -boost-install $(libraries) ; \ No newline at end of file diff --git a/build/__init__.py b/build/__init__.py deleted file mode 100644 index 9032fdf..0000000 --- a/build/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import sys -if sys.platform == 'linux2': - import dl - flags = sys.getdlopenflags() - sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL) - import mpi - sys.setdlopenflags(flags) -else: -import mpi - diff --git a/doc/Jamfile.v2 b/doc/Jamfile.v2 deleted file mode 100644 index 499c3d7..0000000 --- a/doc/Jamfile.v2 +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2005-2006 Douglas Gregor -# -# Distributed under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) -project boost/mpi ; - -using quickbook ; -using doxygen ; - -doxygen mpi_autodoc - : [ glob - ../../../boost/mpi.hpp - ../../../boost/mpi/allocator.hpp - ../../../boost/mpi/collectives.hpp - ../../../boost/mpi/collectives_fwd.hpp - ../../../boost/mpi/communicator.hpp - ../../../boost/mpi/config.hpp - ../../../boost/mpi/datatype.hpp - ../../../boost/mpi/datatype_fwd.hpp - ../../../boost/mpi/environment.hpp - ../../../boost/mpi/exception.hpp - ../../../boost/mpi/graph_communicator.hpp - ../../../boost/mpi/group.hpp - ../../../boost/mpi/intercommunicator.hpp - ../../../boost/mpi/nonblocking.hpp - ../../../boost/mpi/operations.hpp - ../../../boost/mpi/packed_iarchive.hpp - ../../../boost/mpi/packed_oarchive.hpp - ../../../boost/mpi/skeleton_and_content.hpp - ../../../boost/mpi/skeleton_and_content_fwd.hpp - ../../../boost/mpi/status.hpp - ../../../boost/mpi/request.hpp - ../../../boost/mpi/timer.hpp - ../../../boost/mpi/python.hpp - ] - : MACRO_EXPANSION=YES - EXPAND_ONLY_PREDEF=YES - "PREDEFINED=BOOST_MPI_HAS_MEMORY_ALLOCATION= BOOST_MPI_HAS_NOARG_INITIALIZATION= MPI_VERSION=2 BOOST_MPI_DOXYGEN=" - ; - -xml mpi : mpi.qbk ; - -boostbook standalone : mpi mpi_autodoc - # Uncomment this line when generating PDF output - # max-columns=66 - ; - diff --git a/doc/mpi.qbk b/doc/mpi.qbk deleted file mode 100644 index 301f9d2..0000000 --- a/doc/mpi.qbk +++ /dev/null @@ -1,2045 +0,0 @@ -[library Boost.MPI - [authors [Gregor, Douglas], [Troyer, Matthias] ] - [copyright 2005 2006 2007 Douglas Gregor, Matthias Troyer, Trustees of Indiana University] - [purpose - An generic, user-friendly interface to MPI, the Message - Passing Interface. - ] - [id mpi] - [dirname mpi] - [license - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or copy at - - http://www.boost.org/LICENSE_1_0.txt - ) - ] -] - -[/ Links ] -[def _MPI_ [@http://www-unix.mcs.anl.gov/mpi/ MPI]] -[def _MPI_implementations_ - [@http://www-unix.mcs.anl.gov/mpi/implementations.html - MPI implementations]] -[def _Serialization_ [@http://www.boost.org/libs/serialization/doc - Boost.Serialization]] -[def _BoostPython_ [@http://www.boost.org/libs/python/doc - Boost.Python]] -[def _Python_ [@http://www.python.org Python]] -[def _LAM_ [@http://www.lam-mpi.org/ LAM/MPI]] -[def _MPICH_ [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH]] -[def _OpenMPI_ [@http://www.open-mpi.org OpenMPI]] -[def _boost_cvs_ [@http://sourceforge.net/cvs/?group_id=7586 - Boost CVS Repository]] -[def _sandbox_cvs_ [@http://sourceforge.net/cvs/?group_id=53513 - Boost Sandbox CVS Repository]] -[def _accumulate_ [@http://www.sgi.com/tech/stl/accumulate.html - `accumulate`]] - -[/ QuickBook Document version 1.0 ] - -[section:intro Introduction] - -Boost.MPI is a library for message passing in high-performance -parallel applications. A Boost.MPI program is one or more processes -that can communicate either via sending and receiving individual -messages (point-to-point communication) or by coordinating as a group -(collective communication). Unlike communication in threaded -environments or using a shared-memory library, Boost.MPI processes can -be spread across many different machines, possibly with different -operating systems and underlying architectures. - -Boost.MPI is not a completely new parallel programming -library. Rather, it is a C++-friendly interface to the standard -Message Passing Interface (_MPI_), the most popular library interface -for high-performance, distributed computing. MPI defines -a library interface, available from C, Fortran, and C++, for which -there are many _MPI_implementations_. Although there exist C++ -bindings for MPI, they offer little functionality over the C -bindings. The Boost.MPI library provides an alternative C++ interface -to MPI that better supports modern C++ development styles, including -complete support for user-defined data types and C++ Standard Library -types, arbitrary function objects for collective algorithms, and the -use of modern C++ library techniques to maintain maximal -efficiency. - -At present, Boost.MPI supports the majority of functionality in MPI -1.1. The thin abstractions in Boost.MPI allow one to easily combine it -with calls to the underlying C MPI library. Boost.MPI currently -supports: - -* Communicators: Boost.MPI supports the creation, - destruction, cloning, and splitting of MPI communicators, along with - manipulation of process groups. -* Point-to-point communication: Boost.MPI supports - point-to-point communication of primitive and user-defined data - types with send and receive operations, with blocking and - non-blocking interfaces. -* Collective communication: Boost.MPI supports collective - operations such as [funcref boost::mpi::reduce `reduce`] - and [funcref boost::mpi::gather `gather`] with both - built-in and user-defined data types and function objects. -* MPI Datatypes: Boost.MPI can build MPI data types for - user-defined types using the _Serialization_ library. -* Separating structure from content: Boost.MPI can transfer the shape - (or "skeleton") of complexc data structures (lists, maps, - etc.) and then separately transfer their content. This facility - optimizes for cases where the data within a large, static data - structure needs to be transmitted many times. - -Boost.MPI can be accessed either through its native C++ bindings, or -through its alternative, [link mpi.python Python interface]. - -[endsect] - -[section:getting_started Getting started] - -Getting started with Boost.MPI requires a working MPI implementation, -a recent version of Boost, and some configuration information. - -[section:mpi_impl MPI Implementation] -To get started with Boost.MPI, you will first need a working -MPI implementation. There are many conforming _MPI_implementations_ -available. Boost.MPI should work with any of the -implementations, although it has only been tested extensively with: - -* [@http://www.open-mpi.org Open MPI 1.0.x] -* [@http://www.lam-mpi.org LAM/MPI 7.x] -* [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH 1.2.x] - -You can test your implementation using the following simple program, -which passes a message from one processor to another. Each processor -prints a message to standard output. - - #include - #include - - int main(int argc, char* argv[]) - { - MPI_Init(&argc, &argv); - - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - if (rank == 0) { - int value = 17; - int result = MPI_Send(&value, 1, MPI_INT, 1, 0, MPI_COMM_WORLD); - if (result == MPI_SUCCESS) - std::cout << "Rank 0 OK!" << std::endl; - } else if (rank == 1) { - int value; - int result = MPI_Recv(&value, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, - MPI_STATUS_IGNORE); - if (result == MPI_SUCCESS && value == 17) - std::cout << "Rank 1 OK!" << std::endl; - } - MPI_Finalize(); - return 0; - } - -You should compile and run this program on two processors. To do this, -consult the documentation for your MPI implementation. With _LAM_, for -instance, you compile with the `mpiCC` or `mpic++` compiler, boot the -LAM/MPI daemon, and run your program via `mpirun`. For instance, if -your program is called `mpi-test.cpp`, use the following commands: - -[pre -mpiCC -o mpi-test mpi-test.cpp -lamboot -mpirun -np 2 ./mpi-test -lamhalt -] - -When you run this program, you will see both `Rank 0 OK!` and `Rank 1 -OK!` printed to the screen. However, they may be printed in any order -and may even overlap each other. The following output is perfectly -legitimate for this MPI program: - -[pre -Rank Rank 1 OK! -0 OK! -] - -If your output looks something like the above, your MPI implementation -appears to be working with a C++ compiler and we're ready to move on. -[endsect] - -[section:getting Getting Boost.MPI] - -Boost.MPI is not available in any version of Boost prior to the 1.35.x -series. You may need to retrieve the -latest version of Boost from CVS using the commands below. When CVS -asks for a password, just hit "enter". - -[pre -cvs -d:pserver:anonymous@boost.cvs.sourceforge.net:/cvsroot/boost login -cvs -z3 -d:pserver:anonymous@boost.cvs.sourceforge.net:/cvsroot/boost \ - co -P boost -] - -[endsect] - -[section:config Configure and Build] - -Boost.MPI uses version 2 of the -[@http://www.boost.org/doc/html/bbv2.html Boost.Build] system for -configuring and building the library binary. You will need a very new -version of [@http://www.boost.org/tools/build/jam_src/index.html -Boost.Jam] (3.1.12 or later). If you already have Boost.Jam, run `bjam --v` to determine what version you are using. - -Information about building Boost.Jam is -[@http://www.boost.org/tools/build/jam_src/index.html#building_bjam -available here]. However, most users need only run `build.sh` in the -`tools/build/jam_src` subdirectory of Boost. Then, -copy the resulting `bjam` executable some place convenient. - -For many users using _LAM_, _MPICH_, or _OpenMPI_, configuration is -almost automatic. If you don't already have a file `user-config.jam` -in your home directory, copy `tools/build/v2/user-config.jam` -there. For many users, MPI support can be enabled simply by adding the -following line to your user-config.jam file, which is used to configure -Boost.Build version 2. - - using mpi ; - -This should auto-detect MPI settings based on the MPI wrapper compiler in -your path, e.g., `mpic++`. If the wrapper compiler is not in your -path, see below. - -To actually build the MPI library, go into the top-level Boost -directory and execute the command: - -[pre -bjam --with-mpi -] - -If your MPI wrapper compiler has a different name from the default, -you can pass the name of the wrapper compiler as the first argument to -the mpi module: - - using mpi : /opt/mpich2-1.0.4/bin/mpiCC ; - -If your MPI implementation does not have a wrapper compiler, or the MPI -auto-detection code does not work with your MPI's wrapper compiler, -you can pass MPI-related options explicitly via the second parameter to the -`mpi` module: - - using mpi : : lammpio lammpi++ - mpi lam - dl ; - -To see the results of MPI auto-detection, pass `--debug-configuration` on -the bjam command line. - -The (optional) fourth argument configures Boost.MPI for running -regression tests. These parameters specify the executable used to -launch jobs (default: "mpirun") followed by any necessary arguments -to this to run tests and tell the program to expect the number of -processors to follow (default: "-np"). With the default parameters, -for instance, the test harness will execute, e.g., - -[pre -mpirun -np 4 all_gather_test -] - -[endsect] - -[section:installation Installing and Using Boost.MPI] - -Installation of Boost.MPI can be performed in the build step by -specifying `install` on the command line and (optionally) providing an -installation location, e.g., - -[pre -bjam --with-mpi install -] - -This command will install libraries into a default system location. To -change the path where libraries will be installed, add the option -`--prefix=PATH`. - -To build applications based on Boost.MPI, compile and link them as you -normally would for MPI programs, but remember to link against the -`boost_mpi` and `boost_serialization` libraries, e.g., - -[pre -mpic++ -I/path/to/boost/mpi my_application.cpp -Llibdir \ - -lboost_mpi-gcc-mt-1_35 -lboost_serialization-gcc-d-1_35.a -] -[endsect] - -If you plan to use the [link mpi.python Python bindings] for -Boost.MPI in conjunction with the C++ Boost.MPI, you will also need to -link against the boost_mpi_python library, e.g., by adding -`-lboost_mpi_python-gcc-mt-1_35` to your link command. This step will -only be necessary if you intend to [link mpi.python_user_data -register C++ types] or use the [link -mpi.python_skeleton_content skeleton/content mechanism] from -within Python. - -[section:testing Testing Boost.MPI] - -If you would like to verify that Boost.MPI is working properly with -your compiler, platform, and MPI implementation, a self-contained test -suite is available. To use this test suite, you will need to first -configure Boost.Build for your MPI environment and then run `bjam` in -`libs/mpi/test` (possibly with some extra options). For -_LAM_, you will need to run `lamboot` before running `bjam`. For -_MPICH_, you may need to create a machine file and pass -`-sMPIRUN_FLAGS="-machinefile "` to Boost.Jam; see the -section on [link mpi.config configuration] for more -information. If testing succeeds, `bjam` will exit without errors. - -[endsect] - -[endsect] - -[section:tutorial Tutorial] - -A Boost.MPI program consists of many cooperating processes (possibly -running on different computers) that communicate among themselves by -passing messages. Boost.MPI is a library (as is the lower-level MPI), -not a language, so the first step in a Boost.MPI is to create an -[classref boost::mpi::environment mpi::environment] object -that initializes the MPI environment and enables communication among -the processes. The [classref boost::mpi::environment -mpi::environment] object is initialized with the program arguments -(which it may modify) in your main program. The creation of this -object initializes MPI, and its destruction will finalize MPI. In the -vast majority of Boost.MPI programs, an instance of [classref -boost::mpi::environment mpi::environment] will be declared -in `main` at the very beginning of the program. - -Communication with MPI always occurs over a *communicator*, -which can be created be simply default-constructing an object of type -[classref boost::mpi::communicator mpi::communicator]. This -communicator can then be queried to determine how many processes are -running (the "size" of the communicator) and to give a unique number -to each process, from zero to the size of the communicator (i.e., the -"rank" of the process): - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - std::cout << "I am process " << world.rank() << " of " << world.size() - << "." << std::endl; - return 0; - } - -If you run this program with 7 processes, for instance, you will -receive output such as: - -[pre -I am process 5 of 7. -I am process 0 of 7. -I am process 1 of 7. -I am process 6 of 7. -I am process 2 of 7. -I am process 4 of 7. -I am process 3 of 7. -] - -Of course, the processes can execute in a different order each time, -so the ranks might not be strictly increasing. More interestingly, the -text could come out completely garbled, because one process can start -writing "I am a process" before another process has finished writing -"of 7.". - -[section:point_to_point Point-to-Point communication] - -As a message passing library, MPI's primary purpose is to routine -messages from one process to another, i.e., point-to-point. MPI -contains routines that can send messages, receive messages, and query -whether messages are available. Each message has a source process, a -target process, a tag, and a payload containing arbitrary data. The -source and target processes are the ranks of the sender and receiver -of the message, respectively. Tags are integers that allow the -receiver to distinguish between different messages coming from the -same sender. - -The following program uses two MPI processes to write "Hello, world!" -to the screen (`hello_world.cpp`): - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.rank() == 0) { - world.send(1, 0, std::string("Hello")); - std::string msg; - world.recv(1, 1, msg); - std::cout << msg << "!" << std::endl; - } else { - std::string msg; - world.recv(0, 0, msg); - std::cout << msg << ", "; - std::cout.flush(); - world.send(0, 1, std::string("world")); - } - - return 0; - } - -The first processor (rank 0) passes the message "Hello" to the second -processor (rank 1) using tag 0. The second processor prints the string -it receives, along with a comma, then passes the message "world" back -to processor 0 with a different tag. The first processor then writes -this message with the "!" and exits. All sends are accomplished with -the [memberref boost::mpi::communicator::send -communicator::send] method and all receives use a corresponding -[memberref boost::mpi::communicator::recv -communicator::recv] call. - -[section:nonblocking Non-blocking communication] - -The default MPI communication operations--`send` and `recv`--may have -to wait until the entire transmission is completed before they can -return. Sometimes this *blocking* behavior has a negative impact on -performance, because the sender could be performing useful computation -while it is waiting for the transmission to occur. More important, -however, are the cases where several communication operations must -occur simultaneously, e.g., a process will both send and receive at -the same time. - -Let's revisit our "Hello, world!" program from the previous -section. The core of this program transmits two messages: - - if (world.rank() == 0) { - world.send(1, 0, std::string("Hello")); - std::string msg; - world.recv(1, 1, msg); - std::cout << msg << "!" << std::endl; - } else { - std::string msg; - world.recv(0, 0, msg); - std::cout << msg << ", "; - std::cout.flush(); - world.send(0, 1, std::string("world")); - } - -The first process passes a message to the second process, then -prepares to receive a message. The second process does the send and -receive in the opposite order. However, this sequence of events is -just that--a *sequence*--meaning that there is essentially no -parallelism. We can use non-blocking communication to ensure that the -two messages are transmitted simultaneously -(`hello_world_nonblocking.cpp`): - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.rank() == 0) { - mpi::request reqs[2]; - std::string msg, out_msg = "Hello"; - reqs[0] = world.isend(1, 0, out_msg); - reqs[1] = world.irecv(1, 1, msg); - mpi::wait_all(reqs, reqs + 2); - std::cout << msg << "!" << std::endl; - } else { - mpi::request reqs[2]; - std::string msg, out_msg = "world"; - reqs[0] = world.isend(0, 1, out_msg); - reqs[1] = world.irecv(0, 0, msg); - mpi::wait_all(reqs, reqs + 2); - std::cout << msg << ", "; - } - - return 0; - } - -We have replaced calls to the [memberref -boost::mpi::communicator::send communicator::send] and -[memberref boost::mpi::communicator::recv -communicator::recv] members with similar calls to their non-blocking -counterparts, [memberref boost::mpi::communicator::isend -communicator::isend] and [memberref -boost::mpi::communicator::irecv communicator::irecv]. The -prefix *i* indicates that the operations return immediately with a -[classref boost::mpi::request mpi::request] object, which -allows one to query the status of a communication request (see the -[memberref boost::mpi::request::test test] method) or wait -until it has completed (see the [memberref -boost::mpi::request::wait wait] method). Multiple requests -can be completed at the same time with the [funcref -boost::mpi::wait_all wait_all] operation. - -If you run this program multiple times, you may see some strange -results: namely, some runs will produce: - - Hello, world! - -while others will produce: - - world! - Hello, - -or even some garbled version of the letters in "Hello" and -"world". This indicates that there is some parallelism in the program, -because after both messages are (simultaneously) transmitted, both -processes will concurrent execute their print statements. For both -performance and correctness, non-blocking communication operations are -critical to many parallel applications using MPI. - -[endsect] - -[section:user_data_types User-defined data types] - -The inclusion of `boost/serialization/string.hpp` in the previous -examples is very important: it makes values of type `std::string` -serializable, so that they can be be transmitted using Boost.MPI. In -general, built-in C++ types (`int`s, `float`s, characters, etc.) can -be transmitted over MPI directly, while user-defined and -library-defined types will need to first be serialized (packed) into a -format that is amenable to transmission. Boost.MPI relies on the -_Serialization_ library to serialize and deserialize data types. - -For types defined by the standard library (such as `std::string` or -`std::vector`) and some types in Boost (such as `boost::variant`), the -_Serialization_ library already contains all of the required -serialization code. In these cases, you need only include the -appropriate header from the `boost/serialization` directory. - -For types that do not already have a serialization header, you will -first need to implement serialization code before the types can be -transmitted using Boost.MPI. Consider a simple class `gps_position` -that contains members `degrees`, `minutes`, and `seconds`. This class -is made serializable by making it a friend of -`boost::serialization::access` and introducing the templated -`serialize()` function, as follows: - - class gps_position - { - private: - friend class boost::serialization::access; - - template - void serialize(Archive & ar, const unsigned int version) - { - ar & degrees; - ar & minutes; - ar & seconds; - } - - int degrees; - int minutes; - float seconds; - public: - gps_position(){}; - gps_position(int d, int m, float s) : - degrees(d), minutes(m), seconds(s) - {} - }; - -Complete information about making types serializable is beyond the -scope of this tutorial. For more information, please see the -_Serialization_ library tutorial from which the above example was -extracted. One important side benefit of making types serializable for -Boost.MPI is that they become serializable for any other usage, such -as storing the objects to disk to manipulated them in XML. - -Some serializable types, like `gps_position` above, have a fixed -amount of data stored at fixed field positions. When this is the case, -Boost.MPI can optimize their serialization and transmission to avoid -extraneous copy operations. To enable this optimization, users should -specialize the type trait [classref -boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.: - - namespace boost { namespace mpi { - template <> - struct is_mpi_datatype : mpl::true_ { }; - } } - -For non-template types we have defined a macro to simplify declaring a type -as an MPI datatype - - BOOST_IS_MPI_DATATYPE(gps_position) - -For composite traits, the specialization of [classref -boost::mpi::is_mpi_datatype `is_mpi_datatype`] may depend on -`is_mpi_datatype` itself. For instance, a `boost::array` object is -fixed only when the type of the parameter it stores is fixed: - - namespace boost { namespace mpi { - template - struct is_mpi_datatype > - : public is_mpi_datatype { }; - } } - -The redundant copy elimination optimization can only be applied when -the shape of the data type is completely fixed. Variable-length types -(e.g., strings, linked lists) and types that store pointers cannot use -the optimiation, but Boost.MPI will be unable to detect this error at -compile time. Attempting to perform this optimization when it is not -correct will likely result in segmentation faults and other strange -program behavior. - -Boost.MPI can transmit any user-defined data type from one process to -another. Built-in types can be transmitted without any extra effort; -library-defined types require the inclusion of a serialization header; -and user-defined types will require the addition of serialization -code. Fixed data types can be optimized for transmission using the -[classref boost::mpi::is_mpi_datatype `is_mpi_datatype`] -type trait. - -[endsect] -[endsect] - -[section:collectives Collective operations] - -[link mpi.point_to_point Point-to-point operations] are the -core message passing primitives in Boost.MPI. However, many -message-passing applications also require higher-level communication -algorithms that combine or summarize the data stored on many different -processes. These algorithms support many common tasks such as -"broadcast this value to all processes", "compute the sum of the -values on all processors" or "find the global minimum." - -[section:broadcast Broadcast] -The [funcref boost::mpi::broadcast `broadcast`] algorithm is -by far the simplest collective operation. It broadcasts a value from a -single process to all other processes within a [classref -boost::mpi::communicator communicator]. For instance, the -following program broadcasts "Hello, World!" from process 0 to every -other process. (`hello_world_broadcast.cpp`) - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - std::string value; - if (world.rank() == 0) { - value = "Hello, World!"; - } - - broadcast(world, value, 0); - - std::cout << "Process #" << world.rank() << " says " << value - << std::endl; - return 0; - } - -Running this program with seven processes will produce a result such -as: - -[pre -Process #0 says Hello, World! -Process #2 says Hello, World! -Process #1 says Hello, World! -Process #4 says Hello, World! -Process #3 says Hello, World! -Process #5 says Hello, World! -Process #6 says Hello, World! -] -[endsect] - -[section:gather Gather] -The [funcref boost::mpi::gather `gather`] collective gathers -the values produced by every process in a communicator into a vector -of values on the "root" process (specified by an argument to -`gather`). The /i/th element in the vector will correspond to the -value gathered fro mthe /i/th process. For instance, in the following -program each process computes its own random number. All of these -random numbers are gathered at process 0 (the "root" in this case), -which prints out the values that correspond to each processor. -(`random_gather.cpp`) - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - std::srand(time(0) + world.rank()); - int my_number = std::rand(); - if (world.rank() == 0) { - std::vector all_numbers; - gather(world, my_number, all_numbers, 0); - for (int proc = 0; proc < world.size(); ++proc) - std::cout << "Process #" << proc << " thought of " - << all_numbers[proc] << std::endl; - } else { - gather(world, my_number, 0); - } - - return 0; - } - -Executing this program with seven processes will result in output such -as the following. Although the random values will change from one run -to the next, the order of the processes in the output will remain the -same because only process 0 writes to `std::cout`. - -[pre -Process #0 thought of 332199874 -Process #1 thought of 20145617 -Process #2 thought of 1862420122 -Process #3 thought of 480422940 -Process #4 thought of 1253380219 -Process #5 thought of 949458815 -Process #6 thought of 650073868 -] - -The `gather` operation collects values from every process into a -vector at one process. If instead the values from every process need -to be collected into identical vectors on every process, use the -[funcref boost::mpi::all_gather `all_gather`] algorithm, -which is semantically equivalent to calling `gather` followed by a -`broadcast` of the resulting vector. - -[endsect] - -[section:reduce Reduce] - -The [funcref boost::mpi::reduce `reduce`] collective -summarizes the values from each process into a single value at the -user-specified "root" process. The Boost.MPI `reduce` operation is -similar in spirit to the STL _accumulate_ operation, because it takes -a sequence of values (one per process) and combines them via a -function object. For instance, we can randomly generate values in each -process and the compute the minimum value over all processes via a -call to [funcref boost::mpi::reduce `reduce`] -(`random_min.cpp`):: - - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - std::srand(time(0) + world.rank()); - int my_number = std::rand(); - - if (world.rank() == 0) { - int minimum; - reduce(world, my_number, minimum, mpi::minimum(), 0); - std::cout << "The minimum value is " << minimum << std::endl; - } else { - reduce(world, my_number, mpi::minimum(), 0); - } - - return 0; - } - -The use of `mpi::minimum` indicates that the minimum value -should be computed. `mpi::minimum` is a binary function object -that compares its two parameters via `<` and returns the smaller -value. Any associative binary function or function object will -work. For instance, to concatenate strings with `reduce` one could use -the function object `std::plus` (`string_cat.cpp`): - - #include - #include - #include - #include - namespace mpi = boost::mpi; - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - std::string names[10] = { "zero ", "one ", "two ", "three ", - "four ", "five ", "six ", "seven ", - "eight ", "nine " }; - - std::string result; - reduce(world, - world.rank() < 10? names[world.rank()] - : std::string("many "), - result, std::plus(), 0); - - if (world.rank() == 0) - std::cout << "The result is " << result << std::endl; - - return 0; - } - -In this example, we compute a string for each process and then perform -a reduction that concatenates all of the strings together into one, -long string. Executing this program with seven processors yields the -following output: - -[pre -The result is zero one two three four five six -] - -Any kind of binary function objects can be used with `reduce`. For -instance, and there are many such function objects in the C++ standard -`` header and the Boost.MPI header -``. Or, you can create your own -function object. Function objects used with `reduce` must be -associative, i.e. `f(x, f(y, z))` must be equivalent to `f(f(x, y), -z)`. If they are also commutative (i..e, `f(x, y) == f(y, x)`), -Boost.MPI can use a more efficient implementation of `reduce`. To -state that a function object is commutative, you will need to -specialize the class [classref boost::mpi::is_commutative -`is_commutative`]. For instance, we could modify the previous example -by telling Boost.MPI that string concatenation is commutative: - - namespace boost { namespace mpi { - - template<> - struct is_commutative, std::string> - : mpl::true_ { }; - - } } // end namespace boost::mpi - -By adding this code prior to `main()`, Boost.MPI will assume that -string concatenation is commutative and employ a different parallel -algorithm for the `reduce` operation. Using this algorithm, the -program outputs the following when run with seven processes: - -[pre -The result is zero one four five six two three -] - -Note how the numbers in the resulting string are in a different order: -this is a direct result of Boost.MPI reordering operations. The result -in this case differed from the non-commutative result because string -concatenation is not commutative: `f("x", "y")` is not the same as -`f("y", "x")`, because argument order matters. For truly commutative -operations (e.g., integer addition), the more efficient commutative -algorithm will produce the same result as the non-commutative -algorithm. Boost.MPI also performs direct mappings from function -objects in `` to `MPI_Op` values predefined by MPI (e.g., -`MPI_SUM`, `MPI_MAX`); if you have your own function objects that can -take advantage of this mapping, see the class template [classref -boost::mpi::is_mpi_op `is_mpi_op`]. - -Like [link mpi.gather `gather`], `reduce` has an "all" -variant called [funcref boost::mpi::all_reduce `all_reduce`] -that performs the reduction operation and broadcasts the result to all -processes. This variant is useful, for instance, in establishing -global minimum or maximum values. - -[endsect] - -[endsect] - -[section:communicators Managing communicators] - -Communication with Boost.MPI always occurs over a communicator. A -communicator contains a set of processes that can send messages among -themselves and perform collective operations. There can be many -communicators within a single program, each of which contains its own -isolated communication space that acts independently of the other -communicators. - -When the MPI environment is initialized, only the "world" communicator -(called `MPI_COMM_WORLD` in the MPI C and Fortran bindings) is -available. The "world" communicator, accessed by default-constructing -a [classref boost::mpi::communicator mpi::communicator] -object, contains all of the MPI processes present when the program -begins execution. Other communicators can then be constructed by -duplicating or building subsets of the "world" communicator. For -instance, in the following program we split the processes into two -groups: one for processes generating data and the other for processes -that will collect the data. (`generate_collect.cpp`) - - #include - #include - #include - #include - namespace mpi = boost::mpi; - - enum message_tags {msg_data_packet, msg_broadcast_data, msg_finished}; - - void generate_data(mpi::communicator local, mpi::communicator world); - void collect_data(mpi::communicator local, mpi::communicator world); - - int main(int argc, char* argv[]) - { - mpi::environment env(argc, argv); - mpi::communicator world; - - bool is_generator = world.rank() < 2 * world.size() / 3; - mpi::communicator local = world.split(is_generator? 0 : 1); - if (is_generator) generate_data(local, world); - else collect_data(local, world); - - return 0; - } - -When communicators are split in this way, their processes retain -membership in both the original communicator (which is not altered by -the split) and the new communicator. However, the ranks of the -processes may be different from one communicator to the next, because -the rank values within a communicator are always contiguous values -starting at zero. In the example above, the first two thirds of the -processes become "generators" and the remaining processes become -"collectors". The ranks of the "collectors" in the `world` -communicator will be 2/3 `world.size()` and greater, whereas the ranks -of the same collector processes in the `local` communicator will start -at zero. The following excerpt from `collect_data()` (in -`generate_collect.cpp`) illustrates how to manage multiple -communicators: - - mpi::status msg = world.probe(); - if (msg.tag() == msg_data_packet) { - // Receive the packet of data - std::vector data; - world.recv(msg.source(), msg.tag(), data); - - // Tell each of the collectors that we'll be broadcasting some data - for (int dest = 1; dest < local.size(); ++dest) - local.send(dest, msg_broadcast_data, msg.source()); - - // Broadcast the actual data. - broadcast(local, data, 0); - } - -The code in this except is executed by the "master" collector, e.g., -the node with rank 2/3 `world.size()` in the `world` communicator and -rank 0 in the `local` (collector) communicator. It receives a message -from a generator via the `world` communicator, then broadcasts the -message to each of the collectors via the `local` communicator. - -For more control in the creation of communicators for subgroups of -processes, the Boost.MPI [classref boost::mpi::group `group`] provides -facilities to compute the union (`|`), intersection (`&`), and -difference (`-`) of two groups, generate arbitrary subgroups, etc. - -[endsect] - -[section:skeleton_and_content Separating structure from content] - -When communicating data types over MPI that are not fundamental to MPI -(such as strings, lists, and user-defined data types), Boost.MPI must -first serialize these data types into a buffer and then communicate -them; the receiver then copies the results into a buffer before -deserializing into an object on the other end. For some data types, -this overhead can be eliminated by using [classref -boost::mpi::is_mpi_datatype `is_mpi_datatype`]. However, -variable-length data types such as strings and lists cannot be MPI -data types. - -Boost.MPI supports a second technique for improving performance by -separating the structure of these variable-length data structures from -the content stored in the data structures. This feature is only -beneficial when the shape of the data structure remains the same but -the content of the data structure will need to be communicated several -times. For instance, in a finite element analysis the structure of the -mesh may be fixed at the beginning of computation but the various -variables on the cells of the mesh (temperature, stress, etc.) will be -communicated many times within the iterative analysis process. In this -case, Boost.MPI allows one to first send the "skeleton" of the mesh -once, then transmit the "content" multiple times. Since the content -need not contain any information about the structure of the data type, -it can be transmitted without creating separate communication buffers. - -To illustrate the use of skeletons and content, we will take a -somewhat more limited example wherein a master process generates -random number sequences into a list and transmits them to several -slave processes. The length of the list will be fixed at program -startup, so the content of the list (i.e., the current sequence of -numbers) can be transmitted efficiently. The complete example is -available in `example/random_content.cpp`. We being with the master -process (rank 0), which builds a list, communicates its structure via -a [funcref boost::mpi::skeleton `skeleton`], then repeatedly -generates random number sequences to be broadcast to the slave -processes via [classref boost::mpi::content `content`]: - - - // Generate the list and broadcast its structure - std::list l(list_len); - broadcast(world, mpi::skeleton(l), 0); - - // Generate content several times and broadcast out that content - mpi::content c = mpi::get_content(l); - for (int i = 0; i < iterations; ++i) { - // Generate new random values - std::generate(l.begin(), l.end(), &random); - - // Broadcast the new content of l - broadcast(world, c, 0); - } - - // Notify the slaves that we're done by sending all zeroes - std::fill(l.begin(), l.end(), 0); - broadcast(world, c, 0); - - -The slave processes have a very similar structure to the master. They -receive (via the [funcref boost::mpi::broadcast -`broadcast()`] call) the skeleton of the data structure, then use it -to build their own lists of integers. In each iteration, they receive -via another `broadcast()` the new content in the data structure and -compute some property of the data: - - - // Receive the content and build up our own list - std::list l; - broadcast(world, mpi::skeleton(l), 0); - - mpi::content c = mpi::get_content(l); - int i = 0; - do { - broadcast(world, c, 0); - - if (std::find_if - (l.begin(), l.end(), - std::bind1st(std::not_equal_to(), 0)) == l.end()) - break; - - // Compute some property of the data. - - ++i; - } while (true); - - -The skeletons and content of any Serializable data type can be -transmitted either via the [memberref -boost::mpi::communicator::send `send`] and [memberref -boost::mpi::communicator::recv `recv`] members of the -[classref boost::mpi::communicator `communicator`] class -(for point-to-point communicators) or broadcast via the [funcref -boost::mpi::broadcast `broadcast()`] collective. When -separating a data structure into a skeleton and content, be careful -not to modify the data structure (either on the sender side or the -receiver side) without transmitting the skeleton again. Boost.MPI can -not detect these accidental modifications to the data structure, which -will likely result in incorrect data being transmitted or unstable -programs. - -[endsect] - - - - -[section:performance_optimizations Performance optimizations] -[section:serialization_optimizations Serialization optimizations] - -To obtain optimal performance for small fixed-length data types not containing -any pointers it is very important to mark them using the type traits of -Boost.MPI and Boost.Serialization. - -It was alredy discussed that fixed length types containing no pointers can be -using as [classref -boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.: - - namespace boost { namespace mpi { - template <> - struct is_mpi_datatype : mpl::true_ { }; - } } - -or the equivalent macro - - BOOST_IS_MPI_DATATYPE(gps_position) - -In addition it can give a substantial performance gain to turn off tracking -and versioning for these types, if no pointers to these types are used, by -using the traits classes or helper macros of Boost.Serialization: - - BOOST_CLASS_TRACKING(gps_position,track_never) - BOOST_CLASS_IMPLEMENTATION(gps_position,object_serializable) - -[endsect] - -[section:homogeneous_machines Homogeneous machines] - -More optimizations are possible on homogeneous machines, by avoiding -MPI_Pack/MPI_Unpack calls but using direct bitwise copy. This feature can be -enabled by defining the macro BOOST_MPI_HOMOGENEOUS when building Boost.MPI and -when building the application. - -In addition all classes need to be marked both as is_mpi_datatype and -as is_bitwise_serializable, by using the helper macro of Boost.Serialization: - - BOOST_IS_BITWISE_SERIALIZABLE(gps_position) - -Usually it is safe to serialize a class for which is_mpi_datatype is true -by using binary copy of the bits. The exception are classes for which -some members should be skipped for serialization. - -[endsect] -[endsect] - - -[section:c_mapping Mapping from C MPI to Boost.MPI] - -This section provides tables that map from the functions and constants -of the standard C MPI to their Boost.MPI equivalents. It will be most -useful for users that are already familiar with the C or Fortran -interfaces to MPI, or for porting existing parallel programs to Boost.MPI. - -[table Point-to-point communication - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_ANY_SOURCE`] [`any_source`]] - - [[`MPI_ANY_TAG`] [`any_tag`]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node40.html#Node40 -`MPI_Bsend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Bsend_init`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node42.html#Node42 -`MPI_Buffer_attach`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node42.html#Node42 -`MPI_Buffer_detach`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50 -`MPI_Cancel`]] - [[memberref boost::mpi::request::cancel -`request::cancel`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node35.html#Node35 -`MPI_Get_count`]] - [[memberref boost::mpi::status::count `status::count`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46 -`MPI_Ibsend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50 -`MPI_Iprobe`]] - [[memberref boost::mpi::communicator::iprobe `communicator::iprobe`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46 -`MPI_Irsend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46 -`MPI_Isend`]] - [[memberref boost::mpi::communicator::isend -`communicator::isend`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46 -`MPI_Issend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46 -`MPI_Irecv`]] - [[memberref boost::mpi::communicator::isend -`communicator::irecv`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50 -`MPI_Probe`]] - [[memberref boost::mpi::communicator::probe `communicator::probe`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node53.html#Node53 -`MPI_PROC_NULL`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node34.html#Node34 `MPI_Recv`]] - [[memberref boost::mpi::communicator::recv -`communicator::recv`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Recv_init`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Request_free`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node40.html#Node40 -`MPI_Rsend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Rsend_init`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node31.html#Node31 -`MPI_Send`]] - [[memberref boost::mpi::communicator::send -`communicator::send`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node52.html#Node52 -`MPI_Sendrecv`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node52.html#Node52 -`MPI_Sendrecv_replace`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Send_init`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node40.html#Node40 -`MPI_Ssend`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Ssend_init`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Start`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51 -`MPI_Startall`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Test`]] [[memberref boost::mpi::request::wait `request::test`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Testall`]] [[funcref boost::mpi::test_all `test_all`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Testany`]] [[funcref boost::mpi::test_any `test_any`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Testsome`]] [[funcref boost::mpi::test_some `test_some`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50 -`MPI_Test_cancelled`]] - [[memberref boost::mpi::status::cancelled -`status::cancelled`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Wait`]] [[memberref boost::mpi::request::wait -`request::wait`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Waitall`]] [[funcref boost::mpi::wait_all `wait_all`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Waitany`]] [[funcref boost::mpi::wait_any `wait_any`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47 -`MPI_Waitsome`]] [[funcref boost::mpi::wait_some `wait_some`]]] -] - -Boost.MPI automatically maps C and C++ data types to their MPI -equivalents. The following table illustrates the mappings between C++ -types and MPI datatype constants. - -[table Datatypes - [[C Constant] [Boost.MPI Equivalent]] - - [[`MPI_CHAR`] [`signed char`]] - [[`MPI_SHORT`] [`signed short int`]] - [[`MPI_INT`] [`signed int`]] - [[`MPI_LONG`] [`signed long int`]] - [[`MPI_UNSIGNED_CHAR`] [`unsigned char`]] - [[`MPI_UNSIGNED_SHORT`] [`unsigned short int`]] - [[`MPI_UNSIGNED_INT`] [`unsigned int`]] - [[`MPI_UNSIGNED_LONG`] [`unsigned long int`]] - [[`MPI_FLOAT`] [`float`]] - [[`MPI_DOUBLE`] [`double`]] - [[`MPI_LONG_DOUBLE`] [`long double`]] - [[`MPI_BYTE`] [unused]] - [[`MPI_PACKED`] [used internally for [link -mpi.user_data_types serialized data types]]] - [[`MPI_LONG_LONG_INT`] [`long long int`, if supported by compiler]] - [[`MPI_UNSIGNED_LONG_LONG_INT`] [`unsigned long long int`, if -supported by compiler]] - [[`MPI_FLOAT_INT`] [`std::pair`]] - [[`MPI_DOUBLE_INT`] [`std::pair`]] - [[`MPI_LONG_INT`] [`std::pair`]] - [[`MPI_2INT`] [`std::pair`]] - [[`MPI_SHORT_INT`] [`std::pair`]] - [[`MPI_LONG_DOUBLE_INT`] [`std::pair`]] -] - -Boost.MPI does not provide direct wrappers to the MPI derived -datatypes functionality. Instead, Boost.MPI relies on the -_Serialization_ library to construct MPI datatypes for user-defined -classe. The section on [link mpi.user_data_types user-defined -data types] describes this mechanism, which is used for types that -marked as "MPI datatypes" using [classref -boost::mpi::is_mpi_datatype `is_mpi_datatype`]. - -The derived datatypes table that follows describes which C++ types -correspond to the functionality of the C MPI's datatype -constructor. Boost.MPI may not actually use the C MPI function listed -when building datatypes of a certain form. Since the actual datatypes -built by Boost.MPI are typically hidden from the user, many of these -operations are called internally by Boost.MPI. - -[table Derived datatypes - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node56.html#Node56 -`MPI_Address`]] [used automatically in Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node58.html#Node58 -`MPI_Type_commit`]] [used automatically in Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_contiguous`]] [arrays]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node56.html#Node56 -`MPI_Type_extent`]] [used automatically in Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node58.html#Node58 -`MPI_Type_free`]] [used automatically in Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_hindexed`]] [any type used as a subobject]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_hvector`]] [unused]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_indexed`]] [any type used as a subobject]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node57.html#Node57 -`MPI_Type_lb`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node56.html#Node56 -`MPI_Type_size`]] [used automatically in Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_struct`]] [user-defined classes and structs]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node57.html#Node57 -`MPI_Type_ub`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55 -`MPI_Type_vector`]] [used automatically in Boost.MPI]] -] - -MPI's packing facilities store values into a contiguous buffer, which -can later be transmitted via MPI and unpacked into separate values via -MPI's unpacking facilities. As with datatypes, Boost.MPI provides an -abstract interface to MPI's packing and unpacking facilities. In -particular, the two archive classes [classref -boost::mpi::packed_oarchive `packed_oarchive`] and [classref -boost::mpi::packed_iarchive `packed_iarchive`] can be used -to pack or unpack a contiguous buffer using MPI's facilities. - -[table Packing and unpacking - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62 -`MPI_Pack`]] [[classref -boost::mpi::packed_oarchive `packed_oarchive`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62 -`MPI_Pack_size`]] [used internally by Boost.MPI]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62 -`MPI_Unpack`]] [[classref -boost::mpi::packed_iarchive `packed_iarchive`]]] -] - -Boost.MPI supports a one-to-one mapping for most of the MPI -collectives. For each collective provided by Boost.MPI, the underlying -C MPI collective will be invoked when it is possible (and efficient) -to do so. - -[table Collectives - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node73.html#Node73 -`MPI_Allgather`]] [[funcref boost::mpi::all_gather `all_gather`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node73.html#Node73 -`MPI_Allgatherv`]] [most uses supported by [funcref boost::mpi::all_gather `all_gather`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node82.html#Node82 -`MPI_Allreduce`]] [[funcref boost::mpi::all_reduce `all_reduce`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node75.html#Node75 -`MPI_Alltoall`]] [[funcref boost::mpi::all_to_all `all_to_all`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node75.html#Node75 -`MPI_Alltoallv`]] [most uses supported by [funcref boost::mpi::all_to_all `all_to_all`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node66.html#Node66 -`MPI_Barrier`]] [[memberref -boost::mpi::communicator::barrier `communicator::barrier`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node67.html#Node67 -`MPI_Bcast`]] [[funcref boost::mpi::broadcast `broadcast`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node69.html#Node69 -`MPI_Gather`]] [[funcref boost::mpi::gather `gather`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node69.html#Node69 -`MPI_Gatherv`]] [most uses supported by [funcref boost::mpi::gather `gather`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node77.html#Node77 -`MPI_Reduce`]] [[funcref boost::mpi::reduce `reduce`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node83.html#Node83 -`MPI_Reduce_scatter`]] [unsupported]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node84.html#Node84 -`MPI_Scan`]] [[funcref boost::mpi::scan `scan`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node71.html#Node71 -`MPI_Scatter`]] [[funcref boost::mpi::scatter `scatter`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node71.html#Node71 -`MPI_Scatterv`]] [most uses supported by [funcref boost::mpi::scatter `scatter`]]] -] - -Boost.MPI uses function objects to specify how reductions should occur -in its equivalents to `MPI_Allreduce`, `MPI_Reduce`, and -`MPI_Scan`. The following table illustrates how -[@http://www.mpi-forum.org/docs/mpi-11-html/node78.html#Node78 -predefined] and -[@http://www.mpi-forum.org/docs/mpi-11-html/node80.html#Node80 -user-defined] reduction operations can be mapped between the C MPI and -Boost.MPI. - -[table Reduction operations - [[C Constant] [Boost.MPI Equivalent]] - - [[`MPI_BAND`] [[classref boost::mpi::bitwise_and `bitwise_and`]]] - [[`MPI_BOR`] [[classref boost::mpi::bitwise_or `bitwise_or`]]] - [[`MPI_BXOR`] [[classref boost::mpi::bitwise_xor `bitwise_xor`]]] - [[`MPI_LAND`] [`std::logical_and`]] - [[`MPI_LOR`] [`std::logical_or`]] - [[`MPI_LXOR`] [[classref boost::mpi::logical_xor `logical_xor`]]] - [[`MPI_MAX`] [[classref boost::mpi::maximum `maximum`]]] - [[`MPI_MAXLOC`] [unsupported]] - [[`MPI_MIN`] [[classref boost::mpi::minimum `minimum`]]] - [[`MPI_MINLOC`] [unsupported]] - [[`MPI_Op_create`] [used internally by Boost.MPI]] - [[`MPI_Op_free`] [used internally by Boost.MPI]] - [[`MPI_PROD`] [`std::multiplies`]] - [[`MPI_SUM`] [`std::plus`]] -] - -MPI defines several special communicators, including `MPI_COMM_WORLD` -(including all processes that the local process can communicate with), -`MPI_COMM_SELF` (including only the local process), and -`MPI_COMM_EMPTY` (including no processes). These special communicators -are all instances of the [classref boost::mpi::communicator -`communicator`] class in Boost.MPI. - -[table Predefined communicators - [[C Constant] [Boost.MPI Equivalent]] - - [[`MPI_COMM_WORLD`] [a default-constructed [classref boost::mpi::communicator `communicator`]]] - [[`MPI_COMM_SELF`] [a [classref boost::mpi::communicator `communicator`] that contains only the current process]] - [[`MPI_COMM_EMPTY`] [a [classref boost::mpi::communicator `communicator`] that evaluates false]] -] - -Boost.MPI supports groups of processes through its [classref -boost::mpi::group `group`] class. - -[table Group operations and constants - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_GROUP_EMPTY`] [a default-constructed [classref - boost::mpi::group `group`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node97.html#Node97 - `MPI_Group_size`]] [[memberref boost::mpi::group::size `group::size`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node97.html#Node97 - `MPI_Group_rank`]] [memberref boost::mpi::group::rank `group::rank`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node97.html#Node97 - `MPI_Group_translate_ranks`]] [memberref boost::mpi::group::translate_ranks `group::translate_ranks`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node97.html#Node97 - `MPI_Group_compare`]] [operators `==` and `!=`]] - [[`MPI_IDENT`] [operators `==` and `!=`]] - [[`MPI_SIMILAR`] [operators `==` and `!=`]] - [[`MPI_UNEQUAL`] [operators `==` and `!=`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Comm_group`]] [[memberref - boost::mpi::communicator::group `communicator::group`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_union`]] [operator `|` for groups]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_intersection`]] [operator `&` for groups]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_difference`]] [operator `-` for groups]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_incl`]] [[memberref boost::mpi::group::include `group::include`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_excl`]] [[memberref boost::mpi::group::include `group::exclude`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_range_incl`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node98.html#Node98 - `MPI_Group_range_excl`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node99.html#Node99 - `MPI_Group_free`]] [used automatically in Boost.MPI]] -] - -Boost.MPI provides manipulation of communicators through the [classref -boost::mpi::communicator `communicator`] class. - -[table Communicator operations - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node101.html#Node101 - `MPI_Comm_size`]] [[memberref boost::mpi::communicator::size `communicator::size`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node101.html#Node101 - `MPI_Comm_rank`]] [[memberref boost::mpi::communicator::rank - `communicator::rank`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node101.html#Node101 - `MPI_Comm_compare`]] [operators `==` and `!=`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node102.html#Node102 - `MPI_Comm_dup`]] [[classref boost::mpi::communicator `communicator`] - class constructor using `comm_duplicate`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node102.html#Node102 - `MPI_Comm_create`]] [[classref boost::mpi::communicator - `communicator`] constructor]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node102.html#Node102 - `MPI_Comm_split`]] [[memberref boost::mpi::communicator::split - `communicator::split`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node103.html#Node103 - `MPI_Comm_free`]] [used automatically in Boost.MPI]] -] - -Boost.MPI currently provides support for inter-communicators via the -[classref boost::mpi::intercommunicator `intercommunicator`] class. - -[table Inter-communicator operations - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node112.html#Node112 - `MPI_Comm_test_inter`]] [use [memberref boost::mpi::communicator::as_intercommunicator `communicator::as_intercommunicator`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node112.html#Node112 - `MPI_Comm_remote_size`]] [[memberref boost::mpi::intercommunicator::remote_size] `intercommunicator::remote_size`]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node112.html#Node112 - `MPI_Comm_remote_group`]] [[memberref boost::mpi::intercommunicator::remote_group `intercommunicator::remote_group`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node113.html#Node113 - `MPI_Intercomm_create`]] [[classref boost::mpi::intercommunicator `intercommunicator`] constructor]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node113.html#Node113 - `MPI_Intercomm_merge`]] [[memberref boost::mpi::intercommunicator::merge `intercommunicator::merge`]]] -] - -Boost.MPI currently provides no support for attribute caching. - -[table Attributes and caching - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_NULL_COPY_FN`] [unsupported]] - [[`MPI_NULL_DELETE_FN`] [unsupported]] - [[`MPI_KEYVAL_INVALID`] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Keyval_create`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Copy_function`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Delete_function`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Keyval_free`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Attr_put`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Attr_get`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node119.html#Node119 - `MPI_Attr_delete`]] [unsupported]] -] - -Boost.MPI will provide complete support for creating communicators -with different topologies and later querying those topologies. Support -for graph topologies is provided via an interface to the -[@http://www.boost.org/libs/graph/doc/index.html Boost Graph Library -(BGL)], where a communicator can be created which matches the -structure of any BGL graph, and the graph topology of a communicator -can be viewed as a BGL graph for use in existing, generic graph -algorithms. - -[table Process topologies - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_GRAPH`] [unnecessary; use [memberref boost::mpi::communicator::has_graph_topology `communicator::has_graph_topology`]]] - [[`MPI_CART`] [unnecessary; use [memberref boost::mpi::communicator::has_cartesian_topology `communicator::has_cartesian_topology`]]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node133.html#Node133 - `MPI_Cart_create`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node134.html#Node134 - `MPI_Dims_create`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node135.html#Node135 - `MPI_Graph_create`]] [[memberref - boost::mpi::communicator::with_graph_topology - `communicator::with_graph_topology`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Topo_test`]] [[memberref - boost::mpi::communicator::has_graph_topology - `communicator::has_graph_topology`], [memberref - boost::mpi::communicator::has_cartesian_topology - `communicator::has_cartesian_topology`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Graphdims_get`]] [[funcref boost::mpi::num_vertices - `num_vertices`], [funcref boost::mpi::num_edges `num_edges`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Graph_get`]] [[funcref boost::mpi::vertices - `vertices`], [funcref boost::mpi::edges `edges`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Cartdim_get`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Cart_get`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Cart_rank`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Cart_coords`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Graph_neighbors_count`]] [[funcref boost::mpi::out_degree - `out_degree`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node136.html#Node136 - `MPI_Graph_neighbors`]] [[funcref boost::mpi::out_edges - `out_edges`], [funcref boost::mpi::adjacent_vertices `adjacent_vertices`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node137.html#Node137 - `MPI_Cart_shift`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node138.html#Node138 - `MPI_Cart_sub`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node139.html#Node139 - `MPI_Cart_map`]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node139.html#Node139 - `MPI_Graph_map`]] [unsupported]] -] - -Boost.MPI supports environmental inquires through the [classref -boost::mpi::environment `environment`] class. - -[table Environmental inquiries - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_TAG_UB`] [unnecessary; use [memberref - boost::mpi::environment::max_tag `environment::max_tag`]]] - [[`MPI_HOST`] [unnecessary; use [memberref - boost::mpi::environment::host_rank `environment::host_rank`]]] - [[`MPI_IO`] [unnecessary; use [memberref - boost::mpi::environment::io_rank `environment::io_rank`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node143.html#Node147 - `MPI_Get_processor_name`]] - [[memberref boost::mpi::environment::processor_name - `environment::processor_name`]]] -] - -Boost.MPI translates MPI errors into exceptions, reported via the -[classref boost::mpi::exception `exception`] class. - -[table Error handling - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_ERRORS_ARE_FATAL`] [unused; errors are translated into - Boost.MPI exceptions]] - [[`MPI_ERRORS_RETURN`] [unused; errors are translated into - Boost.MPI exceptions]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node148.html#Node148 - `MPI_errhandler_create`]] [unused; errors are translated into - Boost.MPI exceptions]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node148.html#Node148 - `MPI_errhandler_set`]] [unused; errors are translated into - Boost.MPI exceptions]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node148.html#Node148 - `MPI_errhandler_get`]] [unused; errors are translated into - Boost.MPI exceptions]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node148.html#Node148 - `MPI_errhandler_free`]] [unused; errors are translated into - Boost.MPI exceptions]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node148.html#Node148 - `MPI_Error_string`]] [used internally by Boost.MPI]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node149.html#Node149 - `MPI_Error_class`]] [[memberref boost::mpi::exception::error_class `exception::error_class`]]] -] - -The MPI timing facilities are exposed via the Boost.MPI [classref -boost::mpi::timer `timer`] class, which provides an interface -compatible with the [@http://www.boost.org/libs/timer/index.html Boost -Timer library]. - -[table Timing facilities - [[C Function/Constant] [Boost.MPI Equivalent]] - - [[`MPI_WTIME_IS_GLOBAL`] [unnecessary; use [memberref - boost::mpi::timer::time_is_global `timer::time_is_global`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node150.html#Node150 - `MPI_Wtime`]] [use [memberref boost::mpi::timer::elapsed - `timer::elapsed`] to determine the time elapsed from some specific - starting point]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node150.html#Node150 - `MPI_Wtick`]] [[memberref boost::mpi::timer::elapsed_min `timer::elapsed_min`]]] -] - -MPI startup and shutdown are managed by the construction and -descruction of the Boost.MPI [classref boost::mpi::environment -`environment`] class. - -[table Startup/shutdown facilities - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node151.html#Node151 - `MPI_Init`]] [[classref boost::mpi::environment `environment`] - constructor]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node151.html#Node151 - `MPI_Finalize`]] [[classref boost::mpi::environment `environment`] - destructor]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node151.html#Node151 - `MPI_Initialized`]] [[memberref boost::mpi::environment::initialized - `environment::initialized`]]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node151.html#Node151 - `MPI_Abort`]] [[memberref boost::mpi::environment::abort - `environment::abort`]]] -] - -Boost.MPI does not provide any support for the profiling facilities in -MPI 1.1. - -[table Profiling interface - [[C Function] [Boost.MPI Equivalent]] - - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node153.html#Node153 - `PMPI_*` routines]] [unsupported]] - [[[@http://www.mpi-forum.org/docs/mpi-11-html/node156.html#Node156 - `MPI_Pcontrol`]] [unsupported]] -] - -[endsect] - -[endsect] - -[xinclude mpi_autodoc.xml] - -[section:python Python Bindings] -[python] - -Boost.MPI provides an alternative MPI interface from the _Python_ -programming language via the `boost.mpi` module. The -Boost.MPI Python bindings, built on top of the C++ Boost.MPI using the -_BoostPython_ library, provide nearly all of the functionality of -Boost.MPI within a dynamic, object-oriented language. - -The Boost.MPI Python module can be built and installed from the -`libs/mpi/build` directory. Just follow the [link -mpi.config configuration] and [link mpi.installation -installation] instructions for the C++ Boost.MPI. Once you have -installed the Python module, be sure that the installation location is -in your `PYTHONPATH`. - -[section:python_quickstart Quickstart] - -[python] - -Getting started with the Boost.MPI Python module is as easy as -importing `boost.mpi`. Our first "Hello, World!" program is -just two lines long: - - import boost.mpi as mpi - print "I am process %d of %d." % (mpi.rank, mpi.size) - -Go ahead and run this program with several processes. Be sure to -invoke the `python` interpreter from `mpirun`, e.g., - -[pre -mpirun -np 5 python hello_world.py -] - -This will return output such as: - -[pre -I am process 1 of 5. -I am process 3 of 5. -I am process 2 of 5. -I am process 4 of 5. -I am process 0 of 5. -] - -Point-to-point operations in Boost.MPI have nearly the same syntax in -Python as in C++. We can write a simple two-process Python program -that prints "Hello, world!" by transmitting Python strings: - - import boost.mpi as mpi - - if mpi.world.rank == 0: - mpi.world.send(1, 0, 'Hello') - msg = mpi.world.recv(1, 1) - print msg,'!' - else: - msg = mpi.world.recv(0, 0) - print (msg + ', '), - mpi.world.send(0, 1, 'world') - -There are only a few notable differences between this Python code and -the example [link mpi.point_to_point in the C++ -tutorial]. First of all, we don't need to write any initialization -code in Python: just loading the `boost.mpi` module makes the -appropriate `MPI_Init` and `MPI_Finalize` calls. Second, we're passing -Python objects from one process to another through MPI. Any Python -object that can be pickled can be transmitted; the next section will -describe in more detail how the Boost.MPI Python layer transmits -objects. Finally, when we receive objects with `recv`, we don't need -to specify the type because transmission of Python objects is -polymorphic. - -When experimenting with Boost.MPI in Python, don't forget that help is -always available via `pydoc`: just pass the name of the module or -module entity on the command line (e.g., `pydoc -boost.mpi.communicator`) to receive complete reference -documentation. When in doubt, try it! -[endsect] - -[section:python_user_data Transmitting User-Defined Data] -Boost.MPI can transmit user-defined data in several different ways. -Most importantly, it can transmit arbitrary _Python_ objects by pickling -them at the sender and unpickling them at the receiver, allowing -arbitrarily complex Python data structures to interoperate with MPI. - -Boost.MPI also supports efficient serialization and transmission of -C++ objects (that have been exposed to Python) through its C++ -interface. Any C++ type that provides (de-)serialization routines that -meet the requirements of the Boost.Serialization library is eligible -for this optimization, but the type must be registered in advance. To -register a C++ type, invoke the C++ function [funcref -boost::mpi::python::register_serialized -register_serialized]. If your C++ types come from other Python modules -(they probably will!), those modules will need to link against the -`boost_mpi` and `boost_mpi_python` libraries as described in the [link -mpi.installation installation section]. Note that you do -*not* need to link against the Boost.MPI Python extension module. - -Finally, Boost.MPI supports separation of the structure of an object -from the data it stores, allowing the two pieces to be transmitted -separately. This "skeleton/content" mechanism, described in more -detail in a later section, is a communication optimization suitable -for problems with fixed data structures whose internal data changes -frequently. -[endsect] - -[section:python_collectives Collectives] - -Boost.MPI supports all of the MPI collectives (`scatter`, `reduce`, -`scan`, `broadcast`, etc.) for any type of data that can be -transmitted with the point-to-point communication operations. For the -MPI collectives that require a user-specified operation (e.g., `reduce` -and `scan`), the operation can be an arbitrary Python function. For -instance, one could concatenate strings with `all_reduce`: - - mpi.all_reduce(my_string, lambda x,y: x + y) - -The following module-level functions implement MPI collectives: - all_gather Gather the values from all processes. - all_reduce Combine the results from all processes. - all_to_all Every process sends data to every other process. - broadcast Broadcast data from one process to all other processes. - gather Gather the values from all processes to the root. - reduce Combine the results from all processes to the root. - scan Prefix reduction of the values from all processes. - scatter Scatter the values stored at the root to all processes. -[endsect] - -[section:python_skeleton_content Skeleton/Content Mechanism] -Boost.MPI provides a skeleton/content mechanism that allows the -transfer of large data structures to be split into two separate stages, -with the skeleton (or, "shape") of the data structure sent first and -the content (or, "data") of the data structure sent later, potentially -several times, so long as the structure has not changed since the -skeleton was transferred. The skeleton/content mechanism can improve -performance when the data structure is large and its shape is fixed, -because while the skeleton requires serialization (it has an unknown -size), the content transfer is fixed-size and can be done without -extra copies. - -To use the skeleton/content mechanism from Python, you must first -register the type of your data structure with the skeleton/content -mechanism *from C++*. The registration function is [funcref -boost::mpi::python::register_skeleton_and_content -register_skeleton_and_content] and resides in the [headerref -boost/mpi/python.hpp ] header. - -Once you have registered your C++ data structures, you can extract -the skeleton for an instance of that data structure with `skeleton()`. -The resulting `skeleton_proxy` can be transmitted via the normal send -routine, e.g., - - mpi.world.send(1, 0, skeleton(my_data_structure)) - -`skeleton_proxy` objects can be received on the other end via `recv()`, -which stores a newly-created instance of your data structure with the -same "shape" as the sender in its `"object` attribute: - - shape = mpi.world.recv(0, 0) - my_data_structure = shape.object - -Once the skeleton has been transmitted, the content (accessed via -`get_content`) can be transmitted in much the same way. Note, however, -that the receiver also specifies `get_content(my_data_structure)` in its -call to receive: - - if mpi.rank == 0: - mpi.world.send(1, 0, get_content(my_data_structure)) - else: - mpi.world.recv(0, 0, get_content(my_data_structure)) - -Of course, this transmission of content can occur repeatedly, if the -values in the data structure--but not its shape--changes. - -The skeleton/content mechanism is a structured way to exploit the -interaction between custom-built MPI datatypes and `MPI_BOTTOM`, to -eliminate extra buffer copies. -[endsect] - -[section:python_compatbility C++/Python MPI Compatibility] -Boost.MPI is a C++ library whose facilities have been exposed to Python -via the Boost.Python library. Since the Boost.MPI Python bindings are -build directly on top of the C++ library, and nearly every feature of -C++ library is available in Python, hybrid C++/Python programs using -Boost.MPI can interact, e.g., sending a value from Python but receiving -that value in C++ (or vice versa). However, doing so requires some -care. Because Python objects are dynamically typed, Boost.MPI transfers -type information along with the serialized form of the object, so that -the object can be received even when its type is not known. This -mechanism differs from its C++ counterpart, where the static types of -transmitted values are always known. - -The only way to communicate between the C++ and Python views on -Boost.MPI is to traffic entirely in Python objects. For Python, this -is the normal state of affairs, so nothing will change. For C++, this -means sending and receiving values of type `boost::python::object`, -from the _BoostPython_ library. For instance, say we want to transmit -an integer value from Python: - - comm.send(1, 0, 17) - -In C++, we would receive that value into a Python object and then -`extract` an integer value: - -[c++] - - boost::python::object value; - comm.recv(0, 0, value); - int int_value = boost::python::extract(value); - -In the future, Boost.MPI will be extended to allow improved -interoperability with the C++ Boost.MPI and the C MPI bindings. -[endsect] - -[section:pythonref Reference] -The Boost.MPI Python module, `boost.mpi`, has its own -[@boost.mpi.html reference documentation], which is also -available using `pydoc` (from the command line) or -`help(boost.mpi)` (from the Python interpreter). - -[endsect] - -[endsect] - -[section:design Design Philosophy] - -The design philosophy of the Parallel MPI library is very simple: be -both convenient and efficient. MPI is a library built for -high-performance applications, but it's FORTRAN-centric, -performance-minded design makes it rather inflexible from the C++ -point of view: passing a string from one process to another is -inconvenient, requiring several messages and explicit buffering; -passing a container of strings from one process to another requires -an extra level of manual bookkeeping; and passing a map from strings -to containers of strings is positively infuriating. The Parallel MPI -library allows all of these data types to be passed using the same -simple `send()` and `recv()` primitives. Likewise, collective -operations such as [funcref boost::mpi::reduce `reduce()`] -allow arbitrary data types and function objects, much like the C++ -Standard Library would. - -The higher-level abstractions provided for convenience must not have -an impact on the performance of the application. For instance, sending -an integer via `send` must be as efficient as a call to `MPI_Send`, -which means that it must be implemented by a simple call to -`MPI_Send`; likewise, an integer [funcref boost::mpi::reduce -`reduce()`] using `std::plus` must be implemented with a call to -`MPI_Reduce` on integers using the `MPI_SUM` operation: anything less -will impact performance. In essence, this is the "don't pay for what -you don't use" principle: if the user is not transmitting strings, -s/he should not pay the overhead associated with strings. - -Sometimes, achieving maximal performance means foregoing convenient -abstractions and implementing certain functionality using lower-level -primitives. For this reason, it is always possible to extract enough -information from the abstractions in Boost.MPI to minimize -the amount of effort required to interface between Boost.MPI -and the C MPI library. -[endsect] - -[section:performance Performance Evaluation] - -Message-passing performance is crucial in high-performance distributed -computing. To evaluate the performance of Boost.MPI, we modified the -standard [@http://www.scl.ameslab.gov/netpipe/ NetPIPE] benchmark -(version 3.6.2) to use Boost.MPI and compared its performance against -raw MPI. We ran five different variants of the NetPIPE benchmark: - -# MPI: The unmodified NetPIPE benchmark. - -# Boost.MPI: NetPIPE modified to use Boost.MPI calls for - communication. - -# MPI (Datatypes): NetPIPE modified to use a derived datatype (which - itself contains a single `MPI_BYTE`) rathan than a fundamental - datatype. - -# Boost.MPI (Datatypes): NetPIPE modified to use a user-defined type - `Char` in place of the fundamental `char` type. The `Char` type - contains a single `char`, a `serialize()` method to make it - serializable, and specializes [classref - boost::mpi::is_mpi_datatype is_mpi_datatype] to force - Boost.MPI to build a derived MPI data type for it. - -# Boost.MPI (Serialized): NetPIPE modified to use a user-defined type - `Char` in place of the fundamental `char` type. This `Char` type - contains a single `char` and is serializable. Unlike the Datatypes - case, [classref boost::mpi::is_mpi_datatype - is_mpi_datatype] is *not* specialized, forcing Boost.MPI to perform - many, many serialization calls. - -The actual tests were performed on the Odin cluster in the -[@http://www.cs.indiana.edu/ Department of Computer Science] at -[@http://www.iub.edu Indiana University], which contains 128 nodes -connected via Infiniband. Each node contains 4GB memory and two AMD -Opteron processors. The NetPIPE benchmarks were compiled with Intel's -C++ Compiler, version 9.0, Boost 1.35.0 (prerelease), and -[@http://www.open-mpi.org/ Open MPI] version 1.1. The NetPIPE results -follow: - -[$../../../libs/mpi/doc/netpipe.png] - -There are a some observations we can make about these NetPIPE -results. First of all, the top two plots show that Boost.MPI performs -on par with MPI for fundamental types. The next two plots show that -Boost.MPI performs on par with MPI for derived data types, even though -Boost.MPI provides a much more abstract, completely transparent -approach to building derived data types than raw MPI. Overall -performance for derived data types is significantly worse than for -fundamental data types, but the bottleneck is in the underlying MPI -implementation itself. Finally, when forcing Boost.MPI to serialize -characters individually, performance suffers greatly. This particular -instance is the worst possible case for Boost.MPI, because we are -serializing millions of individual characters. Overall, the -additional abstraction provided by Boost.MPI does not impair its -performance. - -[endsect] - -[section:history Revision History] - -* *Boost 1.35.0*: Initial release, containing the following post-review changes - * Support for arrays in all collective operations - * Support default-construction of [classref boost::mpi::environment environment] - -* *2006-09-21*: Boost.MPI accepted into Boost. - -[endsect] - -[section:acknowledge Acknowledgments] -Boost.MPI was developed with support from Zurcher Kantonalbank. Daniel -Egloff and Michael Gauckler contributed many ideas to Boost.MPI's -design, particularly in the design of its abstractions for -MPI data types. Prabhanjan (Anju) Kambadur developed the predecessor to -Boost.MPI that proved the usefulness of the Serialization library in -an MPI setting and the performance benefits of specialization in a C++ -abstraction layer for MPI. Jeremy Siek managed the formal review of Boost.MPI. - -[endsect] diff --git a/doc/netpipe.png b/doc/netpipe.png deleted file mode 100644 index 4017913..0000000 Binary files a/doc/netpipe.png and /dev/null differ diff --git a/example/generate_collect.cpp b/example/generate_collect.cpp deleted file mode 100644 index 5579d50..0000000 --- a/example/generate_collect.cpp +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's split() operation on communicators to -// create separate data-generating processes and data-collecting -// processes. -#include -#include -#include -#include -namespace mpi = boost::mpi; - -enum message_tags { msg_data_packet, msg_broadcast_data, msg_finished }; - -void generate_data(mpi::communicator local, mpi::communicator world) -{ - using std::srand; - using std::rand; - - // The rank of the collector within the world communicator - int master_collector = local.size(); - - srand(time(0) + world.rank()); - - // Send out several blocks of random data to the collectors. - int num_data_blocks = rand() % 3 + 1; - for (int block = 0; block < num_data_blocks; ++block) { - // Generate some random data - int num_samples = rand() % 1000; - std::vector data; - for (int i = 0; i < num_samples; ++i) { - data.push_back(rand()); - } - - // Send our data to the master collector process. - std::cout << "Generator #" << local.rank() << " sends some data..." - << std::endl; - world.send(master_collector, msg_data_packet, data); - } - - // Wait for all of the generators to complete - (local.barrier)(); - - // The first generator will send the message to the master collector - // indicating that we're done. - if (local.rank() == 0) - world.send(master_collector, msg_finished); -} - -void collect_data(mpi::communicator local, mpi::communicator world) -{ - // The rank of the collector within the world communicator - int master_collector = world.size() - local.size(); - - if (world.rank() == master_collector) { - while (true) { - // Wait for a message - mpi::status msg = world.probe(); - if (msg.tag() == msg_data_packet) { - // Receive the packet of data - std::vector data; - world.recv(msg.source(), msg.tag(), data); - - // Tell each of the collectors that we'll be broadcasting some data - for (int dest = 1; dest < local.size(); ++dest) - local.send(dest, msg_broadcast_data, msg.source()); - - // Broadcast the actual data. - broadcast(local, data, 0); - } else if (msg.tag() == msg_finished) { - // Receive the message - world.recv(msg.source(), msg.tag()); - - // Tell each of the collectors that we're finished - for (int dest = 1; dest < local.size(); ++dest) - local.send(dest, msg_finished); - - break; - } - } - } else { - while (true) { - // Wait for a message from the master collector - mpi::status msg = local.probe(); - if (msg.tag() == msg_broadcast_data) { - // Receive the broadcast message - int originator; - local.recv(msg.source(), msg.tag(), originator); - - // Receive the data broadcasted from the master collector - std::vector data; - broadcast(local, data, 0); - - std::cout << "Collector #" << local.rank() - << " is processing data from generator #" << originator - << "." << std::endl; - } else if (msg.tag() == msg_finished) { - // Receive the message - local.recv(msg.source(), msg.tag()); - - break; - } - } - } -} - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.size() < 3) { - if (world.rank() == 0) { - std::cerr << "Error: this example requires at least 3 processes." - << std::endl; - } - env.abort(-1); - } - - bool is_generator = world.rank() < 2 * world.size() / 3; - mpi::communicator local = world.split(is_generator? 0 : 1); - if (is_generator) generate_data(local, world); - else collect_data(local, world); - - return 0; -} diff --git a/example/generate_collect_optional.cpp b/example/generate_collect_optional.cpp deleted file mode 100644 index 3aa3888..0000000 --- a/example/generate_collect_optional.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's split() operation on communicators to -// create separate data-generating processes and data-collecting -// processes using boost::optional for broadcasting. -#include -#include -#include -#include -#include -namespace mpi = boost::mpi; - -enum message_tags { msg_data_packet, msg_finished }; - -void generate_data(mpi::communicator local, mpi::communicator world) -{ - using std::srand; - using std::rand; - - // The rank of the collector within the world communicator - int master_collector = local.size(); - - srand(time(0) + world.rank()); - - // Send out several blocks of random data to the collectors. - int num_data_blocks = rand() % 3 + 1; - for (int block = 0; block < num_data_blocks; ++block) { - // Generate some random dataa - int num_samples = rand() % 1000; - std::vector data; - for (int i = 0; i < num_samples; ++i) { - data.push_back(rand()); - } - - // Send our data to the master collector process. - std::cout << "Generator #" << local.rank() << " sends some data..." - << std::endl; - world.send(master_collector, msg_data_packet, data); - } - - // Wait for all of the generators to complete - (local.barrier)(); - - // The first generator will send the message to the master collector - // indicating that we're done. - if (local.rank() == 0) - world.send(master_collector, msg_finished); -} - -void collect_data(mpi::communicator local, mpi::communicator world) -{ - // The rank of the collector within the world communicator - int master_collector = world.size() - local.size(); - - if (world.rank() == master_collector) { - while (true) { - // Wait for a message - mpi::status msg = world.probe(); - if (msg.tag() == msg_data_packet) { - // Receive the packet of data into a boost::optional - boost::optional > data; - data = std::vector(); - world.recv(msg.source(), msg.source(), *data); - - // Broadcast the actual data. - broadcast(local, data, 0); - } else if (msg.tag() == msg_finished) { - // Receive the message - world.recv(msg.source(), msg.tag()); - - // Broadcast to each collector to tell them we've finished. - boost::optional > data; - broadcast(local, data, 0); - break; - } - } - } else { - boost::optional > data; - do { - // Wait for a broadcast from the master collector - broadcast(local, data, 0); - if (data) { - std::cout << "Collector #" << local.rank() - << " is processing data." << std::endl; - } - } while (data); - } -} - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.size() < 4) { - if (world.rank() == 0) { - std::cerr << "Error: this example requires at least 4 processes." - << std::endl; - } - env.abort(-1); - } - - bool is_generator = world.rank() < 2 * world.size() / 3; - mpi::communicator local = world.split(is_generator? 0 : 1); - if (is_generator) generate_data(local, world); - else collect_data(local, world); - - return 0; -} diff --git a/example/hello_world.cpp b/example/hello_world.cpp deleted file mode 100644 index 7095c17..0000000 --- a/example/hello_world.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A simple Hello, world! example using Boost.MPI message passing. - -#include -#include -#include // Needed to send/receive strings! -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.rank() == 0) { - world.send(1, 0, std::string("Hello")); - std::string msg; - world.recv(1, 1, msg); - std::cout << msg << "!" << std::endl; - } else { - std::string msg; - world.recv(0, 0, msg); - std::cout << msg << ", "; - std::cout.flush(); - world.send(0, 1, std::string("world")); - } - - return 0; -} diff --git a/example/hello_world_broadcast.cpp b/example/hello_world_broadcast.cpp deleted file mode 100644 index 4ffe239..0000000 --- a/example/hello_world_broadcast.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A simple Hello, world! example using Boost.MPI broadcast() - -#include -#include -#include // Needed to send/receive strings! -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - std::string value; - if (world.rank() == 0) { - value = "Hello, World!"; - } - - broadcast(world, value, 0); - - std::cout << "Process #" << world.rank() << " says " << value << std::endl; - return 0; -} diff --git a/example/hello_world_nonblocking.cpp b/example/hello_world_nonblocking.cpp deleted file mode 100644 index c65247b..0000000 --- a/example/hello_world_nonblocking.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A simple Hello, world! example using Boost.MPI message passing. - -#include -#include -#include // Needed to send/receive strings! -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.rank() == 0) { - mpi::request reqs[2]; - std::string msg, out_msg = "Hello"; - reqs[0] = world.isend(1, 0, out_msg); - reqs[1] = world.irecv(1, 1, msg); - mpi::wait_all(reqs, reqs + 2); - std::cout << msg << "!" << std::endl; - } else { - mpi::request reqs[2]; - std::string msg, out_msg = "world"; - reqs[0] = world.isend(0, 1, out_msg); - reqs[1] = world.irecv(0, 0, msg); - mpi::wait_all(reqs, reqs + 2); - std::cout << msg << ", "; - } - - return 0; -} diff --git a/example/parallel_example.cpp b/example/parallel_example.cpp deleted file mode 100644 index 00347d5..0000000 --- a/example/parallel_example.cpp +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) 2005-2006 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example of a parallel Monte Carlo simulation using some nodes to produce -// data and others to aggregate the data -#include - -#include -#include -#include -#include -#include -#include - -namespace mpi = boost::mpi; - -enum {sample_tag, sample_skeleton_tag, sample_broadcast_tag, quit_tag}; - - -void calculate_samples(int sample_length) -{ - int num_samples = 100; - std::vector sample(sample_length); - - // setup communicator by splitting - - mpi::communicator world; - mpi::communicator calculate_communicator = world.split(0); - - unsigned int num_calculate_ranks = calculate_communicator.size(); - - // the master of the accumulaion ranks is the first of them, hence - // with a rank just one after the last calculation rank - int master_accumulate_rank = num_calculate_ranks; - - // the master of the calculation ranks sends the skeleton of the sample - // to the master of the accumulation ranks - - if (world.rank()==0) - world.send(master_accumulate_rank,sample_skeleton_tag,mpi::skeleton(sample)); - - // next we extract the content of the sample vector, to be used in sending - // the content later on - - mpi::content sample_content = mpi::get_content(sample); - - // now intialize the parallel random number generator - - boost::lcg64 engine( - boost::random::stream_number = calculate_communicator.rank(), - boost::random::total_streams = calculate_communicator.size() - ); - - boost::variate_generator > - rng(engine,boost::uniform_real<>()); - - for (unsigned int i=0; i gathered_results(calculate_communicator.size()); - mpi::all_gather(calculate_communicator,local_result,gathered_results); - } - - // we are done: the master tells the accumulation ranks to quit - if (world.rank()==0) - world.send(master_accumulate_rank,quit_tag); -} - - - -void accumulate_samples() -{ - std::vector sample; - - // setup the communicator for all accumulation ranks by splitting - - mpi::communicator world; - mpi::communicator accumulate_communicator = world.split(1); - - bool is_master_accumulate_rank = accumulate_communicator.rank()==0; - - // the master receives the sample skeleton - - if (is_master_accumulate_rank) - world.recv(0,sample_skeleton_tag,mpi::skeleton(sample)); - - // and broadcasts it to all accumulation ranks - mpi::broadcast(accumulate_communicator,mpi::skeleton(sample),0); - - // next we extract the content of the sample vector, to be used in receiving - // the content later on - - mpi::content sample_content = mpi::get_content(sample); - - // accumulate until quit is called - double sum=0.; - while (true) { - - - // the accumulation master checks whether we should quit - if (world.iprobe(0,quit_tag)) { - world.recv(0,quit_tag); - for (int i=1; i. - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -import boost.parallel.mpi as mpi - -if mpi.world.rank == 0: - mpi.world.send(1, 0, 'Hello') - msg = mpi.world.recv(1, 1) - print msg,'!' -else: - msg = mpi.world.recv(0, 0) - print msg,', ', - mpi.world.send(0, 1, 'world') diff --git a/example/random_content.cpp b/example/random_content.cpp deleted file mode 100644 index 22fd06d..0000000 --- a/example/random_content.cpp +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's skeletons and content to optimize -// communication. -#include -#include -#include -#include -#include -#include -#include -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - if (world.size() < 2 || world.size() > 4) { - if (world.rank() == 0) - std::cerr << "error: please execute this program with 2-4 processes.\n"; - world.abort(-1); - } - - if (world.rank() == 0) { - int list_len = 50; - int iterations = 10; - - if (argc > 1) list_len = atoi(argv[1]); - if (argc > 2) iterations = atoi(argv[2]); - - if (list_len <= 0) { - std::cerr << "error: please specific a list length greater than zero.\n"; - world.abort(-1); - } - - // Generate the list and broadcast its structure - std::list l(list_len); - broadcast(world, mpi::skeleton(l), 0); - - // Generate content several times and broadcast out that content - mpi::content c = mpi::get_content(l); - for (int i = 0; i < iterations; ++i) { - do { - std::generate(l.begin(), l.end(), &random); - } while (std::find_if(l.begin(), l.end(), - std::bind1st(std::not_equal_to(), 0)) - == l.end()); - - - std::cout << "Iteration #" << i << ": sending content" - << " (min = " << *std::min_element(l.begin(), l.end()) - << ", max = " << *std::max_element(l.begin(), l.end()) - << ", avg = " - << std::accumulate(l.begin(), l.end(), 0)/l.size() - << ").\n"; - - broadcast(world, c, 0); - } - - // Notify the slaves that we're done by sending all zeroes - std::fill(l.begin(), l.end(), 0); - broadcast(world, c, 0); - - } else { - // Receive the content and build up our own list - std::list l; - broadcast(world, mpi::skeleton(l), 0); - - mpi::content c = mpi::get_content(l); - int i = 0; - do { - broadcast(world, c, 0); - - if (std::find_if(l.begin(), l.end(), - std::bind1st(std::not_equal_to(), 0)) == l.end()) - break; - - if (world.rank() == 1) - std::cout << "Iteration #" << i << ": max value = " - << *std::max_element(l.begin(), l.end()) << ".\n"; - else if (world.rank() == 2) - std::cout << "Iteration #" << i << ": min value = " - << *std::min_element(l.begin(), l.end()) << ".\n"; - else if (world.rank() == 3) - std::cout << "Iteration #" << i << ": avg value = " - << std::accumulate(l.begin(), l.end(), 0)/l.size() - << ".\n"; - ++i; - } while (true); - } - - return 0; -} diff --git a/example/random_gather.cpp b/example/random_gather.cpp deleted file mode 100644 index 5483ba7..0000000 --- a/example/random_gather.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's gather() - -#include -#include -#include -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - std::srand(time(0) + world.rank()); - int my_number = std::rand(); - if (world.rank() == 0) { - std::vector all_numbers; - gather(world, my_number, all_numbers, 0); - for (int proc = 0; proc < world.size(); ++proc) - std::cout << "Process #" << proc << " thought of " << all_numbers[proc] - << std::endl; - } else { - gather(world, my_number, 0); - } - - return 0; -} diff --git a/example/random_min.cpp b/example/random_min.cpp deleted file mode 100644 index d0a67ee..0000000 --- a/example/random_min.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's reduce() to compute a minimum value. -#include -#include -#include -namespace mpi = boost::mpi; - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - std::srand(time(0) + world.rank()); - int my_number = std::rand(); - - if (world.rank() == 0) { - int minimum; - reduce(world, my_number, minimum, mpi::minimum(), 0); - std::cout << "The minimum value is " << minimum << std::endl; - } else { - reduce(world, my_number, mpi::minimum(), 0); - } - - return 0; -} diff --git a/example/reduce_performance_test.cpp b/example/reduce_performance_test.cpp deleted file mode 100644 index 40411b1..0000000 --- a/example/reduce_performance_test.cpp +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) 2006 Trustees of Indiana University -// -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Performance test of the reduce() collective -#include -#include - -namespace mpi = boost::mpi; - -struct add_int { - int operator()(int x, int y) const { return x + y; } -}; - -struct wrapped_int -{ - wrapped_int() : value(0) { } - wrapped_int(int value) : value(value) { } - - template - void serialize(Archiver& ar, const unsigned int /*version*/) { - ar & value; - } - - int value; -}; - -inline wrapped_int operator+(wrapped_int x, wrapped_int y) -{ - return wrapped_int(x.value + y.value); -} - -namespace boost { namespace mpi { - template<> struct is_mpi_datatype : mpl::true_ { }; -} } - -struct serialized_int -{ - serialized_int() : value(0) { } - serialized_int(int value) : value(value) { } - - template - void serialize(Archiver& ar, const unsigned int /*version*/) { - ar & value; - } - - int value; -}; - -inline serialized_int operator+(serialized_int x, serialized_int y) -{ - return serialized_int(x.value + y.value); -} - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - int repeat_count = 100; - int outer_repeat_count = 2; - - if (argc > 1) repeat_count = boost::lexical_cast(argv[1]); - if (argc > 2) outer_repeat_count = boost::lexical_cast(argv[2]); - - if (world.rank() == 0) - std::cout << "# of processors: " << world.size() << std::endl - << "# of iterations: " << repeat_count << std::endl; - - int value = world.rank(); - int result; - wrapped_int wi_value = world.rank(); - wrapped_int wi_result; - serialized_int si_value = world.rank(); - serialized_int si_result; - - // Spin for a while... - for (int i = 0; i < repeat_count/10; ++i) { - reduce(world, value, result, std::plus(), 0); - reduce(world, value, result, add_int(), 0); - reduce(world, wi_value, wi_result, std::plus(), 0); - reduce(world, si_value, si_result, std::plus(), 0); - } - - for (int outer = 0; outer < outer_repeat_count; ++outer) { - // Raw MPI - mpi::timer time; - for (int i = 0; i < repeat_count; ++i) { - MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); - } - double reduce_raw_mpi_total_time = time.elapsed(); - - // MPI_INT/MPI_SUM case - time.restart(); - for (int i = 0; i < repeat_count; ++i) { - reduce(world, value, result, std::plus(), 0); - } - double reduce_int_sum_total_time = time.elapsed(); - - // MPI_INT/MPI_Op case - time.restart(); - for (int i = 0; i < repeat_count; ++i) { - reduce(world, value, result, add_int(), 0); - } - double reduce_int_op_total_time = time.elapsed(); - - // MPI_Datatype/MPI_Op case - time.restart(); - for (int i = 0; i < repeat_count; ++i) { - reduce(world, wi_value, wi_result, std::plus(), 0); - } - double reduce_type_op_total_time = time.elapsed(); - - // Serialized/MPI_Op case - time.restart(); - for (int i = 0; i < repeat_count; ++i) { - reduce(world, si_value, si_result, std::plus(), 0); - } - double reduce_ser_op_total_time = time.elapsed(); - - - if (world.rank() == 0) - std::cout << "\nInvocation\tElapsed Time (seconds)" - << "\nRaw MPI\t\t\t" << reduce_raw_mpi_total_time - << "\nMPI_INT/MPI_SUM\t\t" << reduce_int_sum_total_time - << "\nMPI_INT/MPI_Op\t\t" << reduce_int_op_total_time - << "\nMPI_Datatype/MPI_Op\t" << reduce_type_op_total_time - << "\nSerialized/MPI_Op\t" << reduce_ser_op_total_time - << std::endl; - } - - return 0; -} diff --git a/example/string_cat.cpp b/example/string_cat.cpp deleted file mode 100644 index 5602d59..0000000 --- a/example/string_cat.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// An example using Boost.MPI's reduce() to concatenate strings. -#include -#include -#include -#include // Important for sending strings! -namespace mpi = boost::mpi; - -/* Defining STRING_CONCAT_COMMUTATIVE lies to Boost.MPI by forcing it - * to assume that string concatenation is commutative, which it is - * not. However, doing so illustrates how the results of a reduction - * can change when a non-commutative operator is assumed to be - * commutative. - */ -#ifdef STRING_CONCAT_COMMUTATIVE -namespace boost { namespace mpi { - -template<> -struct is_commutative, std::string> : mpl::true_ { }; - -} } // end namespace boost::mpi -#endif - -int main(int argc, char* argv[]) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - - std::string names[10] = { "zero ", "one ", "two ", "three ", "four ", - "five ", "six ", "seven ", "eight ", "nine " }; - - std::string result; - reduce(world, - world.rank() < 10? names[world.rank()] : std::string("many "), - result, std::plus(), 0); - - if (world.rank() == 0) - std::cout << "The result is " << result << std::endl; - - return 0; -} diff --git a/performance/mpi/netpipe/NPboost_mpi.out b/performance/mpi/netpipe/NPboost_mpi.out deleted file mode 100644 index 0550e98..0000000 --- a/performance/mpi/netpipe/NPboost_mpi.out +++ /dev/null @@ -1,124 +0,0 @@ - 1 1.556150 0.00000490 - 2 3.111232 0.00000490 - 3 4.636256 0.00000494 - 4 6.203755 0.00000492 - 6 9.318715 0.00000491 - 8 12.379524 0.00000493 - 12 18.520635 0.00000494 - 13 20.066207 0.00000494 - 16 24.560890 0.00000497 - 19 29.009140 0.00000500 - 21 32.167126 0.00000498 - 24 36.632882 0.00000500 - 27 40.980980 0.00000503 - 29 44.199523 0.00000501 - 32 47.818632 0.00000511 - 35 52.225626 0.00000511 - 45 66.988035 0.00000513 - 48 61.339363 0.00000597 - 51 65.093453 0.00000598 - 61 77.079842 0.00000604 - 64 80.168787 0.00000609 - 67 83.968150 0.00000609 - 93 114.520926 0.00000620 - 96 116.630924 0.00000628 - 99 120.276119 0.00000628 - 125 150.723845 0.00000633 - 128 153.800491 0.00000635 - 131 156.851610 0.00000637 - 189 218.661755 0.00000659 - 192 221.482582 0.00000661 - 195 225.260431 0.00000660 - 253 286.065295 0.00000675 - 256 287.834775 0.00000679 - 259 290.080586 0.00000681 - 381 401.543300 0.00000724 - 384 404.062730 0.00000725 - 387 407.797158 0.00000724 - 509 503.410447 0.00000771 - 512 507.575975 0.00000770 - 515 506.454502 0.00000776 - 765 685.882287 0.00000851 - 768 684.205999 0.00000856 - 771 678.454881 0.00000867 - 1021 823.469916 0.00000946 - 1024 813.457269 0.00000960 - 1027 827.206161 0.00000947 - 1533 1132.675087 0.00001033 - 1536 1143.708347 0.00001025 - 1539 1147.613816 0.00001023 - 2045 1372.034153 0.00001137 - 2048 1384.937389 0.00001128 - 2051 1372.347641 0.00001140 - 3069 1757.853226 0.00001332 - 3072 1756.807541 0.00001334 - 3075 1745.981930 0.00001344 - 4093 2086.698885 0.00001496 - 4096 2025.556463 0.00001543 - 4099 2067.510804 0.00001513 - 6141 2475.585036 0.00001893 - 6144 2493.108390 0.00001880 - 6147 2445.104039 0.00001918 - 8189 2801.627778 0.00002230 - 8192 2757.325323 0.00002267 - 8195 2688.060071 0.00002326 - 12285 2396.030089 0.00003912 - 12288 2341.773924 0.00004003 - 12291 2395.945003 0.00003914 - 16381 2497.932430 0.00005003 - 16384 2552.535592 0.00004897 - 16387 2497.573380 0.00005006 - 24573 2834.481658 0.00006614 - 24576 2798.008112 0.00006701 - 24579 2834.628016 0.00006615 - 32765 2964.782396 0.00008432 - 32768 2997.587914 0.00008340 - 32771 2962.832337 0.00008439 - 49149 3132.766800 0.00011970 - 49152 3122.238211 0.00012011 - 49155 3128.995237 0.00011985 - 65533 3218.936381 0.00015532 - 65536 3234.549640 0.00015458 - 65539 3221.609751 0.00015521 - 98301 3844.720371 0.00019507 - 98304 3824.927430 0.00019608 - 98307 3845.195953 0.00019505 - 131069 3975.962728 0.00025151 - 131072 3998.055989 0.00025012 - 131075 3976.151255 0.00025151 - 196605 4418.262261 0.00033949 - 196608 4410.209244 0.00034012 - 196611 4408.526416 0.00034025 - 262141 4596.834439 0.00043508 - 262144 4588.491658 0.00043587 - 262147 4594.695033 0.00043529 - 393213 4597.324257 0.00065255 - 393216 4591.901111 0.00065332 - 393219 4579.843585 0.00065505 - 524285 4706.685033 0.00084985 - 524288 4730.157439 0.00084564 - 524291 4696.628430 0.00085168 - 786429 4638.372147 0.00129355 - 786432 4641.428255 0.00129271 - 786435 4646.798176 0.00129122 - 1048573 4776.390739 0.00167490 - 1048576 4773.362216 0.00167597 - 1048579 4090.785622 0.00195562 - 1572861 4134.504179 0.00290240 - 1572864 4150.355819 0.00289132 - 1572867 4122.673087 0.00291074 - 2097149 4251.414302 0.00376345 - 2097152 4249.620576 0.00376504 - 2097155 4136.008192 0.00386847 - 3145725 4261.958593 0.00563121 - 3145728 4257.546302 0.00563705 - 3145731 4207.112602 0.00570463 - 4194301 4269.171597 0.00749559 - 4194304 4273.168439 0.00748859 - 4194307 4222.667048 0.00757815 - 6291453 4312.214104 0.01113117 - 6291456 4307.363313 0.01114371 - 6291459 4259.286743 0.01126950 - 8388605 4278.263420 0.01495934 - 8388608 4272.408977 0.01497984 - 8388611 4267.859389 0.01499581 diff --git a/performance/mpi/netpipe/NPboost_mpi_dt.out b/performance/mpi/netpipe/NPboost_mpi_dt.out deleted file mode 100644 index 9a819f6..0000000 --- a/performance/mpi/netpipe/NPboost_mpi_dt.out +++ /dev/null @@ -1,124 +0,0 @@ - 1 1.484371 0.00000514 - 2 2.892819 0.00000527 - 3 4.248024 0.00000539 - 4 5.599959 0.00000545 - 6 8.135686 0.00000563 - 8 10.451068 0.00000584 - 12 14.650568 0.00000625 - 13 15.688523 0.00000632 - 16 18.449358 0.00000662 - 19 21.068626 0.00000688 - 21 22.748535 0.00000704 - 24 24.977734 0.00000733 - 27 26.991881 0.00000763 - 29 28.434248 0.00000778 - 32 29.933451 0.00000816 - 35 31.700042 0.00000842 - 45 36.802044 0.00000933 - 48 35.013712 0.00001046 - 51 36.368104 0.00001070 - 61 39.943385 0.00001165 - 64 40.679742 0.00001200 - 67 41.715180 0.00001225 - 93 48.442259 0.00001465 - 96 48.574781 0.00001508 - 99 49.457403 0.00001527 - 125 53.931923 0.00001768 - 128 54.602428 0.00001788 - 131 54.788430 0.00001824 - 189 61.135732 0.00002359 - 192 61.275969 0.00002391 - 195 61.643388 0.00002413 - 253 65.341229 0.00002954 - 256 65.614348 0.00002977 - 259 65.598851 0.00003012 - 381 70.215489 0.00004140 - 384 70.273012 0.00004169 - 387 70.381326 0.00004195 - 509 72.856709 0.00005330 - 512 72.946976 0.00005355 - 515 72.963768 0.00005385 - 765 75.846361 0.00007695 - 768 75.785003 0.00007732 - 771 75.875161 0.00007753 - 1021 77.227833 0.00010087 - 1024 77.325941 0.00010103 - 1027 77.313645 0.00010135 - 1533 79.444085 0.00014722 - 1536 79.444961 0.00014751 - 1539 79.495800 0.00014770 - 2045 80.396819 0.00019406 - 2048 80.455675 0.00019421 - 2051 80.438277 0.00019453 - 3069 81.459775 0.00028744 - 3072 81.477049 0.00028766 - 3075 81.483672 0.00028792 - 4093 82.012890 0.00038076 - 4096 82.047416 0.00038088 - 4099 82.028333 0.00038124 - 6141 82.632205 0.00056700 - 6144 82.618343 0.00056737 - 6147 82.635089 0.00056753 - 8189 82.890175 0.00075373 - 8192 82.913827 0.00075379 - 8195 82.897512 0.00075422 - 12285 83.175862 0.00112685 - 12288 83.169749 0.00112721 - 12291 83.200792 0.00112707 - 16381 95.388672 0.00131019 - 16384 95.412286 0.00131010 - 16387 95.398004 0.00131054 - 24573 108.974951 0.00172037 - 24576 108.965802 0.00172072 - 24579 108.990032 0.00172055 - 32765 101.248355 0.00246895 - 32768 101.057095 0.00247385 - 32771 101.022535 0.00247492 - 49149 94.399167 0.00397225 - 49152 94.377562 0.00397340 - 49155 94.381729 0.00397347 - 65533 91.381459 0.00547132 - 65536 91.363912 0.00547262 - 65539 91.358807 0.00547318 - 98301 99.821546 0.00751318 - 98304 99.843580 0.00751175 - 98307 99.840884 0.00751218 - 131069 111.255522 0.00898811 - 131072 111.318823 0.00898321 - 131075 111.279137 0.00898662 - 196605 125.429838 0.01195869 - 196608 125.435003 0.01195838 - 196611 125.434416 0.01195862 - 262141 133.997151 0.01492552 - 262144 134.130811 0.01491082 - 262147 134.006283 0.01492484 - 393213 143.806625 0.02086119 - 393216 143.801969 0.02086202 - 393219 143.805532 0.02086167 - 524285 149.249134 0.02680067 - 524288 149.499130 0.02675601 - 524291 149.656680 0.02672799 - 786429 151.705411 0.03955019 - 786432 151.845583 0.03951383 - 786435 150.233984 0.03993785 - 1048573 153.232822 0.05220799 - 1048576 153.241542 0.05220517 - 1048579 153.336518 0.05217298 - 1572861 162.629583 0.07378717 - 1572864 162.624025 0.07378983 - 1572867 162.625824 0.07378916 - 2097149 161.961354 0.09878886 - 2097152 161.963019 0.09878798 - 2097155 161.961882 0.09878882 - 3145725 166.269112 0.14434417 - 3145728 166.267348 0.14434584 - 3145731 166.275563 0.14433885 - 4194301 167.216394 0.19136866 - 4194304 167.208215 0.19137816 - 4194307 166.595939 0.19208165 - 6291453 166.463125 0.28835201 - 6291456 167.089715 0.28727082 - 6291459 167.327938 0.28686198 - 8388605 168.641041 0.37950416 - 8388608 168.646204 0.37949268 - 8388611 168.644499 0.37949665 diff --git a/performance/mpi/netpipe/NPboost_mpi_ser.out b/performance/mpi/netpipe/NPboost_mpi_ser.out deleted file mode 100644 index b4300bd..0000000 --- a/performance/mpi/netpipe/NPboost_mpi_ser.out +++ /dev/null @@ -1,109 +0,0 @@ - 1 0.019803 0.00038526 - 2 0.039570 0.00038561 - 3 0.043128 0.00053071 - 4 0.057452 0.00053118 - 6 0.085733 0.00053394 - 8 0.113964 0.00053557 - 12 0.135196 0.00067718 - 13 0.146071 0.00067900 - 16 0.178154 0.00068519 - 19 0.210759 0.00068779 - 21 0.232349 0.00068956 - 24 0.264920 0.00069117 - 27 0.246864 0.00083444 - 29 0.264385 0.00083686 - 32 0.291396 0.00083783 - 35 0.317892 0.00084000 - 45 0.404985 0.00084774 - 48 0.431100 0.00084948 - 51 0.456924 0.00085156 - 61 0.465889 0.00099893 - 64 0.487515 0.00100157 - 67 0.509790 0.00100271 - 93 0.694773 0.00102125 - 96 0.715831 0.00102318 - 99 0.736034 0.00102619 - 125 0.806129 0.00118303 - 128 0.823349 0.00118609 - 131 0.841130 0.00118822 - 189 1.173720 0.00122853 - 192 1.190832 0.00123010 - 195 1.207086 0.00123250 - 253 1.362336 0.00141686 - 256 1.376744 0.00141866 - 259 1.392136 0.00141941 - 381 1.930314 0.00150587 - 384 1.942892 0.00150790 - 387 1.955966 0.00150952 - 509 2.234705 0.00173775 - 512 2.246378 0.00173891 - 515 2.256668 0.00174112 - 765 3.047240 0.00191534 - 768 3.054962 0.00191799 - 771 3.062763 0.00192057 - 1021 3.481300 0.00223756 - 1024 3.490202 0.00223841 - 1027 3.496027 0.00224123 - 1533 4.512528 0.00259186 - 1536 4.519516 0.00259292 - 1539 4.523460 0.00259572 - 2045 5.047938 0.00309079 - 2048 5.052234 0.00309269 - 2051 5.058416 0.00309344 - 3069 6.157688 0.00380250 - 3072 6.159614 0.00380503 - 3075 6.163244 0.00380650 - 4093 6.685775 0.00467068 - 4096 6.686272 0.00467376 - 4099 6.689310 0.00467505 - 6141 7.698081 0.00608621 - 6144 7.701148 0.00608675 - 6147 7.698335 0.00609195 - 8189 8.159600 0.00765688 - 8192 8.159892 0.00765942 - 8195 8.162795 0.00765949 - 12285 8.941794 0.01048191 - 12288 8.944961 0.01048076 - 12291 8.946941 0.01048100 - 16381 9.296135 0.01344399 - 16384 9.295420 0.01344748 - 16387 9.305068 0.01343600 - 24573 9.829364 0.01907317 - 24576 9.829274 0.01907567 - 24579 9.831498 0.01907369 - 32765 10.050148 0.02487298 - 32768 10.027471 0.02493151 - 32771 10.055217 0.02486499 - 49149 10.382095 0.03611767 - 49152 10.383117 0.03611632 - 49155 10.381112 0.03612550 - 65533 10.513623 0.04755517 - 65536 10.514254 0.04755449 - 65539 10.512969 0.04756248 - 98301 10.700357 0.07008898 - 98304 10.700556 0.07008982 - 98307 10.700270 0.07009383 - 131069 10.767125 0.09287318 - 131072 10.767510 0.09287198 - 131075 10.766632 0.09288168 - 196605 10.882478 0.13783415 - 196608 10.603116 0.14146785 - 196611 10.881402 0.13785199 - 262141 10.897592 0.18352469 - 262144 10.903115 0.18343381 - 262147 10.904468 0.18341315 - 393213 10.967902 0.27352333 - 393216 10.969944 0.27347449 - 393219 10.969647 0.27348399 - 524285 10.970939 0.36459752 - 524288 10.970923 0.36460014 - 524291 10.970929 0.36460201 - 786429 11.012150 0.54485067 - 786432 11.011851 0.54486752 - 786435 11.015610 0.54468366 - 1048573 10.956998 0.73012487 - 1048576 10.976542 0.72882700 - 1048579 10.974227 0.72898285 - 1572861 11.016027 1.08931983 - 1572864 11.014845 1.08943884 - 1572867 11.015619 1.08936437 diff --git a/performance/mpi/netpipe/NPmpi.out b/performance/mpi/netpipe/NPmpi.out deleted file mode 100644 index 28a0900..0000000 --- a/performance/mpi/netpipe/NPmpi.out +++ /dev/null @@ -1,124 +0,0 @@ - 1 1.541185 0.00000495 - 2 3.079752 0.00000495 - 3 4.594458 0.00000498 - 4 6.151042 0.00000496 - 6 9.245996 0.00000495 - 8 12.276816 0.00000497 - 12 18.349837 0.00000499 - 13 19.889993 0.00000499 - 16 24.361712 0.00000501 - 19 28.768463 0.00000504 - 21 31.919176 0.00000502 - 24 36.340085 0.00000504 - 27 40.570101 0.00000508 - 29 43.785760 0.00000505 - 32 47.380840 0.00000515 - 35 51.734303 0.00000516 - 45 66.358480 0.00000517 - 48 61.041489 0.00000600 - 51 64.434740 0.00000604 - 61 76.817375 0.00000606 - 64 80.025495 0.00000610 - 67 83.926139 0.00000609 - 93 114.385978 0.00000620 - 96 116.982439 0.00000626 - 99 120.030944 0.00000629 - 125 150.448671 0.00000634 - 128 152.615432 0.00000640 - 131 156.501281 0.00000639 - 189 217.995721 0.00000661 - 192 221.223863 0.00000662 - 195 225.249679 0.00000660 - 253 282.659242 0.00000683 - 256 288.017897 0.00000678 - 259 289.239628 0.00000683 - 381 399.777238 0.00000727 - 384 404.198489 0.00000725 - 387 403.545793 0.00000732 - 509 501.256920 0.00000775 - 512 505.858145 0.00000772 - 515 502.769173 0.00000781 - 765 676.669897 0.00000863 - 768 681.618189 0.00000860 - 771 676.336310 0.00000870 - 1021 810.180841 0.00000961 - 1024 812.531222 0.00000962 - 1027 824.944906 0.00000950 - 1533 1126.272677 0.00001038 - 1536 1142.740629 0.00001025 - 1539 1131.490347 0.00001038 - 2045 1388.457938 0.00001124 - 2048 1359.718164 0.00001149 - 2051 1386.333746 0.00001129 - 3069 1752.688641 0.00001336 - 3072 1754.104080 0.00001336 - 3075 1736.273115 0.00001351 - 4093 2081.468075 0.00001500 - 4096 2015.114865 0.00001551 - 4099 2062.938083 0.00001516 - 6141 2466.704392 0.00001899 - 6144 2484.845492 0.00001886 - 6147 2422.862417 0.00001936 - 8189 2701.307702 0.00002313 - 8192 2749.052786 0.00002274 - 8195 2681.592589 0.00002332 - 12285 2273.488046 0.00004123 - 12288 2337.551137 0.00004011 - 12291 2281.701086 0.00004110 - 16381 2425.755479 0.00005152 - 16384 2492.137867 0.00005016 - 16387 2422.596945 0.00005161 - 24573 2738.192568 0.00006847 - 24576 2789.457258 0.00006722 - 24579 2739.817280 0.00006844 - 32765 2930.782732 0.00008529 - 32768 2973.532077 0.00008408 - 32771 2936.390355 0.00008515 - 49149 3097.454223 0.00012106 - 49152 3117.062361 0.00012031 - 49155 3089.593476 0.00012138 - 65533 3206.105084 0.00015595 - 65536 3218.797211 0.00015534 - 65539 3199.455403 0.00015628 - 98301 3854.955986 0.00019455 - 98304 3870.592678 0.00019377 - 98307 3850.868827 0.00019477 - 131069 3996.821132 0.00025019 - 131072 4001.085739 0.00024993 - 131075 3992.100316 0.00025050 - 196605 4397.172841 0.00034112 - 196608 4391.493310 0.00034157 - 196611 4401.841461 0.00034077 - 262141 4562.936571 0.00043831 - 262144 4561.276717 0.00043847 - 262147 4574.004396 0.00043726 - 393213 4576.803075 0.00065547 - 393216 4576.551805 0.00065552 - 393219 4590.356509 0.00065355 - 524285 4455.784302 0.00089770 - 524288 4459.222346 0.00089702 - 524291 4459.150891 0.00089704 - 786429 4496.076947 0.00133449 - 786432 4495.133960 0.00133478 - 786435 4493.611327 0.00133523 - 1048573 4626.774971 0.00172906 - 1048576 4627.764377 0.00172870 - 1048579 4041.001486 0.00197971 - 1572861 4109.832101 0.00291982 - 1572864 4097.470225 0.00292864 - 1572867 4073.390922 0.00294595 - 2097149 4269.721663 0.00374731 - 2097152 4281.204212 0.00373727 - 2097155 4150.253463 0.00385519 - 3145725 4276.700737 0.00561180 - 3145728 4269.497286 0.00562127 - 3145731 4205.834216 0.00570636 - 4194301 4282.876234 0.00747161 - 4194304 4280.891724 0.00747508 - 4194307 4225.636130 0.00757283 - 6291453 4314.185423 0.01112608 - 6291456 4310.111303 0.01113660 - 6291459 4268.534028 0.01124508 - 8388605 4284.568540 0.01493732 - 8388608 4293.433109 0.01490649 - 8388611 4268.470169 0.01499367 diff --git a/performance/mpi/netpipe/NPmpi_dt.out b/performance/mpi/netpipe/NPmpi_dt.out deleted file mode 100644 index bde868b..0000000 --- a/performance/mpi/netpipe/NPmpi_dt.out +++ /dev/null @@ -1,124 +0,0 @@ - 1 1.500386 0.00000508 - 2 2.946861 0.00000518 - 3 4.328129 0.00000529 - 4 5.680687 0.00000537 - 6 8.279837 0.00000553 - 8 10.620375 0.00000575 - 12 14.833338 0.00000617 - 13 15.869697 0.00000625 - 16 18.662619 0.00000654 - 19 21.290201 0.00000681 - 21 23.044685 0.00000695 - 24 25.244857 0.00000725 - 27 27.230274 0.00000756 - 29 28.694812 0.00000771 - 32 30.235393 0.00000807 - 35 32.012512 0.00000834 - 45 37.051923 0.00000927 - 48 35.137933 0.00001042 - 51 36.378442 0.00001070 - 61 40.020536 0.00001163 - 64 40.739109 0.00001199 - 67 41.771341 0.00001224 - 93 48.254142 0.00001470 - 96 48.358716 0.00001515 - 99 49.207675 0.00001535 - 125 53.998148 0.00001766 - 128 54.321093 0.00001798 - 131 54.732022 0.00001826 - 189 60.983186 0.00002365 - 192 61.113296 0.00002397 - 195 61.363089 0.00002424 - 253 65.146921 0.00002963 - 256 65.106144 0.00003000 - 259 65.199301 0.00003031 - 381 69.770319 0.00004166 - 384 69.779897 0.00004198 - 387 69.837263 0.00004228 - 509 72.376345 0.00005366 - 512 72.306878 0.00005402 - 515 72.380826 0.00005428 - 765 75.184275 0.00007763 - 768 75.142572 0.00007798 - 771 75.161956 0.00007826 - 1021 76.622054 0.00010166 - 1024 76.616894 0.00010197 - 1027 76.639170 0.00010224 - 1533 78.688710 0.00014863 - 1536 78.709541 0.00014889 - 1539 78.693374 0.00014921 - 2045 79.676913 0.00019582 - 2048 79.665163 0.00019613 - 2051 79.663945 0.00019642 - 3069 80.657680 0.00029030 - 3072 80.674362 0.00029052 - 3075 80.693141 0.00029074 - 4093 81.208197 0.00038453 - 4096 81.193429 0.00038488 - 4099 81.203764 0.00038512 - 6141 81.788991 0.00057284 - 6144 81.974620 0.00057182 - 6147 81.976342 0.00057209 - 8189 82.253404 0.00075957 - 8192 82.235103 0.00076002 - 8195 82.241205 0.00076024 - 12285 82.613735 0.00113452 - 12288 82.599727 0.00113499 - 12291 82.635605 0.00113478 - 16381 94.672234 0.00132010 - 16384 94.705665 0.00131988 - 16387 94.679556 0.00132048 - 24573 108.550580 0.00172709 - 24576 108.528530 0.00172766 - 24579 108.536654 0.00172774 - 32765 100.844219 0.00247884 - 32768 100.839887 0.00247918 - 32771 100.836028 0.00247950 - 49149 94.147423 0.00398287 - 49152 94.133095 0.00398372 - 49155 94.149934 0.00398325 - 65533 91.092434 0.00548868 - 65536 91.118895 0.00548734 - 65539 91.126806 0.00548711 - 98301 99.506829 0.00753694 - 98304 99.503287 0.00753744 - 98307 99.508881 0.00753725 - 131069 110.845352 0.00902137 - 131072 110.852503 0.00902100 - 131075 110.829264 0.00902309 - 196605 125.039700 0.01199601 - 196608 125.049064 0.01199529 - 196611 125.058180 0.01199460 - 262141 133.655552 0.01496367 - 262144 133.643595 0.01496518 - 262147 133.648318 0.01496482 - 393213 143.412709 0.02091849 - 393216 143.416255 0.02091813 - 393219 143.399916 0.02092067 - 524285 148.981416 0.02684883 - 524288 148.972126 0.02685066 - 524291 148.979372 0.02684951 - 786429 149.190063 0.04021700 - 786432 149.207881 0.04021235 - 786435 149.211694 0.04021148 - 1048573 150.463151 0.05316901 - 1048576 153.137918 0.05224049 - 1048579 151.191017 0.05291335 - 1572861 155.287901 0.07727567 - 1572864 155.114952 0.07736198 - 1572867 155.118834 0.07736019 - 2097149 156.146852 0.10246750 - 2097152 156.145319 0.10246865 - 2097155 156.418502 0.10228984 - 3145725 155.142855 0.15469599 - 3145728 154.684201 0.15515482 - 3145731 154.678010 0.15516118 - 4194301 154.857594 0.20664132 - 4194304 154.994211 0.20645932 - 4194307 154.993308 0.20646067 - 6291453 155.282773 0.30911334 - 6291456 155.287638 0.30910381 - 6291459 154.829623 0.31001834 - 8388605 154.602822 0.41396383 - 8388608 154.383715 0.41455150 - 8388611 154.828717 0.41336016 diff --git a/performance/mpi/netpipe/netpipe.gnuplot b/performance/mpi/netpipe/netpipe.gnuplot deleted file mode 100644 index 3779a90..0000000 --- a/performance/mpi/netpipe/netpipe.gnuplot +++ /dev/null @@ -1,17 +0,0 @@ -# For PNG output (in a very small PNG) -# set terminal png small -# set output "netpipe.png" -# set size 0.5,0.5 - -# For Encapsulated PostScript output -set terminal postscript enhanced 20 -set output "netpipe.eps" - -# Common parts -set title "NetPIPE Bandwidth" -set xlabel "Message Size in Bytes" -set ylabel "Bandwidth in MB/s" -set data style lines -set logscale xy -set key right bottom -plot "NPmpi.out" title 'MPI', "NPboost_mpi.out" title "Boost.MPI", "NPmpi_dt.out" title 'MPI (Datatypes)', "NPboost_mpi_dt.out" title 'Boost.MPI (Datatypes)', "NPboost_mpi_ser.out" title 'Boost.MPI (Serialized)' diff --git a/src/broadcast.cpp b/src/broadcast.cpp deleted file mode 100644 index 2b80777..0000000 --- a/src/broadcast.cpp +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- Section 4.4. Broadcast - -#include -#include -#include -#include - -namespace boost { namespace mpi { - -template<> -void -broadcast(const communicator& comm, - const packed_oarchive& oa, - int root) -{ - // Only the root can broadcast the packed_oarchive - assert(comm.rank() == root); - - int size = comm.size(); - if (size < 2) return; - - // Determine maximum tag value - int tag = environment::collectives_tag(); - - // Broadcast data to all nodes - std::vector requests(size * 2); - int num_requests = 0; - for (int dest = 0; dest < size; ++dest) { - if (dest != root) { - // Build up send requests for each child send. - num_requests += detail::packed_archive_isend(comm, dest, tag, oa, - &requests[num_requests], 2); - } - } - - // Complete all of the sends - BOOST_MPI_CHECK_RESULT(MPI_Waitall, - (num_requests, &requests[0], MPI_STATUSES_IGNORE)); -} - -template<> -void -broadcast(const communicator& comm, packed_oarchive& oa, - int root) -{ - broadcast(comm, const_cast(oa), root); -} - -template<> -void -broadcast(const communicator& comm, packed_iarchive& ia, - int root) -{ - int size = comm.size(); - if (size < 2) return; - - // Determine maximum tag value - int tag = environment::collectives_tag(); - - // Receive data from the root. - if (comm.rank() != root) { - MPI_Status status; - detail::packed_archive_recv(comm, root, tag, ia, status); - } else { - // Broadcast data to all nodes - std::vector requests(size * 2); - int num_requests = 0; - for (int dest = 0; dest < size; ++dest) { - if (dest != root) { - // Build up send requests for each child send. - num_requests += detail::packed_archive_isend(comm, dest, tag, ia, - &requests[num_requests], - 2); - } - } - - // Complete all of the sends - BOOST_MPI_CHECK_RESULT(MPI_Waitall, - (num_requests, &requests[0], MPI_STATUSES_IGNORE)); - } -} - -template<> -void -broadcast(const communicator& comm, - const packed_skeleton_oarchive& oa, - int root) -{ - broadcast(comm, oa.get_skeleton(), root); -} - -template<> -void -broadcast(const communicator& comm, - packed_skeleton_oarchive& oa, int root) -{ - broadcast(comm, oa.get_skeleton(), root); -} - -template<> -void -broadcast(const communicator& comm, - packed_skeleton_iarchive& ia, int root) -{ - broadcast(comm, ia.get_skeleton(), root); -} - -template<> -void broadcast(const communicator& comm, content& c, int root) -{ - broadcast(comm, const_cast(c), root); -} - -template<> -void broadcast(const communicator& comm, const content& c, - int root) -{ -#ifdef LAM_MPI - if (comm.size() < 2) - return; - - // Some versions of LAM/MPI behave badly when broadcasting using - // MPI_BOTTOM, so we'll instead use manual send/recv operations. - if (comm.rank() == root) { - for (int p = 0; p < comm.size(); ++p) { - if (p != root) { - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - p, environment::collectives_tag(), comm)); - } - } - } else { - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - root, environment::collectives_tag(), - comm, MPI_STATUS_IGNORE)); - } -#else - BOOST_MPI_CHECK_RESULT(MPI_Bcast, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - root, comm)); -#endif -} - -} } // end namespace boost::mpi diff --git a/src/communicator.cpp b/src/communicator.cpp deleted file mode 100644 index c1e145c..0000000 --- a/src/communicator.cpp +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include -#include -#include -#include -#include - -namespace boost { namespace mpi { - -/*************************************************************************** - * status * - ***************************************************************************/ -bool status::cancelled() const -{ - int flag = 0; - BOOST_MPI_CHECK_RESULT(MPI_Test_cancelled, (&m_status, &flag)); - return flag != 0; -} - -/*************************************************************************** - * communicator * - ***************************************************************************/ - -communicator::communicator() -{ - comm_ptr.reset(new MPI_Comm(MPI_COMM_WORLD)); -} - -communicator::communicator(const MPI_Comm& comm, comm_create_kind kind) -{ - if (comm == MPI_COMM_NULL) - /* MPI_COMM_NULL indicates that the communicator is not usable. */ - return; - - switch (kind) { - case comm_duplicate: - { - MPI_Comm newcomm; - BOOST_MPI_CHECK_RESULT(MPI_Comm_dup, (comm, &newcomm)); - comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); - MPI_Errhandler_set(newcomm, MPI_ERRORS_RETURN); - break; - } - - case comm_take_ownership: - comm_ptr.reset(new MPI_Comm(comm), comm_free()); - break; - - case comm_attach: - comm_ptr.reset(new MPI_Comm(comm)); - break; - } -} - -communicator::communicator(const communicator& comm, - const boost::mpi::group& subgroup) -{ - MPI_Comm newcomm; - BOOST_MPI_CHECK_RESULT(MPI_Comm_create, - ((MPI_Comm)comm, (MPI_Group)subgroup, &newcomm)); - comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); -} - -int communicator::size() const -{ - int size_; - BOOST_MPI_CHECK_RESULT(MPI_Comm_size, (MPI_Comm(*this), &size_)); - return size_; -} - -int communicator::rank() const -{ - int rank_; - BOOST_MPI_CHECK_RESULT(MPI_Comm_rank, (MPI_Comm(*this), &rank_)); - return rank_; -} - -boost::mpi::group communicator::group() const -{ - MPI_Group gr; - BOOST_MPI_CHECK_RESULT(MPI_Comm_group, ((MPI_Comm)*this, &gr)); - return boost::mpi::group(gr, /*adopt=*/true); -} - -void communicator::send(int dest, int tag) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 0, MPI_PACKED, - dest, tag, MPI_Comm(*this))); -} - -status communicator::recv(int source, int tag) const -{ - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 0, MPI_PACKED, - source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -optional communicator::iprobe(int source, int tag) const -{ - typedef optional result_type; - - status stat; - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Iprobe, - (source, tag, MPI_Comm(*this), &flag, - &stat.m_status)); - if (flag) return stat; - else return result_type(); -} - -status communicator::probe(int source, int tag) const -{ - typedef optional result_type; - - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Probe, - (source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -void (communicator::barrier)() const -{ - BOOST_MPI_CHECK_RESULT(MPI_Barrier, (MPI_Comm(*this))); -} - - -communicator::operator MPI_Comm() const -{ - if (comm_ptr) return *comm_ptr; - else return MPI_COMM_NULL; -} - -communicator communicator::split(int color) const -{ - return split(color, rank()); -} - -communicator communicator::split(int color, int key) const -{ - MPI_Comm newcomm; - BOOST_MPI_CHECK_RESULT(MPI_Comm_split, - (MPI_Comm(*this), color, key, &newcomm)); - return communicator(newcomm, comm_take_ownership); -} - -optional communicator::as_intercommunicator() const -{ - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Comm_test_inter, ((MPI_Comm)*this, &flag)); - if (flag) - return intercommunicator(comm_ptr); - else - return optional(); -} - -optional communicator::as_graph_communicator() const -{ - int status; - BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status)); - if (status == MPI_GRAPH) - return graph_communicator(comm_ptr); - else - return optional(); -} - -bool communicator::has_cartesian_topology() const -{ - int status; - BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status)); - - return status == MPI_CART; -} - -void communicator::abort(int errcode) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_Comm(*this), errcode)); -} - -/************************************************************* - * archived send/recv * - *************************************************************/ -template<> -void -communicator::send(int dest, int tag, - const packed_oarchive& ar) const -{ - detail::packed_archive_send(MPI_Comm(*this), dest, tag, ar); -} - -template<> -void -communicator::send - (int dest, int tag, const packed_skeleton_oarchive& ar) const -{ - this->send(dest, tag, ar.get_skeleton()); -} - -template<> -void communicator::send(int dest, int tag, const content& c) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - dest, tag, MPI_Comm(*this))); -} - -template<> -status -communicator::recv(int source, int tag, - packed_iarchive& ar) const -{ - status stat; - detail::packed_archive_recv(MPI_Comm(*this), source, tag, ar, - stat.m_status); - return stat; -} - -template<> -status -communicator::recv - (int source, int tag, packed_skeleton_iarchive& ar) const -{ - return this->recv(source, tag, ar.get_skeleton()); -} - -template<> -status -communicator::recv(int source, int tag, const content& c) const -{ - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -/************************************************************* - * non-blocking send/recv * - *************************************************************/ -template<> -request -communicator::isend(int dest, int tag, - const packed_oarchive& ar) const -{ - request req; - detail::packed_archive_isend(MPI_Comm(*this), dest, tag, ar, - &req.m_requests[0] ,2); - return req; -} - -template<> -request -communicator::isend - (int dest, int tag, const packed_skeleton_oarchive& ar) const -{ - return this->isend(dest, tag, ar.get_skeleton()); -} - -template<> -request communicator::isend(int dest, int tag, const content& c) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - dest, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -request communicator::isend(int dest, int tag) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (MPI_BOTTOM, 0, MPI_PACKED, - dest, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -template<> -request -communicator::irecv - (int source, int tag, packed_skeleton_iarchive& ar) const -{ - return this->irecv(source, tag, ar.get_skeleton()); -} - -template<> -request -communicator::irecv(int source, int tag, - const content& c) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Irecv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - source, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -request communicator::irecv(int source, int tag) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Irecv, - (MPI_BOTTOM, 0, MPI_PACKED, - source, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -bool operator==(const communicator& comm1, const communicator& comm2) -{ - int result; - BOOST_MPI_CHECK_RESULT(MPI_Comm_compare, - ((MPI_Comm)comm1, (MPI_Comm)comm2, &result)); - return result == MPI_IDENT; -} - -} } // end namespace boost::mpi diff --git a/src/computation_tree.cpp b/src/computation_tree.cpp deleted file mode 100644 index 60de534..0000000 --- a/src/computation_tree.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Compute parents, children, levels, etc. to effect a parallel -// computation tree. - -#include - -namespace boost { namespace mpi { namespace detail { - -int computation_tree::default_branching_factor = 3; - -computation_tree -::computation_tree(int rank, int size, int root, int branching_factor) - : rank(rank), size(size), root(root), - branching_factor_(branching_factor > 1? branching_factor - /* default */: default_branching_factor), - level_(0) -{ - // The position in the tree, once we've adjusted for non-zero - // roots. - int n = (rank + size - root) % size; - int sum = 0; - int term = 1; - - /* The level is the smallest value of k such that - - f^0 + f^1 + ... + f^k > n - - for branching factor f and index n in the tree. */ - while (sum <= n) { - ++level_; - term *= branching_factor_; - sum += term; - } -} - -int computation_tree::level_index(int n) const -{ - int sum = 0; - int term = 1; - while (n--) { - sum += term; - term *= branching_factor_; - } - return sum; -} - -int computation_tree::parent() const -{ - if (rank == root) return rank; - int n = rank + size - 1 - root; - return ((n % size / branching_factor_) + root) % size ; -} - -int computation_tree::child_begin() const -{ - // Zero-based index of this node - int n = (rank + size - root) % size; - - // Compute the index of the child (in a zero-based tree) - int child_index = level_index(level_ + 1) - + branching_factor_ * (n - level_index(level_)); - - if (child_index >= size) return root; - else return (child_index + root) % size; -} - -} } } // end namespace boost::mpi::detail diff --git a/src/content_oarchive.cpp b/src/content_oarchive.cpp deleted file mode 100644 index 357c02e..0000000 --- a/src/content_oarchive.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include - -namespace boost { namespace archive { namespace detail { -// explicitly instantiate all required template functions - -template class archive_pointer_oserializer ; - -} } } diff --git a/src/environment.cpp b/src/environment.cpp deleted file mode 100644 index ae475e4..0000000 --- a/src/environment.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2005-2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- 7.1.1. Environmental Inquiries -#include -#include -#include -#include -#include - -namespace boost { namespace mpi { - -#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION -environment::environment(bool abort_on_exception) - : i_initialized(false), - abort_on_exception(abort_on_exception) -{ - if (!initialized()) { - BOOST_MPI_CHECK_RESULT(MPI_Init, (0, 0)); - i_initialized = true; - } - - MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); -} -#endif - -environment::environment(int& argc, char** &argv, bool abort_on_exception) - : i_initialized(false), - abort_on_exception(abort_on_exception) -{ - if (!initialized()) { - BOOST_MPI_CHECK_RESULT(MPI_Init, (&argc, &argv)); - i_initialized = true; - } - - MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); -} - -environment::~environment() -{ - if (i_initialized) { - if (std::uncaught_exception() && abort_on_exception) { - abort(-1); - } else if (!finalized()) { - BOOST_MPI_CHECK_RESULT(MPI_Finalize, ()); - } - } -} - -void environment::abort(int errcode) -{ - BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_COMM_WORLD, errcode)); -} - -bool environment::initialized() -{ - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Initialized, (&flag)); - return flag != 0; -} - -bool environment::finalized() -{ - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&flag)); - return flag != 0; -} - -int environment::max_tag() -{ - int* max_tag_value; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_TAG_UB, &max_tag_value, &found)); - assert(found != 0); - return *max_tag_value - num_reserved_tags; -} - -int environment::collectives_tag() -{ - return max_tag() + 1; -} - -optional environment::host_rank() -{ - int* host; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_HOST, &host, &found)); - if (!found || *host == MPI_PROC_NULL) - return optional(); - else - return *host; -} - -optional environment::io_rank() -{ - int* io; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_IO, &io, &found)); - if (!found || *io == MPI_PROC_NULL) - return optional(); - else - return *io; -} - -std::string environment::processor_name() -{ - char name[MPI_MAX_PROCESSOR_NAME]; - int len; - - BOOST_MPI_CHECK_RESULT(MPI_Get_processor_name, (name, &len)); - return std::string(name, len); -} - -} } // end namespace boost::mpi diff --git a/src/exception.cpp b/src/exception.cpp deleted file mode 100644 index 9cb4c25..0000000 --- a/src/exception.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2007 Trustees of Indiana University - -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include - -namespace boost { namespace mpi { - -exception::exception(const char* routine, int result_code) - : routine_(routine), result_code_(result_code) -{ - // Query the MPI implementation for its reason for failure - char buffer[MPI_MAX_ERROR_STRING]; - int len; - MPI_Error_string(result_code, buffer, &len); - - // Construct the complete error message - message.append(routine_); - message.append(": "); - message.append(buffer, len); -} - -exception::~exception() throw() { } - -} } // end namespace boost::mpi diff --git a/src/graph_communicator.cpp b/src/graph_communicator.cpp deleted file mode 100644 index 586b57b..0000000 --- a/src/graph_communicator.cpp +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2007 Trustees of Indiana University - -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include - -namespace boost { namespace mpi { - -// Incidence Graph requirements -std::pair -out_edges(int vertex, const graph_communicator& comm) -{ - int nneighbors = out_degree(vertex, comm); - shared_array neighbors(new int[nneighbors]); - BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors, - ((MPI_Comm)comm, vertex, nneighbors, neighbors.get())); - return std::make_pair(detail::comm_out_edge_iterator(vertex, neighbors, 0), - detail::comm_out_edge_iterator(vertex, neighbors, - nneighbors)); -} - -int out_degree(int vertex, const graph_communicator& comm) -{ - int nneighbors; - BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors_count, - ((MPI_Comm)comm, vertex, &nneighbors)); - return nneighbors; -} - -// Adjacency Graph requirements -std::pair -adjacent_vertices(int vertex, const graph_communicator& comm) -{ - int nneighbors = out_degree(vertex, comm); - shared_array neighbors(new int[nneighbors]); - BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors, - ((MPI_Comm)comm, vertex, nneighbors, neighbors.get())); - return std::make_pair(detail::comm_adj_iterator(neighbors, 0), - detail::comm_adj_iterator(neighbors, nneighbors)); -} - -// Edge List Graph requirements -std::pair -edges(const graph_communicator& comm); - -std::pair -edges(const graph_communicator& comm) -{ - int nnodes, nedges; - BOOST_MPI_CHECK_RESULT(MPI_Graphdims_get, ((MPI_Comm)comm, &nnodes, &nedges)); - - shared_array indices(new int[nnodes]); - shared_array edges(new int[nedges]); - BOOST_MPI_CHECK_RESULT(MPI_Graph_get, - ((MPI_Comm)comm, nnodes, nedges, - indices.get(), edges.get())); - return std::make_pair(detail::comm_edge_iterator(indices, edges), - detail::comm_edge_iterator(nedges)); -} - - -int num_edges(const graph_communicator& comm) -{ - int nnodes, nedges; - BOOST_MPI_CHECK_RESULT(MPI_Graphdims_get, ((MPI_Comm)comm, &nnodes, &nedges)); - return nedges; -} - -} } // end namespace boost::mpi diff --git a/src/group.cpp b/src/group.cpp deleted file mode 100644 index 034d08f..0000000 --- a/src/group.cpp +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (C) 2007 Trustees of Indiana University - -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include - -namespace boost { namespace mpi { - -group::group(const MPI_Group& in_group, bool adopt) -{ - if (in_group != MPI_GROUP_EMPTY) { - if (adopt) group_ptr.reset(new MPI_Group(in_group), group_free()); - else group_ptr.reset(new MPI_Group(in_group)); - } -} - -optional group::rank() const -{ - if (!group_ptr) - return optional(); - - int rank; - BOOST_MPI_CHECK_RESULT(MPI_Group_rank, (*group_ptr, &rank)); - if (rank == MPI_UNDEFINED) - return optional(); - else - return rank; -} - -int group::size() const -{ - if (!group_ptr) - return 0; - - int size; - BOOST_MPI_CHECK_RESULT(MPI_Group_size, (*group_ptr, &size)); - return size; -} - -bool operator==(const group& g1, const group& g2) -{ - int result; - BOOST_MPI_CHECK_RESULT(MPI_Group_compare, - ((MPI_Group)g1, (MPI_Group)g2, &result)); - return result == MPI_IDENT; -} - -group operator|(const group& g1, const group& g2) -{ - MPI_Group result; - BOOST_MPI_CHECK_RESULT(MPI_Group_union, - ((MPI_Group)g1, (MPI_Group)g2, &result)); - return group(result, /*adopt=*/true); -} - -group operator&(const group& g1, const group& g2) -{ - MPI_Group result; - BOOST_MPI_CHECK_RESULT(MPI_Group_intersection, - ((MPI_Group)g1, (MPI_Group)g2, &result)); - return group(result, /*adopt=*/true); -} - -group operator-(const group& g1, const group& g2) -{ - MPI_Group result; - BOOST_MPI_CHECK_RESULT(MPI_Group_difference, - ((MPI_Group)g1, (MPI_Group)g2, &result)); - return group(result, /*adopt=*/true); -} - -template<> -int* -group::translate_ranks(int* first, int* last, const group& to_group, int* out) -{ - BOOST_MPI_CHECK_RESULT(MPI_Group_translate_ranks, - ((MPI_Group)*this, - last-first, - first, - (MPI_Group)to_group, - out)); - return out + (last - first); -} - -template<> group group::include(int* first, int* last) -{ - MPI_Group result; - BOOST_MPI_CHECK_RESULT(MPI_Group_incl, - ((MPI_Group)*this, last - first, first, &result)); - return group(result, /*adopt=*/true); -} - -template<> group group::exclude(int* first, int* last) -{ - MPI_Group result; - BOOST_MPI_CHECK_RESULT(MPI_Group_excl, - ((MPI_Group)*this, last - first, first, &result)); - return group(result, /*adopt=*/true); -} - -} } // end namespace boost::mpi diff --git a/src/intercommunicator.cpp b/src/intercommunicator.cpp deleted file mode 100644 index 6b07285..0000000 --- a/src/intercommunicator.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2007 Trustees of Indiana University - -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include -#include - -namespace boost { namespace mpi { - -intercommunicator::intercommunicator(const communicator& local, - int local_leader, - const communicator& peer, - int remote_leader) -{ - MPI_Comm comm; - BOOST_MPI_CHECK_RESULT(MPI_Intercomm_create, - ((MPI_Comm)local, local_leader, - (MPI_Comm)peer, remote_leader, - environment::collectives_tag(), &comm)); - comm_ptr.reset(new MPI_Comm(comm), comm_free()); -} - -boost::mpi::group intercommunicator::local_group() const -{ - return this->group(); -} - -int intercommunicator::remote_size() const -{ - int size; - BOOST_MPI_CHECK_RESULT(MPI_Comm_remote_size, ((MPI_Comm)*this, &size)); - return size; -} - -boost::mpi::group intercommunicator::remote_group() const -{ - MPI_Group gr; - BOOST_MPI_CHECK_RESULT(MPI_Comm_remote_group, ((MPI_Comm)*this, &gr)); - return boost::mpi::group(gr, /*adopt=*/true); -} - -communicator intercommunicator::merge(bool high) const -{ - MPI_Comm comm; - BOOST_MPI_CHECK_RESULT(MPI_Intercomm_merge, ((MPI_Comm)*this, high, &comm)); - return communicator(comm, comm_take_ownership); -} - -} } // end namespace boost::mpi diff --git a/src/mpi/broadcast.cpp b/src/mpi/broadcast.cpp deleted file mode 100644 index b3fc1e2..0000000 --- a/src/mpi/broadcast.cpp +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- Section 4.4. Broadcast - -#include -#include -#include -#include - -namespace boost { namespace parallel { namespace mpi { - -template<> -void -broadcast(const communicator& comm, - const packed_oarchive& oa, - int root) -{ - // Only the root can broadcast the packed_oarchive - assert(comm.rank() == root); - - int size = comm.size(); - if (size < 2) return; - - // Determine maximum tag value - int tag = environment::collectives_tag(); - - // Broadcast data to all nodes - std::vector requests(size * 2); - int num_requests = 0; - for (int dest = 0; dest < size; ++dest) { - if (dest != root) { - // Build up send requests for each child send. - num_requests += detail::packed_archive_isend(comm, dest, tag, oa, - &requests[num_requests], 2); - } - } - - // Complete all of the sends - BOOST_MPI_CHECK_RESULT(MPI_Waitall, - (num_requests, &requests[0], MPI_STATUSES_IGNORE)); -} - -template<> -void -broadcast(const communicator& comm, packed_oarchive& oa, - int root) -{ - broadcast(comm, const_cast(oa), root); -} - -template<> -void -broadcast(const communicator& comm, packed_iarchive& ia, - int root) -{ - int size = comm.size(); - if (size < 2) return; - - // Determine maximum tag value - int tag = environment::collectives_tag(); - - // Receive data from the root. - if (comm.rank() != root) { - MPI_Status status; - detail::packed_archive_recv(comm, root, tag, ia, status); - } else { - // Broadcast data to all nodes - std::vector requests(size * 2); - int num_requests = 0; - for (int dest = 0; dest < size; ++dest) { - if (dest != root) { - // Build up send requests for each child send. - num_requests += detail::packed_archive_isend(comm, dest, tag, ia, - &requests[num_requests], - 2); - } - } - - // Complete all of the sends - BOOST_MPI_CHECK_RESULT(MPI_Waitall, - (num_requests, &requests[0], MPI_STATUSES_IGNORE)); - } -} - -template<> -void -broadcast(const communicator& comm, - const packed_skeleton_oarchive& oa, - int root) -{ - broadcast(comm, oa.get_skeleton(), root); -} - -template<> -void -broadcast(const communicator& comm, - packed_skeleton_oarchive& oa, int root) -{ - broadcast(comm, oa.get_skeleton(), root); -} - -template<> -void -broadcast(const communicator& comm, - packed_skeleton_iarchive& ia, int root) -{ - broadcast(comm, ia.get_skeleton(), root); -} - -template<> -void broadcast(const communicator& comm, content& c, int root) -{ - broadcast(comm, const_cast(c), root); -} - -template<> -void broadcast(const communicator& comm, const content& c, - int root) -{ -#ifdef LAM_MPI - if (comm.size() < 2) - return; - - // Some versions of LAM/MPI behave badly when broadcasting using - // MPI_BOTTOM, so we'll instead use manual send/recv operations. - if (comm.rank() == root) { - for (int p = 0; p < comm.size(); ++p) { - if (p != root) { - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - p, environment::collectives_tag(), comm)); - } - } - } else { - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - root, environment::collectives_tag(), - comm, MPI_STATUS_IGNORE)); - } -#else - BOOST_MPI_CHECK_RESULT(MPI_Bcast, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - root, comm)); -#endif -} - -} } } // end namespace boost::parallel::mpi diff --git a/src/mpi/communicator.cpp b/src/mpi/communicator.cpp deleted file mode 100644 index 15c937c..0000000 --- a/src/mpi/communicator.cpp +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include -#include - -namespace boost { namespace parallel { namespace mpi { - -/*************************************************************************** - * status * - ***************************************************************************/ -bool status::cancelled() -{ - int flag = 0; - BOOST_MPI_CHECK_RESULT(MPI_Test_cancelled, (&m_status, &flag)); - return flag != 0; -} - -/*************************************************************************** - * communicator * - ***************************************************************************/ - -communicator::communicator() -{ - comm_ptr.reset(new MPI_Comm(MPI_COMM_WORLD)); -} - -communicator::communicator(const MPI_Comm& comm, comm_create_kind kind) -{ - if (comm == MPI_COMM_NULL) - /* MPI_COMM_NULL indicates that the communicator is not usable. */ - return; - - switch (kind) { - case comm_duplicate: - { - MPI_Comm newcomm; - BOOST_MPI_CHECK_RESULT(MPI_Comm_dup, (comm, &newcomm)); - comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); - MPI_Errhandler_set(newcomm, MPI_ERRORS_RETURN); - break; - } - - case comm_take_ownership: - comm_ptr.reset(new MPI_Comm(comm), comm_free()); - break; - - case comm_attach: - comm_ptr.reset(new MPI_Comm(comm)); - break; - } - } - -int communicator::size() const -{ - int size_; - BOOST_MPI_CHECK_RESULT(MPI_Comm_size, (MPI_Comm(*this), &size_)); - return size_; -} - -int communicator::rank() const -{ - int rank_; - BOOST_MPI_CHECK_RESULT(MPI_Comm_rank, (MPI_Comm(*this), &rank_)); - return rank_; -} - -void communicator::send(int dest, int tag) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 0, MPI_PACKED, - dest, tag, MPI_Comm(*this))); -} - -status communicator::recv(int source, int tag) const -{ - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 0, MPI_PACKED, - source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -optional communicator::iprobe(int source, int tag) const -{ - typedef optional result_type; - - status stat; - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Iprobe, - (source, tag, MPI_Comm(*this), &flag, - &stat.m_status)); - if (flag) return stat; - else return result_type(); -} - -status communicator::probe(int source, int tag) const -{ - typedef optional result_type; - - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Probe, - (source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -void (communicator::barrier)() const -{ - BOOST_MPI_CHECK_RESULT(MPI_Barrier, (MPI_Comm(*this))); -} - - -communicator::operator MPI_Comm() const -{ - if (comm_ptr) return *comm_ptr; - else return MPI_COMM_NULL; -} - -communicator communicator::split(int color) const -{ - return split(color, rank()); -} - -communicator communicator::split(int color, int key) const -{ - MPI_Comm newcomm; - BOOST_MPI_CHECK_RESULT(MPI_Comm_split, - (MPI_Comm(*this), color, key, &newcomm)); - return communicator(newcomm, comm_take_ownership); -} - -void communicator::abort(int errcode) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_Comm(*this), errcode)); -} - -/************************************************************* - * archived send/recv * - *************************************************************/ -template<> -void -communicator::send(int dest, int tag, - const packed_oarchive& ar) const -{ - detail::packed_archive_send(MPI_Comm(*this), dest, tag, ar); -} - -template<> -void -communicator::send - (int dest, int tag, const packed_skeleton_oarchive& ar) const -{ - this->send(dest, tag, ar.get_skeleton()); -} - -template<> -void communicator::send(int dest, int tag, const content& c) const -{ - BOOST_MPI_CHECK_RESULT(MPI_Send, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - dest, tag, MPI_Comm(*this))); -} - -template<> -status -communicator::recv(int source, int tag, - packed_iarchive& ar) const -{ - status stat; - detail::packed_archive_recv(MPI_Comm(*this), source, tag, ar, - stat.m_status); - return stat; -} - -template<> -status -communicator::recv - (int source, int tag, packed_skeleton_iarchive& ar) const -{ - return this->recv(source, tag, ar.get_skeleton()); -} - -template<> -status -communicator::recv(int source, int tag, const content& c) const -{ - status stat; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - source, tag, MPI_Comm(*this), &stat.m_status)); - return stat; -} - -/************************************************************* - * non-blocking send/recv * - *************************************************************/ -template<> -request -communicator::isend(int dest, int tag, - const packed_oarchive& ar) const -{ - request req; - detail::packed_archive_isend(MPI_Comm(*this), dest, tag, ar, - &req.m_requests[0] ,2); - return req; -} - -template<> -request -communicator::isend - (int dest, int tag, const packed_skeleton_oarchive& ar) const -{ - return this->isend(dest, tag, ar.get_skeleton()); -} - -template<> -request communicator::isend(int dest, int tag, const content& c) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - dest, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -request communicator::isend(int dest, int tag) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (MPI_BOTTOM, 0, MPI_PACKED, - dest, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -template<> -request -communicator::irecv - (int source, int tag, packed_skeleton_iarchive& ar) const -{ - return this->irecv(source, tag, ar.get_skeleton()); -} - -template<> -request -communicator::irecv(int source, int tag, - const content& c) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Irecv, - (MPI_BOTTOM, 1, c.get_mpi_datatype(), - source, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -request communicator::irecv(int source, int tag) const -{ - request req; - BOOST_MPI_CHECK_RESULT(MPI_Irecv, - (MPI_BOTTOM, 0, MPI_PACKED, - source, tag, MPI_Comm(*this), &req.m_requests[0])); - return req; -} - -} } } // end namespace boost::parallel::mpi diff --git a/src/mpi/computation_tree.cpp b/src/mpi/computation_tree.cpp deleted file mode 100644 index a1f76b6..0000000 --- a/src/mpi/computation_tree.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Compute parents, children, levels, etc. to effect a parallel -// computation tree. - -#include - -namespace boost { namespace parallel { namespace mpi { namespace detail { - -int computation_tree::default_branching_factor = 3; - -computation_tree -::computation_tree(int rank, int size, int root, int branching_factor) - : rank(rank), size(size), root(root), - branching_factor_(branching_factor > 1? branching_factor - /* default */: default_branching_factor), - level_(0) -{ - // The position in the tree, once we've adjusted for non-zero - // roots. - int n = (rank + size - root) % size; - int sum = 0; - int term = 1; - - /* The level is the smallest value of k such that - - f^0 + f^1 + ... + f^k > n - - for branching factor f and index n in the tree. */ - while (sum <= n) { - ++level_; - term *= branching_factor_; - sum += term; - } -} - -int computation_tree::level_index(int n) const -{ - int sum = 0; - int term = 1; - while (n--) { - sum += term; - term *= branching_factor_; - } - return sum; -} - -int computation_tree::parent() const -{ - if (rank == root) return rank; - int n = rank + size - 1 - root; - return ((n % size / branching_factor_) + root) % size ; -} - -int computation_tree::child_begin() const -{ - // Zero-based index of this node - int n = (rank + size - root) % size; - - // Compute the index of the child (in a zero-based tree) - int child_index = level_index(level_ + 1) - + branching_factor_ * (n - level_index(level_)); - - if (child_index >= size) return root; - else return (child_index + root) % size; -} - -} } } } // end namespace boost::parallel::mpi::detail diff --git a/src/mpi/content_oarchive.cpp b/src/mpi/content_oarchive.cpp deleted file mode 100644 index c93bb9f..0000000 --- a/src/mpi/content_oarchive.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include - -namespace boost { namespace archive { namespace detail { -// explicitly instantiate all required template functions - -template class archive_pointer_oserializer ; - -} } } diff --git a/src/mpi/environment.cpp b/src/mpi/environment.cpp deleted file mode 100644 index bd64609..0000000 --- a/src/mpi/environment.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2005-2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- 7.1.1. Environmental Inquiries -#include -#include -#include -#include -#include - -namespace boost { namespace parallel { namespace mpi { - -#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION -environment::environment(bool abort_on_exception) - : i_initialized(false), - abort_on_exception(abort_on_exception) -{ - if (!initialized()) { - BOOST_MPI_CHECK_RESULT(MPI_Init, (0, 0)); - i_initialized = true; - } - - MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); -} -#endif - -environment::environment(int& argc, char** &argv, bool abort_on_exception) - : i_initialized(false), - abort_on_exception(abort_on_exception) -{ - if (!initialized()) { - BOOST_MPI_CHECK_RESULT(MPI_Init, (&argc, &argv)); - i_initialized = true; - } - - MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); -} - -environment::~environment() -{ - if (i_initialized) { - if (std::uncaught_exception() && abort_on_exception) { - abort(-1); - } else if (!finalized()) { - BOOST_MPI_CHECK_RESULT(MPI_Finalize, ()); - } - } -} - -void environment::abort(int errcode) -{ - BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_COMM_WORLD, errcode)); -} - -bool environment::initialized() -{ - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Initialized, (&flag)); - return flag != 0; -} - -bool environment::finalized() -{ - int flag; - BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&flag)); - return flag != 0; -} - -int environment::max_tag() -{ - int* max_tag_value; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_TAG_UB, &max_tag_value, &found)); - assert(found != 0); - return *max_tag_value - num_reserved_tags; -} - -int environment::collectives_tag() -{ - return max_tag() + 1; -} - -optional environment::host_rank() -{ - int* host; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_HOST, &host, &found)); - if (!found || *host == MPI_PROC_NULL) - return optional(); - else - return *host; -} - -optional environment::io_rank() -{ - int* io; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_IO, &io, &found)); - if (!found || *io == MPI_PROC_NULL) - return optional(); - else - return *io; -} - -std::string environment::processor_name() -{ - char name[MPI_MAX_PROCESSOR_NAME]; - int len; - - BOOST_MPI_CHECK_RESULT(MPI_Get_processor_name, (name, &len)); - return std::string(name, len); -} - -} } } // end namespace boost::parallel::mpi diff --git a/src/mpi/mpi_datatype_cache.cpp b/src/mpi/mpi_datatype_cache.cpp deleted file mode 100644 index 5371e2c..0000000 --- a/src/mpi/mpi_datatype_cache.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#include -#include - -namespace boost { namespace parallel { namespace mpi { namespace detail { - -mpi_datatype_map mpi_datatype_cache; - -}}}} diff --git a/src/mpi/mpi_datatype_oarchive.cpp b/src/mpi/mpi_datatype_oarchive.cpp deleted file mode 100644 index a791816..0000000 --- a/src/mpi/mpi_datatype_oarchive.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include - -namespace boost { namespace archive { namespace detail { -// explicitly instantiate all required template functions - -template class archive_pointer_oserializer ; - -} } } diff --git a/src/mpi/packed_iarchive.cpp b/src/mpi/packed_iarchive.cpp deleted file mode 100644 index 683bfbd..0000000 --- a/src/mpi/packed_iarchive.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class basic_binary_iarchive ; -template class detail::archive_pointer_iserializer ; -//template class binary_iarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/mpi/packed_oarchive.cpp b/src/mpi/packed_oarchive.cpp deleted file mode 100644 index a4be482..0000000 --- a/src/mpi/packed_oarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; -template class basic_binary_oarchive ; -//template class binary_oarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/mpi/packed_skeleton_iarchive.cpp b/src/mpi/packed_skeleton_iarchive.cpp deleted file mode 100644 index 275c0b2..0000000 --- a/src/mpi/packed_skeleton_iarchive.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class basic_binary_iarchive ; -template class detail::archive_pointer_iserializer ; -//template class binary_iarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/mpi/packed_skeleton_oarchive.cpp b/src/mpi/packed_skeleton_oarchive.cpp deleted file mode 100644 index 191431c..0000000 --- a/src/mpi/packed_skeleton_oarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; -template class basic_binary_oarchive ; -//template class binary_oarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/mpi/point_to_point.cpp b/src/mpi/point_to_point.cpp deleted file mode 100644 index 7318427..0000000 --- a/src/mpi/point_to_point.cpp +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point - -/* There is the potential for optimization here. We could keep around - a "small message" buffer of size N that we just receive into by - default. If the message is N - sizeof(int) bytes or smaller, it can - just be sent with that buffer. If it's larger, we send the first N - - sizeof(int) bytes in the first packet followed by another - packet. The size of the second packet will be stored in an integer - at the end of the first packet. - - We will introduce this optimization later, when we have more - performance test cases and have met our functionality goals. */ - -#include -#include -#include -#include - -namespace boost { namespace parallel { namespace mpi { namespace detail { - -void -packed_archive_send(MPI_Comm comm, int dest, int tag, - const packed_oarchive& ar) -{ - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Send, - (const_cast(size), 1, - get_mpi_datatype(), dest, tag, comm)); - BOOST_MPI_CHECK_RESULT(MPI_Send, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm)); -} - -int -packed_archive_isend(MPI_Comm comm, int dest, int tag, - const packed_oarchive& ar, - MPI_Request* out_requests, int num_out_requests) -{ - assert(num_out_requests >= 2); - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(size), 1, - get_mpi_datatype(), - dest, tag, comm, out_requests)); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm, out_requests + 1)); - - return 2; -} - -int -packed_archive_isend(MPI_Comm comm, int dest, int tag, - const packed_iarchive& ar, - MPI_Request* out_requests, int num_out_requests) -{ - assert(num_out_requests >= 2); - - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(size), 1, - get_mpi_datatype(), - dest, tag, comm, out_requests)); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm, out_requests + 1)); - - return 2; -} - -void -packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar, - MPI_Status& status) -{ - std::size_t count; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (&count, 1, get_mpi_datatype(), - source, tag, comm, &status)); - - // Prepare input buffer and receive the message - ar.resize(count); - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (ar.address(), ar.size(), MPI_PACKED, - status.MPI_SOURCE, status.MPI_TAG, - comm, &status)); -} - -} } } } // end namespace boost::parallel::mpi::detail diff --git a/src/mpi/python/collectives.cpp b/src/mpi/python/collectives.cpp deleted file mode 100644 index 88a26c8..0000000 --- a/src/mpi/python/collectives.cpp +++ /dev/null @@ -1,144 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file status.cpp - * - * This file reflects the Boost.MPI @c status class into - * Python. - */ -#include -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* all_gather_docstring; -extern const char* all_reduce_docstring; -extern const char* all_to_all_docstring; -extern const char* broadcast_docstring; -extern const char* gather_docstring; -extern const char* reduce_docstring; -extern const char* scan_docstring; -extern const char* scatter_docstring; - -object all_gather(const communicator& comm, object value) -{ - std::vector values; - boost::parallel::mpi::all_gather(comm, value, values); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(values[i]); - return boost::python::tuple(l); -} - -object all_to_all(const communicator& comm, object in_values) -{ - // Build input values - std::vector in_values_vec(comm.size()); - object iterator = object(handle<>(PyObject_GetIter(in_values.ptr()))); - for (int i = 0; i < comm.size(); ++i) - in_values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); - - std::vector out_values_vec(comm.size()); - boost::parallel::mpi::all_to_all(comm, in_values_vec, out_values_vec); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(out_values_vec[i]); - return boost::python::tuple(l); -} - -object broadcast(const communicator& comm, object value, int root) -{ - boost::parallel::mpi::broadcast(comm, value, root); - return value; -} - -object gather(const communicator& comm, object value, int root) -{ - if (comm.rank() == root) { - std::vector values; - boost::parallel::mpi::gather(comm, value, values, root); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(values[i]); - return boost::python::tuple(l); - } else { - boost::parallel::mpi::gather(comm, value, root); - return object(); - } -} - -object reduce(const communicator& comm, object value, object op, int root) -{ - if (comm.rank() == root) { - object out_value; - boost::parallel::mpi::reduce(comm, value, out_value, op, root); - return out_value; - } else { - boost::parallel::mpi::reduce(comm, value, op, root); - return object(); - } -} - -object scatter(const communicator& comm, object values, int root) -{ - object result; - - if (comm.rank() == root) { - std::vector values_vec(comm.size()); - object iterator = object(handle<>(PyObject_GetIter(values.ptr()))); - for (int i = 0; i < comm.size(); ++i) - values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); - - boost::parallel::mpi::scatter(comm, values_vec, result, root); - } else { - boost::parallel::mpi::scatter(comm, result, root); - } - return result; -} - -void export_collectives() -{ - using boost::python::arg; - - def("all_reduce", - (object (*)(const communicator&, const object&, object))&all_reduce, - (arg("comm") = communicator(), arg("value"), arg("op")), - all_reduce_docstring); - def("all_gather", &all_gather, - (arg("comm") = communicator(), arg("value") = object()), - all_gather_docstring); - def("all_to_all", &all_to_all, - (arg("comm") = communicator(), arg("values") = object()), - all_to_all_docstring); - def("broadcast", &broadcast, - (arg("comm") = communicator(), arg("value") = object(), arg("root")), - broadcast_docstring); - def("gather", &gather, - (arg("comm") = communicator(), arg("value") = object(), arg("root")), - gather_docstring); - def("reduce", &reduce, - (arg("comm") = communicator(), arg("value"), arg("op"), - arg("root")), - reduce_docstring); - def("scan", - (object (*)(const communicator&, const object&, object))&scan, - (arg("comm") = communicator(), arg("value"), arg("op")), - scan_docstring); - def("scatter", &scatter, - (arg("comm") = communicator(), arg("values") = object(), arg("root")), - scatter_docstring); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/datatypes.cpp b/src/mpi/python/datatypes.cpp deleted file mode 100644 index 50e35f2..0000000 --- a/src/mpi/python/datatypes.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file datatypes.cpp - * - * This file provides datatypes support for Boost.MPI in Python. - */ -#include -#include - -namespace boost { namespace parallel { namespace mpi { namespace python { - -void export_datatypes() -{ - register_serialized(long(0), &PyInt_Type); - register_serialized(false, &PyBool_Type); - register_serialized(double(0.0), &PyFloat_Type); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/documentation.cpp b/src/mpi/python/documentation.cpp deleted file mode 100644 index a17cb48..0000000 --- a/src/mpi/python/documentation.cpp +++ /dev/null @@ -1,581 +0,0 @@ -// (C) Copyright 2005 The Trustees of Indiana University. -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file documentation.cpp - * - * This file contains all of the documentation strings for the - * Boost.MPI Python bindings. - */ -namespace boost { namespace parallel { namespace mpi { namespace python { - -const char* module_docstring = - "The boost.parallel.mpi module contains Python wrappers for Boost.MPI.\n" - "Boost.MPI is a C++ interface to the Message Passing Interface 1.1,\n" - "a high-performance message passing library for parallel programming.\n" - "\n" - "This module supports the most commonly used subset of MPI 1.1. All\n" - "communication operations can transmit any Python object that can be\n" - "pickled and unpickled, along with C++-serialized data types and\n" - "separation of the structure of a data type from its content.\n" - "Collectives that have a user-supplied functions,\n" - "such as reduce() or scan(), accept arbitrary Python functions, and\n" - "all collectives can operate on any serializable or picklable data type.\n" - "\n" - "IMPORTANT MODULE DATA\n" - " any_source This constant may be used for the source parameter of\n" - " receive and probe operations to indicate that a\n" - " message may be received from any source.\n" - "\n" - " any_tag This constant may be used for the tag parameter of\n" - " receive or probe operations to indicate that a send\n" - " with any tag will be matched.\n" - "\n" - " collectives_tag Returns the reserved tag value used by the Boost.MPI\n" - " implementation for collective operations. Although\n" - " users are not permitted to use this tag to send or\n" - " receive messages with this tag, it may be useful when\n" - " monitoring communication patterns.\n" - "\n" - " host_rank If there is a host process, this is the rank of that\n" - " that process. Otherwise, this value will be None. MPI\n" - " does not define the meaning of a \"host\" process: \n" - " consult the documentation for your MPI implementation.\n" - "\n" - " io_rank The rank of a process that can perform input/output\n" - " via the standard facilities. If every process can\n" - " perform I/O using the standard facilities, this value\n" - " will be the same as any_source. If no process can\n" - " perform I/O, this value will be None.\n" - "\n" - " max_tag The maximum value that may be used for the tag\n" - " parameter of send/receive operations. This value will\n" - " be somewhat smaller than the value of MPI_TAG_UB,\n" - " because the Boost.MPI implementation reserves some\n" - " tags for collective operations.\n" - "\n" - " processor_name The name of this processor. The actual form of the\n" - " of the name is unspecified, but may be documented by\n" - " the underlying MPI implementation.\n" - "\n" - " rank The rank of this process in the \"world\" communicator.\n" - "\n" - " size The number of processes in the \"world\" communicator.\n" - " that process. Otherwise, this value will be None. MPI\n" - " does not define the meaning of a \"host\" process: \n" - "\n" - " world The \"world\" communicator from which all other\n" - " communicators will be derived. This is the equivalent\n" - " of MPI_COMM_WORLD.\n" - "\n" - "TRANSMITTING USER-DEFINED DATA\n" - " Boost.MPI can transmit user-defined data in several different ways.\n" - " Most importantly, it can transmit arbitrary Python objects by pickling\n" - " them at the sender and unpickling them at the receiver, allowing\n" - " arbitrarily complex Python data structures to interoperate with MPI.\n" - "\n" - " Boost.MPI also supports efficient serialization and transmission of\n" - " C++ objects (that have been exposed to Python) through its C++\n" - " interface. Any C++ type that provides (de-)serialization routines that\n" - " meet the requirements of the Boost.Serialization library is eligible\n" - " for this optimization, but the type must be registered in advance. To\n" - " register a C++ type, invoke the C++ function:\n" - " boost::parallel::mpi::python::register_serialized\n" - "\n" - " Finally, Boost.MPI supports separation of the structure of an object\n" - " from the data it stores, allowing the two pieces to be transmitted\n" - " separately. This \"skeleton/content\" mechanism, described in more\n" - " detail in a later section, is a communication optimization suitable\n" - " for problems with fixed data structures whose internal data changes\n" - " frequently.\n" - "\n" - "COLLECTIVES\n" - " Boost.MPI supports all of the MPI collectives (scatter, reduce, scan,\n" - " broadcast, etc.) for any type of data that can be transmitted with the\n" - " point-to-point communication operations. For the MPI collectives that\n" - " require a user-specified operation (e.g., reduce and scan), the\n" - " operation can be an arbitrary Python function. For instance, one could\n" - " concatenate strings with all_reduce:\n\n" - " mpi.all_reduce(my_string, lambda x,y: x + y)\n\n" - " The following module-level functions implement MPI collectives:\n" - " all_gather Gather the values from all processes.\n" - " all_reduce Combine the results from all processes.\n" - " all_to_all Every process sends data to every other process.\n" - " broadcast Broadcast data from one process to all other processes.\n" - " gather Gather the values from all processes to the root.\n" - " reduce Combine the results from all processes to the root.\n" - " scan Prefix reduction of the values from all processes.\n" - " scatter Scatter the values stored at the root to all processes.\n" - "\n" - "SKELETON/CONTENT MECHANISM\n" - " Boost.MPI provides a skeleton/content mechanism that allows the\n" - " transfer of large data structures to be split into two separate stages,\n" - " with the `skeleton' (or, `shape') of the data structure sent first and\n" - " the content (or, `data') of the data structure sent later, potentially\n" - " several times, so long as the structure has not changed since the\n" - " skeleton was transferred. The skeleton/content mechanism can improve\n" - " performance when the data structure is large and its shape is fixed,\n" - " because while the skeleton requires serialization (it has an unknown\n" - " size), the content transfer is fixed-size and can be done without\n" - " extra copies.\n" - "\n" - " To use the skeleton/content mechanism from Python, you must first\n" - " register the type of your data structure with the skeleton/content\n" - " mechanism *from C++*. The registration function is\n" - " boost::parallel::mpi::python::register_skeleton_and_content\n" - " and resides in the header.\n" - "\n" - " Once you have registered your C++ data structures, you can extract\n" - " the skeleton for an instance of that data structure with skeleton().\n" - " The resulting skeleton_proxy can be transmitted via the normal send\n" - " routine, e.g.,\n\n" - " mpi.world.send(1, 0, skeleton(my_data_structure))\n\n" - " skeleton_proxy objects can be received on the other end via recv(),\n" - " which stores a newly-created instance of your data structure with the\n" - " same `shape' as the sender in its `object' attribute:\n\n" - " shape = mpi.world.recv(0, 0)\n" - " my_data_structure = shape.object\n\n" - " Once the skeleton has been transmitted, the content (accessed via \n" - " get_content) can be transmitted in much the same way. Note, however,\n" - " that the receiver also specifies get_content(my_data_structure) in its\n" - " call to receive:\n\n" - " if mpi.rank == 0:\n" - " mpi.world.send(1, 0, get_content(my_data_structure))\n" - " else:\n" - " mpi.world.recv(0, 0, get_content(my_data_structure))\n\n" - " Of course, this transmission of content can occur repeatedly, if the\n" - " values in the data structure--but not its shape--changes.\n" - "\n" - " The skeleton/content mechanism is a structured way to exploit the\n" - " interaction between custom-built MPI datatypes and MPI_BOTTOM, to\n" - " eliminate extra buffer copies.\n" - "\n" - "C++/PYTHON MPI COMPATIBILITY\n" - " Boost.MPI is a C++ library whose facilities have been exposed to Python\n" - " via the Boost.Python library. Since the Boost.MPI Python bindings are\n" - " build directly on top of the C++ library, and nearly every feature of\n" - " C++ library is available in Python, hybrid C++/Python programs using\n" - " Boost.MPI can interact, e.g., sending a value from Python but receiving\n" - " that value in C++ (or vice versa). However, doing so requires some\n" - " care. Because Python objects are dynamically typed, Boost.MPI transfers\n" - " type information along with the serialized form of the object, so that\n" - " the object can be received even when its type is not known. This\n" - " mechanism differs from its C++ counterpart, where the static types of\n" - " transmitted values are always known.\n" - "\n" - " The only way to communicate between the C++ and Python views on \n" - " Boost.MPI is to traffic entirely in Python objects. For Python, this is\n" - " the normal state of affairs, so nothing will change. For C++, this\n" - " means sending and receiving values of type boost::python::object, from\n" - " the Boost.Python library. For instance, say we want to transmit an\n" - " integer value from Python:\n\n" - " comm.send(1, 0, 17)\n\n" - " In C++, we would receive that value into a Python object and then\n" - " `extract' an integer value:\n\n" - " boost::python::object value;\n" - " comm.recv(0, 0, value);\n" - " int int_value = boost::python::extract(value);\n\n" - " In the future, Boost.MPI will be extended to allow improved\n" - " interoperability with the C++ Boost.MPI and the C MPI bindings.\n" - ; - -/*********************************************************** - * environment documentation * - ***********************************************************/ -const char* environment_init_docstring = - "Initialize the MPI environment. Users should not need to call\n" - "this function directly, because the MPI environment will be\n" - "automatically initialized when the Boost.MPI module is loaded.\n"; - -const char* environment_finalize_docstring = - "Finalize (shut down) the MPI environment. Users only need to\n" - "invoke this function if MPI should be shut down before program\n" - "termination. Boost.MPI will automatically finalize the MPI\n" - "environment when the program exits.\n"; - -const char* environment_abort_docstring = - "Aborts all MPI processes and returns to the environment. The\n" - "precise behavior will be defined by the underlying MPI\n" - "implementation. This is equivalent to a call to MPI_Abort with\n" - "MPI_COMM_WORLD.\n" - "errcode is the error code to return from aborted processes.\n"; - -const char* environment_initialized_docstring = - "Determine if the MPI environment has already been initialized.\n"; - -const char* environment_finalized_docstring = - "Determine if the MPI environment has already been finalized.\n"; - -/*********************************************************** - * exception documentation * - ***********************************************************/ -const char* exception_docstring = - "Instances of this class will be thrown when an MPI error\n" - "occurs. MPI failures that trigger these exceptions may or may not\n" - "be recoverable, depending on the underlying MPI implementation.\n" - "Consult the documentation for your MPI implementation to determine\n" - "the effect of MPI errors.\n"; - -const char* exception_what_docstring = - "A description of the error that occured. At present, this refers\n" - "only to the name of the MPI routine that failed.\n"; - -const char* exception_routine_docstring = - "The name of the MPI routine that reported the error.\n"; - -const char* exception_result_code_docstring = - "The result code returned from the MPI routine that reported the\n" - "error.\n"; - -/*********************************************************** - * collectives documentation * - ***********************************************************/ -const char* all_gather_docstring = - "all_gather is a collective algorithm that collects the values\n" - "stored at each process into a tuple of values indexed by the\n" - "process number they came from. all_gather is (semantically) a\n" - "gather followed by a broadcast. The same tuple of values is\n" - "returned to all processes.\n"; - -const char* all_reduce_docstring = - "all_reduce is a collective algorithm that combines the values\n" - "stored by each process into a single value. The values can be\n" - "combined arbitrarily, specified via any function. The values\n" - "a1, a2, .., ap provided by p processors will be combined by the\n" - "binary function op into the result\n" - " op(a1, op(a2, ... op(ap-1,ap)))\n" - "that will be returned to all processes. This function is the\n" - "equivalent of calling all_gather() and then applying the built-in\n" - "reduce() function to the returned sequence. op is assumed to be\n" - "associative.\n"; - -const char* all_to_all_docstring = - "all_to_all is a collective algorithm that transmits values from\n" - "every process to every other process. On process i, the jth value\n" - "of the values sequence is sent to process j and placed in the ith\n" - "position of the tuple that will be returned from all_to_all.\n"; - -const char* broadcast_docstring = - "broadcast is a collective algorithm that transfers a value from an\n" - "arbitrary root process to every other process that is part of the\n" - "given communicator (comm). The root parameter must be the same for\n" - "every process. The value parameter need only be specified at the root\n" - "root. broadcast() returns the same broadcasted value to every process.\n"; - -const char* gather_docstring = - "gather is a collective algorithm that collects the values\n" - "stored at each process into a tuple of values at the root\n" - "process. This tuple is indexed by the process number that the\n" - "value came from, and will be returned only by the root process.\n" - "All other processes return None.\n"; - -const char* reduce_docstring = - "reduce is a collective algorithm that combines the values\n" - "stored by each process into a single value at the root. The\n" - "values can be combined arbitrarily, specified via any function.\n" - "The values a1, a2, .., ap provided by p processors will be\n" - "combined by the binary function op into the result\n" - " op(a1, op(a2, ... op(ap-1,ap)))\n" - "that will be returned on the root process. This function is the\n" - "equivalent of calling gather() to the root and then applying the\n" - "built-in reduce() function to the returned sequence. All non-root\n" - "processes return None. op is assumed to be associative.\n"; - -const char* scan_docstring = - "@c scan computes a prefix reduction of values from all processes.\n" - "It is a collective algorithm that combines the values stored by\n" - "each process with the values of all processes with a smaller rank.\n" - "The values can be arbitrarily combined, specified via a binary\n" - "function op. If each process i provides the value ai, then scan\n" - "returns op(a1, op(a2, ... op(ai-1, ai))) to the ith process. op is\n" - "assumed to be associative. This routine is the equivalent of an\n" - "all_gather(), followed by a built-in reduce() on the first i+1\n" - "values in the resulting sequence on processor i. op is assumed\n" - "to be associative.\n"; - -const char* scatter_docstring = - "scatter is a collective algorithm that scatters the values stored\n" - "in the root process (as a container with comm.size elements) to\n" - "all of the processes in the communicator. The values parameter \n" - "(only significant at the root) is indexed by the process number to\n" - "which the corresponding value will be sent. The value received by \n" - "each process is returned from scatter.\n"; - -/*********************************************************** - * communicator documentation * - ***********************************************************/ -const char* communicator_docstring = - "The communicator class abstracts a set of communicating\n" - "processes in MPI. All of the processes that belong to a certain\n" - "communicator can determine the size of the communicator, their rank\n" - "within the communicator, and communicate with any other processes\n" - "in the communicator.\n"; - -const char* communicator_default_constructor_docstring = - "Build a new Boost.MPI communicator for MPI_COMM_WORLD.\n"; - -const char* communicator_rank_docstring = - "Returns the rank of the process in the communicator, which will be a\n" - "value in [0, size).\n"; - -const char* communicator_size_docstring = - "Returns the number of processes in the communicator.\n"; - -const char* communicator_send_docstring = - "This routine executes a potentially blocking send with the given\n" - "tag to the process with rank dest. It can be received by the\n" - "destination process with a matching recv call. The value will be\n" - "transmitted in one of several ways:\n" - "\n" - " - For C++ objects registered via register_serialized(), the value\n" - " will be serialized and transmitted.\n" - "\n" - " - For skeleton_proxy objects, the skeleton of the object will be\n" - " serialized and transmitted.\n" - "\n" - " - For content objects, the content will be transmitted directly.\n" - " This content can be received by a matching recv/irecv call that\n" - " provides a suitable `buffer' argument.\n" - "\n" - " - For all other Python objects, the value will be pickled and\n" - " transmitted.\n"; - -const char* communicator_recv_docstring = - "This routine blocks until it receives a message from the process\n" - "source with the given tag. If the source parameter is not specified,\n" - "the message can be received from any process. Likewise, if the tag\n" - "parameter is not specified, a message with any tag can be received.\n" - "If return_status is True, returns a tuple containing the received\n" - "object followed by a status object describing the communication.\n" - "Otherwise, recv() returns just the received object.\n" - "\n" - "When receiving the content of a data type that has been sent separately\n" - "from its skeleton, user code must provide a value for the `buffer'\n" - "argument. This value should be the content object returned from\n" - "get_content().\n"; - -const char* communicator_isend_docstring = - "This routine executes a nonblocking send with the given\n" - "tag to the process with rank dest. It can be received by the\n" - "destination process with a matching recv call. The value will be\n" - "transmitted in the same way as with send().\n" - "This routine returns a request object, which can be used to query\n" - "when the transmission has completed, wait for its completion, or\n" - "cancel the transmission.\n"; - -const char* communicator_irecv_docstring = - "This routine initiates a non-blocking receive from the process\n" - "source with the given tag. If the source parameter is not specified,\n" - "the message can be received from any process. Likewise, if the tag\n" - "parameter is not specified, a message with any tag can be received.\n" - "This routine returns a request object, which can be used to query\n" - "when the transmission has completed, wait for its completion, or\n" - "cancel the transmission. The received value be accessible\n" - "through the `value' attribute of the request object once transmission\n" - "has completed.\n" - "\n" - "As with the recv() routine, when receiving the content of a data type\n" - "that has been sent separately from its skeleton, user code must provide\n" - "a value for the `buffer' argument. This value should be the content\n" - "object returned from get_content().\n"; - - const char* communicator_probe_docstring = - "This operation waits until a message matching (source, tag)\n" - "is available to be received. It then returns information about\n" - "that message. If source is omitted, a message from any process\n" - "will match. If tag is omitted, a message with any tag will match.\n" - "The actual source and tag can be retrieved from the returned status\n" - "object. To check if a message is available without blocking, use\n" - "iprobe.\n"; - -const char* communicator_iprobe_docstring = - "This operation determines if a message matching (source, tag) is\n" - "available to be received. If so, it returns information about that\n" - "message; otherwise, it returns None. If source is omitted, a message\n" - "from any process will match. If tag is omitted, a message with any\n" - "tag will match. The actual source and tag can be retrieved from the\n" - "returned status object. To wait for a message to become available, use\n" - "probe.\n"; - -const char* communicator_barrier_docstring = - "Wait for all processes within a communicator to reach the\n" - "barrier.\n"; - -const char* communicator_split_docstring = - "Split the communicator into multiple, disjoint communicators\n" - "each of which is based on a particular color. This is a\n" - "collective operation that returns a new communicator that is a\n" - "subgroup of this. This routine is functionally equivalent to\n" - "MPI_Comm_split.\n\n" - "color is the color of this process. All processes with the\n" - "same color value will be placed into the same group.\n\n" - "If provided, key is a key value that will be used to determine\n" - "the ordering of processes with the same color in the resulting\n" - "communicator. If omitted, the key will default to the rank of\n" - "the process in the current communicator.\n\n" - "Returns a new communicator containing all of the processes in\n" - "this communicator that have the same color.\n"; - -const char* communicator_abort_docstring = - "Makes a \"best attempt\" to abort all of the tasks in the group of\n" - "this communicator. Depending on the underlying MPI\n" - "implementation, this may either abort the entire program (and\n" - "possibly return errcode to the environment) or only abort\n" - "some processes, allowing the others to continue. Consult the\n" - "documentation for your MPI implementation. This is equivalent to\n" - "a call to MPI_Abort\n\n" - "errcode is the error code to return from aborted processes.\n"; - -/*********************************************************** - * request documentation * - ***********************************************************/ -const char* request_docstring = - "The request class contains information about a non-blocking send\n" - "or receive and will be returned from isend or irecv, respectively.\n" - "When a request object represents a completed irecv, the `value' \n" - "attribute will contain the received value.\n"; - -const char* request_wait_docstring = - "Wait until the communication associated with this request has\n" - "completed. For a request that is associated with an isend(), returns\n" - "a status object describing the communication. For an irecv()\n" - "operation, returns the received value by default. However, when\n" - "return_status=True, a (value, status) pair is returned by a.\n" - "completed irecv request.\n"; - -const char* request_test_docstring = - "Determine whether the communication associated with this request\n" - "has completed successfully. If so, returns the status object\n" - "describing the communication (for an isend request) or a tuple\n" - "containing the received value and a status object (for an irecv\n" - "request). Note that once test() returns a status object, the\n" - "request has completed and wait() should not be called.\n"; - -const char* request_cancel_docstring = - "Cancel a pending communication, assuming it has not already been\n" - "completed.\n"; - -/*********************************************************** - * skeleton/content documentation * - ***********************************************************/ -const char* object_without_skeleton_docstring = - "The object_without_skeleton class is an exception class used only\n" - "when the skeleton() or get_content() function is called with an\n" - "object that is not supported by the skeleton/content mechanism.\n" - "All C++ types for which skeletons and content can be transmitted\n" - "must be registered with the C++ routine:\n" - " boost::parallel::mpi::python::register_skeleton_and_content\n"; - -const char* object_without_skeleton_object_docstring = - "The object on which skeleton() or get_content() was invoked.\n"; - -const char* skeleton_proxy_docstring = - "The skeleton_proxy class is used to represent the skeleton of an\n" - "object. The skeleton_proxy can be used as the value parameter of\n" - "send() or isend() operations, but instead of transmitting the\n" - "entire object, only its skeleton (\"shape\") will be sent, without\n" - "the actual data. Its content can then be transmitted, separately.\n" - "\n" - "User code cannot generate skeleton_proxy instances directly. To\n" - "refer to the skeleton of an object, use skeleton(object). Skeletons\n" - "can also be received with the recv() and irecv() methods.\n" - "\n" - "Note that the skeleton/content mechanism can only be used with C++\n" - "types that have been explicitly registered.\n"; - -const char* skeleton_proxy_object_docstring = - "The actual object whose skeleton is represented by this proxy object.\n"; - -const char* content_docstring = - "The content is a proxy class that represents the content of an object,\n" - "which can be separately sent or received from its skeleton.\n" - "\n" - "User code cannot generate content instances directly. Call the\n" - "get_content() routine to retrieve the content proxy for a particular\n" - "object. The content instance can be used with any of the send() or\n" - "recv() variants. Note that get_content() can only be used with C++\n" - "data types that have been explicitly registered with the Python\n" - "skeleton/content mechanism.\n"; - -const char* skeleton_docstring = - "The skeleton function retrieves the skeleton_proxy for its object\n" - "parameter, allowing the transmission of the skeleton (or \"shape\")\n" - "of the object separately from its data. The skeleton/content mechanism\n" - "is useful when a large data structure remains structurally the same\n" - "throughout a computation, but its content (i.e., the values in the\n" - "structure) changes several times. Tranmission of the content part does\n" - "not require any serialization or unnecessary buffer copies, so it is\n" - "very efficient for large data structures.\n" - "\n" - "Only C++ types that have been explicitly registered with the Boost.MPI\n" - "Python library can be used with the skeleton/content mechanism. Use:\b" - " boost::parallel::mpi::python::register_skeleton_and_content\n"; - -const char* get_content_docstring = - "The get_content function retrieves the content for its object parameter,\n" - "allowing the transmission of the data in a data structure separately\n" - "from its skeleton (or \"shape\"). The skeleton/content mechanism\n" - "is useful when a large data structure remains structurally the same\n" - "throughout a computation, but its content (i.e., the values in the\n" - "structure) changes several times. Tranmission of the content part does\n" - "not require any serialization or unnecessary buffer copies, so it is\n" - "very efficient for large data structures.\n" - "\n" - "Only C++ types that have been explicitly registered with the Boost.MPI\n" - "Python library can be used with the skeleton/content mechanism. Use:\b" - " boost::parallel::mpi::python::register_skeleton_and_content\n"; - -/*********************************************************** - * status documentation * - ***********************************************************/ -const char* status_docstring = - "The status class stores information about a given message, including\n" - "its source, tag, and whether the message transmission was cancelled\n" - "or resulted in an error.\n"; - -const char* status_source_docstring = - "The source of the incoming message.\n"; - -const char* status_tag_docstring = - "The tag of the incoming message.\n"; - -const char* status_error_docstring = - "The error code associated with this transmission.\n"; - -const char* status_cancelled_docstring = - "Whether this transmission was cancelled.\n"; - -/*********************************************************** - * timer documentation * - ***********************************************************/ -const char* timer_docstring = - "The timer class is a simple wrapper around the MPI timing facilities.\n"; - -const char* timer_default_constructor_docstring = - "Initializes the timer. After this call, elapsed == 0.\n"; - -const char* timer_restart_docstring = - "Restart the timer, after which elapsed == 0.\n"; - -const char* timer_elapsed_docstring = - "The time elapsed since initialization or the last restart(),\n" - "whichever is more recent.\n"; - -const char* timer_elapsed_min_docstring = - "Returns the minimum non-zero value that elapsed may return\n" - "This is the resolution of the timer.\n"; - -const char* timer_elapsed_max_docstring = - "Return an estimate of the maximum possible value of elapsed. Note\n" - "that this routine may return too high a value on some systems.\n"; - -const char* timer_time_is_global_docstring = - "Determines whether the elapsed time values are global times or\n" - "local processor times.\n"; - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/exception.cpp b/src/mpi/python/exception.cpp deleted file mode 100644 index 5907d4d..0000000 --- a/src/mpi/python/exception.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor -// Copyright (C) 2005 The Trustees of Indiana University. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file exception.cpp - * - * This file reflects the Boost.MPI @c mpi_error class into - * Python. - */ -#include -#include -#include -#include -#include "utility.hpp" - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* exception_docstring; -extern const char* exception_what_docstring; -extern const char* exception_routine_docstring; -extern const char* exception_result_code_docstring; - -str exception_str(const exception& e) -{ - return str("MPI routine `" + std::string(e.routine()) + - "' returned error code " + - lexical_cast(e.result_code())); -} - -void export_exception() -{ - using boost::python::arg; - using boost::python::object; - - object type = - class_ - ("exception", exception_docstring, no_init) - .add_property("what", &exception::what, exception_what_docstring) - .add_property("routine", &exception::what, exception_routine_docstring) - .add_property("result_code", &exception::what, - exception_result_code_docstring) - .def("__str__", &exception_str) - ; - translate_exception::declare(type); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/module.cpp b/src/mpi/python/module.cpp deleted file mode 100644 index 14b78f3..0000000 --- a/src/mpi/python/module.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file module.cpp - * - * This file provides the top-level module for the Boost.MPI Python - * bindings. - */ -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern void export_environment(); -extern void export_exception(); -extern void export_collectives(); -extern void export_communicator(); -extern void export_datatypes(); -extern void export_request(); -extern void export_status(); -extern void export_timer(); - -extern const char* module_docstring; - -BOOST_PYTHON_MODULE(mpi) -{ - // Setup module documentation - scope().attr("__doc__") = module_docstring; - scope().attr("__author__") = "Douglas Gregor "; - scope().attr("__date__") = "$LastChangedDate: 2006-07-16 15:25:47 -0400 (Sun, 16 Jul 2006) $"; - scope().attr("__version__") = "$Revision$"; - scope().attr("__copyright__") = "Copyright (C) 2006 Douglas Gregor"; - scope().attr("__license__") = "http://www.boost.org/LICENSE_1_0.txt"; - - export_environment(); - export_exception(); - export_communicator(); - export_collectives(); - export_datatypes(); - export_request(); - export_status(); - export_timer(); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/py_communicator.cpp b/src/mpi/python/py_communicator.cpp deleted file mode 100644 index 9dd9837..0000000 --- a/src/mpi/python/py_communicator.cpp +++ /dev/null @@ -1,134 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file communicator.cpp - * - * This file reflects the Boost.MPI @c communicator class into - * Python. - */ -#include -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* communicator_docstring; -extern const char* communicator_default_constructor_docstring; -extern const char* communicator_rank_docstring; -extern const char* communicator_size_docstring; -extern const char* communicator_send_docstring; -extern const char* communicator_recv_docstring; -extern const char* communicator_isend_docstring; -extern const char* communicator_irecv_docstring; -extern const char* communicator_probe_docstring; -extern const char* communicator_iprobe_docstring; -extern const char* communicator_barrier_docstring; -extern const char* communicator_split_docstring; -extern const char* communicator_split_key_docstring; -extern const char* communicator_abort_docstring; - -object -communicator_recv(const communicator& comm, int source, int tag, - bool return_status) -{ - using boost::python::make_tuple; - - object result; - status stat = comm.recv(source, tag, result); - if (return_status) - return make_tuple(result, stat); - else - return result; -} - -object -communicator_irecv(const communicator& comm, int source, int tag) -{ - using boost::python::make_tuple; - - object result; - object req(comm.irecv(source, tag, result)); - req.attr("value") = result; - return req; -} - -object -communicator_iprobe(const communicator& comm, int source, int tag) -{ - if (boost::optional result = comm.iprobe(source, tag)) - return object(*result); - else - return object(); -} - -extern void export_skeleton_and_content(class_&); - -void export_communicator() -{ - using boost::python::arg; - using boost::python::object; - - class_ comm("communicator", communicator_docstring); - comm - .def(init<>()) - .add_property("rank", &communicator::rank, communicator_rank_docstring) - .add_property("size", &communicator::size, communicator_size_docstring) - .def("send", - (void (communicator::*)(int, int, const object&) const) - &communicator::send, - (arg("dest"), arg("tag") = 0, arg("value") = object()), - communicator_send_docstring) - .def("recv", &communicator_recv, - (arg("source") = any_source, arg("tag") = any_tag, - arg("return_status") = false), - communicator_recv_docstring) - .def("isend", - (request (communicator::*)(int, int, const object&) const) - &communicator::isend, - (arg("dest"), arg("tag") = 0, arg("value") = object()), - communicator_isend_docstring) - .def("irecv", &communicator_irecv, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_irecv_docstring) - .def("probe", &communicator::probe, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_probe_docstring) - .def("iprobe", &communicator_iprobe, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_iprobe_docstring) - .def("barrier", &communicator::barrier, communicator_barrier_docstring) - .def("__nonzero__", &communicator::operator bool) - .def("split", - (communicator (communicator::*)(int) const)&communicator::split, - (arg("color")), communicator_split_docstring) - .def("split", - (communicator (communicator::*)(int, int) const)&communicator::split, - (arg("color"), arg("key"))) - .def("abort", &communicator::abort, arg("errcode"), - communicator_abort_docstring) - ; - - // Module-level attributes - scope().attr("any_source") = any_source; - scope().attr("any_tag") = any_tag; - - { - communicator world; - scope().attr("world") = world; - scope().attr("rank") = world.rank(); - scope().attr("size") = world.size(); - } - - // Export skeleton and content - export_skeleton_and_content(comm); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/py_environment.cpp b/src/mpi/python/py_environment.cpp deleted file mode 100644 index 19817ea..0000000 --- a/src/mpi/python/py_environment.cpp +++ /dev/null @@ -1,111 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file environment.cpp - * - * This file reflects the Boost.MPI "environment" class into Python - * methods at module level. - */ -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* environment_init_docstring; -extern const char* environment_finalize_docstring; -extern const char* environment_abort_docstring; -extern const char* environment_initialized_docstring; -extern const char* environment_finalized_docstring; - -/** - * The environment used by the Boost.MPI Python module. This will be - * zero-initialized before it is used. - */ -static environment* env; - -bool mpi_init(list python_argv, bool abort_on_exception) -{ - // If MPI is already initialized, do nothing. - if (environment::initialized()) - return false; - - // Convert Python argv into C-style argc/argv. - int my_argc = extract(python_argv.attr("__len__")()); - char** my_argv = new char*[my_argc]; - for (int arg = 0; arg < my_argc; ++arg) - my_argv[arg] = strdup(extract(python_argv[arg])); - - // Initialize MPI - int mpi_argc = my_argc; - char** mpi_argv = my_argv; - env = new environment(mpi_argc, mpi_argv, abort_on_exception); - - // If anything changed, convert C-style argc/argv into Python argv - if (mpi_argv != my_argv) - PySys_SetArgv(mpi_argc, mpi_argv); - - for (int arg = 0; arg < my_argc; ++arg) - free(my_argv[arg]); - delete [] my_argv; - - return true; -} - -void mpi_finalize() -{ - if (env) { - delete env; - env = 0; - } -} - -void export_environment() -{ - using boost::python::arg; - - def("init", mpi_init, (arg("argv"), arg("abort_on_exception") = true), - environment_init_docstring); - def("finalize", mpi_finalize, environment_finalize_docstring); - - // Setup initialization and finalization code - if (!environment::initialized()) { - // MPI_Init from sys.argv - object sys = object(handle<>(PyImport_ImportModule("sys"))); - mpi_init(extract(sys.attr("argv")), true); - - // Setup MPI_Finalize call when the program exits - object atexit = object(handle<>(PyImport_ImportModule("atexit"))); - object finalize = scope().attr("finalize"); - atexit.attr("register")(finalize); - } - - def("abort", &environment::abort, arg("errcode"), - environment_abort_docstring); - def("initialized", &environment::initialized, - environment_initialized_docstring); - def("finalized", &environment::finalized, - environment_finalized_docstring); - scope().attr("max_tag") = environment::max_tag(); - scope().attr("collectives_tag") = environment::collectives_tag(); - scope().attr("processor_name") = environment::processor_name(); - - if (optional host_rank = environment::host_rank()) - scope().attr("host_rank") = *host_rank; - else - scope().attr("host_rank") = object(); - - if (optional io_rank = environment::io_rank()) - scope().attr("io_rank") = *io_rank; - else - scope().attr("io_rank") = object(); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/py_request.cpp b/src/mpi/python/py_request.cpp deleted file mode 100644 index fa72046..0000000 --- a/src/mpi/python/py_request.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file request.cpp - * - * This file reflects the Boost.MPI @c request class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* request_docstring; -extern const char* request_wait_docstring; -extern const char* request_test_docstring; -extern const char* request_cancel_docstring; - -object request_wait(object req_obj) -{ - request& req = extract(req_obj)(); - status stat = req.wait(); - if (PyObject_HasAttrString(req_obj.ptr(), "value")) - return boost::python::make_tuple(stat, req_obj.attr("value")); - else - return object(stat); -} - -object request_test(object req_obj) -{ - request& req = extract(req_obj)(); - - if (optional stat = req.test()) - { - if (PyObject_HasAttrString(req_obj.ptr(), "value")) - return boost::python::make_tuple(stat, req_obj.attr("value")); - else - return object(stat); - } - else - return object(); -} - -void export_request() -{ - using boost::python::arg; - using boost::python::object; - - class_("request", request_docstring, no_init) - .def("wait", &request_wait, request_wait_docstring) - .def("test", &request_test, request_test_docstring) - .def("cancel", &request::cancel, request_cancel_docstring) - ; -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/py_timer.cpp b/src/mpi/python/py_timer.cpp deleted file mode 100644 index a902e01..0000000 --- a/src/mpi/python/py_timer.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file timer.cpp - * - * This file reflects the Boost.MPI @c timer class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* timer_docstring; -extern const char* timer_default_constructor_docstring; -extern const char* timer_restart_docstring; -extern const char* timer_elapsed_docstring; -extern const char* timer_elapsed_min_docstring; -extern const char* timer_elapsed_max_docstring; -extern const char* timer_time_is_global_docstring; - -void export_timer() -{ - using boost::python::arg; - using boost::python::object; - - class_("timer", timer_docstring) - .def(init<>()) - .def("restart", &timer::restart, timer_restart_docstring) - .add_property("elapsed", &timer::elapsed, timer_elapsed_docstring) - .add_property("elapsed_min", &timer::elapsed_min, - timer_elapsed_min_docstring) - .add_property("elapsed_max", &timer::elapsed_max, - timer_elapsed_max_docstring) - .add_property("time_is_global", &timer::time_is_global, - timer_time_is_global_docstring) - ; -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/serialize.cpp b/src/mpi/python/serialize.cpp deleted file mode 100644 index 607afa7..0000000 --- a/src/mpi/python/serialize.cpp +++ /dev/null @@ -1,78 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file serialize.cpp - * - * This file provides Boost.Serialization support for Python objects. - */ -#include -#include -#include - -namespace boost { namespace python { - -struct pickle::data_t { - object module; - object dumps; - object loads; -}; - - -/// Data used for communicating with the Python `pickle' module. -pickle::data_t* pickle::data; - -str pickle::dumps(object obj, int protocol) -{ - if (!data) initialize_data(); - return extract((data->dumps)(obj, protocol)); -} - -object pickle::loads(str s) -{ - if (!data) initialize_data(); - return ((data->loads)(s)); -} - -void pickle::initialize_data() -{ - data = new data_t; - data->module = object(handle<>(PyImport_ImportModule("pickle"))); - data->dumps = data->module.attr("dumps"); - data->loads = data->module.attr("loads"); -} - -} } // end namespace boost::python - -BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL( - ::boost::parallel::mpi::packed_iarchive, - ::boost::parallel::mpi::packed_oarchive) - -namespace boost { namespace parallel { namespace mpi { namespace python { namespace detail { - - boost::python::object skeleton_proxy_base_type; - - // A map from Python type objects to skeleton/content handlers - typedef std::map - skeleton_content_handlers_type; - skeleton_content_handlers_type skeleton_content_handlers; - - bool - skeleton_and_content_handler_registered(PyTypeObject* type) - { - return - skeleton_content_handlers.find(type) != skeleton_content_handlers.end(); - } - - void - register_skeleton_and_content_handler(PyTypeObject* type, - const skeleton_content_handler& handler) - { - skeleton_content_handlers[type] = handler; - } - -} } } } } // end namespace boost::parallel::mpi::python::detail diff --git a/src/mpi/python/skeleton_and_content.cpp b/src/mpi/python/skeleton_and_content.cpp deleted file mode 100644 index b5049f3..0000000 --- a/src/mpi/python/skeleton_and_content.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file skeleton_and_content.cpp - * - * This file reflects the skeleton/content facilities into Python. - */ -#include -#include -#include -#include -#include -#include "utility.hpp" - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -namespace detail { - typedef std::map - skeleton_content_handlers_type; - extern skeleton_content_handlers_type skeleton_content_handlers; -} - -/** - * An exception that will be thrown when the object passed to the - * Python version of skeleton() does not have a skeleton. - */ -struct object_without_skeleton : public std::exception { - explicit object_without_skeleton(object value) : value(value) { } - virtual ~object_without_skeleton() throw() { } - - object value; -}; - -str object_without_skeleton_str(const object_without_skeleton& e) -{ - return str("\nThe skeleton() or get_content() function was invoked for a Python\n" - "object that is not supported by the Boost.MPI skeleton/content\n" - "mechanism. To transfer objects via skeleton/content, you must\n" - "register the C++ type of this object with the C++ function:\n" - " boost::parallel::mpi::python::register_skeleton_and_content()\n" - "Object: " + str(e.value) + "\n"); -} - -/** - * Extract the "skeleton" from a Python object. In truth, all we're - * doing at this point is verifying that the object is a C++ type that - * has been registered for the skeleton/content mechanism. - */ -object skeleton(object value) -{ - PyTypeObject* type = value.ptr()->ob_type; - detail::skeleton_content_handlers_type::iterator pos = - detail::skeleton_content_handlers.find(type); - if (pos == detail::skeleton_content_handlers.end()) - throw object_without_skeleton(value); - else - return pos->second.get_skeleton_proxy(value); -} - -/** - * Extract the "content" from a Python object, which must be a C++ - * type that has been registered for the skeleton/content mechanism. - */ -content get_content(object value) -{ - PyTypeObject* type = value.ptr()->ob_type; - detail::skeleton_content_handlers_type::iterator pos = - detail::skeleton_content_handlers.find(type); - if (pos == detail::skeleton_content_handlers.end()) - throw object_without_skeleton(value); - else - return pos->second.get_content(value); -} - -/// Send the content part of a Python object. -void -communicator_send_content(const communicator& comm, int dest, int tag, - const content& c) -{ - comm.send(dest, tag, c.base()); -} - -/// Receive the content of a Python object. We return the object -/// received, not the content wrapper. -object -communicator_recv_content(const communicator& comm, int source, int tag, - const content& c, bool return_status) -{ - using boost::python::make_tuple; - - status stat = comm.recv(source, tag, c.base()); - if (return_status) - return make_tuple(c.object, stat); - else - return c.object; -} - -/// Receive the content of a Python object. The request object's value -/// attribute will reference the object whose content is being -/// received, not the content wrapper. -object -communicator_irecv_content(const communicator& comm, int source, int tag, - const content& c) -{ - using boost::python::make_tuple; - - object req(comm.irecv(source, tag, c.base())); - req.attr("value") = c.object; - return req; -} - -extern const char* object_without_skeleton_docstring; -extern const char* object_without_skeleton_object_docstring; -extern const char* skeleton_proxy_docstring; -extern const char* skeleton_proxy_object_docstring; -extern const char* content_docstring; -extern const char* skeleton_docstring; -extern const char* get_content_docstring; - -void export_skeleton_and_content(class_& comm) -{ - using boost::python::arg; - - // Expose the object_without_skeleton exception - object type = - class_ - ("object_without_skeleton", object_without_skeleton_docstring, no_init) - .def_readonly("object", &object_without_skeleton::value, - object_without_skeleton_object_docstring) - .def("__str__", &object_without_skeleton_str) - ; - translate_exception::declare(type); - - // Expose the Python variants of "skeleton_proxy" and "content", and - // their generator functions. - detail::skeleton_proxy_base_type = - class_("skeleton_proxy", skeleton_proxy_docstring, - no_init) - .def_readonly("object", &skeleton_proxy_base::object, - skeleton_proxy_object_docstring); - class_("content", content_docstring, no_init); - def("skeleton", &skeleton, arg("object"), skeleton_docstring); - def("get_content", &get_content, arg("object"), get_content_docstring); - - // Expose communicator send/recv operations for content. - comm - .def("send", communicator_send_content, - (arg("dest"), arg("tag") = 0, arg("value"))) - .def("recv", communicator_recv_content, - (arg("source") = any_source, arg("tag") = any_tag, arg("buffer"), - arg("return_status") = false)) - .def("irecv", communicator_irecv_content, - (arg("source") = any_source, arg("tag") = any_tag, arg("buffer"))); -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/status.cpp b/src/mpi/python/status.cpp deleted file mode 100644 index 5ba2ab3..0000000 --- a/src/mpi/python/status.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file status.cpp - * - * This file reflects the Boost.MPI @c status class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::parallel::mpi; - -namespace boost { namespace parallel { namespace mpi { namespace python { - -extern const char* status_docstring; -extern const char* status_source_docstring; -extern const char* status_tag_docstring; -extern const char* status_error_docstring; -extern const char* status_cancelled_docstring; - -void export_status() -{ - using boost::python::arg; - using boost::python::object; - - class_("status", status_docstring, no_init) - .add_property("source", &status::source, status_source_docstring) - .add_property("tag", &status::tag, status_tag_docstring) - .add_property("error", &status::error, status_error_docstring) - .add_property("cancelled", &status::cancelled, status_cancelled_docstring) - ; -} - -} } } } // end namespace boost::parallel::mpi::python diff --git a/src/mpi/python/utility.hpp b/src/mpi/python/utility.hpp deleted file mode 100644 index a64ff14..0000000 --- a/src/mpi/python/utility.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor -#ifndef BOOST_PARALLEL_MPI_PYTHON_UTILITY_HPP -#define BOOST_PARALLEL_MPI_PYTHON_UTILITY_HPP - -/** @file utility.hpp - * - * This file is a utility header for the Boost.MPI Python bindings. - */ -#include - -namespace boost { namespace parallel { namespace mpi { namespace python { - -template -class translate_exception -{ - explicit translate_exception(boost::python::object type) : type(type) { } - -public: - static void declare(boost::python::object type) - { - using boost::python::register_exception_translator; - register_exception_translator(translate_exception(type)); - } - - void operator()(const E& e) const - { - using boost::python::object; - PyErr_SetObject(type.ptr(), object(e).ptr()); - } - -private: - boost::python::object type; -}; - -} } } } // end namespace boost::parallel::mpi::python - -#endif // BOOST_PARALLEL_MPI_PYTHON_UTILITY_HPP diff --git a/src/mpi/request.cpp b/src/mpi/request.cpp deleted file mode 100644 index f0646f3..0000000 --- a/src/mpi/request.cpp +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include - -namespace boost { namespace parallel { namespace mpi { - -/*************************************************************************** - * request * - ***************************************************************************/ -request::request() - : m_handler(0), m_data() -{ - m_requests[0] = MPI_REQUEST_NULL; - m_requests[1] = MPI_REQUEST_NULL; -} - -status request::wait() -{ - if (m_handler) { - // This request is a receive for a serialized type. Use the - // handler to wait for completion. - return *m_handler(this, ra_wait); - } else if (m_requests[1] == MPI_REQUEST_NULL) { - // This request is either a send or a receive for a type with an - // associated MPI datatype, or a serialized datatype that has been - // packed into a single message. Just wait on the one receive/send - // and return the status to the user. - status result; - BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_requests[0], &result.m_status)); - return result; - } else { - // This request is a send of a serialized type, broken into two - // separate messages. Complete both sends at once. - MPI_Status stats[2]; - int error_code = MPI_Waitall(2, m_requests, stats); - if (error_code == MPI_ERR_IN_STATUS) { - // Dig out which status structure has the error, and use that - // one when throwing the exception. - if (stats[0].MPI_ERROR == MPI_SUCCESS - || stats[0].MPI_ERROR == MPI_ERR_PENDING) - boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR)); - else - boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR)); - } else if (error_code != MPI_SUCCESS) { - // There was an error somewhere in the MPI_Waitall call; throw - // an exception for it. - boost::throw_exception(exception("MPI_Waitall", error_code)); - } - - // No errors. Returns the first status structure. - status result; - result.m_status = stats[0]; - return result; - } -} - -optional request::test() -{ - if (m_handler) { - // This request is a receive for a serialized type. Use the - // handler to test for completion. - return m_handler(this, ra_test); - } else if (m_requests[1] == MPI_REQUEST_NULL) { - // This request is either a send or a receive for a type with an - // associated MPI datatype, or a serialized datatype that has been - // packed into a single message. Just test the one receive/send - // and return the status to the user if it has completed. - status result; - int flag = 0; - BOOST_MPI_CHECK_RESULT(MPI_Test, - (&m_requests[0], &flag, &result.m_status)); - return flag != 0? optional(result) : optional(); - } else { - // This request is a send of a serialized type, broken into two - // separate messages. We only get a result if both complete. - MPI_Status stats[2]; - int flag = 0; - int error_code = MPI_Testall(2, m_requests, &flag, stats); - if (error_code == MPI_ERR_IN_STATUS) { - // Dig out which status structure has the error, and use that - // one when throwing the exception. - if (stats[0].MPI_ERROR == MPI_SUCCESS - || stats[0].MPI_ERROR == MPI_ERR_PENDING) - boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR)); - else - boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR)); - } else if (error_code != MPI_SUCCESS) { - // There was an error somewhere in the MPI_Testall call; throw - // an exception for it. - boost::throw_exception(exception("MPI_Testall", error_code)); - } - - // No errors. Returns the second status structure if the send has - // completed. - if (flag != 0) { - status result; - result.m_status = stats[1]; - return result; - } else { - return optional(); - } - } -} - -void request::cancel() -{ - if (m_handler) { - m_handler(this, ra_cancel); - } else { - BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0])); - if (m_requests[1] != MPI_REQUEST_NULL) - BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1])); - } -} - -} } } // end namespace boost::parallel::mpi diff --git a/src/mpi/text_skeleton_iarchive.cpp b/src/mpi/text_skeleton_iarchive.cpp deleted file mode 100644 index 6c360d8..0000000 --- a/src/mpi/text_skeleton_iarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class detail::archive_pointer_iserializer ; - -} } // end namespace boost::archive diff --git a/src/mpi/text_skeleton_oarchive.cpp b/src/mpi/text_skeleton_oarchive.cpp deleted file mode 100644 index dcc4357..0000000 --- a/src/mpi/text_skeleton_oarchive.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; - -} } // end namespace boost::archive diff --git a/src/mpi/timer.cpp b/src/mpi/timer.cpp deleted file mode 100644 index 44e61ba..0000000 --- a/src/mpi/timer.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include - -namespace boost { namespace parallel { namespace mpi { - -bool timer::time_is_global() -{ - int* is_global; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &is_global, - &found)); - if (!found) - return false; - else - return *is_global != 0; -} - -} } } /// end namespace boost::parallel::mpi diff --git a/src/mpi_datatype_cache.cpp b/src/mpi_datatype_cache.cpp deleted file mode 100644 index 748171c..0000000 --- a/src/mpi_datatype_cache.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#include -#include - -namespace boost { namespace mpi { namespace detail { - -mpi_datatype_map mpi_datatype_cache; - -} } } diff --git a/src/mpi_datatype_oarchive.cpp b/src/mpi_datatype_oarchive.cpp deleted file mode 100644 index fc83ae7..0000000 --- a/src/mpi_datatype_oarchive.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include - -namespace boost { namespace archive { namespace detail { -// explicitly instantiate all required template functions - -template class archive_pointer_oserializer ; - -} } } diff --git a/src/packed_iarchive.cpp b/src/packed_iarchive.cpp deleted file mode 100644 index ca7741d..0000000 --- a/src/packed_iarchive.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class basic_binary_iarchive ; -template class detail::archive_pointer_iserializer ; -//template class binary_iarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/packed_oarchive.cpp b/src/packed_oarchive.cpp deleted file mode 100644 index 473caf1..0000000 --- a/src/packed_oarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; -template class basic_binary_oarchive ; -//template class binary_oarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/packed_skeleton_iarchive.cpp b/src/packed_skeleton_iarchive.cpp deleted file mode 100644 index a088783..0000000 --- a/src/packed_skeleton_iarchive.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class basic_binary_iarchive ; -template class detail::archive_pointer_iserializer ; -//template class binary_iarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/packed_skeleton_oarchive.cpp b/src/packed_skeleton_oarchive.cpp deleted file mode 100644 index 9db105b..0000000 --- a/src/packed_skeleton_oarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; -template class basic_binary_oarchive ; -//template class binary_oarchive_impl ; - -} } // end namespace boost::archive diff --git a/src/point_to_point.cpp b/src/point_to_point.cpp deleted file mode 100644 index 78351c4..0000000 --- a/src/point_to_point.cpp +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point - -/* There is the potential for optimization here. We could keep around - a "small message" buffer of size N that we just receive into by - default. If the message is N - sizeof(int) bytes or smaller, it can - just be sent with that buffer. If it's larger, we send the first N - - sizeof(int) bytes in the first packet followed by another - packet. The size of the second packet will be stored in an integer - at the end of the first packet. - - We will introduce this optimization later, when we have more - performance test cases and have met our functionality goals. */ - -#include -#include -#include -#include - -namespace boost { namespace mpi { namespace detail { - -void -packed_archive_send(MPI_Comm comm, int dest, int tag, - const packed_oarchive& ar) -{ - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Send, - (const_cast(size), 1, - get_mpi_datatype(ar.size()), - dest, tag, comm)); - BOOST_MPI_CHECK_RESULT(MPI_Send, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm)); -} - -int -packed_archive_isend(MPI_Comm comm, int dest, int tag, - const packed_oarchive& ar, - MPI_Request* out_requests, int num_out_requests) -{ - assert(num_out_requests >= 2); - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(size), 1, - get_mpi_datatype(ar.size()), - dest, tag, comm, out_requests)); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm, out_requests + 1)); - - return 2; -} - -int -packed_archive_isend(MPI_Comm comm, int dest, int tag, - const packed_iarchive& ar, - MPI_Request* out_requests, int num_out_requests) -{ - assert(num_out_requests >= 2); - - const void* size = &ar.size(); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(size), 1, - get_mpi_datatype(ar.size()), - dest, tag, comm, out_requests)); - BOOST_MPI_CHECK_RESULT(MPI_Isend, - (const_cast(ar.address()), ar.size(), - MPI_PACKED, - dest, tag, comm, out_requests + 1)); - - return 2; -} - -void -packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar, - MPI_Status& status) -{ - std::size_t count; - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (&count, 1, get_mpi_datatype(count), - source, tag, comm, &status)); - - // Prepare input buffer and receive the message - ar.resize(count); - BOOST_MPI_CHECK_RESULT(MPI_Recv, - (ar.address(), ar.size(), MPI_PACKED, - status.MPI_SOURCE, status.MPI_TAG, - comm, &status)); -} - -} } } // end namespace boost::mpi::detail diff --git a/src/python/collectives.cpp b/src/python/collectives.cpp deleted file mode 100644 index fc4bf7b..0000000 --- a/src/python/collectives.cpp +++ /dev/null @@ -1,144 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file status.cpp - * - * This file reflects the Boost.MPI @c status class into - * Python. - */ -#include -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* all_gather_docstring; -extern const char* all_reduce_docstring; -extern const char* all_to_all_docstring; -extern const char* broadcast_docstring; -extern const char* gather_docstring; -extern const char* reduce_docstring; -extern const char* scan_docstring; -extern const char* scatter_docstring; - -object all_gather(const communicator& comm, object value) -{ - std::vector values; - boost::mpi::all_gather(comm, value, values); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(values[i]); - return boost::python::tuple(l); -} - -object all_to_all(const communicator& comm, object in_values) -{ - // Build input values - std::vector in_values_vec(comm.size()); - object iterator = object(handle<>(PyObject_GetIter(in_values.ptr()))); - for (int i = 0; i < comm.size(); ++i) - in_values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); - - std::vector out_values_vec(comm.size()); - boost::mpi::all_to_all(comm, in_values_vec, out_values_vec); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(out_values_vec[i]); - return boost::python::tuple(l); -} - -object broadcast(const communicator& comm, object value, int root) -{ - boost::mpi::broadcast(comm, value, root); - return value; -} - -object gather(const communicator& comm, object value, int root) -{ - if (comm.rank() == root) { - std::vector values; - boost::mpi::gather(comm, value, values, root); - - boost::python::list l; - for (int i = 0; i < comm.size(); ++i) - l.append(values[i]); - return boost::python::tuple(l); - } else { - boost::mpi::gather(comm, value, root); - return object(); - } -} - -object reduce(const communicator& comm, object value, object op, int root) -{ - if (comm.rank() == root) { - object out_value; - boost::mpi::reduce(comm, value, out_value, op, root); - return out_value; - } else { - boost::mpi::reduce(comm, value, op, root); - return object(); - } -} - -object scatter(const communicator& comm, object values, int root) -{ - object result; - - if (comm.rank() == root) { - std::vector values_vec(comm.size()); - object iterator = object(handle<>(PyObject_GetIter(values.ptr()))); - for (int i = 0; i < comm.size(); ++i) - values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); - - boost::mpi::scatter(comm, values_vec, result, root); - } else { - boost::mpi::scatter(comm, result, root); - } - return result; -} - -void export_collectives() -{ - using boost::python::arg; - - def("all_reduce", - (object (*)(const communicator&, const object&, object))&all_reduce, - (arg("comm") = communicator(), arg("value"), arg("op")), - all_reduce_docstring); - def("all_gather", &all_gather, - (arg("comm") = communicator(), arg("value") = object()), - all_gather_docstring); - def("all_to_all", &all_to_all, - (arg("comm") = communicator(), arg("values") = object()), - all_to_all_docstring); - def("broadcast", &broadcast, - (arg("comm") = communicator(), arg("value") = object(), arg("root")), - broadcast_docstring); - def("gather", &gather, - (arg("comm") = communicator(), arg("value") = object(), arg("root")), - gather_docstring); - def("reduce", &reduce, - (arg("comm") = communicator(), arg("value"), arg("op"), - arg("root")), - reduce_docstring); - def("scan", - (object (*)(const communicator&, const object&, object))&scan, - (arg("comm") = communicator(), arg("value"), arg("op")), - scan_docstring); - def("scatter", &scatter, - (arg("comm") = communicator(), arg("values") = object(), arg("root")), - scatter_docstring); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/datatypes.cpp b/src/python/datatypes.cpp deleted file mode 100644 index 586fc03..0000000 --- a/src/python/datatypes.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file datatypes.cpp - * - * This file provides datatypes support for Boost.MPI in Python. - */ -#include -#include - -namespace boost { namespace mpi { namespace python { - -void export_datatypes() -{ - register_serialized(long(0), &PyInt_Type); - register_serialized(false, &PyBool_Type); - register_serialized(double(0.0), &PyFloat_Type); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/documentation.cpp b/src/python/documentation.cpp deleted file mode 100644 index c1fd89a..0000000 --- a/src/python/documentation.cpp +++ /dev/null @@ -1,581 +0,0 @@ -// (C) Copyright 2005 The Trustees of Indiana University. -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file documentation.cpp - * - * This file contains all of the documentation strings for the - * Boost.MPI Python bindings. - */ -namespace boost { namespace mpi { namespace python { - -const char* module_docstring = - "The boost.mpi module contains Python wrappers for Boost.MPI.\n" - "Boost.MPI is a C++ interface to the Message Passing Interface 1.1,\n" - "a high-performance message passing library for parallel programming.\n" - "\n" - "This module supports the most commonly used subset of MPI 1.1. All\n" - "communication operations can transmit any Python object that can be\n" - "pickled and unpickled, along with C++-serialized data types and\n" - "separation of the structure of a data type from its content.\n" - "Collectives that have a user-supplied functions,\n" - "such as reduce() or scan(), accept arbitrary Python functions, and\n" - "all collectives can operate on any serializable or picklable data type.\n" - "\n" - "IMPORTANT MODULE DATA\n" - " any_source This constant may be used for the source parameter of\n" - " receive and probe operations to indicate that a\n" - " message may be received from any source.\n" - "\n" - " any_tag This constant may be used for the tag parameter of\n" - " receive or probe operations to indicate that a send\n" - " with any tag will be matched.\n" - "\n" - " collectives_tag Returns the reserved tag value used by the Boost.MPI\n" - " implementation for collective operations. Although\n" - " users are not permitted to use this tag to send or\n" - " receive messages with this tag, it may be useful when\n" - " monitoring communication patterns.\n" - "\n" - " host_rank If there is a host process, this is the rank of that\n" - " that process. Otherwise, this value will be None. MPI\n" - " does not define the meaning of a \"host\" process: \n" - " consult the documentation for your MPI implementation.\n" - "\n" - " io_rank The rank of a process that can perform input/output\n" - " via the standard facilities. If every process can\n" - " perform I/O using the standard facilities, this value\n" - " will be the same as any_source. If no process can\n" - " perform I/O, this value will be None.\n" - "\n" - " max_tag The maximum value that may be used for the tag\n" - " parameter of send/receive operations. This value will\n" - " be somewhat smaller than the value of MPI_TAG_UB,\n" - " because the Boost.MPI implementation reserves some\n" - " tags for collective operations.\n" - "\n" - " processor_name The name of this processor. The actual form of the\n" - " of the name is unspecified, but may be documented by\n" - " the underlying MPI implementation.\n" - "\n" - " rank The rank of this process in the \"world\" communicator.\n" - "\n" - " size The number of processes in the \"world\" communicator.\n" - " that process. Otherwise, this value will be None. MPI\n" - " does not define the meaning of a \"host\" process: \n" - "\n" - " world The \"world\" communicator from which all other\n" - " communicators will be derived. This is the equivalent\n" - " of MPI_COMM_WORLD.\n" - "\n" - "TRANSMITTING USER-DEFINED DATA\n" - " Boost.MPI can transmit user-defined data in several different ways.\n" - " Most importantly, it can transmit arbitrary Python objects by pickling\n" - " them at the sender and unpickling them at the receiver, allowing\n" - " arbitrarily complex Python data structures to interoperate with MPI.\n" - "\n" - " Boost.MPI also supports efficient serialization and transmission of\n" - " C++ objects (that have been exposed to Python) through its C++\n" - " interface. Any C++ type that provides (de-)serialization routines that\n" - " meet the requirements of the Boost.Serialization library is eligible\n" - " for this optimization, but the type must be registered in advance. To\n" - " register a C++ type, invoke the C++ function:\n" - " boost::mpi::python::register_serialized\n" - "\n" - " Finally, Boost.MPI supports separation of the structure of an object\n" - " from the data it stores, allowing the two pieces to be transmitted\n" - " separately. This \"skeleton/content\" mechanism, described in more\n" - " detail in a later section, is a communication optimization suitable\n" - " for problems with fixed data structures whose internal data changes\n" - " frequently.\n" - "\n" - "COLLECTIVES\n" - " Boost.MPI supports all of the MPI collectives (scatter, reduce, scan,\n" - " broadcast, etc.) for any type of data that can be transmitted with the\n" - " point-to-point communication operations. For the MPI collectives that\n" - " require a user-specified operation (e.g., reduce and scan), the\n" - " operation can be an arbitrary Python function. For instance, one could\n" - " concatenate strings with all_reduce:\n\n" - " mpi.all_reduce(my_string, lambda x,y: x + y)\n\n" - " The following module-level functions implement MPI collectives:\n" - " all_gather Gather the values from all processes.\n" - " all_reduce Combine the results from all processes.\n" - " all_to_all Every process sends data to every other process.\n" - " broadcast Broadcast data from one process to all other processes.\n" - " gather Gather the values from all processes to the root.\n" - " reduce Combine the results from all processes to the root.\n" - " scan Prefix reduction of the values from all processes.\n" - " scatter Scatter the values stored at the root to all processes.\n" - "\n" - "SKELETON/CONTENT MECHANISM\n" - " Boost.MPI provides a skeleton/content mechanism that allows the\n" - " transfer of large data structures to be split into two separate stages,\n" - " with the `skeleton' (or, `shape') of the data structure sent first and\n" - " the content (or, `data') of the data structure sent later, potentially\n" - " several times, so long as the structure has not changed since the\n" - " skeleton was transferred. The skeleton/content mechanism can improve\n" - " performance when the data structure is large and its shape is fixed,\n" - " because while the skeleton requires serialization (it has an unknown\n" - " size), the content transfer is fixed-size and can be done without\n" - " extra copies.\n" - "\n" - " To use the skeleton/content mechanism from Python, you must first\n" - " register the type of your data structure with the skeleton/content\n" - " mechanism *from C++*. The registration function is\n" - " boost::mpi::python::register_skeleton_and_content\n" - " and resides in the header.\n" - "\n" - " Once you have registered your C++ data structures, you can extract\n" - " the skeleton for an instance of that data structure with skeleton().\n" - " The resulting skeleton_proxy can be transmitted via the normal send\n" - " routine, e.g.,\n\n" - " mpi.world.send(1, 0, skeleton(my_data_structure))\n\n" - " skeleton_proxy objects can be received on the other end via recv(),\n" - " which stores a newly-created instance of your data structure with the\n" - " same `shape' as the sender in its `object' attribute:\n\n" - " shape = mpi.world.recv(0, 0)\n" - " my_data_structure = shape.object\n\n" - " Once the skeleton has been transmitted, the content (accessed via \n" - " get_content) can be transmitted in much the same way. Note, however,\n" - " that the receiver also specifies get_content(my_data_structure) in its\n" - " call to receive:\n\n" - " if mpi.rank == 0:\n" - " mpi.world.send(1, 0, get_content(my_data_structure))\n" - " else:\n" - " mpi.world.recv(0, 0, get_content(my_data_structure))\n\n" - " Of course, this transmission of content can occur repeatedly, if the\n" - " values in the data structure--but not its shape--changes.\n" - "\n" - " The skeleton/content mechanism is a structured way to exploit the\n" - " interaction between custom-built MPI datatypes and MPI_BOTTOM, to\n" - " eliminate extra buffer copies.\n" - "\n" - "C++/PYTHON MPI COMPATIBILITY\n" - " Boost.MPI is a C++ library whose facilities have been exposed to Python\n" - " via the Boost.Python library. Since the Boost.MPI Python bindings are\n" - " build directly on top of the C++ library, and nearly every feature of\n" - " C++ library is available in Python, hybrid C++/Python programs using\n" - " Boost.MPI can interact, e.g., sending a value from Python but receiving\n" - " that value in C++ (or vice versa). However, doing so requires some\n" - " care. Because Python objects are dynamically typed, Boost.MPI transfers\n" - " type information along with the serialized form of the object, so that\n" - " the object can be received even when its type is not known. This\n" - " mechanism differs from its C++ counterpart, where the static types of\n" - " transmitted values are always known.\n" - "\n" - " The only way to communicate between the C++ and Python views on \n" - " Boost.MPI is to traffic entirely in Python objects. For Python, this is\n" - " the normal state of affairs, so nothing will change. For C++, this\n" - " means sending and receiving values of type boost::python::object, from\n" - " the Boost.Python library. For instance, say we want to transmit an\n" - " integer value from Python:\n\n" - " comm.send(1, 0, 17)\n\n" - " In C++, we would receive that value into a Python object and then\n" - " `extract' an integer value:\n\n" - " boost::python::object value;\n" - " comm.recv(0, 0, value);\n" - " int int_value = boost::python::extract(value);\n\n" - " In the future, Boost.MPI will be extended to allow improved\n" - " interoperability with the C++ Boost.MPI and the C MPI bindings.\n" - ; - -/*********************************************************** - * environment documentation * - ***********************************************************/ -const char* environment_init_docstring = - "Initialize the MPI environment. Users should not need to call\n" - "this function directly, because the MPI environment will be\n" - "automatically initialized when the Boost.MPI module is loaded.\n"; - -const char* environment_finalize_docstring = - "Finalize (shut down) the MPI environment. Users only need to\n" - "invoke this function if MPI should be shut down before program\n" - "termination. Boost.MPI will automatically finalize the MPI\n" - "environment when the program exits.\n"; - -const char* environment_abort_docstring = - "Aborts all MPI processes and returns to the environment. The\n" - "precise behavior will be defined by the underlying MPI\n" - "implementation. This is equivalent to a call to MPI_Abort with\n" - "MPI_COMM_WORLD.\n" - "errcode is the error code to return from aborted processes.\n"; - -const char* environment_initialized_docstring = - "Determine if the MPI environment has already been initialized.\n"; - -const char* environment_finalized_docstring = - "Determine if the MPI environment has already been finalized.\n"; - -/*********************************************************** - * exception documentation * - ***********************************************************/ -const char* exception_docstring = - "Instances of this class will be thrown when an MPI error\n" - "occurs. MPI failures that trigger these exceptions may or may not\n" - "be recoverable, depending on the underlying MPI implementation.\n" - "Consult the documentation for your MPI implementation to determine\n" - "the effect of MPI errors.\n"; - -const char* exception_what_docstring = - "A description of the error that occured. At present, this refers\n" - "only to the name of the MPI routine that failed.\n"; - -const char* exception_routine_docstring = - "The name of the MPI routine that reported the error.\n"; - -const char* exception_result_code_docstring = - "The result code returned from the MPI routine that reported the\n" - "error.\n"; - -/*********************************************************** - * collectives documentation * - ***********************************************************/ -const char* all_gather_docstring = - "all_gather is a collective algorithm that collects the values\n" - "stored at each process into a tuple of values indexed by the\n" - "process number they came from. all_gather is (semantically) a\n" - "gather followed by a broadcast. The same tuple of values is\n" - "returned to all processes.\n"; - -const char* all_reduce_docstring = - "all_reduce is a collective algorithm that combines the values\n" - "stored by each process into a single value. The values can be\n" - "combined arbitrarily, specified via any function. The values\n" - "a1, a2, .., ap provided by p processors will be combined by the\n" - "binary function op into the result\n" - " op(a1, op(a2, ... op(ap-1,ap)))\n" - "that will be returned to all processes. This function is the\n" - "equivalent of calling all_gather() and then applying the built-in\n" - "reduce() function to the returned sequence. op is assumed to be\n" - "associative.\n"; - -const char* all_to_all_docstring = - "all_to_all is a collective algorithm that transmits values from\n" - "every process to every other process. On process i, the jth value\n" - "of the values sequence is sent to process j and placed in the ith\n" - "position of the tuple that will be returned from all_to_all.\n"; - -const char* broadcast_docstring = - "broadcast is a collective algorithm that transfers a value from an\n" - "arbitrary root process to every other process that is part of the\n" - "given communicator (comm). The root parameter must be the same for\n" - "every process. The value parameter need only be specified at the root\n" - "root. broadcast() returns the same broadcasted value to every process.\n"; - -const char* gather_docstring = - "gather is a collective algorithm that collects the values\n" - "stored at each process into a tuple of values at the root\n" - "process. This tuple is indexed by the process number that the\n" - "value came from, and will be returned only by the root process.\n" - "All other processes return None.\n"; - -const char* reduce_docstring = - "reduce is a collective algorithm that combines the values\n" - "stored by each process into a single value at the root. The\n" - "values can be combined arbitrarily, specified via any function.\n" - "The values a1, a2, .., ap provided by p processors will be\n" - "combined by the binary function op into the result\n" - " op(a1, op(a2, ... op(ap-1,ap)))\n" - "that will be returned on the root process. This function is the\n" - "equivalent of calling gather() to the root and then applying the\n" - "built-in reduce() function to the returned sequence. All non-root\n" - "processes return None. op is assumed to be associative.\n"; - -const char* scan_docstring = - "@c scan computes a prefix reduction of values from all processes.\n" - "It is a collective algorithm that combines the values stored by\n" - "each process with the values of all processes with a smaller rank.\n" - "The values can be arbitrarily combined, specified via a binary\n" - "function op. If each process i provides the value ai, then scan\n" - "returns op(a1, op(a2, ... op(ai-1, ai))) to the ith process. op is\n" - "assumed to be associative. This routine is the equivalent of an\n" - "all_gather(), followed by a built-in reduce() on the first i+1\n" - "values in the resulting sequence on processor i. op is assumed\n" - "to be associative.\n"; - -const char* scatter_docstring = - "scatter is a collective algorithm that scatters the values stored\n" - "in the root process (as a container with comm.size elements) to\n" - "all of the processes in the communicator. The values parameter \n" - "(only significant at the root) is indexed by the process number to\n" - "which the corresponding value will be sent. The value received by \n" - "each process is returned from scatter.\n"; - -/*********************************************************** - * communicator documentation * - ***********************************************************/ -const char* communicator_docstring = - "The communicator class abstracts a set of communicating\n" - "processes in MPI. All of the processes that belong to a certain\n" - "communicator can determine the size of the communicator, their rank\n" - "within the communicator, and communicate with any other processes\n" - "in the communicator.\n"; - -const char* communicator_default_constructor_docstring = - "Build a new Boost.MPI communicator for MPI_COMM_WORLD.\n"; - -const char* communicator_rank_docstring = - "Returns the rank of the process in the communicator, which will be a\n" - "value in [0, size).\n"; - -const char* communicator_size_docstring = - "Returns the number of processes in the communicator.\n"; - -const char* communicator_send_docstring = - "This routine executes a potentially blocking send with the given\n" - "tag to the process with rank dest. It can be received by the\n" - "destination process with a matching recv call. The value will be\n" - "transmitted in one of several ways:\n" - "\n" - " - For C++ objects registered via register_serialized(), the value\n" - " will be serialized and transmitted.\n" - "\n" - " - For skeleton_proxy objects, the skeleton of the object will be\n" - " serialized and transmitted.\n" - "\n" - " - For content objects, the content will be transmitted directly.\n" - " This content can be received by a matching recv/irecv call that\n" - " provides a suitable `buffer' argument.\n" - "\n" - " - For all other Python objects, the value will be pickled and\n" - " transmitted.\n"; - -const char* communicator_recv_docstring = - "This routine blocks until it receives a message from the process\n" - "source with the given tag. If the source parameter is not specified,\n" - "the message can be received from any process. Likewise, if the tag\n" - "parameter is not specified, a message with any tag can be received.\n" - "If return_status is True, returns a tuple containing the received\n" - "object followed by a status object describing the communication.\n" - "Otherwise, recv() returns just the received object.\n" - "\n" - "When receiving the content of a data type that has been sent separately\n" - "from its skeleton, user code must provide a value for the `buffer'\n" - "argument. This value should be the content object returned from\n" - "get_content().\n"; - -const char* communicator_isend_docstring = - "This routine executes a nonblocking send with the given\n" - "tag to the process with rank dest. It can be received by the\n" - "destination process with a matching recv call. The value will be\n" - "transmitted in the same way as with send().\n" - "This routine returns a request object, which can be used to query\n" - "when the transmission has completed, wait for its completion, or\n" - "cancel the transmission.\n"; - -const char* communicator_irecv_docstring = - "This routine initiates a non-blocking receive from the process\n" - "source with the given tag. If the source parameter is not specified,\n" - "the message can be received from any process. Likewise, if the tag\n" - "parameter is not specified, a message with any tag can be received.\n" - "This routine returns a request object, which can be used to query\n" - "when the transmission has completed, wait for its completion, or\n" - "cancel the transmission. The received value be accessible\n" - "through the `value' attribute of the request object once transmission\n" - "has completed.\n" - "\n" - "As with the recv() routine, when receiving the content of a data type\n" - "that has been sent separately from its skeleton, user code must provide\n" - "a value for the `buffer' argument. This value should be the content\n" - "object returned from get_content().\n"; - - const char* communicator_probe_docstring = - "This operation waits until a message matching (source, tag)\n" - "is available to be received. It then returns information about\n" - "that message. If source is omitted, a message from any process\n" - "will match. If tag is omitted, a message with any tag will match.\n" - "The actual source and tag can be retrieved from the returned status\n" - "object. To check if a message is available without blocking, use\n" - "iprobe.\n"; - -const char* communicator_iprobe_docstring = - "This operation determines if a message matching (source, tag) is\n" - "available to be received. If so, it returns information about that\n" - "message; otherwise, it returns None. If source is omitted, a message\n" - "from any process will match. If tag is omitted, a message with any\n" - "tag will match. The actual source and tag can be retrieved from the\n" - "returned status object. To wait for a message to become available, use\n" - "probe.\n"; - -const char* communicator_barrier_docstring = - "Wait for all processes within a communicator to reach the\n" - "barrier.\n"; - -const char* communicator_split_docstring = - "Split the communicator into multiple, disjoint communicators\n" - "each of which is based on a particular color. This is a\n" - "collective operation that returns a new communicator that is a\n" - "subgroup of this. This routine is functionally equivalent to\n" - "MPI_Comm_split.\n\n" - "color is the color of this process. All processes with the\n" - "same color value will be placed into the same group.\n\n" - "If provided, key is a key value that will be used to determine\n" - "the ordering of processes with the same color in the resulting\n" - "communicator. If omitted, the key will default to the rank of\n" - "the process in the current communicator.\n\n" - "Returns a new communicator containing all of the processes in\n" - "this communicator that have the same color.\n"; - -const char* communicator_abort_docstring = - "Makes a \"best attempt\" to abort all of the tasks in the group of\n" - "this communicator. Depending on the underlying MPI\n" - "implementation, this may either abort the entire program (and\n" - "possibly return errcode to the environment) or only abort\n" - "some processes, allowing the others to continue. Consult the\n" - "documentation for your MPI implementation. This is equivalent to\n" - "a call to MPI_Abort\n\n" - "errcode is the error code to return from aborted processes.\n"; - -/*********************************************************** - * request documentation * - ***********************************************************/ -const char* request_docstring = - "The request class contains information about a non-blocking send\n" - "or receive and will be returned from isend or irecv, respectively.\n" - "When a request object represents a completed irecv, the `value' \n" - "attribute will contain the received value.\n"; - -const char* request_wait_docstring = - "Wait until the communication associated with this request has\n" - "completed. For a request that is associated with an isend(), returns\n" - "a status object describing the communication. For an irecv()\n" - "operation, returns the received value by default. However, when\n" - "return_status=True, a (value, status) pair is returned by a.\n" - "completed irecv request.\n"; - -const char* request_test_docstring = - "Determine whether the communication associated with this request\n" - "has completed successfully. If so, returns the status object\n" - "describing the communication (for an isend request) or a tuple\n" - "containing the received value and a status object (for an irecv\n" - "request). Note that once test() returns a status object, the\n" - "request has completed and wait() should not be called.\n"; - -const char* request_cancel_docstring = - "Cancel a pending communication, assuming it has not already been\n" - "completed.\n"; - -/*********************************************************** - * skeleton/content documentation * - ***********************************************************/ -const char* object_without_skeleton_docstring = - "The object_without_skeleton class is an exception class used only\n" - "when the skeleton() or get_content() function is called with an\n" - "object that is not supported by the skeleton/content mechanism.\n" - "All C++ types for which skeletons and content can be transmitted\n" - "must be registered with the C++ routine:\n" - " boost::mpi::python::register_skeleton_and_content\n"; - -const char* object_without_skeleton_object_docstring = - "The object on which skeleton() or get_content() was invoked.\n"; - -const char* skeleton_proxy_docstring = - "The skeleton_proxy class is used to represent the skeleton of an\n" - "object. The skeleton_proxy can be used as the value parameter of\n" - "send() or isend() operations, but instead of transmitting the\n" - "entire object, only its skeleton (\"shape\") will be sent, without\n" - "the actual data. Its content can then be transmitted, separately.\n" - "\n" - "User code cannot generate skeleton_proxy instances directly. To\n" - "refer to the skeleton of an object, use skeleton(object). Skeletons\n" - "can also be received with the recv() and irecv() methods.\n" - "\n" - "Note that the skeleton/content mechanism can only be used with C++\n" - "types that have been explicitly registered.\n"; - -const char* skeleton_proxy_object_docstring = - "The actual object whose skeleton is represented by this proxy object.\n"; - -const char* content_docstring = - "The content is a proxy class that represents the content of an object,\n" - "which can be separately sent or received from its skeleton.\n" - "\n" - "User code cannot generate content instances directly. Call the\n" - "get_content() routine to retrieve the content proxy for a particular\n" - "object. The content instance can be used with any of the send() or\n" - "recv() variants. Note that get_content() can only be used with C++\n" - "data types that have been explicitly registered with the Python\n" - "skeleton/content mechanism.\n"; - -const char* skeleton_docstring = - "The skeleton function retrieves the skeleton_proxy for its object\n" - "parameter, allowing the transmission of the skeleton (or \"shape\")\n" - "of the object separately from its data. The skeleton/content mechanism\n" - "is useful when a large data structure remains structurally the same\n" - "throughout a computation, but its content (i.e., the values in the\n" - "structure) changes several times. Tranmission of the content part does\n" - "not require any serialization or unnecessary buffer copies, so it is\n" - "very efficient for large data structures.\n" - "\n" - "Only C++ types that have been explicitly registered with the Boost.MPI\n" - "Python library can be used with the skeleton/content mechanism. Use:\b" - " boost::mpi::python::register_skeleton_and_content\n"; - -const char* get_content_docstring = - "The get_content function retrieves the content for its object parameter,\n" - "allowing the transmission of the data in a data structure separately\n" - "from its skeleton (or \"shape\"). The skeleton/content mechanism\n" - "is useful when a large data structure remains structurally the same\n" - "throughout a computation, but its content (i.e., the values in the\n" - "structure) changes several times. Tranmission of the content part does\n" - "not require any serialization or unnecessary buffer copies, so it is\n" - "very efficient for large data structures.\n" - "\n" - "Only C++ types that have been explicitly registered with the Boost.MPI\n" - "Python library can be used with the skeleton/content mechanism. Use:\b" - " boost::mpi::python::register_skeleton_and_content\n"; - -/*********************************************************** - * status documentation * - ***********************************************************/ -const char* status_docstring = - "The status class stores information about a given message, including\n" - "its source, tag, and whether the message transmission was cancelled\n" - "or resulted in an error.\n"; - -const char* status_source_docstring = - "The source of the incoming message.\n"; - -const char* status_tag_docstring = - "The tag of the incoming message.\n"; - -const char* status_error_docstring = - "The error code associated with this transmission.\n"; - -const char* status_cancelled_docstring = - "Whether this transmission was cancelled.\n"; - -/*********************************************************** - * timer documentation * - ***********************************************************/ -const char* timer_docstring = - "The timer class is a simple wrapper around the MPI timing facilities.\n"; - -const char* timer_default_constructor_docstring = - "Initializes the timer. After this call, elapsed == 0.\n"; - -const char* timer_restart_docstring = - "Restart the timer, after which elapsed == 0.\n"; - -const char* timer_elapsed_docstring = - "The time elapsed since initialization or the last restart(),\n" - "whichever is more recent.\n"; - -const char* timer_elapsed_min_docstring = - "Returns the minimum non-zero value that elapsed may return\n" - "This is the resolution of the timer.\n"; - -const char* timer_elapsed_max_docstring = - "Return an estimate of the maximum possible value of elapsed. Note\n" - "that this routine may return too high a value on some systems.\n"; - -const char* timer_time_is_global_docstring = - "Determines whether the elapsed time values are global times or\n" - "local processor times.\n"; - -} } } // end namespace boost::mpi::python diff --git a/src/python/exception.cpp b/src/python/exception.cpp deleted file mode 100644 index e19c0eb..0000000 --- a/src/python/exception.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor -// Copyright (C) 2005 The Trustees of Indiana University. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file exception.cpp - * - * This file reflects the Boost.MPI @c mpi_error class into - * Python. - */ -#include -#include -#include -#include -#include "utility.hpp" - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* exception_docstring; -extern const char* exception_what_docstring; -extern const char* exception_routine_docstring; -extern const char* exception_result_code_docstring; - -str exception_str(const exception& e) -{ - return str("MPI routine `" + std::string(e.routine()) + - "' returned error code " + - lexical_cast(e.result_code())); -} - -void export_exception() -{ - using boost::python::arg; - using boost::python::object; - - object type = - class_ - ("exception", exception_docstring, no_init) - .add_property("what", &exception::what, exception_what_docstring) - .add_property("routine", &exception::what, exception_routine_docstring) - .add_property("result_code", &exception::what, - exception_result_code_docstring) - .def("__str__", &exception_str) - ; - translate_exception::declare(type); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/module.cpp b/src/python/module.cpp deleted file mode 100644 index f365b7e..0000000 --- a/src/python/module.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file module.cpp - * - * This file provides the top-level module for the Boost.MPI Python - * bindings. - */ -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern void export_environment(); -extern void export_exception(); -extern void export_collectives(); -extern void export_communicator(); -extern void export_datatypes(); -extern void export_request(); -extern void export_status(); -extern void export_timer(); - -extern const char* module_docstring; - -BOOST_PYTHON_MODULE(mpi) -{ - // Setup module documentation - scope().attr("__doc__") = module_docstring; - scope().attr("__author__") = "Douglas Gregor "; - scope().attr("__date__") = "$LastChangedDate: 2006-07-16 15:25:47 -0400 (Sun, 16 Jul 2006) $"; - scope().attr("__version__") = "$Revision$"; - scope().attr("__copyright__") = "Copyright (C) 2006 Douglas Gregor"; - scope().attr("__license__") = "http://www.boost.org/LICENSE_1_0.txt"; - - export_environment(); - export_exception(); - export_communicator(); - export_collectives(); - export_datatypes(); - export_request(); - export_status(); - export_timer(); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/py_communicator.cpp b/src/python/py_communicator.cpp deleted file mode 100644 index 4b7ce50..0000000 --- a/src/python/py_communicator.cpp +++ /dev/null @@ -1,134 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file communicator.cpp - * - * This file reflects the Boost.MPI @c communicator class into - * Python. - */ -#include -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* communicator_docstring; -extern const char* communicator_default_constructor_docstring; -extern const char* communicator_rank_docstring; -extern const char* communicator_size_docstring; -extern const char* communicator_send_docstring; -extern const char* communicator_recv_docstring; -extern const char* communicator_isend_docstring; -extern const char* communicator_irecv_docstring; -extern const char* communicator_probe_docstring; -extern const char* communicator_iprobe_docstring; -extern const char* communicator_barrier_docstring; -extern const char* communicator_split_docstring; -extern const char* communicator_split_key_docstring; -extern const char* communicator_abort_docstring; - -object -communicator_recv(const communicator& comm, int source, int tag, - bool return_status) -{ - using boost::python::make_tuple; - - object result; - status stat = comm.recv(source, tag, result); - if (return_status) - return make_tuple(result, stat); - else - return result; -} - -object -communicator_irecv(const communicator& comm, int source, int tag) -{ - using boost::python::make_tuple; - - object result; - object req(comm.irecv(source, tag, result)); - req.attr("value") = result; - return req; -} - -object -communicator_iprobe(const communicator& comm, int source, int tag) -{ - if (boost::optional result = comm.iprobe(source, tag)) - return object(*result); - else - return object(); -} - -extern void export_skeleton_and_content(class_&); - -void export_communicator() -{ - using boost::python::arg; - using boost::python::object; - - class_ comm("communicator", communicator_docstring); - comm - .def(init<>()) - .add_property("rank", &communicator::rank, communicator_rank_docstring) - .add_property("size", &communicator::size, communicator_size_docstring) - .def("send", - (void (communicator::*)(int, int, const object&) const) - &communicator::send, - (arg("dest"), arg("tag") = 0, arg("value") = object()), - communicator_send_docstring) - .def("recv", &communicator_recv, - (arg("source") = any_source, arg("tag") = any_tag, - arg("return_status") = false), - communicator_recv_docstring) - .def("isend", - (request (communicator::*)(int, int, const object&) const) - &communicator::isend, - (arg("dest"), arg("tag") = 0, arg("value") = object()), - communicator_isend_docstring) - .def("irecv", &communicator_irecv, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_irecv_docstring) - .def("probe", &communicator::probe, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_probe_docstring) - .def("iprobe", &communicator_iprobe, - (arg("source") = any_source, arg("tag") = any_tag), - communicator_iprobe_docstring) - .def("barrier", &communicator::barrier, communicator_barrier_docstring) - .def("__nonzero__", &communicator::operator bool) - .def("split", - (communicator (communicator::*)(int) const)&communicator::split, - (arg("color")), communicator_split_docstring) - .def("split", - (communicator (communicator::*)(int, int) const)&communicator::split, - (arg("color"), arg("key"))) - .def("abort", &communicator::abort, arg("errcode"), - communicator_abort_docstring) - ; - - // Module-level attributes - scope().attr("any_source") = any_source; - scope().attr("any_tag") = any_tag; - - { - communicator world; - scope().attr("world") = world; - scope().attr("rank") = world.rank(); - scope().attr("size") = world.size(); - } - - // Export skeleton and content - export_skeleton_and_content(comm); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/py_environment.cpp b/src/python/py_environment.cpp deleted file mode 100644 index bcd95e4..0000000 --- a/src/python/py_environment.cpp +++ /dev/null @@ -1,111 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file environment.cpp - * - * This file reflects the Boost.MPI "environment" class into Python - * methods at module level. - */ -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* environment_init_docstring; -extern const char* environment_finalize_docstring; -extern const char* environment_abort_docstring; -extern const char* environment_initialized_docstring; -extern const char* environment_finalized_docstring; - -/** - * The environment used by the Boost.MPI Python module. This will be - * zero-initialized before it is used. - */ -static environment* env; - -bool mpi_init(list python_argv, bool abort_on_exception) -{ - // If MPI is already initialized, do nothing. - if (environment::initialized()) - return false; - - // Convert Python argv into C-style argc/argv. - int my_argc = extract(python_argv.attr("__len__")()); - char** my_argv = new char*[my_argc]; - for (int arg = 0; arg < my_argc; ++arg) - my_argv[arg] = strdup(extract(python_argv[arg])); - - // Initialize MPI - int mpi_argc = my_argc; - char** mpi_argv = my_argv; - env = new environment(mpi_argc, mpi_argv, abort_on_exception); - - // If anything changed, convert C-style argc/argv into Python argv - if (mpi_argv != my_argv) - PySys_SetArgv(mpi_argc, mpi_argv); - - for (int arg = 0; arg < my_argc; ++arg) - free(my_argv[arg]); - delete [] my_argv; - - return true; -} - -void mpi_finalize() -{ - if (env) { - delete env; - env = 0; - } -} - -void export_environment() -{ - using boost::python::arg; - - def("init", mpi_init, (arg("argv"), arg("abort_on_exception") = true), - environment_init_docstring); - def("finalize", mpi_finalize, environment_finalize_docstring); - - // Setup initialization and finalization code - if (!environment::initialized()) { - // MPI_Init from sys.argv - object sys = object(handle<>(PyImport_ImportModule("sys"))); - mpi_init(extract(sys.attr("argv")), true); - - // Setup MPI_Finalize call when the program exits - object atexit = object(handle<>(PyImport_ImportModule("atexit"))); - object finalize = scope().attr("finalize"); - atexit.attr("register")(finalize); - } - - def("abort", &environment::abort, arg("errcode"), - environment_abort_docstring); - def("initialized", &environment::initialized, - environment_initialized_docstring); - def("finalized", &environment::finalized, - environment_finalized_docstring); - scope().attr("max_tag") = environment::max_tag(); - scope().attr("collectives_tag") = environment::collectives_tag(); - scope().attr("processor_name") = environment::processor_name(); - - if (optional host_rank = environment::host_rank()) - scope().attr("host_rank") = *host_rank; - else - scope().attr("host_rank") = object(); - - if (optional io_rank = environment::io_rank()) - scope().attr("io_rank") = *io_rank; - else - scope().attr("io_rank") = object(); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/py_exception.cpp b/src/python/py_exception.cpp deleted file mode 100644 index e19c0eb..0000000 --- a/src/python/py_exception.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor -// Copyright (C) 2005 The Trustees of Indiana University. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file exception.cpp - * - * This file reflects the Boost.MPI @c mpi_error class into - * Python. - */ -#include -#include -#include -#include -#include "utility.hpp" - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* exception_docstring; -extern const char* exception_what_docstring; -extern const char* exception_routine_docstring; -extern const char* exception_result_code_docstring; - -str exception_str(const exception& e) -{ - return str("MPI routine `" + std::string(e.routine()) + - "' returned error code " + - lexical_cast(e.result_code())); -} - -void export_exception() -{ - using boost::python::arg; - using boost::python::object; - - object type = - class_ - ("exception", exception_docstring, no_init) - .add_property("what", &exception::what, exception_what_docstring) - .add_property("routine", &exception::what, exception_routine_docstring) - .add_property("result_code", &exception::what, - exception_result_code_docstring) - .def("__str__", &exception_str) - ; - translate_exception::declare(type); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/py_request.cpp b/src/python/py_request.cpp deleted file mode 100644 index 2746265..0000000 --- a/src/python/py_request.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file request.cpp - * - * This file reflects the Boost.MPI @c request class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* request_docstring; -extern const char* request_wait_docstring; -extern const char* request_test_docstring; -extern const char* request_cancel_docstring; - -object request_wait(object req_obj) -{ - request& req = extract(req_obj)(); - status stat = req.wait(); - if (PyObject_HasAttrString(req_obj.ptr(), "value")) - return boost::python::make_tuple(stat, req_obj.attr("value")); - else - return object(stat); -} - -object request_test(object req_obj) -{ - request& req = extract(req_obj)(); - - if (optional stat = req.test()) - { - if (PyObject_HasAttrString(req_obj.ptr(), "value")) - return boost::python::make_tuple(stat, req_obj.attr("value")); - else - return object(stat); - } - else - return object(); -} - -void export_request() -{ - using boost::python::arg; - using boost::python::object; - - class_("request", request_docstring, no_init) - .def("wait", &request_wait, request_wait_docstring) - .def("test", &request_test, request_test_docstring) - .def("cancel", &request::cancel, request_cancel_docstring) - ; -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/py_timer.cpp b/src/python/py_timer.cpp deleted file mode 100644 index d33f694..0000000 --- a/src/python/py_timer.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file timer.cpp - * - * This file reflects the Boost.MPI @c timer class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* timer_docstring; -extern const char* timer_default_constructor_docstring; -extern const char* timer_restart_docstring; -extern const char* timer_elapsed_docstring; -extern const char* timer_elapsed_min_docstring; -extern const char* timer_elapsed_max_docstring; -extern const char* timer_time_is_global_docstring; - -void export_timer() -{ - using boost::python::arg; - using boost::python::object; - - class_("timer", timer_docstring) - .def(init<>()) - .def("restart", &timer::restart, timer_restart_docstring) - .add_property("elapsed", &timer::elapsed, timer_elapsed_docstring) - .add_property("elapsed_min", &timer::elapsed_min, - timer_elapsed_min_docstring) - .add_property("elapsed_max", &timer::elapsed_max, - timer_elapsed_max_docstring) - .add_property("time_is_global", &timer::time_is_global, - timer_time_is_global_docstring) - ; -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/serialize.cpp b/src/python/serialize.cpp deleted file mode 100644 index 92004a3..0000000 --- a/src/python/serialize.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file serialize.cpp - * - * This file provides Boost.Serialization support for Python objects. - */ -#include -#include -#include - -namespace boost { namespace python { - -struct pickle::data_t { - object module; - object dumps; - object loads; -}; - - -/// Data used for communicating with the Python `pickle' module. -pickle::data_t* pickle::data; - -str pickle::dumps(object obj, int protocol) -{ - if (!data) initialize_data(); - return extract((data->dumps)(obj, protocol)); -} - -object pickle::loads(str s) -{ - if (!data) initialize_data(); - return ((data->loads)(s)); -} - -void pickle::initialize_data() -{ - data = new data_t; - data->module = object(handle<>(PyImport_ImportModule("pickle"))); - data->dumps = data->module.attr("dumps"); - data->loads = data->module.attr("loads"); -} - -} } // end namespace boost::python - -BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL( - ::boost::mpi::packed_iarchive, - ::boost::mpi::packed_oarchive) - -namespace boost { namespace mpi { namespace python { namespace detail { - - boost::python::object skeleton_proxy_base_type; - - // A map from Python type objects to skeleton/content handlers - typedef std::map - skeleton_content_handlers_type; - - BOOST_MPI_PYTHON_DECL skeleton_content_handlers_type skeleton_content_handlers; - - bool - skeleton_and_content_handler_registered(PyTypeObject* type) - { - return - skeleton_content_handlers.find(type) != skeleton_content_handlers.end(); - } - - void - register_skeleton_and_content_handler(PyTypeObject* type, - const skeleton_content_handler& handler) - { - skeleton_content_handlers[type] = handler; - } - -} } } } // end namespace boost::mpi::python::detail diff --git a/src/python/skeleton_and_content.cpp b/src/python/skeleton_and_content.cpp deleted file mode 100644 index 45a39d1..0000000 --- a/src/python/skeleton_and_content.cpp +++ /dev/null @@ -1,172 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file skeleton_and_content.cpp - * - * This file reflects the skeleton/content facilities into Python. - */ -#include -#include -#include -#include -#include -#include "utility.hpp" - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -namespace detail { - typedef std::map - skeleton_content_handlers_type; - -// We're actually importing skeleton_content_handlers from skeleton_and_content.cpp. -#if defined(BOOST_HAS_DECLSPEC) && (defined(BOOST_MPI_PYTHON_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) -# define BOOST_SC_DECL __declspec(dllimport) -#else -# define BOOST_SC_DECL -#endif - - extern BOOST_SC_DECL skeleton_content_handlers_type skeleton_content_handlers; -} - -/** - * An exception that will be thrown when the object passed to the - * Python version of skeleton() does not have a skeleton. - */ -struct object_without_skeleton : public std::exception { - explicit object_without_skeleton(object value) : value(value) { } - virtual ~object_without_skeleton() throw() { } - - object value; -}; - -str object_without_skeleton_str(const object_without_skeleton& e) -{ - return str("\nThe skeleton() or get_content() function was invoked for a Python\n" - "object that is not supported by the Boost.MPI skeleton/content\n" - "mechanism. To transfer objects via skeleton/content, you must\n" - "register the C++ type of this object with the C++ function:\n" - " boost::mpi::python::register_skeleton_and_content()\n" - "Object: " + str(e.value) + "\n"); -} - -/** - * Extract the "skeleton" from a Python object. In truth, all we're - * doing at this point is verifying that the object is a C++ type that - * has been registered for the skeleton/content mechanism. - */ -object skeleton(object value) -{ - PyTypeObject* type = value.ptr()->ob_type; - detail::skeleton_content_handlers_type::iterator pos = - detail::skeleton_content_handlers.find(type); - if (pos == detail::skeleton_content_handlers.end()) - throw object_without_skeleton(value); - else - return pos->second.get_skeleton_proxy(value); -} - -/** - * Extract the "content" from a Python object, which must be a C++ - * type that has been registered for the skeleton/content mechanism. - */ -content get_content(object value) -{ - PyTypeObject* type = value.ptr()->ob_type; - detail::skeleton_content_handlers_type::iterator pos = - detail::skeleton_content_handlers.find(type); - if (pos == detail::skeleton_content_handlers.end()) - throw object_without_skeleton(value); - else - return pos->second.get_content(value); -} - -/// Send the content part of a Python object. -void -communicator_send_content(const communicator& comm, int dest, int tag, - const content& c) -{ - comm.send(dest, tag, c.base()); -} - -/// Receive the content of a Python object. We return the object -/// received, not the content wrapper. -object -communicator_recv_content(const communicator& comm, int source, int tag, - const content& c, bool return_status) -{ - using boost::python::make_tuple; - - status stat = comm.recv(source, tag, c.base()); - if (return_status) - return make_tuple(c.object, stat); - else - return c.object; -} - -/// Receive the content of a Python object. The request object's value -/// attribute will reference the object whose content is being -/// received, not the content wrapper. -object -communicator_irecv_content(const communicator& comm, int source, int tag, - const content& c) -{ - using boost::python::make_tuple; - - object req(comm.irecv(source, tag, c.base())); - req.attr("value") = c.object; - return req; -} - -extern const char* object_without_skeleton_docstring; -extern const char* object_without_skeleton_object_docstring; -extern const char* skeleton_proxy_docstring; -extern const char* skeleton_proxy_object_docstring; -extern const char* content_docstring; -extern const char* skeleton_docstring; -extern const char* get_content_docstring; - -void export_skeleton_and_content(class_& comm) -{ - using boost::python::arg; - - // Expose the object_without_skeleton exception - object type = - class_ - ("object_without_skeleton", object_without_skeleton_docstring, no_init) - .def_readonly("object", &object_without_skeleton::value, - object_without_skeleton_object_docstring) - .def("__str__", &object_without_skeleton_str) - ; - translate_exception::declare(type); - - // Expose the Python variants of "skeleton_proxy" and "content", and - // their generator functions. - detail::skeleton_proxy_base_type = - class_("skeleton_proxy", skeleton_proxy_docstring, - no_init) - .def_readonly("object", &skeleton_proxy_base::object, - skeleton_proxy_object_docstring); - class_("content", content_docstring, no_init); - def("skeleton", &skeleton, arg("object"), skeleton_docstring); - def("get_content", &get_content, arg("object"), get_content_docstring); - - // Expose communicator send/recv operations for content. - comm - .def("send", communicator_send_content, - (arg("dest"), arg("tag") = 0, arg("value"))) - .def("recv", communicator_recv_content, - (arg("source") = any_source, arg("tag") = any_tag, arg("buffer"), - arg("return_status") = false)) - .def("irecv", communicator_irecv_content, - (arg("source") = any_source, arg("tag") = any_tag, arg("buffer"))); -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/status.cpp b/src/python/status.cpp deleted file mode 100644 index 51e1d27..0000000 --- a/src/python/status.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -/** @file status.cpp - * - * This file reflects the Boost.MPI @c status class into - * Python. - */ -#include -#include - -using namespace boost::python; -using namespace boost::mpi; - -namespace boost { namespace mpi { namespace python { - -extern const char* status_docstring; -extern const char* status_source_docstring; -extern const char* status_tag_docstring; -extern const char* status_error_docstring; -extern const char* status_cancelled_docstring; - -void export_status() -{ - using boost::python::arg; - using boost::python::object; - - class_("status", status_docstring, no_init) - .add_property("source", &status::source, status_source_docstring) - .add_property("tag", &status::tag, status_tag_docstring) - .add_property("error", &status::error, status_error_docstring) - .add_property("cancelled", &status::cancelled, status_cancelled_docstring) - ; -} - -} } } // end namespace boost::mpi::python diff --git a/src/python/utility.hpp b/src/python/utility.hpp deleted file mode 100644 index ed00167..0000000 --- a/src/python/utility.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor -#ifndef BOOST_MPI_PYTHON_UTILITY_HPP -#define BOOST_MPI_PYTHON_UTILITY_HPP - -/** @file utility.hpp - * - * This file is a utility header for the Boost.MPI Python bindings. - */ -#include - -namespace boost { namespace mpi { namespace python { - -template -class translate_exception -{ - explicit translate_exception(boost::python::object type) : type(type) { } - -public: - static void declare(boost::python::object type) - { - using boost::python::register_exception_translator; - register_exception_translator(translate_exception(type)); - } - - void operator()(const E& e) const - { - using boost::python::object; - PyErr_SetObject(type.ptr(), object(e).ptr()); - } - -private: - boost::python::object type; -}; - -} } } // end namespace boost::mpi::python - -#endif // BOOST_MPI_PYTHON_UTILITY_HPP diff --git a/src/request.cpp b/src/request.cpp deleted file mode 100644 index 9bc842f..0000000 --- a/src/request.cpp +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include - -namespace boost { namespace mpi { - -/*************************************************************************** - * request * - ***************************************************************************/ -request::request() - : m_handler(0), m_data() -{ - m_requests[0] = MPI_REQUEST_NULL; - m_requests[1] = MPI_REQUEST_NULL; -} - -status request::wait() -{ - if (m_handler) { - // This request is a receive for a serialized type. Use the - // handler to wait for completion. - return *m_handler(this, ra_wait); - } else if (m_requests[1] == MPI_REQUEST_NULL) { - // This request is either a send or a receive for a type with an - // associated MPI datatype, or a serialized datatype that has been - // packed into a single message. Just wait on the one receive/send - // and return the status to the user. - status result; - BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_requests[0], &result.m_status)); - return result; - } else { - // This request is a send of a serialized type, broken into two - // separate messages. Complete both sends at once. - MPI_Status stats[2]; - int error_code = MPI_Waitall(2, m_requests, stats); - if (error_code == MPI_ERR_IN_STATUS) { - // Dig out which status structure has the error, and use that - // one when throwing the exception. - if (stats[0].MPI_ERROR == MPI_SUCCESS - || stats[0].MPI_ERROR == MPI_ERR_PENDING) - boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR)); - else - boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR)); - } else if (error_code != MPI_SUCCESS) { - // There was an error somewhere in the MPI_Waitall call; throw - // an exception for it. - boost::throw_exception(exception("MPI_Waitall", error_code)); - } - - // No errors. Returns the first status structure. - status result; - result.m_status = stats[0]; - return result; - } -} - -optional request::test() -{ - if (m_handler) { - // This request is a receive for a serialized type. Use the - // handler to test for completion. - return m_handler(this, ra_test); - } else if (m_requests[1] == MPI_REQUEST_NULL) { - // This request is either a send or a receive for a type with an - // associated MPI datatype, or a serialized datatype that has been - // packed into a single message. Just test the one receive/send - // and return the status to the user if it has completed. - status result; - int flag = 0; - BOOST_MPI_CHECK_RESULT(MPI_Test, - (&m_requests[0], &flag, &result.m_status)); - return flag != 0? optional(result) : optional(); - } else { - // This request is a send of a serialized type, broken into two - // separate messages. We only get a result if both complete. - MPI_Status stats[2]; - int flag = 0; - int error_code = MPI_Testall(2, m_requests, &flag, stats); - if (error_code == MPI_ERR_IN_STATUS) { - // Dig out which status structure has the error, and use that - // one when throwing the exception. - if (stats[0].MPI_ERROR == MPI_SUCCESS - || stats[0].MPI_ERROR == MPI_ERR_PENDING) - boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR)); - else - boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR)); - } else if (error_code != MPI_SUCCESS) { - // There was an error somewhere in the MPI_Testall call; throw - // an exception for it. - boost::throw_exception(exception("MPI_Testall", error_code)); - } - - // No errors. Returns the second status structure if the send has - // completed. - if (flag != 0) { - status result; - result.m_status = stats[1]; - return result; - } else { - return optional(); - } - } -} - -void request::cancel() -{ - if (m_handler) { - m_handler(this, ra_cancel); - } else { - BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0])); - if (m_requests[1] != MPI_REQUEST_NULL) - BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1])); - } -} - -} } // end namespace boost::mpi diff --git a/src/text_skeleton_iarchive.cpp b/src/text_skeleton_iarchive.cpp deleted file mode 100644 index a4f1dc6..0000000 --- a/src/text_skeleton_iarchive.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include -#include -#include - -#include -#include -#include - -namespace boost { namespace archive { - -// explicitly instantiate all required templates - -template class detail::archive_pointer_iserializer ; - -} } // end namespace boost::archive diff --git a/src/text_skeleton_oarchive.cpp b/src/text_skeleton_oarchive.cpp deleted file mode 100644 index 45cf13b..0000000 --- a/src/text_skeleton_oarchive.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// (C) Copyright 2005 Matthias Troyer - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Matthias Troyer - -#define BOOST_ARCHIVE_SOURCE -#include - -#include -#include -#include -#include - -namespace boost { namespace archive { -// explicitly instantiate all required templates - -template class detail::archive_pointer_oserializer ; - -} } // end namespace boost::archive diff --git a/src/timer.cpp b/src/timer.cpp deleted file mode 100644 index 32d65b4..0000000 --- a/src/timer.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -#include -#include - -namespace boost { namespace mpi { - -bool timer::time_is_global() -{ - int* is_global; - int found = 0; - - BOOST_MPI_CHECK_RESULT(MPI_Attr_get, - (MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &is_global, - &found)); - if (!found) - return false; - else - return *is_global != 0; -} - -} } // end namespace boost::mpi diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 deleted file mode 100644 index a95e123..0000000 --- a/test/Jamfile.v2 +++ /dev/null @@ -1,36 +0,0 @@ -# Support for the Message Passing Interface (MPI) -# -# (C) Copyright 2005, 2006 Trustees of Indiana University -# (C) Copyright 2005 Douglas Gregor -# -# Distributed under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) -# -# Authors: Douglas Gregor -# Andrew Lumsdaine - -use-project /boost/mpi : ../build ; -project /boost/mpi/test ; -import mpi : mpi-test ; - -if [ mpi.configured ] -{ -test-suite mpi - : - [ mpi-test all_gather_test ] - [ mpi-test all_reduce_test ] - [ mpi-test all_to_all_test ] - [ mpi-test broadcast_test : : : 2 17 ] - [ mpi-test gather_test ] - [ mpi-test is_mpi_op_test : : : 1 ] - # Note: Microsoft MPI fails nonblocking_test on 1 processor - [ mpi-test nonblocking_test ] - [ mpi-test reduce_test ] - [ mpi-test ring_test : : : 2 3 4 7 8 13 17 ] - [ mpi-test scan_test ] - [ mpi-test scatter_test ] - # Note: Microsoft MPI fails all skeleton-content tests - [ mpi-test skeleton_content_test : : : 2 3 4 7 8 13 17 ] - [ mpi-test graph_topology_test : : : 2 7 13 ] - ; -} diff --git a/test/all_gather_test.cpp b/test/all_gather_test.cpp deleted file mode 100644 index a0543fd..0000000 --- a/test/all_gather_test.cpp +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2005-2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the all_gather() collective. -#include -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -template -void -all_gather_test(const communicator& comm, Generator generator, - const char* kind) -{ - typedef typename Generator::result_type value_type; - value_type value = generator(comm.rank()); - - using boost::mpi::all_gather; - - std::vector values; - if (comm.rank() == 0) { - std::cout << "Gathering " << kind << "..."; - std::cout.flush(); - } - - all_gather(comm, value, values); - - std::vector expected_values; - for (int p = 0; p < comm.size(); ++p) - expected_values.push_back(generator(p)); - BOOST_CHECK(values == expected_values); - if (comm.rank() == 0 && values == expected_values) - std::cout << "OK." << std::endl; - - (comm.barrier)(); -} - -// Generates integers to test with gather() -struct int_generator -{ - typedef int result_type; - - int operator()(int p) const { return 17 + p; } -}; - -// Generates GPS positions to test with gather() -struct gps_generator -{ - typedef gps_position result_type; - - gps_position operator()(int p) const - { - return gps_position(39 + p, 16, 20.2799); - } -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct string_list_generator -{ - typedef std::list result_type; - - std::list operator()(int p) const - { - std::list result; - for (int i = 0; i <= p; ++i) { - std::string value = boost::lexical_cast(i); - result.push_back(value); - } - return result; - } -}; - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - communicator comm; - all_gather_test(comm, int_generator(), "integers"); - all_gather_test(comm, gps_generator(), "GPS positions"); - all_gather_test(comm, string_generator(), "string"); - all_gather_test(comm, string_list_generator(), "list of strings"); - return 0; -} diff --git a/test/all_reduce_test.cpp b/test/all_reduce_test.cpp deleted file mode 100644 index 566e441..0000000 --- a/test/all_reduce_test.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the all_reduce() collective. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using boost::mpi::communicator; - -// A simple point class that we can build, add, compare, and -// serialize. -struct point -{ - point() : x(0), y(0), z(0) { } - point(int x, int y, int z) : x(x), y(y), z(z) { } - - int x; - int y; - int z; - - private: - template - void serialize(Archiver& ar, unsigned int /*version*/) - { - ar & x & y & z; - } - - friend class boost::serialization::access; -}; - -std::ostream& operator<<(std::ostream& out, const point& p) -{ - return out << p.x << ' ' << p.y << ' ' << p.z; -} - -bool operator==(const point& p1, const point& p2) -{ - return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; -} - -bool operator!=(const point& p1, const point& p2) -{ - return !(p1 == p2); -} - -point operator+(const point& p1, const point& p2) -{ - return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); -} - -namespace boost { namespace mpi { - - template <> - struct is_mpi_datatype : public mpl::true_ { }; - -} } // end namespace boost::mpi - -template -void -all_reduce_test(const communicator& comm, Generator generator, - const char* type_kind, Op op, const char* op_kind, - typename Generator::result_type init) -{ - typedef typename Generator::result_type value_type; - value_type value = generator(comm.rank()); - - using boost::mpi::all_reduce; - - if (comm.rank() == 0) { - std::cout << "Reducing to " << op_kind << " of " << type_kind << "..."; - std::cout.flush(); - } - - value_type result_value = all_reduce(comm, value, op); - - // Compute expected result - std::vector generated_values; - for (int p = 0; p < comm.size(); ++p) - generated_values.push_back(generator(p)); - value_type expected_result = std::accumulate(generated_values.begin(), - generated_values.end(), - init, op); - BOOST_CHECK(result_value == expected_result); - if (result_value == expected_result && comm.rank() == 0) - std::cout << "OK." << std::endl; - - (comm.barrier)(); -} - -// Generates integers to test with all_reduce() -struct int_generator -{ - typedef int result_type; - - int_generator(int base = 1) : base(base) { } - - int operator()(int p) const { return base + p; } - - private: - int base; -}; - -// Generate points to test with all_reduce() -struct point_generator -{ - typedef point result_type; - - point_generator(point origin) : origin(origin) { } - - point operator()(int p) const - { - return point(origin.x + 1, origin.y + 1, origin.z + 1); - } - - private: - point origin; -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct secret_int_bit_and -{ - int operator()(int x, int y) const { return x & y; } -}; - -struct wrapped_int -{ - wrapped_int() : value(0) { } - explicit wrapped_int(int value) : value(value) { } - - template - void serialize(Archive& ar, unsigned int /* version */) - { - ar & value; - } - - int value; -}; - -wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) -{ - return wrapped_int(x.value + y.value); -} - -bool operator==(const wrapped_int& x, const wrapped_int& y) -{ - return x.value == y.value; -} - -// Generates wrapped_its to test with all_reduce() -struct wrapped_int_generator -{ - typedef wrapped_int result_type; - - wrapped_int_generator(int base = 1) : base(base) { } - - wrapped_int operator()(int p) const { return wrapped_int(base + p); } - - private: - int base; -}; - -namespace boost { namespace mpi { - -// Make std::plus commutative. -template<> -struct is_commutative, wrapped_int> - : mpl::true_ { }; - -} } // end namespace boost::mpi - -int test_main(int argc, char* argv[]) -{ - using namespace boost::mpi; - environment env(argc, argv); - - communicator comm; - - // Built-in MPI datatypes with built-in MPI operations - all_reduce_test(comm, int_generator(), "integers", std::plus(), "sum", - 0); - all_reduce_test(comm, int_generator(), "integers", std::multiplies(), - "product", 1); - all_reduce_test(comm, int_generator(), "integers", maximum(), - "maximum", 0); - all_reduce_test(comm, int_generator(), "integers", minimum(), - "minimum", 2); - - // User-defined MPI datatypes with operations that have the - // same name as built-in operations. - all_reduce_test(comm, point_generator(point(0,0,0)), "points", - std::plus(), "sum", point()); - - // Built-in MPI datatypes with user-defined operations - all_reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(), - "bitwise and", -1); - - // Arbitrary types with user-defined, commutative operations. - all_reduce_test(comm, wrapped_int_generator(17), "wrapped integers", - std::plus(), "sum", wrapped_int(0)); - - // Arbitrary types with (non-commutative) user-defined operations - all_reduce_test(comm, string_generator(), "strings", - std::plus(), "concatenation", std::string()); - - return 0; -} diff --git a/test/all_to_all_test.cpp b/test/all_to_all_test.cpp deleted file mode 100644 index 4863a2c..0000000 --- a/test/all_to_all_test.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the all_to_all() collective. -#include -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -template -void -all_to_all_test(const communicator& comm, Generator generator, - const char* kind) -{ - typedef typename Generator::result_type value_type; - - using boost::mpi::all_to_all; - - std::vector in_values; - for (int p = 0; p < comm.size(); ++p) - in_values.push_back(generator((p + 1) * (comm.rank() + 1))); - - if (comm.rank() == 0) { - std::cout << "Performing all-to-all operation on " << kind << "..."; - std::cout.flush(); - } - std::vector out_values; - all_to_all(comm, in_values, out_values); - - for (int p = 0; p < comm.size(); ++p) { - BOOST_CHECK(out_values[p] == generator((p + 1) * (comm.rank() + 1))); - } - - if (comm.rank() == 0) { - std::cout << " done." << std::endl; - } - - (comm.barrier)(); -} - -// Generates integers to test with all_to_all() -struct int_generator -{ - typedef int result_type; - - int operator()(int p) const { return 17 + p; } -}; - -// Generates GPS positions to test with all_to_all() -struct gps_generator -{ - typedef gps_position result_type; - - gps_position operator()(int p) const - { - return gps_position(39 + p, 16, 20.2799); - } -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct string_list_generator -{ - typedef std::list result_type; - - std::list operator()(int p) const - { - std::list result; - for (int i = 0; i <= p; ++i) { - std::string value = boost::lexical_cast(i); - result.push_back(value); - } - return result; - } -}; - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - all_to_all_test(comm, int_generator(), "integers"); - all_to_all_test(comm, gps_generator(), "GPS positions"); - all_to_all_test(comm, string_generator(), "string"); - all_to_all_test(comm, string_list_generator(), "list of strings"); - - return 0; -} diff --git a/test/broadcast_test.cpp b/test/broadcast_test.cpp deleted file mode 100644 index 426efd8..0000000 --- a/test/broadcast_test.cpp +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the broadcast() collective. -#include -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -template -void -broadcast_test(const communicator& comm, const T& bc_value, - const char* kind, int root = -1) -{ - if (root == -1) { - for (root = 0; root < comm.size(); ++root) - broadcast_test(comm, bc_value, kind, root); - } else { - using boost::mpi::broadcast; - - T value; - if (comm.rank() == root) { - value = bc_value; - std::cout << "Broadcasting " << kind << " from root " << root << "..."; - std::cout.flush(); - } - - broadcast(comm, value, root); - BOOST_CHECK(value == bc_value); - if (comm.rank() == root && value == bc_value) - std::cout << "OK." << std::endl; - } - - (comm.barrier)(); -} - -void -test_skeleton_and_content(const communicator& comm, int root = 0) -{ - using boost::mpi::content; - using boost::mpi::get_content; - using boost::make_counting_iterator; - using boost::mpi::broadcast; - using boost::mpi::content; - using boost::mpi::get_content; - - typedef std::list::iterator iterator; - - int list_size = comm.size() + 7; - if (comm.rank() == root) { - // Fill in the seed data - std::list original_list; - for (int i = 0; i < list_size; ++i) - original_list.push_back(i); - - // Build up the skeleton - packed_skeleton_oarchive oa(comm); - oa << original_list; - - // Broadcast the skeleton - std::cout << "Broadcasting integer list skeleton from root " << root - << "..."; - broadcast(comm, oa, root); - std::cout << "OK." << std::endl; - - // Broadcast the content - std::cout << "Broadcasting integer list content from root " << root - << "..."; - { - content c = get_content(original_list); - broadcast(comm, c, root); - } - std::cout << "OK." << std::endl; - - // Reverse the list, broadcast the content again - std::reverse(original_list.begin(), original_list.end()); - std::cout << "Broadcasting reversed integer list content from root " - << root << "..."; - { - content c = get_content(original_list); - broadcast(comm, c, root); - } - std::cout << "OK." << std::endl; - } else { - // Allocate some useless data, to try to get the addresses of the - // list's used later to be different across processes. - std::list junk_list(comm.rank() * 3 + 1, 17); - - // Receive the skeleton - packed_skeleton_iarchive ia(comm); - broadcast(comm, ia, root); - - // Build up a list to match the skeleton, and make sure it has the - // right structure (we have no idea what the data will be). - std::list transferred_list; - ia >> transferred_list; - BOOST_CHECK((int)transferred_list.size() == list_size); - - // Receive the content and check it - broadcast(comm, get_content(transferred_list), root); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.begin())); - - // Receive the reversed content and check it - broadcast(comm, get_content(transferred_list), root); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.rbegin())); - } - - (comm.barrier)(); -} - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - if (comm.size() == 1) { - std::cerr << "ERROR: Must run the broadcast test with more than one " - << "process." << std::endl; - MPI_Abort(comm, -1); - } - - // Check transfer of individual objects - broadcast_test(comm, 17, "integers"); - broadcast_test(comm, gps_position(39,16,20.2799), "GPS positions"); - broadcast_test(comm, gps_position(26,25,30.0), "GPS positions"); - broadcast_test(comm, std::string("Rosie"), "string"); - - std::list strings; - strings.push_back("Hello"); - strings.push_back("MPI"); - strings.push_back("World"); - broadcast_test(comm, strings, "list of strings"); - - test_skeleton_and_content(comm, 0); - test_skeleton_and_content(comm, 1); - return 0; -} diff --git a/test/gather_test.cpp b/test/gather_test.cpp deleted file mode 100644 index 42e92dc..0000000 --- a/test/gather_test.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the gather() collective. -#include -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -template -void -gather_test(const communicator& comm, Generator generator, - const char* kind, int root = -1) -{ - typedef typename Generator::result_type value_type; - value_type value = generator(comm.rank()); - - if (root == -1) { - for (root = 0; root < comm.size(); ++root) - gather_test(comm, generator, kind, root); - } else { - using boost::mpi::gather; - - std::vector values; - if (comm.rank() == root) { - std::cout << "Gathering " << kind << " from root " << root << "..."; - std::cout.flush(); - } - - gather(comm, value, values, root); - - if (comm.rank() == root) { - std::vector expected_values; - for (int p = 0; p < comm.size(); ++p) - expected_values.push_back(generator(p)); - BOOST_CHECK(values == expected_values); - if (values == expected_values) - std::cout << "OK." << std::endl; - } else { - BOOST_CHECK(values.empty()); - } - } - - (comm.barrier)(); -} - -// Generates integers to test with gather() -struct int_generator -{ - typedef int result_type; - - int operator()(int p) const { return 17 + p; } -}; - -// Generates GPS positions to test with gather() -struct gps_generator -{ - typedef gps_position result_type; - - gps_position operator()(int p) const - { - return gps_position(39 + p, 16, 20.2799); - } -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct string_list_generator -{ - typedef std::list result_type; - - std::list operator()(int p) const - { - std::list result; - for (int i = 0; i <= p; ++i) { - std::string value = boost::lexical_cast(i); - result.push_back(value); - } - return result; - } -}; - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - gather_test(comm, int_generator(), "integers"); - gather_test(comm, gps_generator(), "GPS positions"); - gather_test(comm, string_generator(), "string"); - gather_test(comm, string_list_generator(), "list of strings"); - - return 0; -} diff --git a/test/gps_position.hpp b/test/gps_position.hpp deleted file mode 100644 index 109a3a9..0000000 --- a/test/gps_position.hpp +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef GPS_POSITION_HPP -#define GPS_POSITION_HPP - -// Copyright Matthias Troyer -// 2005. Distributed under the Boost Software License, Version -// 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include - -class gps_position -{ -private: - friend class boost::serialization::access; - // When the class Archive corresponds to an output archive, the - // & operator is defined similar to <<. Likewise, when the class Archive - // is a type of input archive the & operator is defined similar to >>. - template - void serialize(Archive & ar, const unsigned int version) - { - ar & degrees & minutes & seconds; - } - int degrees; - int minutes; - float seconds; -public: - gps_position(){}; - gps_position(int d, int m, float s) : - degrees(d), minutes(m), seconds(s) - {} - - friend bool operator==(const gps_position& x, const gps_position& y) - { - return (x.degrees == y.degrees - && x.minutes == y.minutes - && x.seconds == y.seconds); - } - - inline friend bool operator!=(const gps_position& x, const gps_position& y) - { - return !(x == y); - } -}; - - -namespace boost { namespace mpi { - - template <> - struct is_mpi_datatype - : public mpl::and_ - < - is_mpi_datatype, - is_mpi_datatype - > - {}; - -} } -#endif diff --git a/test/graph_topology_test.cpp b/test/graph_topology_test.cpp deleted file mode 100644 index d573f20..0000000 --- a/test/graph_topology_test.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2007 Trustees of Indiana University - -// Authors: Douglas Gregor -// Andrew Lumsdaine - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the communicator that passes data around a ring and -// verifies that the same data makes it all the way. Should test all -// of the various kinds of data that can be sent (primitive types, POD -// types, serializable objects, etc.) -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include // for random_shuffle -#include -#include - -using boost::mpi::communicator; -using boost::mpi::graph_communicator; -using namespace boost; - -int test_main(int argc, char* argv[]) -{ - boost::function_requires< IncidenceGraphConcept >(); - boost::function_requires< AdjacencyGraphConcept >(); - boost::function_requires< VertexListGraphConcept >(); - boost::function_requires< EdgeListGraphConcept >(); - - double prob = 0.1; - - boost::mpi::environment env(argc, argv); - - communicator world; - - // Random number generator - minstd_rand gen; - - // Build a random graph with as many vertices as there are processes - typedef adjacency_list Graph; - sorted_erdos_renyi_iterator - first(gen, world.size(), prob), last; - Graph graph(first, last, world.size()); - - // Display the original graph - if (world.rank() == 0) { - std::cout << "Original, random graph:\n"; - BGL_FORALL_VERTICES(v, graph, Graph) { - BGL_FORALL_OUTEDGES(v, e, graph, Graph) { - std::cout << source(e, graph) << " -> " << target(e, graph) - << std::endl; - } - } - } - - // Create an arbitrary mapping from vertices to integers - typedef property_map::type GraphVertexIndexMap; - std::vector graph_alt_index_vec(num_vertices(graph)); - iterator_property_map - graph_alt_index(&graph_alt_index_vec[0], get(vertex_index, graph)); - - // Rank 0 will populate the alternative index vector - if (world.rank() == 0) { - int index = 0; - BGL_FORALL_VERTICES(v, graph, Graph) - put(graph_alt_index, v, index++); - - std::random_shuffle(graph_alt_index_vec.begin(), graph_alt_index_vec.end()); - } - broadcast(world, graph_alt_index_vec, 0); - - // Display the original graph with the remapping - if (world.rank() == 0) { - std::cout << "Original, random graph with remapped vertex numbers:\n"; - BGL_FORALL_VERTICES(v, graph, Graph) { - BGL_FORALL_OUTEDGES(v, e, graph, Graph) { - std::cout << get(graph_alt_index, source(e, graph)) << " -> " - << get(graph_alt_index, target(e, graph)) << std::endl; - } - } - } - - // Create a communicator with a topology equivalent to the graph - graph_communicator graph_comm(world, graph, graph_alt_index, false); - - // The communicator's topology should have the same number of - // vertices and edges and the original graph - BOOST_CHECK((int)num_vertices(graph) == num_vertices(graph_comm)); - BOOST_CHECK((int)num_edges(graph) == num_edges(graph_comm)); - - // Display the communicator graph - if (graph_comm.rank() == 0) { - std::cout << "Communicator graph:\n"; - BGL_FORALL_VERTICES(v, graph_comm, graph_communicator) { - BGL_FORALL_OUTEDGES(v, e, graph_comm, graph_communicator) { - std::cout << source(e, graph_comm) << " -> " << target(e, graph_comm) - << std::endl; - } - } - - std::cout << "Communicator graph via edges():\n"; - BGL_FORALL_EDGES(e, graph_comm, graph_communicator) - std::cout << source(e, graph_comm) << " -> " << target(e, graph_comm) - << std::endl; - } - (graph_comm.barrier)(); - - // Verify the isomorphism - if (graph_comm.rank() == 0) - std::cout << "Verifying isomorphism..." << std::endl; - BOOST_CHECK(verify_isomorphism(graph, graph_comm, graph_alt_index)); - - return 0; -} diff --git a/test/is_mpi_op_test.cpp b/test/is_mpi_op_test.cpp deleted file mode 100644 index 01f34fc..0000000 --- a/test/is_mpi_op_test.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2005-2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the is_mpi_op functionality. -#include -#include -#include -#include - -using namespace boost::mpi; -using namespace std; -using boost::is_base_and_derived; - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - // Check each predefined MPI_Op type that we support directly. - BOOST_CHECK((is_mpi_op, int>::op() == MPI_MAX)); - BOOST_CHECK((is_mpi_op, float>::op() == MPI_MIN)); - BOOST_CHECK((is_mpi_op, double>::op() == MPI_SUM)); - BOOST_CHECK((is_mpi_op, long>::op() == MPI_PROD)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_LAND)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_BAND)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_LOR)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_BOR)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_LXOR)); - BOOST_CHECK((is_mpi_op, int>::op() == MPI_BXOR)); - - return 0; -} diff --git a/test/nonblocking_test.cpp b/test/nonblocking_test.cpp deleted file mode 100644 index 1895d15..0000000 --- a/test/nonblocking_test.cpp +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (C) 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the nonblocking point-to-point operations. -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include -#include - -using boost::mpi::communicator; -using boost::mpi::request; -using boost::mpi::status; - -enum method_kind { - mk_wait_any, mk_test_any, mk_wait_all, mk_wait_all_keep, - mk_test_all, mk_test_all_keep, mk_wait_some, mk_wait_some_keep, - mk_test_some, mk_test_some_keep, - mk_all, // use to run all of the different methods - mk_all_except_test_all // use for serialized types -}; - -static char* method_kind_names[mk_all] = { - "wait_any", - "test_any", - "wait_all", - "wait_all (keep results)", - "test_all", - "test_all (keep results)", - "wait_some", - "wait_some (keep results)", - "test_some", - "test_some (keep results)" -}; - - -template -void -nonblocking_test(const communicator& comm, const T* values, int num_values, - const char* kind, method_kind method = mk_all) -{ - using boost::mpi::wait_any; - using boost::mpi::test_any; - using boost::mpi::wait_all; - using boost::mpi::test_all; - using boost::mpi::wait_some; - using boost::mpi::test_some; - - if (method == mk_all || method == mk_all_except_test_all) { - nonblocking_test(comm, values, num_values, kind, mk_wait_any); - nonblocking_test(comm, values, num_values, kind, mk_test_any); - nonblocking_test(comm, values, num_values, kind, mk_wait_all); - nonblocking_test(comm, values, num_values, kind, mk_wait_all_keep); - if (method == mk_all) { - nonblocking_test(comm, values, num_values, kind, mk_test_all); - nonblocking_test(comm, values, num_values, kind, mk_test_all_keep); - } - nonblocking_test(comm, values, num_values, kind, mk_wait_some); - nonblocking_test(comm, values, num_values, kind, mk_wait_some_keep); - nonblocking_test(comm, values, num_values, kind, mk_test_some); - nonblocking_test(comm, values, num_values, kind, mk_test_some_keep); - } else { - if (comm.rank() == 0) { - std::cout << "Testing " << method_kind_names[method] - << " with " << kind << "..."; - std::cout.flush(); - } - - typedef std::pair::iterator> - status_iterator_pair; - - T incoming_value; - std::vector incoming_values(num_values); - - std::vector reqs; - // Send/receive the first value - reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 0, values[0])); - reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(), - 0, incoming_value)); - - if (method != mk_wait_any && method != mk_test_any) { -#ifndef LAM_MPI - // We've run into problems here (with 0-length messages) with - // LAM/MPI on Mac OS X and x86-86 Linux. Will investigate - // further at a later time, but the problem only seems to occur - // when using shared memory, not TCP. - - // Send/receive an empty message - reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 1)); - reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(), - 1)); -#endif - - // Send/receive an array - reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 2, values, - num_values)); - reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(), - 2, &incoming_values.front(), num_values)); - } - - switch (method) { - case mk_wait_any: - if (wait_any(reqs.begin(), reqs.end()).second == reqs.begin()) - reqs[1].wait(); - else - reqs[0].wait(); - break; - - case mk_test_any: - { - boost::optional result; - do { - result = test_any(reqs.begin(), reqs.end()); - } while (!result); - if (result->second == reqs.begin()) - reqs[1].wait(); - else - reqs[0].wait(); - break; - } - - case mk_wait_all: - wait_all(reqs.begin(), reqs.end()); - break; - - case mk_wait_all_keep: - { - std::vector stats; - wait_all(reqs.begin(), reqs.end(), std::back_inserter(stats)); - } - break; - - case mk_test_all: - while (!test_all(reqs.begin(), reqs.end())) { /* Busy wait */ } - break; - - case mk_test_all_keep: - { - std::vector stats; - while (!test_all(reqs.begin(), reqs.end(), std::back_inserter(stats))) - /* Busy wait */; - } - break; - - case mk_wait_some: - { - std::vector::iterator pos = reqs.end(); - do { - pos = wait_some(reqs.begin(), pos); - } while (pos != reqs.begin()); - } - break; - - case mk_wait_some_keep: - { - std::vector stats; - std::vector::iterator pos = reqs.end(); - do { - pos = wait_some(reqs.begin(), pos, std::back_inserter(stats)).second; - } while (pos != reqs.begin()); - } - break; - - case mk_test_some: - { - std::vector::iterator pos = reqs.end(); - do { - pos = test_some(reqs.begin(), pos); - } while (pos != reqs.begin()); - } - break; - - case mk_test_some_keep: - { - std::vector stats; - std::vector::iterator pos = reqs.end(); - do { - pos = test_some(reqs.begin(), pos, std::back_inserter(stats)).second; - } while (pos != reqs.begin()); - } - break; - - default: - BOOST_CHECK(false); - } - - if (comm.rank() == 0) { - bool okay = true; - - if (!((incoming_value == values[0]))) - okay = false; - - if (method != mk_wait_any && method != mk_test_any - && !std::equal(incoming_values.begin(), incoming_values.end(), - values)) - okay = false; - - if (okay) - std::cout << "OK." << std::endl; - else - std::cerr << "ERROR!" << std::endl; - } - - BOOST_CHECK(incoming_value == values[0]); - - if (method != mk_wait_any && method != mk_test_any) - BOOST_CHECK(std::equal(incoming_values.begin(), incoming_values.end(), - values)); - } -} - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - - int int_array[3] = {17, 42, 256}; - nonblocking_test(comm, int_array, 3, "integers"); - - gps_position gps_array[2] = { - gps_position(17, 42, .06), - gps_position(42, 17, .06) - }; - nonblocking_test(comm, gps_array, 2, "gps positions"); - - std::string string_array[2] = { "Hello", "World" }; - nonblocking_test(comm, string_array, 2, "strings", - mk_all_except_test_all); - - std::list lst_of_strings; - for (int i = 0; i < comm.size(); ++i) - lst_of_strings.push_back(boost::lexical_cast(i)); - - nonblocking_test(comm, &lst_of_strings, 1, "list of strings", - mk_all_except_test_all); - - return 0; -} diff --git a/test/python/all_gather_test.py b/test/python/all_gather_test.py deleted file mode 100644 index c2118dd..0000000 --- a/test/python/all_gather_test.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test all_gather() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def all_gather_test(comm, generator, kind): - if comm.rank == 0: print ("Gathering %s..." % (kind,)), - my_value = generator(comm.rank) - result = mpi.all_gather(comm, my_value) - for p in range(0, comm.size): - assert result[p] == generator(p) - if comm.rank == 0: print "OK." - - return - -all_gather_test(mpi.world, int_generator, "integers") -all_gather_test(mpi.world, gps_generator, "GPS positions") -all_gather_test(mpi.world, string_generator, "strings") -all_gather_test(mpi.world, string_list_generator, "list of strings") diff --git a/test/python/all_reduce_test.py b/test/python/all_reduce_test.py deleted file mode 100644 index 5d8fab5..0000000 --- a/test/python/all_reduce_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test all_reduce() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def all_reduce_test(comm, generator, kind, op, op_kind): - if comm.rank == 0: - print ("Reducing to %s of %s..." % (op_kind, kind)), - my_value = generator(comm.rank) - result = mpi.all_reduce(comm, my_value, op) - expected_result = generator(0); - for p in range(1, comm.size): - expected_result = op(expected_result, generator(p)) - - assert result == expected_result - if comm.rank == 0: - print "OK." - return - -all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum") -all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product") -all_reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation") -all_reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation") diff --git a/test/python/all_to_all_test.py b/test/python/all_to_all_test.py deleted file mode 100644 index e7ebeff..0000000 --- a/test/python/all_to_all_test.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test all_to_all() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def all_to_all_test(comm, generator, kind): - if comm.rank == 0: - print ("All-to-all transmission of %s..." % (kind,)), - - values = list() - for p in range(0, comm.size): - values.append(generator(p)) - result = mpi.all_to_all(comm, values) - - for p in range(0, comm.size): - assert result[p] == generator(comm.rank) - - if comm.rank == 0: print "OK." - return - -all_to_all_test(mpi.world, int_generator, "integers") -all_to_all_test(mpi.world, gps_generator, "GPS positions") -all_to_all_test(mpi.world, string_generator, "strings") -all_to_all_test(mpi.world, string_list_generator, "list of strings") diff --git a/test/python/broadcast_test.py b/test/python/broadcast_test.py deleted file mode 100644 index 3e2743e..0000000 --- a/test/python/broadcast_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test broadcast() collective. - -import boost.parallel.mpi as mpi - -def broadcast_test(comm, value, kind, root): - if comm.rank == root: - print ("Broadcasting %s from root %d..." % (kind, root)), - - got_value = mpi.broadcast(comm, value, root) - assert got_value == value - if comm.rank == root: - print "OK." - return - -broadcast_test(mpi.world, 17, 'integer', 0) -broadcast_test(mpi.world, 17, 'integer', 1) -broadcast_test(mpi.world, 'Hello, World!', 'string', 0) -broadcast_test(mpi.world, 'Hello, World!', 'string', 1) -broadcast_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], - 'list of strings', 0) -broadcast_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], - 'list of strings', 1) - diff --git a/test/python/gather_test.py b/test/python/gather_test.py deleted file mode 100644 index 4ee4621..0000000 --- a/test/python/gather_test.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test gather() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def gather_test(comm, generator, kind, root): - if comm.rank == root: - print ("Gathering %s to root %d..." % (kind, root)), - my_value = generator(comm.rank) - result = mpi.gather(comm, my_value, root) - if comm.rank == root: - for p in range(0, comm.size): - assert result[p] == generator(p) - print "OK." - else: - assert result == None - return - -gather_test(mpi.world, int_generator, "integers", 0) -gather_test(mpi.world, int_generator, "integers", 1) -gather_test(mpi.world, gps_generator, "GPS positions", 0) -gather_test(mpi.world, gps_generator, "GPS positions", 1) -gather_test(mpi.world, string_generator, "strings", 0) -gather_test(mpi.world, string_generator, "strings", 1) -gather_test(mpi.world, string_list_generator, "list of strings", 0) -gather_test(mpi.world, string_list_generator, "list of strings", 1) diff --git a/test/python/generators.py b/test/python/generators.py deleted file mode 100644 index d9b55c6..0000000 --- a/test/python/generators.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Value generators used in the Boost.MPI Python regression tests -def int_generator(p): - return 17 + p - -def gps_generator(p): - return (39 + p, 16, 20.2799) - -def string_generator(p): - result = "%d rosebud" % p; - if p != 1: result = result + 's' - return result - -def string_list_generator(p): - result = list() - for i in range(0,p): - result.append(str(i)) - return result diff --git a/test/python/reduce_test.py b/test/python/reduce_test.py deleted file mode 100644 index 536ac84..0000000 --- a/test/python/reduce_test.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test reduce() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def reduce_test(comm, generator, kind, op, op_kind, root): - if comm.rank == root: - print ("Reducing to %s of %s at root %d..." % (op_kind, kind, root)), - my_value = generator(comm.rank) - result = mpi.reduce(comm, my_value, op, root) - if comm.rank == root: - expected_result = generator(0); - for p in range(1, comm.size): - expected_result = op(expected_result, generator(p)) - assert result == expected_result - print "OK." - else: - assert result == None - return - -reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum", 0) -reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product", 1) -reduce_test(mpi.world, int_generator, "integers", min, "minimum", 0) -reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation", 0) -reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation", 0) diff --git a/test/python/ring_test.py b/test/python/ring_test.py deleted file mode 100644 index 303b5a9..0000000 --- a/test/python/ring_test.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test basic communication. - -import boost.parallel.mpi as mpi - -def ring_test(comm, value, kind, root): - next_peer = (comm.rank + 1) % comm.size; - prior_peer = (comm.rank + comm.size - 1) % comm.size; - - if comm.rank == root: - print ("Passing %s around a ring from root %d..." % (kind, root)), - comm.send(next_peer, 0, value) - (other_value, stat) = comm.recv(return_status = True) - assert value == other_value - assert stat.source == prior_peer - assert stat.tag == 0 - else: - msg = comm.probe() - other_value = comm.recv(msg.source, msg.tag) - assert value == other_value - comm.send(next_peer, 0, other_value) - - comm.barrier() - if comm.rank == root: - print "OK" - pass - -if mpi.world.size < 2: - print "ERROR: ring_test.py must be executed with more than one process" - mpi.world.abort(-1); - -ring_test(mpi.world, 17, 'integers', 0) -ring_test(mpi.world, 17, 'integers', 1) -ring_test(mpi.world, 'Hello, World!', 'string', 0) -ring_test(mpi.world, 'Hello, World!', 'string', 1) -ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 0) -ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 1) diff --git a/test/python/scan_test.py b/test/python/scan_test.py deleted file mode 100644 index 0cdc52e..0000000 --- a/test/python/scan_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test scan() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def scan_test(comm, generator, kind, op, op_kind): - if comm.rank == 0: - print ("Prefix reduction to %s of %s..." % (op_kind, kind)), - my_value = generator(comm.rank) - result = mpi.scan(comm, my_value, op) - expected_result = generator(0); - for p in range(1, comm.rank+1): - expected_result = op(expected_result, generator(p)) - - assert result == expected_result - if comm.rank == 0: - print "OK." - return - -scan_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum") -scan_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product") -scan_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation") -scan_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation") diff --git a/test/python/scatter_test.py b/test/python/scatter_test.py deleted file mode 100644 index 1f75576..0000000 --- a/test/python/scatter_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test scatter() collective. - -import boost.parallel.mpi as mpi -from generators import * - -def scatter_test(comm, generator, kind, root): - if comm.rank == root: - print ("Scattering %s from root %d..." % (kind, root)), - - if comm.rank == root: - values = list() - for p in range(0, comm.size): - values.append(generator(p)) - result = mpi.scatter(comm, values, root = root) - else: - result = mpi.scatter(comm, root = root); - - assert result == generator(comm.rank) - - if comm.rank == root: print "OK." - return - -scatter_test(mpi.world, int_generator, "integers", 0) -scatter_test(mpi.world, int_generator, "integers", 1) -scatter_test(mpi.world, gps_generator, "GPS positions", 0) -scatter_test(mpi.world, gps_generator, "GPS positions", 1) -scatter_test(mpi.world, string_generator, "strings", 0) -scatter_test(mpi.world, string_generator, "strings", 1) -scatter_test(mpi.world, string_list_generator, "list of strings", 0) -scatter_test(mpi.world, string_list_generator, "list of strings", 1) diff --git a/test/python/skeleton_content_test.cpp b/test/python/skeleton_content_test.cpp deleted file mode 100644 index 7ccae00..0000000 --- a/test/python/skeleton_content_test.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// (C) Copyright 2006 Douglas Gregor - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Authors: Douglas Gregor - -#include -#include -#include -using namespace boost::python; - -template -boost::python::list list_to_python(const std::list& value) { - boost::python::list result; - for (typename std::list::const_iterator i = value.begin(); - i != value.end(); ++i) - result.append(*i); - return result; -} - -BOOST_PYTHON_MODULE(skeleton_content) -{ - using boost::python::arg; - - class_ >("list_int") - .def("push_back", &std::list::push_back, arg("value")) - .def("pop_back", &std::list::pop_back) - .def("reverse", &std::list::reverse) - .def(boost::python::self == boost::python::self) - .def(boost::python::self != boost::python::self) - .add_property("size", &std::list::size) - .def("to_python", &list_to_python); - - boost::parallel::mpi::python::register_skeleton_and_content >(); -} diff --git a/test/python/skeleton_content_test.py b/test/python/skeleton_content_test.py deleted file mode 100644 index 5a857cd..0000000 --- a/test/python/skeleton_content_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2006 Douglas Gregor . - -# Use, modification and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -# http:#www.boost.org/LICENSE_1_0.txt) - -# Test skeleton/content - -import boost.parallel.mpi as mpi -import skeleton_content - -def test_skeleton_and_content(comm, root, manual_broadcast = True): - assert manual_broadcast - - # Setup data - list_size = comm.size + 7 - original_list = skeleton_content.list_int() - for i in range(0,list_size): - original_list.push_back(i) - - if comm.rank == root: - # Broadcast skeleton - print ("Broadcasting integer list skeleton from root %d..." % (root)), - if manual_broadcast: - for p in range(0,comm.size): - if p != comm.rank: - comm.send(p, 0, value = mpi.skeleton(original_list)) - print "OK." - - # Broadcast content - print ("Broadcasting integer list content from root %d..." % (root)), - if manual_broadcast: - for p in range(0,comm.size): - if p != comm.rank: - comm.send(p, 0, value = mpi.get_content(original_list)) - - print "OK." - - # Broadcast reversed content - original_list.reverse() - print ("Broadcasting reversed integer list content from root %d..." % (root)), - if manual_broadcast: - for p in range(0,comm.size): - if p != comm.rank: - comm.send(p, 0, value = mpi.get_content(original_list)) - - print "OK." - else: - # Allocate some useless data, to try to get the addresses of - # the underlying lists used later to be different across - # processors. - junk_list = skeleton_content.list_int() - for i in range(0,comm.rank * 3 + 1): - junk_list.push_back(i) - - # Receive the skeleton of the list - if manual_broadcast: - transferred_list_skeleton = comm.recv(root, 0) - assert transferred_list_skeleton.object.size == list_size - - # Receive the content and check it - transferred_list = transferred_list_skeleton.object - if manual_broadcast: - comm.recv(root, 0, mpi.get_content(transferred_list)) - assert transferred_list == original_list - - # Receive the content (again) and check it - original_list.reverse() - if manual_broadcast: - comm.recv(root, 0, mpi.get_content(transferred_list)) - assert transferred_list == original_list - - -test_skeleton_and_content(mpi.world, 0) -test_skeleton_and_content(mpi.world, 1) diff --git a/test/reduce_test.cpp b/test/reduce_test.cpp deleted file mode 100644 index d5538b1..0000000 --- a/test/reduce_test.cpp +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor . - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the reduce() collective. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using boost::mpi::communicator; - -// A simple point class that we can build, add, compare, and -// serialize. -struct point -{ - point() : x(0), y(0), z(0) { } - point(int x, int y, int z) : x(x), y(y), z(z) { } - - int x; - int y; - int z; - - private: - template - void serialize(Archiver& ar, unsigned int /*version*/) - { - ar & x & y & z; - } - - friend class boost::serialization::access; -}; - -std::ostream& operator<<(std::ostream& out, const point& p) -{ - return out << p.x << ' ' << p.y << ' ' << p.z; -} - -bool operator==(const point& p1, const point& p2) -{ - return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; -} - -bool operator!=(const point& p1, const point& p2) -{ - return !(p1 == p2); -} - -point operator+(const point& p1, const point& p2) -{ - return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); -} - -namespace boost { namespace mpi { - - template <> - struct is_mpi_datatype : public mpl::true_ { }; - -} } // end namespace boost::mpi - -template -void -reduce_test(const communicator& comm, Generator generator, - const char* type_kind, Op op, const char* op_kind, - typename Generator::result_type init, - int root = -1) -{ - typedef typename Generator::result_type value_type; - value_type value = generator(comm.rank()); - - if (root == -1) { - for (root = 0; root < comm.size(); ++root) - reduce_test(comm, generator, type_kind, op, op_kind, init, root); - } else { - using boost::mpi::reduce; - - if (comm.rank() == root) { - std::cout << "Reducing to " << op_kind << " of " << type_kind - << " at root " << root << "..."; - std::cout.flush(); - - value_type result_value; - reduce(comm, value, result_value, op, root); - - // Compute expected result - std::vector generated_values; - for (int p = 0; p < comm.size(); ++p) - generated_values.push_back(generator(p)); - value_type expected_result = std::accumulate(generated_values.begin(), - generated_values.end(), - init, op); - BOOST_CHECK(result_value == expected_result); - if (result_value == expected_result) - std::cout << "OK." << std::endl; - } else { - reduce(comm, value, op, root); - } - } - - (comm.barrier)(); -} - -// Generates integers to test with reduce() -struct int_generator -{ - typedef int result_type; - - int_generator(int base = 1) : base(base) { } - - int operator()(int p) const { return base + p; } - - private: - int base; -}; - -// Generate points to test with reduce() -struct point_generator -{ - typedef point result_type; - - point_generator(point origin) : origin(origin) { } - - point operator()(int p) const - { - return point(origin.x + 1, origin.y + 1, origin.z + 1); - } - - private: - point origin; -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct secret_int_bit_and -{ - int operator()(int x, int y) const { return x & y; } -}; - -struct wrapped_int -{ - wrapped_int() : value(0) { } - explicit wrapped_int(int value) : value(value) { } - - template - void serialize(Archive& ar, unsigned int /* version */) - { - ar & value; - } - - int value; -}; - -wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) -{ - return wrapped_int(x.value + y.value); -} - -bool operator==(const wrapped_int& x, const wrapped_int& y) -{ - return x.value == y.value; -} - -// Generates wrapped_its to test with reduce() -struct wrapped_int_generator -{ - typedef wrapped_int result_type; - - wrapped_int_generator(int base = 1) : base(base) { } - - wrapped_int operator()(int p) const { return wrapped_int(base + p); } - - private: - int base; -}; - -namespace boost { namespace mpi { - -// Make std::plus commutative. -template<> -struct is_commutative, wrapped_int> - : mpl::true_ { }; - -} } // end namespace boost::mpi - -int test_main(int argc, char* argv[]) -{ - using namespace boost::mpi; - environment env(argc, argv); - - communicator comm; - - // Built-in MPI datatypes with built-in MPI operations - reduce_test(comm, int_generator(), "integers", std::plus(), "sum", 0); - reduce_test(comm, int_generator(), "integers", std::multiplies(), - "product", 1); - reduce_test(comm, int_generator(), "integers", maximum(), - "maximum", 0); - reduce_test(comm, int_generator(), "integers", minimum(), - "minimum", 2); - - // User-defined MPI datatypes with operations that have the - // same name as built-in operations. - reduce_test(comm, point_generator(point(0,0,0)), "points", - std::plus(), "sum", point()); - - // Built-in MPI datatypes with user-defined operations - reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(), - "bitwise and", -1); - - // Arbitrary types with user-defined, commutative operations. - reduce_test(comm, wrapped_int_generator(17), "wrapped integers", - std::plus(), "sum", wrapped_int(0)); - - // Arbitrary types with (non-commutative) user-defined operations - reduce_test(comm, string_generator(), "strings", - std::plus(), "concatenation", std::string()); - - return 0; -} diff --git a/test/ring_test.cpp b/test/ring_test.cpp deleted file mode 100644 index 97897ec..0000000 --- a/test/ring_test.cpp +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the communicator that passes data around a ring and -// verifies that the same data makes it all the way. Should test all -// of the various kinds of data that can be sent (primitive types, POD -// types, serializable objects, etc.) -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include - -using boost::mpi::communicator; -using boost::mpi::status; - -template -void -ring_test(const communicator& comm, const T& pass_value, const char* kind, - int root = 0) -{ - T transferred_value; - - int rank = comm.rank(); - int size = comm.size(); - - if (rank == root) { - std::cout << "Passing " << kind << " around a ring from root " << root - << "..."; - comm.send((rank + 1) % size, 0, pass_value); - comm.recv((rank + size - 1) % size, 0, transferred_value); - BOOST_CHECK(transferred_value == pass_value); - if (transferred_value == pass_value) std::cout << " OK." << std::endl; - } else { - comm.recv((rank + size - 1) % size, 0, transferred_value); - BOOST_CHECK(transferred_value == pass_value); - comm.send((rank + 1) % size, 0, transferred_value); - } - - (comm.barrier)(); -} - - -template -void -ring_array_test(const communicator& comm, const T* pass_values, - int n, const char* kind, int root = 0) -{ - T* transferred_values = new T[n]; - int rank = comm.rank(); - int size = comm.size(); - - if (rank == root) { - - std::cout << "Passing " << kind << " array around a ring from root " - << root << "..."; - comm.send((rank + 1) % size, 0, pass_values, n); - comm.recv((rank + size - 1) % size, 0, transferred_values, n); - bool okay = std::equal(pass_values, pass_values + n, - transferred_values); - BOOST_CHECK(okay); - if (okay) std::cout << " OK." << std::endl; - } else { - status stat = comm.probe(boost::mpi::any_source, 0); - boost::optional num_values = stat.template count(); - if (boost::mpi::is_mpi_datatype()) - BOOST_CHECK(num_values && *num_values == n); - else - BOOST_CHECK(!num_values || *num_values == n); - comm.recv(stat.source(), 0, transferred_values, n); - BOOST_CHECK(std::equal(pass_values, pass_values + n, - transferred_values)); - comm.send((rank + 1) % size, 0, transferred_values, n); - } - (comm.barrier)(); - delete [] transferred_values; -} - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - if (comm.size() == 1) { - std::cerr << "ERROR: Must run the ring test with more than one process." - << std::endl; - MPI_Abort(comm, -1); - } - - // Check transfer of individual objects - ring_test(comm, 17, "integers", 0); - ring_test(comm, 17, "integers", 1); - ring_test(comm, gps_position(39,16,20.2799), "GPS positions", 0); - ring_test(comm, gps_position(26,25,30.0), "GPS positions", 1); - ring_test(comm, std::string("Rosie"), "string", 0); - - std::list strings; - strings.push_back("Hello"); - strings.push_back("MPI"); - strings.push_back("World"); - ring_test(comm, strings, "list of strings", 1); - - // Check transfer of arrays - int int_array[2] = { 17, 42 }; - ring_array_test(comm, int_array, 2, "integer", 1); - gps_position gps_position_array[2] = { - gps_position(39,16,20.2799), - gps_position(26,25,30.0) - }; - ring_array_test(comm, gps_position_array, 2, "GPS position", 1); - - std::string string_array[3] = { "Hello", "MPI", "World" }; - ring_array_test(comm, string_array, 3, "string", 0); - ring_array_test(comm, string_array, 3, "string", 1); - - return 0; -} diff --git a/test/scan_test.cpp b/test/scan_test.cpp deleted file mode 100644 index 7ba162a..0000000 --- a/test/scan_test.cpp +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor . - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the scan() collective. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using boost::mpi::communicator; - -// A simple point class that we can build, add, compare, and -// serialize. -struct point -{ - point() : x(0), y(0), z(0) { } - point(int x, int y, int z) : x(x), y(y), z(z) { } - - int x; - int y; - int z; - - private: - template - void serialize(Archiver& ar, unsigned int /*version*/) - { - ar & x & y & z; - } - - friend class boost::serialization::access; -}; - -std::ostream& operator<<(std::ostream& out, const point& p) -{ - return out << p.x << ' ' << p.y << ' ' << p.z; -} - -bool operator==(const point& p1, const point& p2) -{ - return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; -} - -bool operator!=(const point& p1, const point& p2) -{ - return !(p1 == p2); -} - -point operator+(const point& p1, const point& p2) -{ - return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); -} - -namespace boost { namespace mpi { - - template <> - struct is_mpi_datatype : public mpl::true_ { }; - -} } // end namespace boost::mpi - -template -void -scan_test(const communicator& comm, Generator generator, - const char* type_kind, Op op, const char* op_kind) -{ - typedef typename Generator::result_type value_type; - value_type value = generator(comm.rank()); - using boost::mpi::scan; - - if (comm.rank() == 0) { - std::cout << "Prefix reducing to " << op_kind << " of " << type_kind - << "..."; - std::cout.flush(); - } - - value_type result_value; - scan(comm, value, result_value, op); - BOOST_CHECK(scan(comm, value, op) == result_value); - - // Compute expected result - std::vector generated_values; - for (int p = 0; p < comm.size(); ++p) - generated_values.push_back(generator(p)); - std::vector expected_results(comm.size()); - std::partial_sum(generated_values.begin(), generated_values.end(), - expected_results.begin(), op); - BOOST_CHECK(result_value == expected_results[comm.rank()]); - if (comm.rank() == 0) std::cout << "Done." << std::endl; - - (comm.barrier)(); -} - -// Generates integers to test with scan() -struct int_generator -{ - typedef int result_type; - - int_generator(int base = 1) : base(base) { } - - int operator()(int p) const { return base + p; } - - private: - int base; -}; - -// Generate points to test with scan() -struct point_generator -{ - typedef point result_type; - - point_generator(point origin) : origin(origin) { } - - point operator()(int p) const - { - return point(origin.x + 1, origin.y + 1, origin.z + 1); - } - - private: - point origin; -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct secret_int_bit_and -{ - int operator()(int x, int y) const { return x & y; } -}; - -struct wrapped_int -{ - wrapped_int() : value(0) { } - explicit wrapped_int(int value) : value(value) { } - - template - void serialize(Archive& ar, unsigned int /* version */) - { - ar & value; - } - - int value; -}; - -wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) -{ - return wrapped_int(x.value + y.value); -} - -bool operator==(const wrapped_int& x, const wrapped_int& y) -{ - return x.value == y.value; -} - -// Generates wrapped_its to test with scan() -struct wrapped_int_generator -{ - typedef wrapped_int result_type; - - wrapped_int_generator(int base = 1) : base(base) { } - - wrapped_int operator()(int p) const { return wrapped_int(base + p); } - - private: - int base; -}; - -namespace boost { namespace mpi { - -// Make std::plus commutative. -template<> -struct is_commutative, wrapped_int> - : mpl::true_ { }; - -} } // end namespace boost::mpi - -int test_main(int argc, char* argv[]) -{ - using namespace boost::mpi; - environment env(argc, argv); - - communicator comm; - - // Built-in MPI datatypes with built-in MPI operations - scan_test(comm, int_generator(), "integers", std::plus(), "sum"); - scan_test(comm, int_generator(), "integers", std::multiplies(), - "product"); - scan_test(comm, int_generator(), "integers", maximum(), - "maximum"); - scan_test(comm, int_generator(), "integers", minimum(), - "minimum"); - - // User-defined MPI datatypes with operations that have the - // same name as built-in operations. - scan_test(comm, point_generator(point(0,0,0)), "points", - std::plus(), "sum"); - - // Built-in MPI datatypes with user-defined operations - scan_test(comm, int_generator(17), "integers", secret_int_bit_and(), - "bitwise and"); - - // Arbitrary types with user-defined, commutative operations. - scan_test(comm, wrapped_int_generator(17), "wrapped integers", - std::plus(), "sum"); - - // Arbitrary types with (non-commutative) user-defined operations - scan_test(comm, string_generator(), "strings", - std::plus(), "concatenation"); - - return 0; -} diff --git a/test/scatter_test.cpp b/test/scatter_test.cpp deleted file mode 100644 index 3932e62..0000000 --- a/test/scatter_test.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2005, 2006 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the scatter() collective. -#include -#include -#include -#include -#include -#include "gps_position.hpp" -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -template -void -scatter_test(const communicator& comm, Generator generator, - const char* kind, int root = -1) -{ - typedef typename Generator::result_type value_type; - - if (root == -1) { - for (root = 0; root < comm.size(); ++root) - scatter_test(comm, generator, kind, root); - } else { - using boost::mpi::scatter; - - value_type value; - - if (comm.rank() == root) { - std::vector values; - - for (int p = 0; p < comm.size(); ++p) - values.push_back(generator(p)); - - if (comm.rank() == root) { - std::cout << "Scattering " << kind << " from root " << root << "..."; - std::cout.flush(); - } - - scatter(comm, values, value, root); - } else { - scatter(comm, value, root); - } - - BOOST_CHECK(value == generator(comm.rank())); - } - - (comm.barrier)(); -} - -// Generates integers to test with scatter() -struct int_generator -{ - typedef int result_type; - - int operator()(int p) const { return 17 + p; } -}; - -// Generates GPS positions to test with scatter() -struct gps_generator -{ - typedef gps_position result_type; - - gps_position operator()(int p) const - { - return gps_position(39 + p, 16, 20.2799); - } -}; - -struct string_generator -{ - typedef std::string result_type; - - std::string operator()(int p) const - { - std::string result = boost::lexical_cast(p); - result += " rosebud"; - if (p != 1) result += 's'; - return result; - } -}; - -struct string_list_generator -{ - typedef std::list result_type; - - std::list operator()(int p) const - { - std::list result; - for (int i = 0; i <= p; ++i) { - std::string value = boost::lexical_cast(i); - result.push_back(value); - } - return result; - } -}; - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - scatter_test(comm, int_generator(), "integers"); - scatter_test(comm, gps_generator(), "GPS positions"); - scatter_test(comm, string_generator(), "string"); - scatter_test(comm, string_list_generator(), "list of strings"); - - return 0; -} diff --git a/test/skeleton_content_test.cpp b/test/skeleton_content_test.cpp deleted file mode 100644 index 36a2bc5..0000000 --- a/test/skeleton_content_test.cpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2005 Douglas Gregor. - -// Use, modification and distribution is subject to the Boost Software -// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// A test of the communicator that transmits skeletons and -// content for data types. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using boost::mpi::communicator; - -using boost::mpi::packed_skeleton_iarchive; -using boost::mpi::packed_skeleton_oarchive; - -void -test_skeleton_and_content(const communicator& comm, int root, - bool manual_broadcast) -{ - using boost::mpi::skeleton; - using boost::mpi::content; - using boost::mpi::get_content; - using boost::make_counting_iterator; - using boost::mpi::broadcast; - - typedef std::list::iterator iterator; - - int list_size = comm.size() + 7; - if (comm.rank() == root) { - // Fill in the seed data - std::list original_list; - for (int i = 0; i < list_size; ++i) - original_list.push_back(i); - - std::cout << "Broadcasting integer list skeleton from root " << root - << "..."; - if (manual_broadcast) { - // Broadcast the skeleton (manually) - for (int p = 0; p < comm.size(); ++p) - if (p != root) comm.send(p, 0, skeleton(original_list)); - } else { - broadcast(comm, skeleton(original_list), root); - } - std::cout << "OK." << std::endl; - - // Broadcast the content (manually) - std::cout << "Broadcasting integer list content from root " << root - << "..."; - { - content c = get_content(original_list); - for (int p = 0; p < comm.size(); ++p) - if (p != root) comm.send(p, 1, c); - } - std::cout << "OK." << std::endl; - - // Reverse the list, broadcast the content again - std::reverse(original_list.begin(), original_list.end()); - std::cout << "Broadcasting reversed integer list content from root " - << root << "..."; - { - content c = get_content(original_list); - for (int p = 0; p < comm.size(); ++p) - if (p != root) comm.send(p, 2, c); - } - std::cout << "OK." << std::endl; - } else { - // Allocate some useless data, to try to get the addresses of the - // list's used later to be different across processes. - std::list junk_list(comm.rank() * 3 + 1, 17); - - // Receive the skeleton to build up the transferred list - std::list transferred_list; - if (manual_broadcast) { - comm.recv(root, 0, skeleton(transferred_list)); - } else { - broadcast(comm, skeleton(transferred_list), root); - } - BOOST_CHECK((int)transferred_list.size() == list_size); - - // Receive the content and check it - comm.recv(root, 1, get_content(transferred_list)); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.begin())); - - // Receive the reversed content and check it - comm.recv(root, 2, get_content(transferred_list)); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.rbegin())); - } - - (comm.barrier)(); -} - -void -test_skeleton_and_content_nonblocking(const communicator& comm, int root) -{ - using boost::mpi::skeleton; - using boost::mpi::content; - using boost::mpi::get_content; - using boost::make_counting_iterator; - using boost::mpi::broadcast; - using boost::mpi::request; - using boost::mpi::wait_all; - - typedef std::list::iterator iterator; - - int list_size = comm.size() + 7; - if (comm.rank() == root) { - // Fill in the seed data - std::list original_list; - for (int i = 0; i < list_size; ++i) - original_list.push_back(i); - - std::cout << "Non-blocking broadcast of integer list skeleton from root " << root - << "..."; - - // Broadcast the skeleton (manually) - { - std::vector reqs; - for (int p = 0; p < comm.size(); ++p) - if (p != root) - reqs.push_back(comm.isend(p, 0, skeleton(original_list))); - wait_all(reqs.begin(), reqs.end()); - } - std::cout << "OK." << std::endl; - - // Broadcast the content (manually) - std::cout << "Non-blocking broadcast of integer list content from root " << root - << "..."; - { - content c = get_content(original_list); - std::vector reqs; - for (int p = 0; p < comm.size(); ++p) - if (p != root) reqs.push_back(comm.isend(p, 1, c)); - wait_all(reqs.begin(), reqs.end()); - } - std::cout << "OK." << std::endl; - - // Reverse the list, broadcast the content again - std::reverse(original_list.begin(), original_list.end()); - std::cout << "Non-blocking broadcast of reversed integer list content from root " - << root << "..."; - { - std::vector reqs; - content c = get_content(original_list); - for (int p = 0; p < comm.size(); ++p) - if (p != root) reqs.push_back(comm.isend(p, 2, c)); - wait_all(reqs.begin(), reqs.end()); - } - std::cout << "OK." << std::endl; - } else { - // Allocate some useless data, to try to get the addresses of the - // list's used later to be different across processes. - std::list junk_list(comm.rank() * 3 + 1, 17); - - // Receive the skeleton to build up the transferred list - std::list transferred_list; - request req = comm.irecv(root, 0, skeleton(transferred_list)); - req.wait(); - BOOST_CHECK((int)transferred_list.size() == list_size); - - // Receive the content and check it - req = comm.irecv(root, 1, get_content(transferred_list)); - req.wait(); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.begin())); - - // Receive the reversed content and check it - req = comm.irecv(root, 2, get_content(transferred_list)); - req.wait(); - BOOST_CHECK(std::equal(make_counting_iterator(0), - make_counting_iterator(list_size), - transferred_list.rbegin())); - } - - (comm.barrier)(); -} - -int test_main(int argc, char* argv[]) -{ - boost::mpi::environment env(argc, argv); - - communicator comm; - if (comm.size() == 1) { - std::cerr << "ERROR: Must run the skeleton and content test with more " - "than one process." - << std::endl; - MPI_Abort(comm, -1); - } - - test_skeleton_and_content(comm, 0, true); - test_skeleton_and_content(comm, 0, false); - test_skeleton_and_content(comm, 1, true); - test_skeleton_and_content(comm, 1, false); - test_skeleton_and_content_nonblocking(comm, 0); - test_skeleton_and_content_nonblocking(comm, 1); - - return 0; -}