2
0
mirror of https://github.com/boostorg/mpi.git synced 2026-01-19 04:22:10 +00:00

Move MPI library to the top level

[SVN r36741]
This commit is contained in:
Douglas Gregor
2007-01-17 02:48:51 +00:00
parent 5cbf8330a2
commit 28d9c668cb
104 changed files with 3393 additions and 744 deletions

View File

@@ -14,27 +14,27 @@ import python ;
if [ mpi.configured ]
{
project boost/parallel
project boost/mpi
: source-location ../src
;
lib boost_mpi
:
mpi/broadcast.cpp
mpi/communicator.cpp
mpi/computation_tree.cpp
mpi/content_oarchive.cpp
mpi/environment.cpp
mpi/mpi_datatype_cache.cpp
mpi/mpi_datatype_oarchive.cpp
mpi/packed_iarchive.cpp
mpi/packed_oarchive.cpp
mpi/packed_skeleton_iarchive.cpp
mpi/packed_skeleton_oarchive.cpp
mpi/point_to_point.cpp
mpi/request.cpp
mpi/text_skeleton_oarchive.cpp
mpi/timer.cpp
broadcast.cpp
communicator.cpp
computation_tree.cpp
content_oarchive.cpp
environment.cpp
mpi_datatype_cache.cpp
mpi_datatype_oarchive.cpp
packed_iarchive.cpp
packed_oarchive.cpp
packed_skeleton_iarchive.cpp
packed_skeleton_oarchive.cpp
point_to_point.cpp
request.cpp
text_skeleton_oarchive.cpp
timer.cpp
: # Requirements
<library>../../serialization/build//boost_serialization
<library>/mpi//mpi [ mpi.extra-requirements ]
@@ -49,7 +49,7 @@ lib boost_mpi
{
lib boost_mpi_python
: # Sources
mpi/python/serialize.cpp
python/serialize.cpp
: # Requirements
<library>boost_mpi
<library>/boost/python//boost_python
@@ -57,23 +57,23 @@ lib boost_mpi
python-extension mpi
: # Sources
mpi/python/collectives.cpp
mpi/python/py_communicator.cpp
mpi/python/datatypes.cpp
mpi/python/documentation.cpp
mpi/python/py_environment.cpp
mpi/python/exception.cpp
mpi/python/module.cpp
mpi/python/py_request.cpp
mpi/python/skeleton_and_content.cpp
mpi/python/status.cpp
mpi/python/py_timer.cpp
python/collectives.cpp
python/py_communicator.cpp
python/datatypes.cpp
python/documentation.cpp
python/py_environment.cpp
python/exception.cpp
python/module.cpp
python/py_request.cpp
python/skeleton_and_content.cpp
python/status.cpp
python/py_timer.cpp
: # Requirements
<library>boost_mpi_python
<library>boost_mpi
<library>/boost/python//boost_python
;
}
}
}
else
{

View File

@@ -2,32 +2,32 @@
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.)
project boost/parallel/mpi ;
project boost/mpi ;
using quickbook ;
using doxygen ;
doxygen mpi_autodoc
: ../../../boost/parallel/mpi.hpp
../../../boost/parallel/mpi/allocator.hpp
../../../boost/parallel/mpi/collectives.hpp
../../../boost/parallel/mpi/collectives_fwd.hpp
../../../boost/parallel/mpi/communicator.hpp
../../../boost/parallel/mpi/config.hpp
../../../boost/parallel/mpi/datatype.hpp
../../../boost/parallel/mpi/datatype_fwd.hpp
../../../boost/parallel/mpi/environment.hpp
../../../boost/parallel/mpi/exception.hpp
../../../boost/parallel/mpi/nonblocking.hpp
../../../boost/parallel/mpi/operations.hpp
../../../boost/parallel/mpi/packed_iarchive.hpp
../../../boost/parallel/mpi/packed_oarchive.hpp
../../../boost/parallel/mpi/skeleton_and_content.hpp
../../../boost/parallel/mpi/skeleton_and_content_fwd.hpp
../../../boost/parallel/mpi/status.hpp
../../../boost/parallel/mpi/request.hpp
../../../boost/parallel/mpi/timer.hpp
../../../boost/parallel/mpi/python.hpp
: ../../../boost/mpi.hpp
../../../boost/mpi/allocator.hpp
../../../boost/mpi/collectives.hpp
../../../boost/mpi/collectives_fwd.hpp
../../../boost/mpi/communicator.hpp
../../../boost/mpi/config.hpp
../../../boost/mpi/datatype.hpp
../../../boost/mpi/datatype_fwd.hpp
../../../boost/mpi/environment.hpp
../../../boost/mpi/exception.hpp
../../../boost/mpi/nonblocking.hpp
../../../boost/mpi/operations.hpp
../../../boost/mpi/packed_iarchive.hpp
../../../boost/mpi/packed_oarchive.hpp
../../../boost/mpi/skeleton_and_content.hpp
../../../boost/mpi/skeleton_and_content_fwd.hpp
../../../boost/mpi/status.hpp
../../../boost/mpi/request.hpp
../../../boost/mpi/timer.hpp
../../../boost/mpi/python.hpp
: <doxygen:param>MACRO_EXPANSION=YES
<doxygen:param>MACRO_ONLY_PREDEF=YES
<doxygen:param>"PREDEFINED=BOOST_MPI_HAS_MEMORY_ALLOCATION= BOOST_MPI_HAS_NOARG_INITIALIZATION= MPI_VERSION=2 BOOST_MPI_DOXYGEN="

View File

@@ -5,8 +5,8 @@
An generic, user-friendly interface to MPI, the Message
Passing Interface.
]
[id parallel/mpi]
[dirname parallel]
[id mpi]
[dirname mpi]
[license
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
@@ -75,8 +75,8 @@ underlying C MPI library. Boost.MPI currently supports:
types with send and receive operations, with blocking and
non-blocking interfaces.
* Collective communication: Boost.MPI supports collective
operations such as [funcref boost::parallel::mpi::reduce `reduce`]
and [funcref boost::parallel::mpi::gather `gather`] with both
operations such as [funcref boost::mpi::reduce `reduce`]
and [funcref boost::mpi::gather `gather`] with both
built-in and user-defined data types and function objects.
* MPI Datatypes: Boost.MPI can build MPI data types for
user-defined types using the _Serialization_ library.
@@ -87,7 +87,7 @@ underlying C MPI library. Boost.MPI currently supports:
structure needs to be transmitted many times.
Boost.MPI can be accessed either through its native C++ bindings, or
through its alternative, [link parallel/mpi.python Python interface].
through its alternative, [link mpi.python Python interface].
[endsect]
@@ -204,7 +204,7 @@ copy the resulting `bjam` executable some place convenient.
For many users using _LAM_, _MPICH_, or _OpenMPI_, configuration is
automatic. First, enter the directory
`libs/parallel/build`, then run `bjam` with no
`libs/mpi/build`, then run `bjam` with no
arguments. The automatic configuration requires that (1) your MPI
implementation provide a wrapper compiler `mpiCC` in your path, and
(2) that the default compiler for your system is the compiler
@@ -269,7 +269,7 @@ bjam -sMPICXX=/opt/lam-mpi-7.1.1/bin/mpiCC install
This command will install libraries into the Python executable prefix
(if Python is enabled), or `/usr`, by default. It should be run from
the `libs/parallel/build` subdirectory. There are several command-line
the `libs/mpi/build` subdirectory. There are several command-line
options that can change the installation location:
* `--exec-prefix=PATH`: Sets the executable prefix to
@@ -293,13 +293,13 @@ mpic++ -I/path/to/boost/mpi my_application.cpp -Llibdir \
]
[endsect]
If you plan to use the [link parallel/mpi.python Python bindings] for
If you plan to use the [link mpi.python Python bindings] for
Boost.MPI in conjunction with the C++ Boost.MPI, you will also need to
link against the boost_mpi_python library, e.g., by adding
`-lboost_mpi_python-gcc-mt-1_35` to your link command. This step will
only be necessary if you intend to [link parallel/mpi.python_user_data
only be necessary if you intend to [link mpi.python_user_data
register C++ types] or use the [link
parallel/mpi.python_skeleton_content skeleton/content mechanism] from
mpi.python_skeleton_content skeleton/content mechanism] from
within Python.
[section:testing Testing Boost.MPI]
@@ -308,11 +308,11 @@ If you would like to verify that Boost.MPI is working properly with
your compiler, platform, and MPI implementation, a self-contained test
suite is available. To use this test suite, you will need to first
configure Boost.Build for your MPI environment and then run `bjam
test` in `libs/parallel/test` (possibly with some extra options). For
test` in `libs/mpi/test` (possibly with some extra options). For
_LAM_, you will need to run `lamboot` before running `bjam`. For
_MPICH_, you may need to create a machine file and pass
`-sMPIRUN_FLAGS="-machinefile <filename>"` to Boost.Jam; see the
section on [link parallel/mpi.config configuration] for more
section on [link mpi.config configuration] for more
information. If testing succeeds, `bjam` will exit without errors.
[endsect]
@@ -325,28 +325,28 @@ A Boost.MPI program consists of many cooperating processes (possibly
running on different computers) that communicate among themselves by
passing messages. Boost.MPI is a library (as is the lower-level MPI),
not a language, so the first step in a Boost.MPI is to create an
[classref boost::parallel::mpi::environment mpi::environment] object
[classref boost::mpi::environment mpi::environment] object
that initializes the MPI environment and enables communication among
the processes. The [classref boost::parallel::mpi::environment
the processes. The [classref boost::mpi::environment
mpi::environment] object is initialized with the program arguments
(which it may modify) in your main program. The creation of this
object initializes MPI, and its destruction will finalize MPI. In the
vast majority of Boost.MPI programs, an instance of [classref
boost::parallel::mpi::environment mpi::environment] will be declared
boost::mpi::environment mpi::environment] will be declared
in `main` at the very beginning of the program.
Communication with MPI always occurs over a *communicator*,
which can be created be simply default-constructing an object of type
[classref boost::parallel::mpi::communicator mpi::communicator]. This
[classref boost::mpi::communicator mpi::communicator]. This
communicator can then be queried to determine how many processes are
running (the "size" of the communicator) and to give a unique number
to each process, from zero to the size of the communicator (i.e., the
"rank" of the process):
#include <boost/parallel/mpi/environment.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/communicator.hpp>
#include <iostream>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -391,10 +391,10 @@ same sender.
The following program uses two MPI processes to write "Hello, world!"
to the screen (`hello_world.cpp`):
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -422,9 +422,9 @@ processor (rank 1) using tag 0. The second processor prints the string
it receives, along with a comma, then passes the message "world" back
to processor 0 with a different tag. The first processor then writes
this message with the "!" and exits. All sends are accomplished with
the [memberref boost::parallel::mpi::communicator::send
the [memberref boost::mpi::communicator::send
communicator::send] method and all receives use a corresponding
[memberref boost::parallel::mpi::communicator::recv
[memberref boost::mpi::communicator::recv
communicator::recv] call.
[section:nonblocking Non-blocking communication]
@@ -462,10 +462,10 @@ parallelism. We can use non-blocking communication to ensure that the
two messages are transmitted simultaneously
(`hello_world_nonblocking.cpp`):
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -492,20 +492,20 @@ two messages are transmitted simultaneously
}
We have replaced calls to the [memberref
boost::parallel::mpi::communicator::send communicator::send] and
[memberref boost::parallel::mpi::communicator::recv
boost::mpi::communicator::send communicator::send] and
[memberref boost::mpi::communicator::recv
communicator::recv] members with similar calls to their non-blocking
counterparts, [memberref boost::parallel::mpi::communicator::isend
counterparts, [memberref boost::mpi::communicator::isend
communicator::isend] and [memberref
boost::parallel::mpi::communicator::irecv communicator::irecv]. The
boost::mpi::communicator::irecv communicator::irecv]. The
prefix *i* indicates that the operations return immediately with a
[classref boost::parallel::mpi::request mpi::request] object, which
[classref boost::mpi::request mpi::request] object, which
allows one to query the status of a communication request (see the
[memberref boost::parallel::mpi::request::test test] method) or wait
[memberref boost::mpi::request::test test] method) or wait
until it has completed (see the [memberref
boost::parallel::mpi::request::wait wait] method). Multiple requests
boost::mpi::request::wait wait] method). Multiple requests
can be completed at the same time with the [funcref
boost::parallel::mpi::wait_all wait_all] operation.
boost::mpi::wait_all wait_all] operation.
If you run this program multiple times, you may see some strange
results: namely, some runs will produce:
@@ -586,23 +586,23 @@ amount of data stored at fixed field positions. When this is the case,
Boost.MPI can optimize their serialization and transmission to avoid
extraneous copy operations. To enable this optimization, users should
specialize the type trait [classref
boost::parallel::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template <>
struct is_mpi_datatype<gps_position> : mpl::true_ { };
}}}
} }
For composite traits, the specialization of [classref
boost::parallel::mpi::is_mpi_datatype `is_mpi_datatype`] may depend on
boost::mpi::is_mpi_datatype `is_mpi_datatype`] may depend on
`is_mpi_datatype` itself. For instance, a `boost::array` object is
fixed only when the type of the parameter it stores is fixed:
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template <typename T, std::size_t N>
struct is_mpi_datatype<array<T, N> >
: public is_mpi_datatype<T> { };
}}}
} }
The redundant copy elimination optimization can only be applied when
the shape of the data type is completely fixed. Variable-length types
@@ -617,7 +617,7 @@ another. Built-in types can be transmitted without any extra effort;
library-defined types require the inclusion of a serialization header;
and user-defined types will require the addition of serialization
code. Fixed data types can be optimized for transmission using the
[classref boost::parallel::mpi::is_mpi_datatype `is_mpi_datatype`]
[classref boost::mpi::is_mpi_datatype `is_mpi_datatype`]
type trait.
[endsect]
@@ -625,7 +625,7 @@ type trait.
[section:collectives Collective operations]
[link parallel/mpi.point_to_point Point-to-point operations] are the
[link mpi.point_to_point Point-to-point operations] are the
core message passing primitives in Boost.MPI. However, many
message-passing applications also require higher-level communication
algorithms that combine or summarize the data stored on many different
@@ -634,17 +634,17 @@ processes. These algorithms support many common tasks such as
values on all processors" or "find the global minimum."
[section:broadcast Broadcast]
The [funcref boost::parallel::mpi::broadcast `broadcast`] algorithm is
The [funcref boost::mpi::broadcast `broadcast`] algorithm is
by far the simplest collective operation. It broadcasts a value from a
single process to all other processes within a [classref
boost::parallel::mpi::communicator communicator]. For instance, the
boost::mpi::communicator communicator]. For instance, the
following program broadcasts "Hello, World!" from process 0 to every
other process. (`hello_world_broadcast.cpp`)
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -678,7 +678,7 @@ Process #6 says Hello, World!
[endsect]
[section:gather Gather]
The [funcref boost::parallel::mpi::gather `gather`] collective gathers
The [funcref boost::mpi::gather `gather`] collective gathers
the values produced by every process in a communicator into a vector
of values on the "root" process (specified by an argument to
`gather`). The /i/th element in the vector will correspond to the
@@ -688,10 +688,10 @@ random numbers are gathered at process 0 (the "root" in this case),
which prints out the values that correspond to each processor.
(`random_gather.cpp`)
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -731,7 +731,7 @@ Process #6 thought of 650073868
The `gather` operation collects values from every process into a
vector at one process. If instead the values from every process need
to be collected into identical vectors on every process, use the
[funcref boost::parallel::mpi::all_gather `all_gather`] algorithm,
[funcref boost::mpi::all_gather `all_gather`] algorithm,
which is semantically equivalent to calling `gather` followed by a
`broadcast` of the resulting vector.
@@ -739,20 +739,20 @@ which is semantically equivalent to calling `gather` followed by a
[section:reduce Reduce]
The [funcref boost::parallel::mpi::reduce `reduce`] collective
The [funcref boost::mpi::reduce `reduce`] collective
summarizes the values from each process into a single value at the
user-specified "root" process. The Boost.MPI `reduce` operation is
similar in spirit to the STL _accumulate_ operation, because it takes
a sequence of values (one per process) and combines them via a
function object. For instance, we can randomly generate values in each
process and the compute the minimum value over all processes via a
call to [funcref boost::parallel::mpi::reduce `reduce`]
call to [funcref boost::mpi::reduce `reduce`]
(`random_min.cpp`)::
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -780,11 +780,11 @@ value. Any associative binary function or function object will
work. For instance, to concatenate strings with `reduce` one could use
the function object `std::plus<std::string>` (`string_cat.cpp`):
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <string>
#include <boost/serialization/string.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{
@@ -819,23 +819,23 @@ The result is zero one two three four five six
Any kind of binary function objects can be used with `reduce`. For
instance, and there are many such function objects in the C++ standard
`<functional>` header and the Boost.MPI header
`<boost/parallel/mpi/operations.hpp>`. Or, you can create your own
`<boost/mpi/operations.hpp>`. Or, you can create your own
function object. Function objects used with `reduce` must be
associative, i.e. `f(x, f(y, z))` must be equivalent to `f(f(x, y),
z)`. If they are also commutative (i..e, `f(x, y) == f(y, x)`),
Boost.MPI can use a more efficient implementation of `reduce`. To
state that a function object is commutative, you will need to
specialize the class [classref boost::parallel::mpi::is_commutative
specialize the class [classref boost::mpi::is_commutative
`is_commutative`]. For instance, we could modify the previous example
by telling Boost.MPI that string concatenation is commutative:
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<>
struct is_commutative<std::plus<std::string>, std::string>
: mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
By adding this code prior to `main()`, Boost.MPI will assume that
string concatenation is commutative and employ a different parallel
@@ -857,10 +857,10 @@ algorithm. Boost.MPI also performs direct mappings from function
objects in `<functional>` to `MPI_Op` values predefined by MPI (e.g.,
`MPI_SUM`, `MPI_MAX`); if you have your own function objects that can
take advantage of this mapping, see the class template [classref
boost::parallel::mpi::is_mpi_op `is_mpi_op`].
boost::mpi::is_mpi_op `is_mpi_op`].
Like [link parallel/mpi.gather `gather`], `reduce` has an "all"
variant called [funcref boost::parallel::mpi::all_reduce `all_reduce`]
Like [link mpi.gather `gather`], `reduce` has an "all"
variant called [funcref boost::mpi::all_reduce `all_reduce`]
that performs the reduction operation and broadcasts the result to all
processes. This variant is useful, for instance, in establishing
global minimum or maximum values.
@@ -881,7 +881,7 @@ communicators.
When the MPI environment is initialized, only the "world" communicator
(called `MPI_COMM_WORLD` in the MPI C and Fortran bindings) is
available. The "world" communicator, accessed by default-constructing
a [classref boost::parallel::mpi::communicator mpi::communicator]
a [classref boost::mpi::communicator mpi::communicator]
object, contains all of the MPI processes present when the program
begins execution. Other communicators can then be constructed by
duplicating or building subsets of the "world" communicator. For
@@ -889,11 +889,11 @@ instance, in the following program we split the processes into two
groups: one for processes generating data and the other for processes
that will collect the data. (`generate_collect.cpp`)
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
#include <boost/serialization/vector.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
enum message_tags {msg_data_packet, msg_broadcast_data, msg_finished};
@@ -957,7 +957,7 @@ first serialize these data types into a buffer and then communicate
them; the receiver then copies the results into a buffer before
deserializing into an object on the other end. For some data types,
this overhead can be eliminated by using [classref
boost::parallel::mpi::is_mpi_datatype `is_mpi_datatype`]. However,
boost::mpi::is_mpi_datatype `is_mpi_datatype`]. However,
variable-length data types such as strings and lists cannot be MPI
data types.
@@ -983,9 +983,9 @@ startup, so the content of the list (i.e., the current sequence of
numbers) can be transmitted efficiently. The complete example is
available in `example/random_content.cpp`. We being with the master
process (rank 0), which builds a list, communicates its structure via
a [funcref boost::parallel::mpi::skeleton `skeleton`], then repeatedly
a [funcref boost::mpi::skeleton `skeleton`], then repeatedly
generates random number sequences to be broadcast to the slave
processes via [classref boost::parallel::mpi::content `content`]:
processes via [classref boost::mpi::content `content`]:
// Generate the list and broadcast its structure
@@ -1008,7 +1008,7 @@ processes via [classref boost::parallel::mpi::content `content`]:
The slave processes have a very similar structure to the master. They
receive (via the [funcref boost::parallel::mpi::broadcast
receive (via the [funcref boost::mpi::broadcast
`broadcast()`] call) the skeleton of the data structure, then use it
to build their own lists of integers. In each iteration, they receive
via another `broadcast()` the new content in the data structure and
@@ -1037,11 +1037,11 @@ compute some property of the data:
The skeletons and content of any Serializable data type can be
transmitted either via the [memberref
boost::parallel::mpi::communicator::send `send`] and [memberref
boost::parallel::mpi::communicator::recv `recv`] members of the
[classref boost::parallel::mpi::communicator `communicator`] class
boost::mpi::communicator::send `send`] and [memberref
boost::mpi::communicator::recv `recv`] members of the
[classref boost::mpi::communicator `communicator`] class
(for point-to-point communicators) or broadcast via the [funcref
boost::parallel::mpi::broadcast `broadcast()`] collective. When
boost::mpi::broadcast `broadcast()`] collective. When
separating a data structure into a skeleton and content, be careful
not to modify the data structure (either on the sender side or the
receiver side) without transmitting the skeleton again. Boost.MPI can
@@ -1079,26 +1079,26 @@ interfaces to MPI, or for porting existing parallel programs to MPI.
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50
`MPI_Cancel`]]
[[memberref boost::parallel::mpi::request::cancel
[[memberref boost::mpi::request::cancel
`request::cancel`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node35.html#Node35
`MPI_Get_count`]]
[[memberref boost::parallel::mpi::status::count `status::count`]]]
[[memberref boost::mpi::status::count `status::count`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46
`MPI_Ibsend`]] [unsupported]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50
`MPI_Iprobe`]]
[[memberref boost::parallel::mpi::communicator::iprobe `communicator::iprobe`]]]
[[memberref boost::mpi::communicator::iprobe `communicator::iprobe`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46
`MPI_Irsend`]] [unsupported]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46
`MPI_Isend`]]
[[memberref boost::parallel::mpi::communicator::isend
[[memberref boost::mpi::communicator::isend
`communicator::isend`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46
@@ -1106,18 +1106,18 @@ interfaces to MPI, or for porting existing parallel programs to MPI.
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node46.html#Node46
`MPI_Irecv`]]
[[memberref boost::parallel::mpi::communicator::isend
[[memberref boost::mpi::communicator::isend
`communicator::irecv`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50
`MPI_Probe`]]
[[memberref boost::parallel::mpi::communicator::probe `communicator::probe`]]]
[[memberref boost::mpi::communicator::probe `communicator::probe`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node53.html#Node53
`MPI_PROC_NULL`]] [unsupported]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node34.html#Node34 `MPI_Recv`]]
[[memberref boost::parallel::mpi::communicator::recv
[[memberref boost::mpi::communicator::recv
`communicator::recv`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node51.html#Node51
@@ -1134,7 +1134,7 @@ interfaces to MPI, or for porting existing parallel programs to MPI.
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node31.html#Node31
`MPI_Send`]]
[[memberref boost::parallel::mpi::communicator::send
[[memberref boost::mpi::communicator::send
`communicator::send`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node52.html#Node52
@@ -1159,34 +1159,34 @@ interfaces to MPI, or for porting existing parallel programs to MPI.
`MPI_Startall`]] [unsupported]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Test`]] [[memberref boost::parallel::mpi::request::wait `request::test`]]]
`MPI_Test`]] [[memberref boost::mpi::request::wait `request::test`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Testall`]] [[funcref boost::parallel::mpi::test_all `test_all`]]]
`MPI_Testall`]] [[funcref boost::mpi::test_all `test_all`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Testany`]] [[funcref boost::parallel::mpi::test_any `test_any`]]]
`MPI_Testany`]] [[funcref boost::mpi::test_any `test_any`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Testsome`]] [[funcref boost::parallel::mpi::test_some `test_some`]]]
`MPI_Testsome`]] [[funcref boost::mpi::test_some `test_some`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node50.html#Node50
`MPI_Test_cancelled`]]
[[memberref boost::parallel::mpi::status::cancelled
[[memberref boost::mpi::status::cancelled
`status::cancelled`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Wait`]] [[memberref boost::parallel::mpi::request::wait
`MPI_Wait`]] [[memberref boost::mpi::request::wait
`request::wait`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Waitall`]] [[funcref boost::parallel::mpi::wait_all `wait_all`]]]
`MPI_Waitall`]] [[funcref boost::mpi::wait_all `wait_all`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Waitany`]] [[funcref boost::parallel::mpi::wait_any `wait_any`]]]
`MPI_Waitany`]] [[funcref boost::mpi::wait_any `wait_any`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node47.html#Node47
`MPI_Waitsome`]] [[funcref boost::parallel::mpi::wait_some `wait_some`]]]
`MPI_Waitsome`]] [[funcref boost::mpi::wait_some `wait_some`]]]
]
Boost.MPI automatically maps C and C++ data types to their MPI
@@ -1209,7 +1209,7 @@ types and MPI datatype constants.
[[`MPI_LONG_DOUBLE`] [`long double`]]
[[`MPI_BYTE`] [unused]]
[[`MPI_PACKED`] [used internally for [link
parallel/mpi.user_data_types serialized data types]]]
mpi.user_data_types serialized data types]]]
[[`MPI_LONG_LONG_INT`] [`long long int`, if supported by compiler]]
[[`MPI_UNSIGNED_LONG_LONG_INT`] [`unsigned long long int`, if
supported by compiler]]
@@ -1224,10 +1224,10 @@ supported by compiler]]
Boost.MPI does not provide direct wrappers to the MPI derived
datatypes functionality. Instead, Boost.MPI relies on the
_Serialization_ library to construct MPI datatypes for user-defined
classe. The section on [link parallel/mpi.user_data_types user-defined
classe. The section on [link mpi.user_data_types user-defined
data types] describes this mechanism, which is used for types that
marked as "MPI datatypes" using [classref
boost::parallel::mpi::is_mpi_datatype `is_mpi_datatype`].
boost::mpi::is_mpi_datatype `is_mpi_datatype`].
The derived datatypes table that follows describes which C++ types
correspond to the functionality of the C MPI's datatype
@@ -1285,8 +1285,8 @@ can later be transmitted via MPI and unpacked into separate values via
MPI's unpacking facilities. As with datatypes, Boost.MPI provides an
abstract interface to MPI's packing and unpacking facilities. In
particular, the two archive classes [classref
boost::parallel::mpi::packed_oarchive `packed_oarchive`] and [classref
boost::parallel::mpi::packed_iarchive `packed_iarchive`] can be used
boost::mpi::packed_oarchive `packed_oarchive`] and [classref
boost::mpi::packed_iarchive `packed_iarchive`] can be used
to pack or unpack a contiguous buffer using MPI's facilities.
[table Packing and unpacking
@@ -1294,14 +1294,14 @@ to pack or unpack a contiguous buffer using MPI's facilities.
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62
`MPI_Pack`]] [[classref
boost::parallel::mpi::packed_oarchive `packed_oarchive`]]]
boost::mpi::packed_oarchive `packed_oarchive`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62
`MPI_Pack_size`]] [used internally by Boost.MPI]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node62.html#Node62
`MPI_Unpack`]] [[classref
boost::parallel::mpi::packed_iarchive `packed_iarchive`]]]
boost::mpi::packed_iarchive `packed_iarchive`]]]
]
Boost.MPI supports a one-to-one mapping for most of the MPI
@@ -1313,47 +1313,47 @@ to do so.
[[C Function] [Boost.MPI Equivalent]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node73.html#Node73
`MPI_Allgather`]] [[funcref boost::parallel::mpi::all_gather `all_gather`]]]
`MPI_Allgather`]] [[funcref boost::mpi::all_gather `all_gather`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node73.html#Node73
`MPI_Allgatherv`]] [most uses supported by [funcref boost::parallel::mpi::all_gather `all_gather`]]]
`MPI_Allgatherv`]] [most uses supported by [funcref boost::mpi::all_gather `all_gather`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node82.html#Node82
`MPI_Allreduce`]] [[funcref boost::parallel::mpi::all_reduce `all_reduce`]]]
`MPI_Allreduce`]] [[funcref boost::mpi::all_reduce `all_reduce`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node75.html#Node75
`MPI_Alltoall`]] [[funcref boost::parallel::mpi::all_to_all `all_to_all`]]]
`MPI_Alltoall`]] [[funcref boost::mpi::all_to_all `all_to_all`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node75.html#Node75
`MPI_Alltoallv`]] [most uses supported by [funcref boost::parallel::mpi::all_to_all `all_to_all`]]]
`MPI_Alltoallv`]] [most uses supported by [funcref boost::mpi::all_to_all `all_to_all`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node66.html#Node66
`MPI_Barrier`]] [[memberref
boost::parallel::mpi::communicator::barrier `communicator::barrier`]]]
boost::mpi::communicator::barrier `communicator::barrier`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node67.html#Node67
`MPI_Bcast`]] [[funcref boost::parallel::mpi::broadcast `broadcast`]]]
`MPI_Bcast`]] [[funcref boost::mpi::broadcast `broadcast`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node69.html#Node69
`MPI_Gather`]] [[funcref boost::parallel::mpi::gather `gather`]]]
`MPI_Gather`]] [[funcref boost::mpi::gather `gather`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node69.html#Node69
`MPI_Gatherv`]] [most uses supported by [funcref boost::parallel::mpi::gather `gather`]]]
`MPI_Gatherv`]] [most uses supported by [funcref boost::mpi::gather `gather`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node77.html#Node77
`MPI_Reduce`]] [[funcref boost::parallel::mpi::reduce `reduce`]]]
`MPI_Reduce`]] [[funcref boost::mpi::reduce `reduce`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node83.html#Node83
`MPI_Reduce_scatter`]] [unsupported]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node84.html#Node84
`MPI_Scan`]] [[funcref boost::parallel::mpi::scan `scan`]]]
`MPI_Scan`]] [[funcref boost::mpi::scan `scan`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node71.html#Node71
`MPI_Scatter`]] [[funcref boost::parallel::mpi::scatter `scatter`]]]
`MPI_Scatter`]] [[funcref boost::mpi::scatter `scatter`]]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node71.html#Node71
`MPI_Scatterv`]] [most uses supported by [funcref boost::parallel::mpi::scatter `scatter`]]]
`MPI_Scatterv`]] [most uses supported by [funcref boost::mpi::scatter `scatter`]]]
]
Boost.MPI uses function objects to specify how reductions should occur
@@ -1368,15 +1368,15 @@ Boost.MPI.
[table Reduction operations
[[C Function/Constant] [Boost.MPI Equivalent]]
[[`MPI_BAND`] [[classref boost::parallel::mpi::bitwise_and `bitwise_and`]]]
[[`MPI_BOR`] [[classref boost::parallel::mpi::bitwise_or `bitwise_or`]]]
[[`MPI_BXOR`] [[classref boost::parallel::mpi::bitwise_xor `bitwise_xor`]]]
[[`MPI_BAND`] [[classref boost::mpi::bitwise_and `bitwise_and`]]]
[[`MPI_BOR`] [[classref boost::mpi::bitwise_or `bitwise_or`]]]
[[`MPI_BXOR`] [[classref boost::mpi::bitwise_xor `bitwise_xor`]]]
[[`MPI_LAND`] [`std::logical_and`]]
[[`MPI_LOR`] [`std::logical_or`]]
[[`MPI_LXOR`] [[classref boost::parallel::mpi::logical_xor `logical_xor`]]]
[[`MPI_MAX`] [[classref boost::parallel::mpi::maximum `maximum`]]]
[[`MPI_LXOR`] [[classref boost::mpi::logical_xor `logical_xor`]]]
[[`MPI_MAX`] [[classref boost::mpi::maximum `maximum`]]]
[[`MPI_MAXLOC`] [unsupported]]
[[`MPI_MIN`] [[classref boost::parallel::mpi::minimum `minimum`]]]
[[`MPI_MIN`] [[classref boost::mpi::minimum `minimum`]]]
[[`MPI_MINLOC`] [unsupported]]
[[`MPI_Op_create`] [used internally by Boost.MPI]]
[[`MPI_Op_free`] [used internally by Boost.MPI]]
@@ -1394,14 +1394,14 @@ Boost.MPI.
[python]
Boost.MPI provides an alternative MPI interface from the _Python_
programming language via the `boost.parallel.mpi` module. The
programming language via the `boost.mpi` module. The
Boost.MPI Python bindings, built on top of the C++ Boost.MPI using the
_BoostPython_ library, provide nearly all of the functionality of
Boost.MPI within a dynamic, object-oriented language.
The Boost.MPI Python module can be built and installed from the
`libs/parallel/build` directory. Just follow the [link
parallel/mpi.config configuration] and [link parallel/mpi.installation
`libs/mpi/build` directory. Just follow the [link
mpi.config configuration] and [link mpi.installation
installation] instructions for the C++ Boost.MPI. Once you have
installed the Python module, be sure that the installation location is
in your `PYTHONPATH`.
@@ -1411,10 +1411,10 @@ in your `PYTHONPATH`.
[python]
Getting started with the Boost.MPI Python module is as easy as
importing `boost.parallel.mpi`. Our first "Hello, World!" program is
importing `boost.mpi`. Our first "Hello, World!" program is
just two lines long:
import boost.parallel.mpi as mpi
import boost.mpi as mpi
print "I am process %d of %d." % (mpi.rank, mpi.size)
Go ahead and run this program with several processes. Be sure to
@@ -1438,7 +1438,7 @@ Point-to-point operations in Boost.MPI have nearly the same syntax in
Python as in C++. We can write a simple two-process Python program
that prints "Hello, world!" by transmitting Python strings:
import boost.parallel.mpi as mpi
import boost.mpi as mpi
if mpi.world.rank == 0:
mpi.world.send(1, 0, 'Hello')
@@ -1450,9 +1450,9 @@ that prints "Hello, world!" by transmitting Python strings:
mpi.world.send(0, 1, 'world')
There are only a few notable differences between this Python code and
the example [link parallel/mpi.point_to_point in the C++
the example [link mpi.point_to_point in the C++
tutorial]. First of all, we don't need to write any initialization
code in Python: just loading the `boost.parallel.mpi` module makes the
code in Python: just loading the `boost.mpi` module makes the
appropriate `MPI_Init` and `MPI_Finalize` calls. Second, we're passing
Python objects from one process to another through MPI. Any Python
object that can be pickled can be transmitted; the next section will
@@ -1464,7 +1464,7 @@ polymorphic.
When experimenting with Boost.MPI in Python, don't forget that help is
always available via `pydoc`: just pass the name of the module or
module entity on the command line (e.g., `pydoc
boost.parallel.mpi.communicator`) to receive complete reference
boost.mpi.communicator`) to receive complete reference
documentation. When in doubt, try it!
[endsect]
@@ -1480,11 +1480,11 @@ interface. Any C++ type that provides (de-)serialization routines that
meet the requirements of the Boost.Serialization library is eligible
for this optimization, but the type must be registered in advance. To
register a C++ type, invoke the C++ function [funcref
boost::parallel::mpi::python::register_serialized
boost::mpi::python::register_serialized
register_serialized]. If your C++ types come from other Python modules
(they probably will!), those modules will need to link against the
`boost_mpi` and `boost_mpi_python` libraries as described in the [link
parallel/mpi.installation installation section]. Note that you do
mpi.installation installation section]. Note that you do
*not* need to link against the Boost.MPI Python extension module.
Finally, Boost.MPI supports separation of the structure of an object
@@ -1532,9 +1532,9 @@ extra copies.
To use the skeleton/content mechanism from Python, you must first
register the type of your data structure with the skeleton/content
mechanism *from C++*. The registration function is [funcref
boost::parallel::mpi::python::register_skeleton_and_content
boost::mpi::python::register_skeleton_and_content
register_skeleton_and_content] and resides in the [headerref
boost/parallel/mpi/python.hpp <boost/parallel/mpi/python.hpp>] header.
boost/mpi/python.hpp <boost/mpi/python.hpp>] header.
Once you have registered your C++ data structures, you can extract
the skeleton for an instance of that data structure with `skeleton()`.
@@ -1604,10 +1604,10 @@ interoperability with the C++ Boost.MPI and the C MPI bindings.
[endsect]
[section:pythonref Reference]
The Boost.MPI Python module, `boost.parallel.mpi`, has its own
[@boost.parallel.mpi.html reference documentation], which is also
The Boost.MPI Python module, `boost.mpi`, has its own
[@boost.mpi.html reference documentation], which is also
available using `pydoc` (from the command line) or
`help(boost.parallel.mpi)` (from the Python interpreter).
`help(boost.mpi)` (from the Python interpreter).
[endsect]
@@ -1626,7 +1626,7 @@ an extra level of manual bookkeeping; and passing a map from strings
to containers of strings is positively infuriating. The Parallel MPI
library allows all of these data types to be passed using the same
simple `send()` and `recv()` primitives. Likewise, collective
operations such as [funcref boost::parallel::mpi::reduce `reduce()`]
operations such as [funcref boost::mpi::reduce `reduce()`]
allow arbitrary data types and function objects, much like the C++
Standard Library would.
@@ -1634,7 +1634,7 @@ The higher-level abstractions provided for convenience must not have
an impact on the performance of the application. For instance, sending
an integer via `send` must be as efficient as a call to `MPI_Send`,
which means that it must be implemented by a simple call to
`MPI_Send`; likewise, an integer [funcref boost::parallel::mpi::reduce
`MPI_Send`; likewise, an integer [funcref boost::mpi::reduce
`reduce()`] using `std::plus<int>` must be implemented with a call to
`MPI_Reduce` on integers using the `MPI_SUM` operation: anything less
will impact performance. In essence, this is the "don't pay for what
@@ -1670,13 +1670,13 @@ raw MPI. We ran five different variants of the NetPIPE benchmark:
`Char` in place of the fundamental `char` type. The `Char` type
contains a single `char`, a `serialize()` method to make it
serializable, and specializes [classref
boost::parallel::mpi::is_mpi_datatype is_mpi_datatype] to force
boost::mpi::is_mpi_datatype is_mpi_datatype] to force
Boost.MPI to build a derived MPI data type for it.
# Boost.MPI (Serialized): NetPIPE modified to use a user-defined type
`Char` in place of the fundamental `char` type. This `Char` type
contains a single `char` and is serializable. Unlike the Datatypes
case, [classref boost::parallel::mpi::is_mpi_datatype
case, [classref boost::mpi::is_mpi_datatype
is_mpi_datatype] is *not* specialized, forcing Boost.MPI to perform
many, many serialization calls.
@@ -1689,7 +1689,7 @@ C++ Compiler, version 9.0, Boost 1.35.0 (prerelease), and
[@http://www.open-mpi.org/ Open MPI] version 1.1. The NetPIPE results
follow:
[$../../../libs/parallel/doc/netpipe.png]
[$../../../libs/mpi/doc/netpipe.png]
There are a some observations we can make about these NetPIPE
results. First of all, the top two plots show that Boost.MPI performs
@@ -1712,7 +1712,7 @@ performance.
* *Boost 1.35.0*: Initial release, containing the following post-review changes
* Support for arrays in all collective operations
* Support default-construction of [classref boost::parallel::mpi::environment environment]
* Support default-construction of [classref boost::mpi::environment environment]
* *2006-09-21*: Boost.MPI accepted into Boost.
@@ -1727,4 +1727,4 @@ Boost.MPI that proved the usefulness of the Serialization library in
an MPI setting and the performance benefits of specialization in a C++
abstraction layer for MPI. Jeremy Siek managed the formal review of Boost.MPI.
[endsect]
[endsect]

View File

@@ -7,11 +7,11 @@
// An example using Boost.MPI's split() operation on communicators to
// create separate data-generating processes and data-collecting
// processes.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
#include <boost/serialization/vector.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
enum message_tags { msg_data_packet, msg_broadcast_data, msg_finished };

View File

@@ -7,12 +7,12 @@
// An example using Boost.MPI's split() operation on communicators to
// create separate data-generating processes and data-collecting
// processes using boost::optional for broadcasting.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/optional.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
enum message_tags { msg_data_packet, msg_finished };

View File

@@ -6,10 +6,10 @@
// A simple Hello, world! example using Boost.MPI message passing.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp> // Needed to send/receive strings!
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -6,10 +6,10 @@
// A simple Hello, world! example using Boost.MPI broadcast()
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp> // Needed to send/receive strings!
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -6,10 +6,10 @@
// A simple Hello, world! example using Boost.MPI message passing.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <boost/serialization/string.hpp> // Needed to send/receive strings!
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -8,14 +8,14 @@
// data and others to aggregate the data
#include <iostream>
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <boost/random/parallel.hpp>
#include <boost/random.hpp>
#include <boost/foreach.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
enum {sample_tag, sample_skeleton_tag, sample_broadcast_tag, quit_tag};

View File

@@ -6,14 +6,14 @@
// An example using Boost.MPI's skeletons and content to optimize
// communication.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <boost/serialization/list.hpp>
#include <algorithm>
#include <functional>
#include <numeric>
#include <iostream>
#include <stdlib.h>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -6,10 +6,10 @@
// An example using Boost.MPI's gather()
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -5,10 +5,10 @@
// http://www.boost.org/LICENSE_1_0.txt)
// An example using Boost.MPI's reduce() to compute a minimum value.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
int main(int argc, char* argv[])
{

View File

@@ -8,10 +8,10 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Performance test of the reduce() collective
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <boost/lexical_cast.hpp>
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
struct add_int {
int operator()(int x, int y) const { return x + y; }
@@ -35,9 +35,9 @@ inline wrapped_int operator+(wrapped_int x, wrapped_int y)
return wrapped_int(x.value + y.value);
}
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<> struct is_mpi_datatype<wrapped_int> : mpl::true_ { };
} } }
} }
struct serialized_int
{

View File

@@ -5,11 +5,11 @@
// http://www.boost.org/LICENSE_1_0.txt)
// An example using Boost.MPI's reduce() to concatenate strings.
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <iostream>
#include <string>
#include <boost/serialization/string.hpp> // Important for sending strings!
namespace mpi = boost::parallel::mpi;
namespace mpi = boost::mpi;
/* Defining STRING_CONCAT_COMMUTATIVE lies to Boost.MPI by forcing it
* to assume that string concatenation is commutative, which it is
@@ -18,12 +18,12 @@ namespace mpi = boost::parallel::mpi;
* commutative.
*/
#ifdef STRING_CONCAT_COMMUTATIVE
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<>
struct is_commutative<std::plus<std::string>, std::string> : mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif
int main(int argc, char* argv[])

View File

@@ -17,14 +17,14 @@
#ifndef BOOST_MPI_HPP
#define BOOST_MPI_HPP
#include <boost/parallel/mpi/allocator.hpp>
#include <boost/parallel/mpi/collectives.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/parallel/mpi/nonblocking.hpp>
#include <boost/parallel/mpi/operations.hpp>
#include <boost/parallel/mpi/skeleton_and_content.hpp>
#include <boost/parallel/mpi/timer.hpp>
#include <boost/mpi/allocator.hpp>
#include <boost/mpi/collectives.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/nonblocking.hpp>
#include <boost/mpi/operations.hpp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/mpi/timer.hpp>
#endif // BOOST_MPI_HPP

View File

@@ -9,16 +9,16 @@
* This header provides an STL-compliant allocator that uses the
* MPI-2 memory allocation facilities.
*/
#ifndef BOOST_PARALLEL_MPI_ALLOCATOR_HPP
#define BOOST_PARALLEL_MPI_ALLOCATOR_HPP
#ifndef BOOST_MPI_ALLOCATOR_HPP
#define BOOST_MPI_ALLOCATOR_HPP
#include <boost/parallel/mpi/config.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/config.hpp>
#include <boost/mpi/exception.hpp>
#include <cstddef>
#include <memory>
#include <boost/limits.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
#if defined(BOOST_MPI_HAS_MEMORY_ALLOCATION)
template<typename T> class allocator;
@@ -205,6 +205,6 @@ inline bool operator!=(const allocator<T1>&, const allocator<T2>&) throw()
using std::allocator;
#endif
} } } /// end namespace boost::parallel::mpi
} } /// end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_ALLOCATOR_HPP
#endif // BOOST_MPI_ALLOCATOR_HPP

View File

@@ -13,15 +13,15 @@
* processes within a communicator. The header @c collectives_fwd.hpp
* provides forward declarations for each of these operations. To
* include only specific collective algorithms, use the headers @c
* boost/parallel/mpi/collectives/algorithm_name.hpp.
* boost/mpi/collectives/algorithm_name.hpp.
*/
#ifndef BOOST_PARALLEL_MPI_COLLECTIVES_HPP
#define BOOST_PARALLEL_MPI_COLLECTIVES_HPP
#ifndef BOOST_MPI_COLLECTIVES_HPP
#define BOOST_MPI_COLLECTIVES_HPP
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/mpi/communicator.hpp>
#include <vector>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/**
* @brief Gather the values stored at every process into vectors of
@@ -528,18 +528,18 @@ template<typename T, typename Op>
void
scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op);
} } } // end namespace boost::parallel::mpi
#endif // BOOST_PARALLEL_MPI_COLLECTIVES_HPP
} } // end namespace boost::mpi
#endif // BOOST_MPI_COLLECTIVES_HPP
#ifndef BOOST_PARALLEL_MPI_COLLECTIVES_FORWARD_ONLY
#ifndef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
// Include implementations of each of the collectives
# include <boost/parallel/mpi/collectives/all_gather.hpp>
# include <boost/parallel/mpi/collectives/all_reduce.hpp>
# include <boost/parallel/mpi/collectives/all_to_all.hpp>
# include <boost/parallel/mpi/collectives/broadcast.hpp>
# include <boost/parallel/mpi/collectives/gather.hpp>
# include <boost/parallel/mpi/collectives/scatter.hpp>
# include <boost/parallel/mpi/collectives/reduce.hpp>
# include <boost/parallel/mpi/collectives/scan.hpp>
# include <boost/mpi/collectives/all_gather.hpp>
# include <boost/mpi/collectives/all_reduce.hpp>
# include <boost/mpi/collectives/all_to_all.hpp>
# include <boost/mpi/collectives/broadcast.hpp>
# include <boost/mpi/collectives/gather.hpp>
# include <boost/mpi/collectives/scatter.hpp>
# include <boost/mpi/collectives/reduce.hpp>
# include <boost/mpi/collectives/scan.hpp>
#endif

View File

@@ -5,19 +5,19 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.7. Gather-to-all
#ifndef BOOST_PARALLEL_MPI_ALL_GATHER_HPP
#define BOOST_PARALLEL_MPI_ALL_GATHER_HPP
#ifndef BOOST_MPI_ALL_GATHER_HPP
#define BOOST_MPI_ALL_GATHER_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/serialization/vector.hpp>
// all_gather falls back to gather+broadcast in some cases
#include <boost/parallel/mpi/collectives/broadcast.hpp>
#include <boost/parallel/mpi/collectives/gather.hpp>
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/collectives/gather.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
// We're all-gathering for a type that has an associated MPI
@@ -27,7 +27,7 @@ namespace detail {
all_gather_impl(const communicator& comm, const T* in_values, int n,
T* out_values, mpl::true_)
{
MPI_Datatype type = boost::parallel::mpi::get_mpi_datatype<T>();
MPI_Datatype type = boost::mpi::get_mpi_datatype<T>();
BOOST_MPI_CHECK_RESULT(MPI_Allgather,
(const_cast<T*>(in_values), n, type,
out_values, n, type, comm));
@@ -61,7 +61,7 @@ all_gather(const communicator& comm, const T& in_value,
std::vector<T>& out_values)
{
out_values.resize(comm.size());
::boost::parallel::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
::boost::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
}
template<typename T>
@@ -77,9 +77,9 @@ all_gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values)
{
out_values.resize(comm.size() * n);
::boost::parallel::mpi::all_gather(comm, in_values, n, &out_values[0]);
::boost::mpi::all_gather(comm, in_values, n, &out_values[0]);
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_ALL_GATHER_HPP
#endif // BOOST_MPI_ALL_GATHER_HPP

View File

@@ -9,14 +9,14 @@
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
#ifndef BOOST_PARALLEL_MPI_ALL_REDUCE_HPP
#define BOOST_PARALLEL_MPI_ALL_REDUCE_HPP
#ifndef BOOST_MPI_ALL_REDUCE_HPP
#define BOOST_MPI_ALL_REDUCE_HPP
// All-reduce falls back to reduce() + broadcast() in some cases.
#include <boost/parallel/mpi/collectives/broadcast.hpp>
#include <boost/parallel/mpi/collectives/reduce.hpp>
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/collectives/reduce.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
/**********************************************************************
@@ -32,7 +32,7 @@ namespace detail {
{
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
is_mpi_op<Op, T>::op(), comm));
}
@@ -51,7 +51,7 @@ namespace detail {
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
mpi_op.get_mpi_op(), comm));
}
@@ -93,10 +93,10 @@ template<typename T, typename Op>
T all_reduce(const communicator& comm, const T& in_value, Op op)
{
T result;
::boost::parallel::mpi::all_reduce(comm, in_value, result, op);
::boost::mpi::all_reduce(comm, in_value, result, op);
return result;
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_ALL_REDUCE_HPP
#endif // BOOST_MPI_ALL_REDUCE_HPP

View File

@@ -5,21 +5,21 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.8. All-to-all
#ifndef BOOST_PARALLEL_MPI_ALL_TO_ALL_HPP
#define BOOST_PARALLEL_MPI_ALL_TO_ALL_HPP
#ifndef BOOST_MPI_ALL_TO_ALL_HPP
#define BOOST_MPI_ALL_TO_ALL_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
#include <boost/parallel/mpi/collectives_fwd.hpp>
#include <boost/parallel/mpi/allocator.hpp>
#include <boost/mpi/collectives_fwd.hpp>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
// We're performaing an all-to-all with a type that has an
@@ -124,7 +124,7 @@ all_to_all(const communicator& comm, const std::vector<T>& in_values,
{
BOOST_ASSERT((int)in_values.size() == comm.size());
out_values.resize(comm.size());
::boost::parallel::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
}
template<typename T>
@@ -141,9 +141,9 @@ all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
{
BOOST_ASSERT((int)in_values.size() == comm.size() * n);
out_values.resize(comm.size() * n);
::boost::parallel::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_ALL_TO_ALL_HPP
#endif // BOOST_MPI_ALL_TO_ALL_HPP

View File

@@ -5,15 +5,16 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.4. Broadcast
#ifndef BOOST_PARALLEL_MPI_BROADCAST_HPP
#define BOOST_PARALLEL_MPI_BROADCAST_HPP
#ifndef BOOST_MPI_BROADCAST_HPP
#define BOOST_MPI_BROADCAST_HPP
#include <boost/parallel/mpi/collectives_fwd.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/mpi/collectives_fwd.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/communicator.hpp>
namespace boost { namespace mpi {
namespace boost { namespace parallel { namespace mpi {
/************************************************************************
* Specializations *
************************************************************************/
@@ -94,7 +95,7 @@ namespace detail {
{
BOOST_MPI_CHECK_RESULT(MPI_Bcast,
(values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
root, MPI_Comm(comm)));
}
@@ -133,12 +134,12 @@ void broadcast(const communicator& comm, T* values, int n, int root)
detail::broadcast_impl(comm, values, n, root, is_mpi_datatype<T>());
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
// If the user has already included skeleton_and_content.hpp, include
// the code to broadcast skeletons and content.
#ifdef BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_HPP
# include <boost/parallel/mpi/detail/broadcast_sc.hpp>
#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
# include <boost/mpi/detail/broadcast_sc.hpp>
#endif
#endif // BOOST_PARALLEL_MPI_BROADCAST_HPP
#endif // BOOST_MPI_BROADCAST_HPP

View File

@@ -5,20 +5,20 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.5. Gather
#ifndef BOOST_PARALLEL_MPI_GATHER_HPP
#define BOOST_PARALLEL_MPI_GATHER_HPP
#ifndef BOOST_MPI_GATHER_HPP
#define BOOST_MPI_GATHER_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/parallel/mpi/detail/point_to_point.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
// We're gathering at the root for a type that has an associated MPI
@@ -102,7 +102,7 @@ gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
if (comm.rank() == root)
out_values.resize(comm.size());
::boost::parallel::mpi::gather(comm, in_value, &out_values[0], root);
::boost::mpi::gather(comm, in_value, &out_values[0], root);
}
template<typename T>
@@ -129,7 +129,7 @@ void
gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values, int root)
{
::boost::parallel::mpi::gather(comm, in_values, n, &out_values[0], root);
::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
}
template<typename T>
@@ -140,6 +140,6 @@ void gather(const communicator& comm, const T* in_values, int n, int root)
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_GATHER_HPP
#endif // BOOST_MPI_GATHER_HPP

View File

@@ -9,29 +9,29 @@
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
#ifndef BOOST_PARALLEL_MPI_REDUCE_HPP
#define BOOST_PARALLEL_MPI_REDUCE_HPP
#ifndef BOOST_MPI_REDUCE_HPP
#define BOOST_MPI_REDUCE_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
// For (de-)serializing sends and receives
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
// For packed_[io]archive sends and receives
#include <boost/parallel/mpi/detail/point_to_point.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/parallel/mpi/detail/computation_tree.hpp>
#include <boost/parallel/mpi/operations.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/detail/computation_tree.hpp>
#include <boost/mpi/operations.hpp>
#include <algorithm>
#include <exception>
#include <boost/assert.hpp>
#include <boost/scoped_array.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/************************************************************************
@@ -51,7 +51,7 @@ namespace detail {
{
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
is_mpi_op<Op, T>::op(), root, comm));
}
@@ -64,7 +64,7 @@ namespace detail {
{
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), 0, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
is_mpi_op<Op, T>::op(), root, comm));
}
@@ -84,7 +84,7 @@ namespace detail {
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
mpi_op.get_mpi_op(), root, comm));
}
@@ -99,7 +99,7 @@ namespace detail {
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), 0, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
mpi_op.get_mpi_op(), root, comm));
}
@@ -352,6 +352,6 @@ void reduce(const communicator& comm, const T& in_value, Op op, int root)
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_REDUCE_HPP
#endif // BOOST_MPI_REDUCE_HPP

View File

@@ -9,28 +9,28 @@
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Scan
#ifndef BOOST_PARALLEL_MPI_SCAN_HPP
#define BOOST_PARALLEL_MPI_SCAN_HPP
#ifndef BOOST_MPI_SCAN_HPP
#define BOOST_MPI_SCAN_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
// For (de-)serializing sends and receives
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
// For packed_[io]archive sends and receives
#include <boost/parallel/mpi/detail/point_to_point.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/parallel/mpi/detail/computation_tree.hpp>
#include <boost/parallel/mpi/operations.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/detail/computation_tree.hpp>
#include <boost/mpi/operations.hpp>
#include <algorithm>
#include <exception>
#include <boost/assert.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/************************************************************************
@@ -51,7 +51,7 @@ namespace detail {
{
BOOST_MPI_CHECK_RESULT(MPI_Scan,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
is_mpi_op<Op, T>::op(), comm));
}
@@ -70,7 +70,7 @@ namespace detail {
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Scan,
(const_cast<T*>(in_values), out_values, n,
boost::parallel::mpi::get_mpi_datatype<T>(),
boost::mpi::get_mpi_datatype<T>(),
mpi_op.get_mpi_op(), comm));
}
@@ -163,6 +163,6 @@ scan(const communicator& comm, const T& in_value, Op op)
return out_value;
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_SCAN_HPP
#endif // BOOST_MPI_SCAN_HPP

View File

@@ -5,20 +5,20 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.6. Scatter
#ifndef BOOST_PARALLEL_MPI_SCATTER_HPP
#define BOOST_PARALLEL_MPI_SCATTER_HPP
#ifndef BOOST_MPI_SCATTER_HPP
#define BOOST_MPI_SCATTER_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/parallel/mpi/detail/point_to_point.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
// We're scattering from the root for a type that has an associated MPI
@@ -112,9 +112,9 @@ scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
int root)
{
if (comm.rank() == root)
::boost::parallel::mpi::scatter(comm, &in_values[0], out_value, root);
::boost::mpi::scatter(comm, &in_values[0], out_value, root);
else
::boost::parallel::mpi::scatter(comm, static_cast<const T*>(0), out_value,
::boost::mpi::scatter(comm, static_cast<const T*>(0), out_value,
root);
}
@@ -143,9 +143,9 @@ scatter(const communicator& comm, const std::vector<T>& in_values,
T* out_values, int n, int root)
{
if (comm.rank() == root)
::boost::parallel::mpi::scatter(comm, &in_values[0], out_values, n, root);
::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
else
::boost::parallel::mpi::scatter(comm, static_cast<const T*>(0), out_values,
::boost::mpi::scatter(comm, static_cast<const T*>(0), out_values,
n, root);
}
@@ -156,6 +156,6 @@ void scatter(const communicator& comm, T* out_values, int n, int root)
detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_SCATTER_HPP
#endif // BOOST_MPI_SCATTER_HPP

View File

@@ -11,13 +11,13 @@
* This header provides forward declarations for all of the
* collective operations contained in the header @c collectives.hpp.
*/
#ifndef BOOST_PARALLEL_MPI_COLLECTIVES_FWD_HPP
#define BOOST_PARALLEL_MPI_COLLECTIVES_FWD_HPP
#ifndef BOOST_MPI_COLLECTIVES_FWD_HPP
#define BOOST_MPI_COLLECTIVES_FWD_HPP
/// INTERNAL ONLY
#define BOOST_PARALLEL_MPI_COLLECTIVES_FORWARD_ONLY
#include <boost/parallel/mpi/collectives.hpp>
#undef BOOST_PARALLEL_MPI_COLLECTIVES_FORWARD_ONLY
#define BOOST_MPI_COLLECTIVES_FORWARD_ONLY
#include <boost/mpi/collectives.hpp>
#undef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
#endif // BOOST_PARALLEL_MPI_COLLECTIVES_FWD_HPP
#endif // BOOST_MPI_COLLECTIVES_FWD_HPP

View File

@@ -10,32 +10,32 @@
* of all communication within Boost.MPI, and provides point-to-point
* communication operations.
*/
#ifndef BOOST_PARALLEL_MPI_COMMUNICATOR_HPP
#define BOOST_PARALLEL_MPI_COMMUNICATOR_HPP
#ifndef BOOST_MPI_COMMUNICATOR_HPP
#define BOOST_MPI_COMMUNICATOR_HPP
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <utility>
#include <iterator>
#include <stdexcept> // for std::range_error
// For (de-)serializing sends and receives
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
// For (de-)serializing skeletons and content
#include <boost/parallel/mpi/skeleton_and_content_fwd.hpp>
#include <boost/mpi/skeleton_and_content_fwd.hpp>
// For (de-)serializing arrays
#include <boost/serialization/array.hpp>
#include <boost/parallel/mpi/detail/point_to_point.hpp>
#include <boost/parallel/mpi/status.hpp>
#include <boost/parallel/mpi/request.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/status.hpp>
#include <boost/mpi/request.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/**
* @brief A constant representing "any process."
@@ -1572,12 +1572,12 @@ communicator::irecv<content>(int source, int tag,
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
// If the user has already included skeleton_and_content.hpp, include
// the code to send/receive skeletons and content.
#ifdef BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_HPP
# include <boost/parallel/mpi/detail/communicator_sc.hpp>
#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
# include <boost/mpi/detail/communicator_sc.hpp>
#endif
#endif // BOOST_PARALLEL_MPI_COMMUNICATOR_HPP
#endif // BOOST_MPI_COMMUNICATOR_HPP

View File

@@ -9,8 +9,8 @@
* This header provides MPI configuration details that expose the
* capabilities of the underlying MPI implementation.
*/
#ifndef BOOST_PARALLEL_MPI_CONFIG_HPP
#define BOOST_PARALLEL_MPI_CONFIG_HPP
#ifndef BOOST_MPI_CONFIG_HPP
#define BOOST_MPI_CONFIG_HPP
#include <mpi.h>
@@ -47,4 +47,4 @@
// Configuration for MPICH
#endif
#endif // BOOST_PARALLEL_MPI_CONFIG_HPP
#endif // BOOST_MPI_CONFIG_HPP

View File

@@ -14,18 +14,18 @@
*
* This header provides the mapping from C++ types to MPI data types.
*/
#ifndef BOOST_PARALLEL_MPI_DATATYPE_HPP
#define BOOST_PARALLEL_MPI_DATATYPE_HPP
#ifndef BOOST_MPI_DATATYPE_HPP
#define BOOST_MPI_DATATYPE_HPP
#include <boost/parallel/mpi/datatype_fwd.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <mpi.h>
#include <boost/config.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/or.hpp>
#include <boost/parallel/mpi/detail/mpi_datatype_cache.hpp>
#include <boost/mpi/detail/mpi_datatype_cache.hpp>
#include <boost/mpl/assert.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/**
* @brief Type trait that determines if there exists a built-in
@@ -142,10 +142,10 @@ struct is_mpi_builtin_datatype
* mpl::true_:
*
* @code
* namespace boost { namespace parallel { namespace mpi {
* namespace boost { namespace mpi {
* template<> struct is_mpi_datatype<point>
* : public mpl::true_ { };
* } } }
* } }
* @endcode
*/
template<typename T>
@@ -288,6 +288,6 @@ struct is_mpi_datatype<bool>
: boost::mpl::bool_<true>
{};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_MPI_DATATYPE_HPP
#endif // BOOST_MPI_MPI_DATATYPE_HPP

View File

@@ -11,12 +11,12 @@
* user-defined C++ classes that need to specialize @c
* is_mpi_datatype.
*/
#ifndef BOOST_PARALLEL_MPI_DATATYPE_FWD_HPP
#define BOOST_PARALLEL_MPI_DATATYPE_FWD_HPP
#ifndef BOOST_MPI_DATATYPE_FWD_HPP
#define BOOST_MPI_DATATYPE_FWD_HPP
#include <mpi.h>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<typename T> struct is_mpi_builtin_datatype;
template<typename T> struct is_mpi_integer_datatype;
@@ -27,6 +27,6 @@ template<typename T> struct is_mpi_byte_datatype;
template<typename T> struct is_mpi_datatype;
template<typename T> MPI_Datatype get_mpi_datatype(const T& x = T());
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_MPI_DATATYPE_FWD_HPP
#endif // BOOST_MPI_MPI_DATATYPE_FWD_HPP

View File

@@ -8,10 +8,10 @@
// This header may only be included after both the broadcast.hpp and
// and skeleton_and_content.hpp headers have been included.
#ifndef BOOST_PARALLEL_MPI_BROADCAST_SC_HPP
#define BOOST_PARALLEL_MPI_BROADCAST_SC_HPP
#ifndef BOOST_MPI_BROADCAST_SC_HPP
#define BOOST_MPI_BROADCAST_SC_HPP
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<typename T>
inline void
@@ -36,6 +36,6 @@ broadcast(const communicator& comm, const skeleton_proxy<T>& proxy, int root)
}
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_BROADCAST_SC_HPP
#endif // BOOST_MPI_BROADCAST_SC_HPP

View File

@@ -8,10 +8,10 @@
// This header should be included only after both communicator.hpp and
// skeleton_and_content.hpp have been included.
#ifndef BOOST_PARALLEL_MPI_COMMUNICATOR_SC_HPP
#define BOOST_PARALLEL_MPI_COMMUNICATOR_SC_HPP
#ifndef BOOST_MPI_COMMUNICATOR_SC_HPP
#define BOOST_MPI_COMMUNICATOR_SC_HPP
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<typename T>
void
@@ -90,7 +90,7 @@ namespace detail {
};
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_COMMUNICATOR_SC_HPP
#endif // BOOST_MPI_COMMUNICATOR_SC_HPP

View File

@@ -6,10 +6,10 @@
// Compute parents, children, levels, etc. to effect a parallel
// computation tree.
#ifndef BOOST_PARALLEL_MPI_COMPUTATION_TREE_HPP
#define BOOST_PARALLEL_MPI_COMPUTATION_TREE_HPP
#ifndef BOOST_MPI_COMPUTATION_TREE_HPP
#define BOOST_MPI_COMPUTATION_TREE_HPP
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/**
* @brief Aids tree-based parallel collective algorithms.
@@ -81,6 +81,6 @@ class computation_tree
int level_;
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_COMPUTATION_TREE_HPP
#endif // BOOST_MPI_COMPUTATION_TREE_HPP

View File

@@ -6,16 +6,16 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#ifndef BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/parallel/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/parallel/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/mpi/datatype.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
namespace detail {
// an archive wrapper that stores only the data members but not the
@@ -56,6 +56,6 @@ const content get_content(const T& x)
return ar.get_content();
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#endif // BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP

View File

@@ -8,10 +8,10 @@
#include <boost/serialization/array.hpp>
#ifndef BOOST_PARALLEL_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
#ifndef BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal input archive, which forwards reading to another archive
///
@@ -67,6 +67,6 @@ private:
implementation_archive_type& implementation_archive;
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
#endif // BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP

View File

@@ -6,13 +6,13 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#ifndef BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal output archive, which forwards saving to another archive
///
@@ -68,6 +68,6 @@ private:
implementation_archive_type& implementation_archive;
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#endif // BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP

View File

@@ -6,8 +6,8 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#include <boost/pfto.hpp>
@@ -18,7 +18,7 @@
#include <boost/serialization/collection_size_type.hpp>
#include <boost/archive/array/iarchive.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
template<class Archive, class ImplementationArchive>
class forward_skeleton_iarchive
@@ -73,6 +73,6 @@ protected:
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_FORWARD_STRUCTURE_IARCHIVE_HPP
#endif // BOOST_MPI_DETAIL_FORWARD_STRUCTURE_IARCHIVE_HPP

View File

@@ -6,8 +6,8 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#include <boost/pfto.hpp>
@@ -18,7 +18,7 @@
#include <boost/serialization/collection_size_type.hpp>
#include <boost/archive/array/oarchive.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
template<class Archive, class ImplementationArchive>
class forward_skeleton_oarchive
@@ -74,6 +74,6 @@ protected:
implementation_archive_type& implementation_archive;
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_FORWARD_STRUCTURE_OARCHIVE_HPP
#endif // BOOST_MPI_DETAIL_FORWARD_STRUCTURE_OARCHIVE_HPP

View File

@@ -6,15 +6,15 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#ifndef BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal input archive, which ignores any load
///
@@ -56,6 +56,6 @@ public:
}
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#endif // BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP

View File

@@ -6,14 +6,14 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#ifndef BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal output archive, which ignores any save
///
@@ -57,6 +57,6 @@ public:
}
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#endif // BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP

View File

@@ -6,8 +6,8 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#ifndef BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#include <boost/pfto.hpp>
@@ -17,7 +17,7 @@
#include <boost/archive/detail/oserializer.hpp>
#include <boost/serialization/collection_size_type.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
template<class Archive>
class ignore_skeleton_oarchive
@@ -65,6 +65,6 @@ BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(serialization::collection_size_type)
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#endif // BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP

View File

@@ -6,19 +6,19 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#ifndef BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#define BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#include <boost/parallel/mpi/datatype_fwd.hpp>
#include <boost/parallel/mpi/detail/mpi_datatype_oarchive.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpi/detail/mpi_datatype_oarchive.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/utility/enable_if.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/noncopyable.hpp>
#include <map>
#include <typeinfo>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/// @brief comparison function object for two std::type_info pointers
///
@@ -92,7 +92,7 @@ private:
extern mpi_datatype_map mpi_datatype_cache;
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#endif // BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP

View File

@@ -6,18 +6,18 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#include <boost/archive/detail/oserializer.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/parallel/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/parallel/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/parallel/mpi/datatype_fwd.hpp>
#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpl/assert.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
// an archive wrapper that stores only the data members but not the
@@ -38,6 +38,6 @@ public:
}
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP

View File

@@ -6,8 +6,8 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#include <mpi.h>
#include <cstddef> // size_t
@@ -19,8 +19,8 @@ namespace std{
} // namespace std
#endif
#include <boost/parallel/mpi/datatype_fwd.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/throw_exception.hpp>
#include <boost/assert.hpp>
#include <boost/mpl/placeholders.hpp>
@@ -30,7 +30,7 @@ namespace std{
#include <iostream>
#include <vector>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/////////////////////////////////////////////////////////////////////////
// class mpi_data_type_oprimitive - creation of custom MPI data types
@@ -62,7 +62,7 @@ public:
void save_array(serialization::array<T> const& x, unsigned int /* version */)
{
if (x.count())
save_impl(x.address(), boost::parallel::mpi::get_mpi_datatype(*x.address()), x.count());
save_impl(x.address(), boost::mpi::get_mpi_datatype(*x.address()), x.count());
}
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
@@ -93,7 +93,7 @@ public:
template<class T>
void save(const T & t)
{
save_impl(&t, boost::parallel::mpi::get_mpi_datatype(t), 1);
save_impl(&t, boost::mpi::get_mpi_datatype(t), 1);
}
private:
@@ -122,7 +122,7 @@ private:
};
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP

View File

@@ -6,22 +6,22 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_PACKED_IPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_PACKED_IPRIMITIVE_HPP
#ifndef BOOST_MPI_PACKED_IPRIMITIVE_HPP
#define BOOST_MPI_PACKED_IPRIMITIVE_HPP
#include <mpi.h>
#include <iostream>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/assert.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/detail/get_data.hpp>
#include <vector>
#include <boost/parallel/mpi/allocator.hpp>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/// deserialization using MPI_Unpack
@@ -114,6 +114,6 @@ private:
int position;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_PACKED_IPRIMITIVE_HPP
#endif // BOOST_MPI_PACKED_IPRIMITIVE_HPP

View File

@@ -6,23 +6,23 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_PACKED_OPRIMITIVE_HPP
#define BOOST_PARALLEL_MPI_PACKED_OPRIMITIVE_HPP
#ifndef BOOST_MPI_PACKED_OPRIMITIVE_HPP
#define BOOST_MPI_PACKED_OPRIMITIVE_HPP
#include <mpi.h>
#include <iostream>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/serialization/detail/get_data.hpp>
#include <boost/serialization/array.hpp>
#include <boost/assert.hpp>
#include <vector>
#include <boost/parallel/mpi/allocator.hpp>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/// serialization using MPI::Pack
@@ -110,6 +110,6 @@ private:
MPI_Comm comm;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_PACKED_OPRIMITIVE_HPP
#endif // BOOST_MPI_PACKED_OPRIMITIVE_HPP

View File

@@ -5,14 +5,14 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point
#ifndef BOOST_PARALLEL_MPI_DETAIL_POINT_TO_POINT_HPP
#define BOOST_PARALLEL_MPI_DETAIL_POINT_TO_POINT_HPP
#ifndef BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
#define BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
// For (de-)serializing sends and receives
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
namespace boost { namespace parallel { namespace mpi { namespace detail {
namespace boost { namespace mpi { namespace detail {
/** Sends a packed archive using MPI_Send. */
void
@@ -46,6 +46,6 @@ void
packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
MPI_Status& status);
} } } } // end namespace boost::parallel::mpi::detail
} } } // end namespace boost::mpi::detail
#endif // BOOST_PARALLEL_MPI_DETAIL_POINT_TO_POINT_HPP
#endif // BOOST_MPI_DETAIL_POINT_TO_POINT_HPP

View File

@@ -6,16 +6,16 @@
// Authors: Matthias Troyer
#ifndef BOOST_PARALLEL_MPI_TEXT_SKELETON_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_TEXT_SKELETON_OARCHIVE_HPP
#ifndef BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/parallel/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/parallel/mpi/detail/ignore_oprimitive.hpp>
#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/mpi/detail/ignore_oprimitive.hpp>
#include <boost/archive/array/oarchive.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
// an archive that writes a text skeleton into a stream
@@ -33,7 +33,7 @@ private:
boost::archive::text_oarchive skeleton_archive_;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_TEXT_SKELETON_OARCHIVE_HPP
#endif // BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP

View File

@@ -10,15 +10,15 @@
* routines to initialize, finalization, and query the status of the
* Boost MPI environment.
*/
#ifndef BOOST_PARALLEL_MPI_ENVIRONMENT_HPP
#define BOOST_PARALLEL_MPI_ENVIRONMENT_HPP
#ifndef BOOST_MPI_ENVIRONMENT_HPP
#define BOOST_MPI_ENVIRONMENT_HPP
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <string>
#include <boost/parallel/mpi/config.hpp>
#include <boost/mpi/config.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/** @brief Initialize, finalize, and query the MPI environment.
*
@@ -196,6 +196,6 @@ private:
static const int num_reserved_tags = 1;
};
} } } /// end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_ENVIRONMENT_HPP
#endif // BOOST_MPI_ENVIRONMENT_HPP

View File

@@ -10,15 +10,15 @@
* the user and macros that translate MPI error codes into Boost.MPI
* exceptions.
*/
#ifndef BOOST_PARALLEL_MPI_EXCEPTION_HPP
#define BOOST_PARALLEL_MPI_EXCEPTION_HPP
#ifndef BOOST_MPI_EXCEPTION_HPP
#define BOOST_MPI_EXCEPTION_HPP
#include <mpi.h>
#include <exception>
#include <boost/config.hpp>
#include <boost/throw_exception.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/** @brief Catch-all exception class for MPI errors.
*
@@ -80,10 +80,10 @@ class exception : public std::exception
{ \
int _check_result = MPIFunc Args; \
if (_check_result != MPI_SUCCESS) \
boost::throw_exception(boost::parallel::mpi::exception(#MPIFunc, \
boost::throw_exception(boost::mpi::exception(#MPIFunc, \
_check_result)); \
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_EXCEPTION_HPP
#endif // BOOST_MPI_EXCEPTION_HPP

View File

@@ -9,8 +9,8 @@
* This header defines operations for completing non-blocking
* communication requests.
*/
#ifndef BOOST_PARALLEL_MPI_NONBLOCKING_HPP
#define BOOST_PARALLEL_MPI_NONBLOCKING_HPP
#ifndef BOOST_MPI_NONBLOCKING_HPP
#define BOOST_MPI_NONBLOCKING_HPP
#include <mpi.h>
#include <vector>
@@ -19,11 +19,11 @@
#include <utility> // for std::pair
#include <algorithm> // for iter_swap, reverse
#include <boost/static_assert.hpp>
#include <boost/parallel/mpi/request.hpp>
#include <boost/parallel/mpi/status.hpp>
#include <boost/parallel/mpi/exception.hpp>
#include <boost/mpi/request.hpp>
#include <boost/mpi/status.hpp>
#include <boost/mpi/exception.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/**
* @brief Wait until any non-blocking request has completed.
@@ -726,7 +726,7 @@ test_some(BidirectionalIterator first, BidirectionalIterator last)
return start_of_completed;
}
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_NONBLOCKING_HPP
#endif // BOOST_MPI_NONBLOCKING_HPP

View File

@@ -15,18 +15,18 @@
* several new function object types not present in the standard @c
* <functional> header that have direct mappings to @c MPI_Op.
*/
#ifndef BOOST_PARALLEL_MPI_IS_MPI_OP_HPP
#define BOOST_PARALLEL_MPI_IS_MPI_OP_HPP
#ifndef BOOST_MPI_IS_MPI_OP_HPP
#define BOOST_MPI_IS_MPI_OP_HPP
#include <mpi.h>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
#include <boost/mpl/and.hpp>
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/utility/enable_if.hpp>
#include <functional>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template<typename Op, typename T> struct is_mpi_op;
@@ -169,7 +169,7 @@ struct bitwise_xor : public std::binary_function<T, T, T>
* that map onto operations that have @c MPI_Op equivalences, such as
* bitwise OR, logical and, or maximum. However, users are encouraged
* to use the standard function objects in the @c functional and @c
* boost/parallel/mpi/operations.hpp headers whenever possible. For
* boost/mpi/operations.hpp headers whenever possible. For
* function objects that are class templates with a single template
* parameter, it may be easier to specialize @c is_builtin_mpi_op.
*/
@@ -317,6 +317,6 @@ namespace detail {
} // end namespace detail
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_GET_MPI_OP_HPP
#endif // BOOST_MPI_GET_MPI_OP_HPP

View File

@@ -15,16 +15,16 @@
* transmitted via MPI and then be unpacked either via the facilities
* in @c packed_oarchive.hpp or @c MPI_Unpack.
*/
#ifndef BOOST_PARALLEL_MPI_PACKED_IARCHIVE_HPP
#define BOOST_PARALLEL_MPI_PACKED_IARCHIVE_HPP
#ifndef BOOST_MPI_PACKED_IARCHIVE_HPP
#define BOOST_MPI_PACKED_IARCHIVE_HPP
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_binary_iarchive.hpp>
#include <boost/parallel/mpi/detail/packed_iprimitive.hpp>
#include <boost/mpi/detail/packed_iprimitive.hpp>
#include <boost/assert.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/** @brief An archive that packs binary data into an MPI buffer.
*
@@ -84,9 +84,8 @@ private:
buffer_type internal_buffer_;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION(parallel::mpi::packed_iarchive)
#endif // BOOST_PARALLEL_MPI_PACKED_IARCHIVE_HPP
#endif // BOOST_MPI_PACKED_IARCHIVE_HPP

View File

@@ -15,15 +15,15 @@
* typically received via MPI and have been packed either by via the
* facilities in @c packed_iarchive.hpp or @c MPI_Pack.
*/
#ifndef BOOST_PARALLEL_MPI_PACKED_OARCHIVE_HPP
#define BOOST_PARALLEL_MPI_PACKED_OARCHIVE_HPP
#ifndef BOOST_MPI_PACKED_OARCHIVE_HPP
#define BOOST_MPI_PACKED_OARCHIVE_HPP
#include <boost/parallel/mpi/datatype.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_binary_oarchive.hpp>
#include <boost/parallel/mpi/detail/packed_oprimitive.hpp>
#include <boost/mpi/detail/packed_oprimitive.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/** @brief An archive that unpacks binary data from an MPI buffer.
*
@@ -80,6 +80,6 @@ private:
buffer_type internal_buffer_;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_PACKED_OARCHIVE_HPP
#endif // BOOST_MPI_PACKED_OARCHIVE_HPP

View File

@@ -5,8 +5,8 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
#ifndef BOOST_PARALLEL_MPI_PYTHON_HPP
#define BOOST_PARALLEL_MPI_PYTHON_HPP
#ifndef BOOST_MPI_PYTHON_HPP
#define BOOST_MPI_PYTHON_HPP
#include <boost/python/object.hpp>
@@ -20,7 +20,7 @@
*
*/
namespace boost { namespace parallel { namespace mpi { namespace python {
namespace boost { namespace mpi { namespace python {
/**
* @brief Register the type T for direct serialization within Boost.MPI
@@ -67,13 +67,13 @@ template<typename T>
void
register_skeleton_and_content(const T& value = T(), PyTypeObject* type = 0);
} } } } // end namespace boost::parallel::mpi::python
} } } // end namespace boost::mpi::python
#ifndef BOOST_PARALLEL_MPI_PYTHON_FORWARD_ONLY
# include <boost/parallel/mpi/python/serialize.hpp>
# include <boost/parallel/mpi/python/skeleton_and_content.hpp>
#ifndef BOOST_MPI_PYTHON_FORWARD_ONLY
# include <boost/mpi/python/serialize.hpp>
# include <boost/mpi/python/skeleton_and_content.hpp>
#else
# undef BOOST_PARALLEL_MPI_PYTHON_FORWARD_ONLY
# undef BOOST_MPI_PYTHON_FORWARD_ONLY
#endif
#endif // BOOST_PARALLEL_MPI_PYTHON_HPP
#endif // BOOST_MPI_PYTHON_HPP

View File

@@ -17,8 +17,8 @@
* registered with register_serialized(), objects are directly
* serialized for transmissing, skipping the pickling step.
*/
#ifndef BOOST_PARALLEL_MPI_PYTHON_SERIALIZE_HPP
#define BOOST_PARALLEL_MPI_PYTHON_SERIALIZE_HPP
#ifndef BOOST_MPI_PYTHON_SERIALIZE_HPP
#define BOOST_MPI_PYTHON_SERIALIZE_HPP
#include <boost/python/object.hpp>
#include <boost/python/str.hpp>
@@ -39,8 +39,8 @@
#include <boost/type_traits/is_fundamental.hpp>
#define BOOST_PARALLEL_MPI_PYTHON_FORWARD_ONLY
#include <boost/parallel/mpi/python.hpp>
#define BOOST_MPI_PYTHON_FORWARD_ONLY
#include <boost/mpi/python.hpp>
/************************************************************************
* Boost.Python Serialization Section *
@@ -509,16 +509,16 @@ serialize(Archive& ar, boost::python::object& obj, const unsigned int version)
/************************************************************************
* Boost.MPI-Specific Section *
************************************************************************/
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
class packed_iarchive;
class packed_oarchive;
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE(
::boost::parallel::mpi::packed_iarchive,
::boost::parallel::mpi::packed_oarchive)
::boost::mpi::packed_iarchive,
::boost::mpi::packed_oarchive)
namespace boost { namespace parallel { namespace mpi { namespace python {
namespace boost { namespace mpi { namespace python {
template<typename T>
void
@@ -528,6 +528,6 @@ register_serialized(const T& value, PyTypeObject* type)
register_serialized<packed_iarchive, packed_oarchive>(value, type);
}
} } } } // end namespace boost::parallel::mpi::python
} } } // end namespace boost::mpi::python
#endif // BOOST_PARALLEL_MPI_PYTHON_SERIALIZE_HPP
#endif // BOOST_MPI_PYTHON_SERIALIZE_HPP

View File

@@ -5,22 +5,22 @@
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
#ifndef BOOST_PARALLEL_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
#define BOOST_PARALLEL_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
#ifndef BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
#define BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
/** @file skeleton_and_content.hpp
*
* This file reflects the skeleton/content facilities into Python.
*/
#include <boost/python.hpp>
#include <boost/parallel/mpi.hpp>
#include <boost/mpi.hpp>
#include <boost/function/function1.hpp>
#define BOOST_PARALLEL_MPI_PYTHON_FORWARD_ONLY
#include <boost/parallel/mpi/python.hpp>
#include <boost/parallel/mpi/python/serialize.hpp>
#define BOOST_MPI_PYTHON_FORWARD_ONLY
#include <boost/mpi/python.hpp>
#include <boost/mpi/python/serialize.hpp>
namespace boost { namespace parallel { namespace mpi { namespace python {
namespace boost { namespace mpi { namespace python {
/**
* INTERNAL ONLY
@@ -29,9 +29,9 @@ namespace boost { namespace parallel { namespace mpi { namespace python {
* retrieved from get_content. This wrapper is only needed to store a
* copy of the Python object on which get_content() was called.
*/
class content : public boost::parallel::mpi::content
class content : public boost::mpi::content
{
typedef boost::parallel::mpi::content inherited;
typedef boost::mpi::content inherited;
public:
content(const inherited& base, boost::python::object object)
@@ -142,7 +142,7 @@ namespace detail {
{
content operator()(object value_obj) {
T& value = extract<T&>(value_obj)();
return content(boost::parallel::mpi::get_content(value), value_obj);
return content(boost::mpi::get_content(value), value_obj);
}
};
@@ -204,6 +204,6 @@ void register_skeleton_and_content(const T& value, PyTypeObject* type)
detail::register_skeleton_and_content_handler(type, handler);
}
} } } } // end namespace boost::parallel::mpi::python
} } } // end namespace boost::mpi::python
#endif // BOOST_PARALLEL_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
#endif // BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP

View File

@@ -9,15 +9,15 @@
* This header defines the class @c request, which contains a request
* for non-blocking communication.
*/
#ifndef BOOST_PARALLEL_MPI_REQUEST_HPP
#define BOOST_PARALLEL_MPI_REQUEST_HPP
#ifndef BOOST_MPI_REQUEST_HPP
#define BOOST_MPI_REQUEST_HPP
#include <mpi.h>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
class status;
class communicator;
@@ -97,6 +97,6 @@ class request
friend class communicator;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_REQUEST_HPP
#endif // BOOST_MPI_REQUEST_HPP

View File

@@ -20,20 +20,20 @@
* local computation (serialization need only be performed once for
* the content).
*/
#ifndef BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_HPP
#define BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_HPP
#ifndef BOOST_MPI_SKELETON_AND_CONTENT_HPP
#define BOOST_MPI_SKELETON_AND_CONTENT_HPP
#include <mpi.h>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/parallel/mpi/packed_iarchive.hpp>
#include <boost/parallel/mpi/packed_oarchive.hpp>
#include <boost/parallel/mpi/detail/forward_skeleton_iarchive.hpp>
#include <boost/parallel/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/parallel/mpi/detail/ignore_iprimitive.hpp>
#include <boost/parallel/mpi/detail/ignore_oprimitive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/detail/forward_skeleton_iarchive.hpp>
#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/mpi/detail/ignore_iprimitive.hpp>
#include <boost/mpi/detail/ignore_oprimitive.hpp>
#include <boost/shared_ptr.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/**
* @brief A proxy that requests that the skeleton of an object be
@@ -356,21 +356,21 @@ private:
packed_oarchive skeleton_archive_;
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#include <boost/parallel/mpi/detail/content_oarchive.hpp>
#include <boost/mpi/detail/content_oarchive.hpp>
// For any headers that have provided declarations based on forward
// declarations of the contents of this header, include definitions
// for those declarations. This means that the inclusion of
// skeleton_and_content.hpp enables the use of skeleton/content
// transmission throughout the library.
#ifdef BOOST_PARALLEL_MPI_BROADCAST_HPP
# include <boost/parallel/mpi/detail/broadcast_sc.hpp>
#ifdef BOOST_MPI_BROADCAST_HPP
# include <boost/mpi/detail/broadcast_sc.hpp>
#endif
#ifdef BOOST_PARALLEL_MPI_COMMUNICATOR_HPP
# include <boost/parallel/mpi/detail/communicator_sc.hpp>
#ifdef BOOST_MPI_COMMUNICATOR_HPP
# include <boost/mpi/detail/communicator_sc.hpp>
#endif
#endif // BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_HPP
#endif // BOOST_MPI_SKELETON_AND_CONTENT_HPP

View File

@@ -11,13 +11,13 @@
* This header contains all of the forward declarations required to
* use transmit skeletons of data structures and the content of data
* structures separately. To actually transmit skeletons or content,
* include the header @c boost/parallel/mpi/skeleton_and_content.hpp.
* include the header @c boost/mpi/skeleton_and_content.hpp.
*/
#ifndef BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_FWD_HPP
#define BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_FWD_HPP
#ifndef BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
#define BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template <class T> struct skeleton_proxy;
template <class T> const skeleton_proxy<T> skeleton(T& x);
@@ -26,6 +26,6 @@ template <class T> const content get_content(const T& x);
class packed_skeleton_iarchive;
class packed_skeleton_oarchive;
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_SKELETON_AND_CONTENT_FWD_HPP
#endif // BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP

View File

@@ -9,13 +9,13 @@
* This header defines the class @c status, which reports on the
* results of point-to-point communication.
*/
#ifndef BOOST_PARALLEL_MPI_STATUS_HPP
#define BOOST_PARALLEL_MPI_STATUS_HPP
#ifndef BOOST_MPI_STATUS_HPP
#define BOOST_MPI_STATUS_HPP
#include <mpi.h>
#include <boost/optional.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
class request;
class communicator;
@@ -100,6 +100,6 @@ class status
};
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_STATUS_HPP
#endif // BOOST_MPI_STATUS_HPP

View File

@@ -9,13 +9,13 @@
* This header provides the @c timer class, which provides access to
* the MPI timers.
*/
#ifndef BOOST_PARALLEL_MPI_TIMER_HPP
#define BOOST_PARALLEL_MPI_TIMER_HPP
#ifndef BOOST_MPI_TIMER_HPP
#define BOOST_MPI_TIMER_HPP
#include <mpi.h>
#include <boost/limits.hpp>
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
/** @brief A simple timer that provides access to the MPI timing
* facilities.
@@ -86,6 +86,6 @@ inline double timer::elapsed_min() const
return MPI_Wtick();
}
} } } /// end namespace boost::parallel::mpi
} } // end namespace boost::mpi
#endif // BOOST_PARALLEL_MPI_TIMER_HPP
#endif // BOOST_MPI_TIMER_HPP

151
src/broadcast.cpp Normal file
View File

@@ -0,0 +1,151 @@
// Copyright 2005 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.4. Broadcast
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/environment.hpp>
namespace boost { namespace mpi {
template<>
void
broadcast<const packed_oarchive>(const communicator& comm,
const packed_oarchive& oa,
int root)
{
// Only the root can broadcast the packed_oarchive
assert(comm.rank() == root);
int size = comm.size();
if (size < 2) return;
// Determine maximum tag value
int tag = environment::collectives_tag();
// Broadcast data to all nodes
std::vector<MPI_Request> requests(size * 2);
int num_requests = 0;
for (int dest = 0; dest < size; ++dest) {
if (dest != root) {
// Build up send requests for each child send.
num_requests += detail::packed_archive_isend(comm, dest, tag, oa,
&requests[num_requests], 2);
}
}
// Complete all of the sends
BOOST_MPI_CHECK_RESULT(MPI_Waitall,
(num_requests, &requests[0], MPI_STATUSES_IGNORE));
}
template<>
void
broadcast<packed_oarchive>(const communicator& comm, packed_oarchive& oa,
int root)
{
broadcast(comm, const_cast<const packed_oarchive&>(oa), root);
}
template<>
void
broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia,
int root)
{
int size = comm.size();
if (size < 2) return;
// Determine maximum tag value
int tag = environment::collectives_tag();
// Receive data from the root.
if (comm.rank() != root) {
MPI_Status status;
detail::packed_archive_recv(comm, root, tag, ia, status);
} else {
// Broadcast data to all nodes
std::vector<MPI_Request> requests(size * 2);
int num_requests = 0;
for (int dest = 0; dest < size; ++dest) {
if (dest != root) {
// Build up send requests for each child send.
num_requests += detail::packed_archive_isend(comm, dest, tag, ia,
&requests[num_requests],
2);
}
}
// Complete all of the sends
BOOST_MPI_CHECK_RESULT(MPI_Waitall,
(num_requests, &requests[0], MPI_STATUSES_IGNORE));
}
}
template<>
void
broadcast<const packed_skeleton_oarchive>(const communicator& comm,
const packed_skeleton_oarchive& oa,
int root)
{
broadcast(comm, oa.get_skeleton(), root);
}
template<>
void
broadcast<packed_skeleton_oarchive>(const communicator& comm,
packed_skeleton_oarchive& oa, int root)
{
broadcast(comm, oa.get_skeleton(), root);
}
template<>
void
broadcast<packed_skeleton_iarchive>(const communicator& comm,
packed_skeleton_iarchive& ia, int root)
{
broadcast(comm, ia.get_skeleton(), root);
}
template<>
void broadcast<content>(const communicator& comm, content& c, int root)
{
broadcast(comm, const_cast<const content&>(c), root);
}
template<>
void broadcast<const content>(const communicator& comm, const content& c,
int root)
{
#ifdef LAM_MPI
if (comm.size() < 2)
return;
// Some versions of LAM/MPI behave badly when broadcasting using
// MPI_BOTTOM, so we'll instead use manual send/recv operations.
if (comm.rank() == root) {
for (int p = 0; p < comm.size(); ++p) {
if (p != root) {
BOOST_MPI_CHECK_RESULT(MPI_Send,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
p, environment::collectives_tag(), comm));
}
}
} else {
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
root, environment::collectives_tag(),
comm, MPI_STATUS_IGNORE));
}
#else
BOOST_MPI_CHECK_RESULT(MPI_Bcast,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
root, comm));
#endif
}
} } // end namespace boost::mpi

267
src/communicator.cpp Normal file
View File

@@ -0,0 +1,267 @@
// Copyright (C) 2005, 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
namespace boost { namespace mpi {
/***************************************************************************
* status *
***************************************************************************/
bool status::cancelled()
{
int flag = 0;
BOOST_MPI_CHECK_RESULT(MPI_Test_cancelled, (&m_status, &flag));
return flag != 0;
}
/***************************************************************************
* communicator *
***************************************************************************/
communicator::communicator()
{
comm_ptr.reset(new MPI_Comm(MPI_COMM_WORLD));
}
communicator::communicator(const MPI_Comm& comm, comm_create_kind kind)
{
if (comm == MPI_COMM_NULL)
/* MPI_COMM_NULL indicates that the communicator is not usable. */
return;
switch (kind) {
case comm_duplicate:
{
MPI_Comm newcomm;
BOOST_MPI_CHECK_RESULT(MPI_Comm_dup, (comm, &newcomm));
comm_ptr.reset(new MPI_Comm(newcomm), comm_free());
MPI_Errhandler_set(newcomm, MPI_ERRORS_RETURN);
break;
}
case comm_take_ownership:
comm_ptr.reset(new MPI_Comm(comm), comm_free());
break;
case comm_attach:
comm_ptr.reset(new MPI_Comm(comm));
break;
}
}
int communicator::size() const
{
int size_;
BOOST_MPI_CHECK_RESULT(MPI_Comm_size, (MPI_Comm(*this), &size_));
return size_;
}
int communicator::rank() const
{
int rank_;
BOOST_MPI_CHECK_RESULT(MPI_Comm_rank, (MPI_Comm(*this), &rank_));
return rank_;
}
void communicator::send(int dest, int tag) const
{
BOOST_MPI_CHECK_RESULT(MPI_Send,
(MPI_BOTTOM, 0, MPI_PACKED,
dest, tag, MPI_Comm(*this)));
}
status communicator::recv(int source, int tag) const
{
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(MPI_BOTTOM, 0, MPI_PACKED,
source, tag, MPI_Comm(*this), &stat.m_status));
return stat;
}
optional<status> communicator::iprobe(int source, int tag) const
{
typedef optional<status> result_type;
status stat;
int flag;
BOOST_MPI_CHECK_RESULT(MPI_Iprobe,
(source, tag, MPI_Comm(*this), &flag,
&stat.m_status));
if (flag) return stat;
else return result_type();
}
status communicator::probe(int source, int tag) const
{
typedef optional<status> result_type;
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Probe,
(source, tag, MPI_Comm(*this), &stat.m_status));
return stat;
}
void (communicator::barrier)() const
{
BOOST_MPI_CHECK_RESULT(MPI_Barrier, (MPI_Comm(*this)));
}
communicator::operator MPI_Comm() const
{
if (comm_ptr) return *comm_ptr;
else return MPI_COMM_NULL;
}
communicator communicator::split(int color) const
{
return split(color, rank());
}
communicator communicator::split(int color, int key) const
{
MPI_Comm newcomm;
BOOST_MPI_CHECK_RESULT(MPI_Comm_split,
(MPI_Comm(*this), color, key, &newcomm));
return communicator(newcomm, comm_take_ownership);
}
void communicator::abort(int errcode) const
{
BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_Comm(*this), errcode));
}
/*************************************************************
* archived send/recv *
*************************************************************/
template<>
void
communicator::send<packed_oarchive>(int dest, int tag,
const packed_oarchive& ar) const
{
detail::packed_archive_send(MPI_Comm(*this), dest, tag, ar);
}
template<>
void
communicator::send<packed_skeleton_oarchive>
(int dest, int tag, const packed_skeleton_oarchive& ar) const
{
this->send(dest, tag, ar.get_skeleton());
}
template<>
void communicator::send<content>(int dest, int tag, const content& c) const
{
BOOST_MPI_CHECK_RESULT(MPI_Send,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
dest, tag, MPI_Comm(*this)));
}
template<>
status
communicator::recv<packed_iarchive>(int source, int tag,
packed_iarchive& ar) const
{
status stat;
detail::packed_archive_recv(MPI_Comm(*this), source, tag, ar,
stat.m_status);
return stat;
}
template<>
status
communicator::recv<packed_skeleton_iarchive>
(int source, int tag, packed_skeleton_iarchive& ar) const
{
return this->recv(source, tag, ar.get_skeleton());
}
template<>
status
communicator::recv<const content>(int source, int tag, const content& c) const
{
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
source, tag, MPI_Comm(*this), &stat.m_status));
return stat;
}
/*************************************************************
* non-blocking send/recv *
*************************************************************/
template<>
request
communicator::isend<packed_oarchive>(int dest, int tag,
const packed_oarchive& ar) const
{
request req;
detail::packed_archive_isend(MPI_Comm(*this), dest, tag, ar,
&req.m_requests[0] ,2);
return req;
}
template<>
request
communicator::isend<packed_skeleton_oarchive>
(int dest, int tag, const packed_skeleton_oarchive& ar) const
{
return this->isend(dest, tag, ar.get_skeleton());
}
template<>
request communicator::isend<content>(int dest, int tag, const content& c) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
dest, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
}
request communicator::isend(int dest, int tag) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(MPI_BOTTOM, 0, MPI_PACKED,
dest, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
}
template<>
request
communicator::irecv<packed_skeleton_iarchive>
(int source, int tag, packed_skeleton_iarchive& ar) const
{
return this->irecv(source, tag, ar.get_skeleton());
}
template<>
request
communicator::irecv<const content>(int source, int tag,
const content& c) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(MPI_BOTTOM, 1, c.get_mpi_datatype(),
source, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
}
request communicator::irecv(int source, int tag) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(MPI_BOTTOM, 0, MPI_PACKED,
source, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
}
} } // end namespace boost::mpi

72
src/computation_tree.cpp Normal file
View File

@@ -0,0 +1,72 @@
// Copyright (C) 2005 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Compute parents, children, levels, etc. to effect a parallel
// computation tree.
#include <boost/mpi/detail/computation_tree.hpp>
namespace boost { namespace mpi { namespace detail {
int computation_tree::default_branching_factor = 3;
computation_tree
::computation_tree(int rank, int size, int root, int branching_factor)
: rank(rank), size(size), root(root),
branching_factor_(branching_factor > 1? branching_factor
/* default */: default_branching_factor),
level_(0)
{
// The position in the tree, once we've adjusted for non-zero
// roots.
int n = (rank + size - root) % size;
int sum = 0;
int term = 1;
/* The level is the smallest value of k such that
f^0 + f^1 + ... + f^k > n
for branching factor f and index n in the tree. */
while (sum <= n) {
++level_;
term *= branching_factor_;
sum += term;
}
}
int computation_tree::level_index(int n) const
{
int sum = 0;
int term = 1;
while (n--) {
sum += term;
term *= branching_factor_;
}
return sum;
}
int computation_tree::parent() const
{
if (rank == root) return rank;
int n = rank + size - 1 - root;
return ((n % size / branching_factor_) + root) % size ;
}
int computation_tree::child_begin() const
{
// Zero-based index of this node
int n = (rank + size - root) % size;
// Compute the index of the child (in a zero-based tree)
int child_index = level_index(level_ + 1)
+ branching_factor_ * (n - level_index(level_));
if (child_index >= size) return root;
else return (child_index + root) % size;
}
} } } // end namespace boost::mpi::detail

18
src/content_oarchive.cpp Normal file
View File

@@ -0,0 +1,18 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/skeleton_and_content.hpp>
namespace boost { namespace archive { namespace detail {
// explicitly instantiate all required template functions
template class archive_pointer_oserializer<mpi::detail::content_oarchive> ;
} } }

123
src/environment.cpp Normal file
View File

@@ -0,0 +1,123 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- 7.1.1. Environmental Inquiries
#include <boost/mpi/environment.hpp>
#include <boost/mpi/exception.hpp>
#include <cassert>
#include <exception>
#include <stdexcept>
namespace boost { namespace mpi {
#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION
environment::environment(bool abort_on_exception)
: i_initialized(false),
abort_on_exception(abort_on_exception)
{
if (!initialized()) {
BOOST_MPI_CHECK_RESULT(MPI_Init, (0, 0));
i_initialized = true;
}
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
}
#endif
environment::environment(int& argc, char** &argv, bool abort_on_exception)
: i_initialized(false),
abort_on_exception(abort_on_exception)
{
if (!initialized()) {
BOOST_MPI_CHECK_RESULT(MPI_Init, (&argc, &argv));
i_initialized = true;
}
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
}
environment::~environment()
{
if (i_initialized) {
if (std::uncaught_exception() && abort_on_exception) {
abort(-1);
} else if (!finalized()) {
BOOST_MPI_CHECK_RESULT(MPI_Finalize, ());
}
}
}
void environment::abort(int errcode)
{
BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_COMM_WORLD, errcode));
}
bool environment::initialized()
{
int flag;
BOOST_MPI_CHECK_RESULT(MPI_Initialized, (&flag));
return flag != 0;
}
bool environment::finalized()
{
int flag;
BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&flag));
return flag != 0;
}
int environment::max_tag()
{
int* max_tag_value;
int found = 0;
BOOST_MPI_CHECK_RESULT(MPI_Attr_get,
(MPI_COMM_WORLD, MPI_TAG_UB, &max_tag_value, &found));
assert(found != 0);
return *max_tag_value - num_reserved_tags;
}
int environment::collectives_tag()
{
return max_tag() + 1;
}
optional<int> environment::host_rank()
{
int* host;
int found = 0;
BOOST_MPI_CHECK_RESULT(MPI_Attr_get,
(MPI_COMM_WORLD, MPI_HOST, &host, &found));
if (!found || *host == MPI_PROC_NULL)
return optional<int>();
else
return *host;
}
optional<int> environment::io_rank()
{
int* io;
int found = 0;
BOOST_MPI_CHECK_RESULT(MPI_Attr_get,
(MPI_COMM_WORLD, MPI_IO, &io, &found));
if (!found || *io == MPI_PROC_NULL)
return optional<int>();
else
return *io;
}
std::string environment::processor_name()
{
char name[MPI_MAX_PROCESSOR_NAME];
int len;
BOOST_MPI_CHECK_RESULT(MPI_Get_processor_name, (name, &len));
return std::string(name, len);
}
} } // end namespace boost::mpi

View File

@@ -0,0 +1,16 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/detail/mpi_datatype_cache.hpp>
namespace boost { namespace mpi { namespace detail {
mpi_datatype_map mpi_datatype_cache;
} } }

View File

@@ -0,0 +1,18 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/detail/mpi_datatype_oarchive.hpp>
namespace boost { namespace archive { namespace detail {
// explicitly instantiate all required template functions
template class archive_pointer_oserializer<mpi::detail::mpi_datatype_oarchive> ;
} } }

26
src/packed_iarchive.cpp Normal file
View File

@@ -0,0 +1,26 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/archive/impl/archive_pointer_iserializer.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/impl/basic_binary_iprimitive.ipp>
#include <boost/archive/impl/basic_binary_iarchive.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class basic_binary_iarchive<mpi::packed_iarchive> ;
template class detail::archive_pointer_iserializer<mpi::packed_iarchive> ;
//template class binary_iarchive_impl<mpi_packed_iarchive> ;
} } // end namespace boost::archive

24
src/packed_oarchive.cpp Normal file
View File

@@ -0,0 +1,24 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/impl/basic_binary_oprimitive.ipp>
#include <boost/archive/impl/basic_binary_oarchive.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class detail::archive_pointer_oserializer<mpi::packed_oarchive> ;
template class basic_binary_oarchive<mpi::packed_oarchive> ;
//template class binary_oarchive_impl<mpi_packed_oarchive> ;
} } // end namespace boost::archive

View File

@@ -0,0 +1,26 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/archive/impl/archive_pointer_iserializer.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/impl/basic_binary_iprimitive.ipp>
#include <boost/archive/impl/basic_binary_iarchive.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class basic_binary_iarchive<mpi::packed_skeleton_iarchive> ;
template class detail::archive_pointer_iserializer<mpi::packed_skeleton_iarchive> ;
//template class binary_iarchive_impl<packed_skeleton_iarchive> ;
} } // end namespace boost::archive

View File

@@ -0,0 +1,24 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/impl/basic_binary_oprimitive.ipp>
#include <boost/archive/impl/basic_binary_oarchive.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class detail::archive_pointer_oserializer<mpi::packed_skeleton_oarchive> ;
template class basic_binary_oarchive<mpi::packed_skeleton_oarchive> ;
//template class binary_oarchive_impl<mpi_packed_oarchive> ;
} } // end namespace boost::archive

97
src/point_to_point.cpp Normal file
View File

@@ -0,0 +1,97 @@
// Copyright 2005 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point
/* There is the potential for optimization here. We could keep around
a "small message" buffer of size N that we just receive into by
default. If the message is N - sizeof(int) bytes or smaller, it can
just be sent with that buffer. If it's larger, we send the first N
- sizeof(int) bytes in the first packet followed by another
packet. The size of the second packet will be stored in an integer
at the end of the first packet.
We will introduce this optimization later, when we have more
performance test cases and have met our functionality goals. */
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <cassert>
namespace boost { namespace mpi { namespace detail {
void
packed_archive_send(MPI_Comm comm, int dest, int tag,
const packed_oarchive& ar)
{
const void* size = &ar.size();
BOOST_MPI_CHECK_RESULT(MPI_Send,
(const_cast<void*>(size), 1,
get_mpi_datatype<std::size_t>(), dest, tag, comm));
BOOST_MPI_CHECK_RESULT(MPI_Send,
(const_cast<void*>(ar.address()), ar.size(),
MPI_PACKED,
dest, tag, comm));
}
int
packed_archive_isend(MPI_Comm comm, int dest, int tag,
const packed_oarchive& ar,
MPI_Request* out_requests, int num_out_requests)
{
assert(num_out_requests >= 2);
const void* size = &ar.size();
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<void*>(size), 1,
get_mpi_datatype<std::size_t>(),
dest, tag, comm, out_requests));
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<void*>(ar.address()), ar.size(),
MPI_PACKED,
dest, tag, comm, out_requests + 1));
return 2;
}
int
packed_archive_isend(MPI_Comm comm, int dest, int tag,
const packed_iarchive& ar,
MPI_Request* out_requests, int num_out_requests)
{
assert(num_out_requests >= 2);
const void* size = &ar.size();
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<void*>(size), 1,
get_mpi_datatype<std::size_t>(),
dest, tag, comm, out_requests));
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<void*>(ar.address()), ar.size(),
MPI_PACKED,
dest, tag, comm, out_requests + 1));
return 2;
}
void
packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
MPI_Status& status)
{
std::size_t count;
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(&count, 1, get_mpi_datatype<std::size_t>(),
source, tag, comm, &status));
// Prepare input buffer and receive the message
ar.resize(count);
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(ar.address(), ar.size(), MPI_PACKED,
status.MPI_SOURCE, status.MPI_TAG,
comm, &status));
}
} } } // end namespace boost::mpi::detail

View File

@@ -0,0 +1,55 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Copyright (C) 2005 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file exception.cpp
*
* This file reflects the Boost.MPI @c mpi_error class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi/exception.hpp>
#include <string>
#include <boost/lexical_cast.hpp>
#include "utility.hpp"
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* exception_docstring;
extern const char* exception_what_docstring;
extern const char* exception_routine_docstring;
extern const char* exception_result_code_docstring;
str exception_str(const exception& e)
{
return str("MPI routine `" + std::string(e.routine()) +
"' returned error code " +
lexical_cast<std::string>(e.result_code()));
}
void export_exception()
{
using boost::python::arg;
using boost::python::object;
object type =
class_<exception>
("exception", exception_docstring, no_init)
.add_property("what", &exception::what, exception_what_docstring)
.add_property("routine", &exception::what, exception_routine_docstring)
.add_property("result_code", &exception::what,
exception_result_code_docstring)
.def("__str__", &exception_str)
;
translate_exception<exception>::declare(type);
}
} } } // end namespace boost::mpi::python

144
src/python/collectives.cpp Normal file
View File

@@ -0,0 +1,144 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file status.cpp
*
* This file reflects the Boost.MPI @c status class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
#include <boost/mpi/python/serialize.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* all_gather_docstring;
extern const char* all_reduce_docstring;
extern const char* all_to_all_docstring;
extern const char* broadcast_docstring;
extern const char* gather_docstring;
extern const char* reduce_docstring;
extern const char* scan_docstring;
extern const char* scatter_docstring;
object all_gather(const communicator& comm, object value)
{
std::vector<object> values;
boost::mpi::all_gather(comm, value, values);
boost::python::list l;
for (int i = 0; i < comm.size(); ++i)
l.append(values[i]);
return boost::python::tuple(l);
}
object all_to_all(const communicator& comm, object in_values)
{
// Build input values
std::vector<object> in_values_vec(comm.size());
object iterator = object(handle<>(PyObject_GetIter(in_values.ptr())));
for (int i = 0; i < comm.size(); ++i)
in_values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr())));
std::vector<object> out_values_vec(comm.size());
boost::mpi::all_to_all(comm, in_values_vec, out_values_vec);
boost::python::list l;
for (int i = 0; i < comm.size(); ++i)
l.append(out_values_vec[i]);
return boost::python::tuple(l);
}
object broadcast(const communicator& comm, object value, int root)
{
boost::mpi::broadcast(comm, value, root);
return value;
}
object gather(const communicator& comm, object value, int root)
{
if (comm.rank() == root) {
std::vector<object> values;
boost::mpi::gather(comm, value, values, root);
boost::python::list l;
for (int i = 0; i < comm.size(); ++i)
l.append(values[i]);
return boost::python::tuple(l);
} else {
boost::mpi::gather(comm, value, root);
return object();
}
}
object reduce(const communicator& comm, object value, object op, int root)
{
if (comm.rank() == root) {
object out_value;
boost::mpi::reduce(comm, value, out_value, op, root);
return out_value;
} else {
boost::mpi::reduce(comm, value, op, root);
return object();
}
}
object scatter(const communicator& comm, object values, int root)
{
object result;
if (comm.rank() == root) {
std::vector<object> values_vec(comm.size());
object iterator = object(handle<>(PyObject_GetIter(values.ptr())));
for (int i = 0; i < comm.size(); ++i)
values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr())));
boost::mpi::scatter(comm, values_vec, result, root);
} else {
boost::mpi::scatter(comm, result, root);
}
return result;
}
void export_collectives()
{
using boost::python::arg;
def("all_reduce",
(object (*)(const communicator&, const object&, object))&all_reduce,
(arg("comm") = communicator(), arg("value"), arg("op")),
all_reduce_docstring);
def("all_gather", &all_gather,
(arg("comm") = communicator(), arg("value") = object()),
all_gather_docstring);
def("all_to_all", &all_to_all,
(arg("comm") = communicator(), arg("values") = object()),
all_to_all_docstring);
def("broadcast", &broadcast,
(arg("comm") = communicator(), arg("value") = object(), arg("root")),
broadcast_docstring);
def("gather", &gather,
(arg("comm") = communicator(), arg("value") = object(), arg("root")),
gather_docstring);
def("reduce", &reduce,
(arg("comm") = communicator(), arg("value"), arg("op"),
arg("root")),
reduce_docstring);
def("scan",
(object (*)(const communicator&, const object&, object))&scan,
(arg("comm") = communicator(), arg("value"), arg("op")),
scan_docstring);
def("scatter", &scatter,
(arg("comm") = communicator(), arg("values") = object(), arg("root")),
scatter_docstring);
}
} } } // end namespace boost::mpi::python

25
src/python/datatypes.cpp Normal file
View File

@@ -0,0 +1,25 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file datatypes.cpp
*
* This file provides datatypes support for Boost.MPI in Python.
*/
#include <boost/mpi/python/serialize.hpp>
#include <boost/mpi.hpp>
namespace boost { namespace mpi { namespace python {
void export_datatypes()
{
register_serialized(long(0), &PyInt_Type);
register_serialized(false, &PyBool_Type);
register_serialized(double(0.0), &PyFloat_Type);
}
} } } // end namespace boost::mpi::python

View File

@@ -0,0 +1,581 @@
// (C) Copyright 2005 The Trustees of Indiana University.
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file documentation.cpp
*
* This file contains all of the documentation strings for the
* Boost.MPI Python bindings.
*/
namespace boost { namespace mpi { namespace python {
const char* module_docstring =
"The boost.mpi module contains Python wrappers for Boost.MPI.\n"
"Boost.MPI is a C++ interface to the Message Passing Interface 1.1,\n"
"a high-performance message passing library for parallel programming.\n"
"\n"
"This module supports the most commonly used subset of MPI 1.1. All\n"
"communication operations can transmit any Python object that can be\n"
"pickled and unpickled, along with C++-serialized data types and\n"
"separation of the structure of a data type from its content.\n"
"Collectives that have a user-supplied functions,\n"
"such as reduce() or scan(), accept arbitrary Python functions, and\n"
"all collectives can operate on any serializable or picklable data type.\n"
"\n"
"IMPORTANT MODULE DATA\n"
" any_source This constant may be used for the source parameter of\n"
" receive and probe operations to indicate that a\n"
" message may be received from any source.\n"
"\n"
" any_tag This constant may be used for the tag parameter of\n"
" receive or probe operations to indicate that a send\n"
" with any tag will be matched.\n"
"\n"
" collectives_tag Returns the reserved tag value used by the Boost.MPI\n"
" implementation for collective operations. Although\n"
" users are not permitted to use this tag to send or\n"
" receive messages with this tag, it may be useful when\n"
" monitoring communication patterns.\n"
"\n"
" host_rank If there is a host process, this is the rank of that\n"
" that process. Otherwise, this value will be None. MPI\n"
" does not define the meaning of a \"host\" process: \n"
" consult the documentation for your MPI implementation.\n"
"\n"
" io_rank The rank of a process that can perform input/output\n"
" via the standard facilities. If every process can\n"
" perform I/O using the standard facilities, this value\n"
" will be the same as any_source. If no process can\n"
" perform I/O, this value will be None.\n"
"\n"
" max_tag The maximum value that may be used for the tag\n"
" parameter of send/receive operations. This value will\n"
" be somewhat smaller than the value of MPI_TAG_UB,\n"
" because the Boost.MPI implementation reserves some\n"
" tags for collective operations.\n"
"\n"
" processor_name The name of this processor. The actual form of the\n"
" of the name is unspecified, but may be documented by\n"
" the underlying MPI implementation.\n"
"\n"
" rank The rank of this process in the \"world\" communicator.\n"
"\n"
" size The number of processes in the \"world\" communicator.\n"
" that process. Otherwise, this value will be None. MPI\n"
" does not define the meaning of a \"host\" process: \n"
"\n"
" world The \"world\" communicator from which all other\n"
" communicators will be derived. This is the equivalent\n"
" of MPI_COMM_WORLD.\n"
"\n"
"TRANSMITTING USER-DEFINED DATA\n"
" Boost.MPI can transmit user-defined data in several different ways.\n"
" Most importantly, it can transmit arbitrary Python objects by pickling\n"
" them at the sender and unpickling them at the receiver, allowing\n"
" arbitrarily complex Python data structures to interoperate with MPI.\n"
"\n"
" Boost.MPI also supports efficient serialization and transmission of\n"
" C++ objects (that have been exposed to Python) through its C++\n"
" interface. Any C++ type that provides (de-)serialization routines that\n"
" meet the requirements of the Boost.Serialization library is eligible\n"
" for this optimization, but the type must be registered in advance. To\n"
" register a C++ type, invoke the C++ function:\n"
" boost::mpi::python::register_serialized\n"
"\n"
" Finally, Boost.MPI supports separation of the structure of an object\n"
" from the data it stores, allowing the two pieces to be transmitted\n"
" separately. This \"skeleton/content\" mechanism, described in more\n"
" detail in a later section, is a communication optimization suitable\n"
" for problems with fixed data structures whose internal data changes\n"
" frequently.\n"
"\n"
"COLLECTIVES\n"
" Boost.MPI supports all of the MPI collectives (scatter, reduce, scan,\n"
" broadcast, etc.) for any type of data that can be transmitted with the\n"
" point-to-point communication operations. For the MPI collectives that\n"
" require a user-specified operation (e.g., reduce and scan), the\n"
" operation can be an arbitrary Python function. For instance, one could\n"
" concatenate strings with all_reduce:\n\n"
" mpi.all_reduce(my_string, lambda x,y: x + y)\n\n"
" The following module-level functions implement MPI collectives:\n"
" all_gather Gather the values from all processes.\n"
" all_reduce Combine the results from all processes.\n"
" all_to_all Every process sends data to every other process.\n"
" broadcast Broadcast data from one process to all other processes.\n"
" gather Gather the values from all processes to the root.\n"
" reduce Combine the results from all processes to the root.\n"
" scan Prefix reduction of the values from all processes.\n"
" scatter Scatter the values stored at the root to all processes.\n"
"\n"
"SKELETON/CONTENT MECHANISM\n"
" Boost.MPI provides a skeleton/content mechanism that allows the\n"
" transfer of large data structures to be split into two separate stages,\n"
" with the `skeleton' (or, `shape') of the data structure sent first and\n"
" the content (or, `data') of the data structure sent later, potentially\n"
" several times, so long as the structure has not changed since the\n"
" skeleton was transferred. The skeleton/content mechanism can improve\n"
" performance when the data structure is large and its shape is fixed,\n"
" because while the skeleton requires serialization (it has an unknown\n"
" size), the content transfer is fixed-size and can be done without\n"
" extra copies.\n"
"\n"
" To use the skeleton/content mechanism from Python, you must first\n"
" register the type of your data structure with the skeleton/content\n"
" mechanism *from C++*. The registration function is\n"
" boost::mpi::python::register_skeleton_and_content\n"
" and resides in the <boost/mpi/python.hpp> header.\n"
"\n"
" Once you have registered your C++ data structures, you can extract\n"
" the skeleton for an instance of that data structure with skeleton().\n"
" The resulting skeleton_proxy can be transmitted via the normal send\n"
" routine, e.g.,\n\n"
" mpi.world.send(1, 0, skeleton(my_data_structure))\n\n"
" skeleton_proxy objects can be received on the other end via recv(),\n"
" which stores a newly-created instance of your data structure with the\n"
" same `shape' as the sender in its `object' attribute:\n\n"
" shape = mpi.world.recv(0, 0)\n"
" my_data_structure = shape.object\n\n"
" Once the skeleton has been transmitted, the content (accessed via \n"
" get_content) can be transmitted in much the same way. Note, however,\n"
" that the receiver also specifies get_content(my_data_structure) in its\n"
" call to receive:\n\n"
" if mpi.rank == 0:\n"
" mpi.world.send(1, 0, get_content(my_data_structure))\n"
" else:\n"
" mpi.world.recv(0, 0, get_content(my_data_structure))\n\n"
" Of course, this transmission of content can occur repeatedly, if the\n"
" values in the data structure--but not its shape--changes.\n"
"\n"
" The skeleton/content mechanism is a structured way to exploit the\n"
" interaction between custom-built MPI datatypes and MPI_BOTTOM, to\n"
" eliminate extra buffer copies.\n"
"\n"
"C++/PYTHON MPI COMPATIBILITY\n"
" Boost.MPI is a C++ library whose facilities have been exposed to Python\n"
" via the Boost.Python library. Since the Boost.MPI Python bindings are\n"
" build directly on top of the C++ library, and nearly every feature of\n"
" C++ library is available in Python, hybrid C++/Python programs using\n"
" Boost.MPI can interact, e.g., sending a value from Python but receiving\n"
" that value in C++ (or vice versa). However, doing so requires some\n"
" care. Because Python objects are dynamically typed, Boost.MPI transfers\n"
" type information along with the serialized form of the object, so that\n"
" the object can be received even when its type is not known. This\n"
" mechanism differs from its C++ counterpart, where the static types of\n"
" transmitted values are always known.\n"
"\n"
" The only way to communicate between the C++ and Python views on \n"
" Boost.MPI is to traffic entirely in Python objects. For Python, this is\n"
" the normal state of affairs, so nothing will change. For C++, this\n"
" means sending and receiving values of type boost::python::object, from\n"
" the Boost.Python library. For instance, say we want to transmit an\n"
" integer value from Python:\n\n"
" comm.send(1, 0, 17)\n\n"
" In C++, we would receive that value into a Python object and then\n"
" `extract' an integer value:\n\n"
" boost::python::object value;\n"
" comm.recv(0, 0, value);\n"
" int int_value = boost::python::extract<int>(value);\n\n"
" In the future, Boost.MPI will be extended to allow improved\n"
" interoperability with the C++ Boost.MPI and the C MPI bindings.\n"
;
/***********************************************************
* environment documentation *
***********************************************************/
const char* environment_init_docstring =
"Initialize the MPI environment. Users should not need to call\n"
"this function directly, because the MPI environment will be\n"
"automatically initialized when the Boost.MPI module is loaded.\n";
const char* environment_finalize_docstring =
"Finalize (shut down) the MPI environment. Users only need to\n"
"invoke this function if MPI should be shut down before program\n"
"termination. Boost.MPI will automatically finalize the MPI\n"
"environment when the program exits.\n";
const char* environment_abort_docstring =
"Aborts all MPI processes and returns to the environment. The\n"
"precise behavior will be defined by the underlying MPI\n"
"implementation. This is equivalent to a call to MPI_Abort with\n"
"MPI_COMM_WORLD.\n"
"errcode is the error code to return from aborted processes.\n";
const char* environment_initialized_docstring =
"Determine if the MPI environment has already been initialized.\n";
const char* environment_finalized_docstring =
"Determine if the MPI environment has already been finalized.\n";
/***********************************************************
* exception documentation *
***********************************************************/
const char* exception_docstring =
"Instances of this class will be thrown when an MPI error\n"
"occurs. MPI failures that trigger these exceptions may or may not\n"
"be recoverable, depending on the underlying MPI implementation.\n"
"Consult the documentation for your MPI implementation to determine\n"
"the effect of MPI errors.\n";
const char* exception_what_docstring =
"A description of the error that occured. At present, this refers\n"
"only to the name of the MPI routine that failed.\n";
const char* exception_routine_docstring =
"The name of the MPI routine that reported the error.\n";
const char* exception_result_code_docstring =
"The result code returned from the MPI routine that reported the\n"
"error.\n";
/***********************************************************
* collectives documentation *
***********************************************************/
const char* all_gather_docstring =
"all_gather is a collective algorithm that collects the values\n"
"stored at each process into a tuple of values indexed by the\n"
"process number they came from. all_gather is (semantically) a\n"
"gather followed by a broadcast. The same tuple of values is\n"
"returned to all processes.\n";
const char* all_reduce_docstring =
"all_reduce is a collective algorithm that combines the values\n"
"stored by each process into a single value. The values can be\n"
"combined arbitrarily, specified via any function. The values\n"
"a1, a2, .., ap provided by p processors will be combined by the\n"
"binary function op into the result\n"
" op(a1, op(a2, ... op(ap-1,ap)))\n"
"that will be returned to all processes. This function is the\n"
"equivalent of calling all_gather() and then applying the built-in\n"
"reduce() function to the returned sequence. op is assumed to be\n"
"associative.\n";
const char* all_to_all_docstring =
"all_to_all is a collective algorithm that transmits values from\n"
"every process to every other process. On process i, the jth value\n"
"of the values sequence is sent to process j and placed in the ith\n"
"position of the tuple that will be returned from all_to_all.\n";
const char* broadcast_docstring =
"broadcast is a collective algorithm that transfers a value from an\n"
"arbitrary root process to every other process that is part of the\n"
"given communicator (comm). The root parameter must be the same for\n"
"every process. The value parameter need only be specified at the root\n"
"root. broadcast() returns the same broadcasted value to every process.\n";
const char* gather_docstring =
"gather is a collective algorithm that collects the values\n"
"stored at each process into a tuple of values at the root\n"
"process. This tuple is indexed by the process number that the\n"
"value came from, and will be returned only by the root process.\n"
"All other processes return None.\n";
const char* reduce_docstring =
"reduce is a collective algorithm that combines the values\n"
"stored by each process into a single value at the root. The\n"
"values can be combined arbitrarily, specified via any function.\n"
"The values a1, a2, .., ap provided by p processors will be\n"
"combined by the binary function op into the result\n"
" op(a1, op(a2, ... op(ap-1,ap)))\n"
"that will be returned on the root process. This function is the\n"
"equivalent of calling gather() to the root and then applying the\n"
"built-in reduce() function to the returned sequence. All non-root\n"
"processes return None. op is assumed to be associative.\n";
const char* scan_docstring =
"@c scan computes a prefix reduction of values from all processes.\n"
"It is a collective algorithm that combines the values stored by\n"
"each process with the values of all processes with a smaller rank.\n"
"The values can be arbitrarily combined, specified via a binary\n"
"function op. If each process i provides the value ai, then scan\n"
"returns op(a1, op(a2, ... op(ai-1, ai))) to the ith process. op is\n"
"assumed to be associative. This routine is the equivalent of an\n"
"all_gather(), followed by a built-in reduce() on the first i+1\n"
"values in the resulting sequence on processor i. op is assumed\n"
"to be associative.\n";
const char* scatter_docstring =
"scatter is a collective algorithm that scatters the values stored\n"
"in the root process (as a container with comm.size elements) to\n"
"all of the processes in the communicator. The values parameter \n"
"(only significant at the root) is indexed by the process number to\n"
"which the corresponding value will be sent. The value received by \n"
"each process is returned from scatter.\n";
/***********************************************************
* communicator documentation *
***********************************************************/
const char* communicator_docstring =
"The communicator class abstracts a set of communicating\n"
"processes in MPI. All of the processes that belong to a certain\n"
"communicator can determine the size of the communicator, their rank\n"
"within the communicator, and communicate with any other processes\n"
"in the communicator.\n";
const char* communicator_default_constructor_docstring =
"Build a new Boost.MPI communicator for MPI_COMM_WORLD.\n";
const char* communicator_rank_docstring =
"Returns the rank of the process in the communicator, which will be a\n"
"value in [0, size).\n";
const char* communicator_size_docstring =
"Returns the number of processes in the communicator.\n";
const char* communicator_send_docstring =
"This routine executes a potentially blocking send with the given\n"
"tag to the process with rank dest. It can be received by the\n"
"destination process with a matching recv call. The value will be\n"
"transmitted in one of several ways:\n"
"\n"
" - For C++ objects registered via register_serialized(), the value\n"
" will be serialized and transmitted.\n"
"\n"
" - For skeleton_proxy objects, the skeleton of the object will be\n"
" serialized and transmitted.\n"
"\n"
" - For content objects, the content will be transmitted directly.\n"
" This content can be received by a matching recv/irecv call that\n"
" provides a suitable `buffer' argument.\n"
"\n"
" - For all other Python objects, the value will be pickled and\n"
" transmitted.\n";
const char* communicator_recv_docstring =
"This routine blocks until it receives a message from the process\n"
"source with the given tag. If the source parameter is not specified,\n"
"the message can be received from any process. Likewise, if the tag\n"
"parameter is not specified, a message with any tag can be received.\n"
"If return_status is True, returns a tuple containing the received\n"
"object followed by a status object describing the communication.\n"
"Otherwise, recv() returns just the received object.\n"
"\n"
"When receiving the content of a data type that has been sent separately\n"
"from its skeleton, user code must provide a value for the `buffer'\n"
"argument. This value should be the content object returned from\n"
"get_content().\n";
const char* communicator_isend_docstring =
"This routine executes a nonblocking send with the given\n"
"tag to the process with rank dest. It can be received by the\n"
"destination process with a matching recv call. The value will be\n"
"transmitted in the same way as with send().\n"
"This routine returns a request object, which can be used to query\n"
"when the transmission has completed, wait for its completion, or\n"
"cancel the transmission.\n";
const char* communicator_irecv_docstring =
"This routine initiates a non-blocking receive from the process\n"
"source with the given tag. If the source parameter is not specified,\n"
"the message can be received from any process. Likewise, if the tag\n"
"parameter is not specified, a message with any tag can be received.\n"
"This routine returns a request object, which can be used to query\n"
"when the transmission has completed, wait for its completion, or\n"
"cancel the transmission. The received value be accessible\n"
"through the `value' attribute of the request object once transmission\n"
"has completed.\n"
"\n"
"As with the recv() routine, when receiving the content of a data type\n"
"that has been sent separately from its skeleton, user code must provide\n"
"a value for the `buffer' argument. This value should be the content\n"
"object returned from get_content().\n";
const char* communicator_probe_docstring =
"This operation waits until a message matching (source, tag)\n"
"is available to be received. It then returns information about\n"
"that message. If source is omitted, a message from any process\n"
"will match. If tag is omitted, a message with any tag will match.\n"
"The actual source and tag can be retrieved from the returned status\n"
"object. To check if a message is available without blocking, use\n"
"iprobe.\n";
const char* communicator_iprobe_docstring =
"This operation determines if a message matching (source, tag) is\n"
"available to be received. If so, it returns information about that\n"
"message; otherwise, it returns None. If source is omitted, a message\n"
"from any process will match. If tag is omitted, a message with any\n"
"tag will match. The actual source and tag can be retrieved from the\n"
"returned status object. To wait for a message to become available, use\n"
"probe.\n";
const char* communicator_barrier_docstring =
"Wait for all processes within a communicator to reach the\n"
"barrier.\n";
const char* communicator_split_docstring =
"Split the communicator into multiple, disjoint communicators\n"
"each of which is based on a particular color. This is a\n"
"collective operation that returns a new communicator that is a\n"
"subgroup of this. This routine is functionally equivalent to\n"
"MPI_Comm_split.\n\n"
"color is the color of this process. All processes with the\n"
"same color value will be placed into the same group.\n\n"
"If provided, key is a key value that will be used to determine\n"
"the ordering of processes with the same color in the resulting\n"
"communicator. If omitted, the key will default to the rank of\n"
"the process in the current communicator.\n\n"
"Returns a new communicator containing all of the processes in\n"
"this communicator that have the same color.\n";
const char* communicator_abort_docstring =
"Makes a \"best attempt\" to abort all of the tasks in the group of\n"
"this communicator. Depending on the underlying MPI\n"
"implementation, this may either abort the entire program (and\n"
"possibly return errcode to the environment) or only abort\n"
"some processes, allowing the others to continue. Consult the\n"
"documentation for your MPI implementation. This is equivalent to\n"
"a call to MPI_Abort\n\n"
"errcode is the error code to return from aborted processes.\n";
/***********************************************************
* request documentation *
***********************************************************/
const char* request_docstring =
"The request class contains information about a non-blocking send\n"
"or receive and will be returned from isend or irecv, respectively.\n"
"When a request object represents a completed irecv, the `value' \n"
"attribute will contain the received value.\n";
const char* request_wait_docstring =
"Wait until the communication associated with this request has\n"
"completed. For a request that is associated with an isend(), returns\n"
"a status object describing the communication. For an irecv()\n"
"operation, returns the received value by default. However, when\n"
"return_status=True, a (value, status) pair is returned by a.\n"
"completed irecv request.\n";
const char* request_test_docstring =
"Determine whether the communication associated with this request\n"
"has completed successfully. If so, returns the status object\n"
"describing the communication (for an isend request) or a tuple\n"
"containing the received value and a status object (for an irecv\n"
"request). Note that once test() returns a status object, the\n"
"request has completed and wait() should not be called.\n";
const char* request_cancel_docstring =
"Cancel a pending communication, assuming it has not already been\n"
"completed.\n";
/***********************************************************
* skeleton/content documentation *
***********************************************************/
const char* object_without_skeleton_docstring =
"The object_without_skeleton class is an exception class used only\n"
"when the skeleton() or get_content() function is called with an\n"
"object that is not supported by the skeleton/content mechanism.\n"
"All C++ types for which skeletons and content can be transmitted\n"
"must be registered with the C++ routine:\n"
" boost::mpi::python::register_skeleton_and_content\n";
const char* object_without_skeleton_object_docstring =
"The object on which skeleton() or get_content() was invoked.\n";
const char* skeleton_proxy_docstring =
"The skeleton_proxy class is used to represent the skeleton of an\n"
"object. The skeleton_proxy can be used as the value parameter of\n"
"send() or isend() operations, but instead of transmitting the\n"
"entire object, only its skeleton (\"shape\") will be sent, without\n"
"the actual data. Its content can then be transmitted, separately.\n"
"\n"
"User code cannot generate skeleton_proxy instances directly. To\n"
"refer to the skeleton of an object, use skeleton(object). Skeletons\n"
"can also be received with the recv() and irecv() methods.\n"
"\n"
"Note that the skeleton/content mechanism can only be used with C++\n"
"types that have been explicitly registered.\n";
const char* skeleton_proxy_object_docstring =
"The actual object whose skeleton is represented by this proxy object.\n";
const char* content_docstring =
"The content is a proxy class that represents the content of an object,\n"
"which can be separately sent or received from its skeleton.\n"
"\n"
"User code cannot generate content instances directly. Call the\n"
"get_content() routine to retrieve the content proxy for a particular\n"
"object. The content instance can be used with any of the send() or\n"
"recv() variants. Note that get_content() can only be used with C++\n"
"data types that have been explicitly registered with the Python\n"
"skeleton/content mechanism.\n";
const char* skeleton_docstring =
"The skeleton function retrieves the skeleton_proxy for its object\n"
"parameter, allowing the transmission of the skeleton (or \"shape\")\n"
"of the object separately from its data. The skeleton/content mechanism\n"
"is useful when a large data structure remains structurally the same\n"
"throughout a computation, but its content (i.e., the values in the\n"
"structure) changes several times. Tranmission of the content part does\n"
"not require any serialization or unnecessary buffer copies, so it is\n"
"very efficient for large data structures.\n"
"\n"
"Only C++ types that have been explicitly registered with the Boost.MPI\n"
"Python library can be used with the skeleton/content mechanism. Use:\b"
" boost::mpi::python::register_skeleton_and_content\n";
const char* get_content_docstring =
"The get_content function retrieves the content for its object parameter,\n"
"allowing the transmission of the data in a data structure separately\n"
"from its skeleton (or \"shape\"). The skeleton/content mechanism\n"
"is useful when a large data structure remains structurally the same\n"
"throughout a computation, but its content (i.e., the values in the\n"
"structure) changes several times. Tranmission of the content part does\n"
"not require any serialization or unnecessary buffer copies, so it is\n"
"very efficient for large data structures.\n"
"\n"
"Only C++ types that have been explicitly registered with the Boost.MPI\n"
"Python library can be used with the skeleton/content mechanism. Use:\b"
" boost::mpi::python::register_skeleton_and_content\n";
/***********************************************************
* status documentation *
***********************************************************/
const char* status_docstring =
"The status class stores information about a given message, including\n"
"its source, tag, and whether the message transmission was cancelled\n"
"or resulted in an error.\n";
const char* status_source_docstring =
"The source of the incoming message.\n";
const char* status_tag_docstring =
"The tag of the incoming message.\n";
const char* status_error_docstring =
"The error code associated with this transmission.\n";
const char* status_cancelled_docstring =
"Whether this transmission was cancelled.\n";
/***********************************************************
* timer documentation *
***********************************************************/
const char* timer_docstring =
"The timer class is a simple wrapper around the MPI timing facilities.\n";
const char* timer_default_constructor_docstring =
"Initializes the timer. After this call, elapsed == 0.\n";
const char* timer_restart_docstring =
"Restart the timer, after which elapsed == 0.\n";
const char* timer_elapsed_docstring =
"The time elapsed since initialization or the last restart(),\n"
"whichever is more recent.\n";
const char* timer_elapsed_min_docstring =
"Returns the minimum non-zero value that elapsed may return\n"
"This is the resolution of the timer.\n";
const char* timer_elapsed_max_docstring =
"Return an estimate of the maximum possible value of elapsed. Note\n"
"that this routine may return too high a value on some systems.\n";
const char* timer_time_is_global_docstring =
"Determines whether the elapsed time values are global times or\n"
"local processor times.\n";
} } } // end namespace boost::mpi::python

55
src/python/exception.cpp Normal file
View File

@@ -0,0 +1,55 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Copyright (C) 2005 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file exception.cpp
*
* This file reflects the Boost.MPI @c mpi_error class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi/exception.hpp>
#include <string>
#include <boost/lexical_cast.hpp>
#include "utility.hpp"
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* exception_docstring;
extern const char* exception_what_docstring;
extern const char* exception_routine_docstring;
extern const char* exception_result_code_docstring;
str exception_str(const exception& e)
{
return str("MPI routine `" + std::string(e.routine()) +
"' returned error code " +
lexical_cast<std::string>(e.result_code()));
}
void export_exception()
{
using boost::python::arg;
using boost::python::object;
object type =
class_<exception>
("exception", exception_docstring, no_init)
.add_property("what", &exception::what, exception_what_docstring)
.add_property("routine", &exception::what, exception_routine_docstring)
.add_property("result_code", &exception::what,
exception_result_code_docstring)
.def("__str__", &exception_str)
;
translate_exception<exception>::declare(type);
}
} } } // end namespace boost::mpi::python

53
src/python/module.cpp Normal file
View File

@@ -0,0 +1,53 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file module.cpp
*
* This file provides the top-level module for the Boost.MPI Python
* bindings.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern void export_environment();
extern void export_exception();
extern void export_collectives();
extern void export_communicator();
extern void export_datatypes();
extern void export_request();
extern void export_status();
extern void export_timer();
extern const char* module_docstring;
BOOST_PYTHON_MODULE(mpi)
{
// Setup module documentation
scope().attr("__doc__") = module_docstring;
scope().attr("__author__") = "Douglas Gregor <doug.gregor@gmail.com>";
scope().attr("__date__") = "$LastChangedDate: 2006-07-16 15:25:47 -0400 (Sun, 16 Jul 2006) $";
scope().attr("__version__") = "$Revision$";
scope().attr("__copyright__") = "Copyright (C) 2006 Douglas Gregor";
scope().attr("__license__") = "http://www.boost.org/LICENSE_1_0.txt";
export_environment();
export_exception();
export_communicator();
export_collectives();
export_datatypes();
export_request();
export_status();
export_timer();
}
} } } // end namespace boost::mpi::python

View File

@@ -0,0 +1,134 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file communicator.cpp
*
* This file reflects the Boost.MPI @c communicator class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
#include <boost/mpi/python/serialize.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* communicator_docstring;
extern const char* communicator_default_constructor_docstring;
extern const char* communicator_rank_docstring;
extern const char* communicator_size_docstring;
extern const char* communicator_send_docstring;
extern const char* communicator_recv_docstring;
extern const char* communicator_isend_docstring;
extern const char* communicator_irecv_docstring;
extern const char* communicator_probe_docstring;
extern const char* communicator_iprobe_docstring;
extern const char* communicator_barrier_docstring;
extern const char* communicator_split_docstring;
extern const char* communicator_split_key_docstring;
extern const char* communicator_abort_docstring;
object
communicator_recv(const communicator& comm, int source, int tag,
bool return_status)
{
using boost::python::make_tuple;
object result;
status stat = comm.recv(source, tag, result);
if (return_status)
return make_tuple(result, stat);
else
return result;
}
object
communicator_irecv(const communicator& comm, int source, int tag)
{
using boost::python::make_tuple;
object result;
object req(comm.irecv(source, tag, result));
req.attr("value") = result;
return req;
}
object
communicator_iprobe(const communicator& comm, int source, int tag)
{
if (boost::optional<status> result = comm.iprobe(source, tag))
return object(*result);
else
return object();
}
extern void export_skeleton_and_content(class_<communicator>&);
void export_communicator()
{
using boost::python::arg;
using boost::python::object;
class_<communicator> comm("communicator", communicator_docstring);
comm
.def(init<>())
.add_property("rank", &communicator::rank, communicator_rank_docstring)
.add_property("size", &communicator::size, communicator_size_docstring)
.def("send",
(void (communicator::*)(int, int, const object&) const)
&communicator::send<object>,
(arg("dest"), arg("tag") = 0, arg("value") = object()),
communicator_send_docstring)
.def("recv", &communicator_recv,
(arg("source") = any_source, arg("tag") = any_tag,
arg("return_status") = false),
communicator_recv_docstring)
.def("isend",
(request (communicator::*)(int, int, const object&) const)
&communicator::isend<object>,
(arg("dest"), arg("tag") = 0, arg("value") = object()),
communicator_isend_docstring)
.def("irecv", &communicator_irecv,
(arg("source") = any_source, arg("tag") = any_tag),
communicator_irecv_docstring)
.def("probe", &communicator::probe,
(arg("source") = any_source, arg("tag") = any_tag),
communicator_probe_docstring)
.def("iprobe", &communicator_iprobe,
(arg("source") = any_source, arg("tag") = any_tag),
communicator_iprobe_docstring)
.def("barrier", &communicator::barrier, communicator_barrier_docstring)
.def("__nonzero__", &communicator::operator bool)
.def("split",
(communicator (communicator::*)(int) const)&communicator::split,
(arg("color")), communicator_split_docstring)
.def("split",
(communicator (communicator::*)(int, int) const)&communicator::split,
(arg("color"), arg("key")))
.def("abort", &communicator::abort, arg("errcode"),
communicator_abort_docstring)
;
// Module-level attributes
scope().attr("any_source") = any_source;
scope().attr("any_tag") = any_tag;
{
communicator world;
scope().attr("world") = world;
scope().attr("rank") = world.rank();
scope().attr("size") = world.size();
}
// Export skeleton and content
export_skeleton_and_content(comm);
}
} } } // end namespace boost::mpi::python

View File

@@ -0,0 +1,111 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file environment.cpp
*
* This file reflects the Boost.MPI "environment" class into Python
* methods at module level.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* environment_init_docstring;
extern const char* environment_finalize_docstring;
extern const char* environment_abort_docstring;
extern const char* environment_initialized_docstring;
extern const char* environment_finalized_docstring;
/**
* The environment used by the Boost.MPI Python module. This will be
* zero-initialized before it is used.
*/
static environment* env;
bool mpi_init(list python_argv, bool abort_on_exception)
{
// If MPI is already initialized, do nothing.
if (environment::initialized())
return false;
// Convert Python argv into C-style argc/argv.
int my_argc = extract<int>(python_argv.attr("__len__")());
char** my_argv = new char*[my_argc];
for (int arg = 0; arg < my_argc; ++arg)
my_argv[arg] = strdup(extract<const char*>(python_argv[arg]));
// Initialize MPI
int mpi_argc = my_argc;
char** mpi_argv = my_argv;
env = new environment(mpi_argc, mpi_argv, abort_on_exception);
// If anything changed, convert C-style argc/argv into Python argv
if (mpi_argv != my_argv)
PySys_SetArgv(mpi_argc, mpi_argv);
for (int arg = 0; arg < my_argc; ++arg)
free(my_argv[arg]);
delete [] my_argv;
return true;
}
void mpi_finalize()
{
if (env) {
delete env;
env = 0;
}
}
void export_environment()
{
using boost::python::arg;
def("init", mpi_init, (arg("argv"), arg("abort_on_exception") = true),
environment_init_docstring);
def("finalize", mpi_finalize, environment_finalize_docstring);
// Setup initialization and finalization code
if (!environment::initialized()) {
// MPI_Init from sys.argv
object sys = object(handle<>(PyImport_ImportModule("sys")));
mpi_init(extract<list>(sys.attr("argv")), true);
// Setup MPI_Finalize call when the program exits
object atexit = object(handle<>(PyImport_ImportModule("atexit")));
object finalize = scope().attr("finalize");
atexit.attr("register")(finalize);
}
def("abort", &environment::abort, arg("errcode"),
environment_abort_docstring);
def("initialized", &environment::initialized,
environment_initialized_docstring);
def("finalized", &environment::finalized,
environment_finalized_docstring);
scope().attr("max_tag") = environment::max_tag();
scope().attr("collectives_tag") = environment::collectives_tag();
scope().attr("processor_name") = environment::processor_name();
if (optional<int> host_rank = environment::host_rank())
scope().attr("host_rank") = *host_rank;
else
scope().attr("host_rank") = object();
if (optional<int> io_rank = environment::io_rank())
scope().attr("io_rank") = *io_rank;
else
scope().attr("io_rank") = object();
}
} } } // end namespace boost::mpi::python

64
src/python/py_request.cpp Normal file
View File

@@ -0,0 +1,64 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file request.cpp
*
* This file reflects the Boost.MPI @c request class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* request_docstring;
extern const char* request_wait_docstring;
extern const char* request_test_docstring;
extern const char* request_cancel_docstring;
object request_wait(object req_obj)
{
request& req = extract<request&>(req_obj)();
status stat = req.wait();
if (PyObject_HasAttrString(req_obj.ptr(), "value"))
return boost::python::make_tuple(stat, req_obj.attr("value"));
else
return object(stat);
}
object request_test(object req_obj)
{
request& req = extract<request&>(req_obj)();
if (optional<status> stat = req.test())
{
if (PyObject_HasAttrString(req_obj.ptr(), "value"))
return boost::python::make_tuple(stat, req_obj.attr("value"));
else
return object(stat);
}
else
return object();
}
void export_request()
{
using boost::python::arg;
using boost::python::object;
class_<request>("request", request_docstring, no_init)
.def("wait", &request_wait, request_wait_docstring)
.def("test", &request_test, request_test_docstring)
.def("cancel", &request::cancel, request_cancel_docstring)
;
}
} } } // end namespace boost::mpi::python

48
src/python/py_timer.cpp Normal file
View File

@@ -0,0 +1,48 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file timer.cpp
*
* This file reflects the Boost.MPI @c timer class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi/timer.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* timer_docstring;
extern const char* timer_default_constructor_docstring;
extern const char* timer_restart_docstring;
extern const char* timer_elapsed_docstring;
extern const char* timer_elapsed_min_docstring;
extern const char* timer_elapsed_max_docstring;
extern const char* timer_time_is_global_docstring;
void export_timer()
{
using boost::python::arg;
using boost::python::object;
class_<timer>("timer", timer_docstring)
.def(init<>())
.def("restart", &timer::restart, timer_restart_docstring)
.add_property("elapsed", &timer::elapsed, timer_elapsed_docstring)
.add_property("elapsed_min", &timer::elapsed_min,
timer_elapsed_min_docstring)
.add_property("elapsed_max", &timer::elapsed_max,
timer_elapsed_max_docstring)
.add_property("time_is_global", &timer::time_is_global,
timer_time_is_global_docstring)
;
}
} } } // end namespace boost::mpi::python

78
src/python/serialize.cpp Normal file
View File

@@ -0,0 +1,78 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file serialize.cpp
*
* This file provides Boost.Serialization support for Python objects.
*/
#include <boost/mpi/python/serialize.hpp>
#include <boost/mpi/python/skeleton_and_content.hpp>
#include <boost/mpi.hpp>
namespace boost { namespace python {
struct pickle::data_t {
object module;
object dumps;
object loads;
};
/// Data used for communicating with the Python `pickle' module.
pickle::data_t* pickle::data;
str pickle::dumps(object obj, int protocol)
{
if (!data) initialize_data();
return extract<str>((data->dumps)(obj, protocol));
}
object pickle::loads(str s)
{
if (!data) initialize_data();
return ((data->loads)(s));
}
void pickle::initialize_data()
{
data = new data_t;
data->module = object(handle<>(PyImport_ImportModule("pickle")));
data->dumps = data->module.attr("dumps");
data->loads = data->module.attr("loads");
}
} } // end namespace boost::python
BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL(
::boost::mpi::packed_iarchive,
::boost::mpi::packed_oarchive)
namespace boost { namespace mpi { namespace python { namespace detail {
boost::python::object skeleton_proxy_base_type;
// A map from Python type objects to skeleton/content handlers
typedef std::map<PyTypeObject*, skeleton_content_handler>
skeleton_content_handlers_type;
skeleton_content_handlers_type skeleton_content_handlers;
bool
skeleton_and_content_handler_registered(PyTypeObject* type)
{
return
skeleton_content_handlers.find(type) != skeleton_content_handlers.end();
}
void
register_skeleton_and_content_handler(PyTypeObject* type,
const skeleton_content_handler& handler)
{
skeleton_content_handlers[type] = handler;
}
} } } } // end namespace boost::mpi::python::detail

View File

@@ -0,0 +1,164 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file skeleton_and_content.cpp
*
* This file reflects the skeleton/content facilities into Python.
*/
#include <boost/mpi/python/skeleton_and_content.hpp>
#include <boost/mpi/python/serialize.hpp>
#include <boost/python/list.hpp>
#include <typeinfo>
#include <list>
#include "utility.hpp"
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
namespace detail {
typedef std::map<PyTypeObject*, skeleton_content_handler>
skeleton_content_handlers_type;
extern skeleton_content_handlers_type skeleton_content_handlers;
}
/**
* An exception that will be thrown when the object passed to the
* Python version of skeleton() does not have a skeleton.
*/
struct object_without_skeleton : public std::exception {
explicit object_without_skeleton(object value) : value(value) { }
virtual ~object_without_skeleton() throw() { }
object value;
};
str object_without_skeleton_str(const object_without_skeleton& e)
{
return str("\nThe skeleton() or get_content() function was invoked for a Python\n"
"object that is not supported by the Boost.MPI skeleton/content\n"
"mechanism. To transfer objects via skeleton/content, you must\n"
"register the C++ type of this object with the C++ function:\n"
" boost::mpi::python::register_skeleton_and_content()\n"
"Object: " + str(e.value) + "\n");
}
/**
* Extract the "skeleton" from a Python object. In truth, all we're
* doing at this point is verifying that the object is a C++ type that
* has been registered for the skeleton/content mechanism.
*/
object skeleton(object value)
{
PyTypeObject* type = value.ptr()->ob_type;
detail::skeleton_content_handlers_type::iterator pos =
detail::skeleton_content_handlers.find(type);
if (pos == detail::skeleton_content_handlers.end())
throw object_without_skeleton(value);
else
return pos->second.get_skeleton_proxy(value);
}
/**
* Extract the "content" from a Python object, which must be a C++
* type that has been registered for the skeleton/content mechanism.
*/
content get_content(object value)
{
PyTypeObject* type = value.ptr()->ob_type;
detail::skeleton_content_handlers_type::iterator pos =
detail::skeleton_content_handlers.find(type);
if (pos == detail::skeleton_content_handlers.end())
throw object_without_skeleton(value);
else
return pos->second.get_content(value);
}
/// Send the content part of a Python object.
void
communicator_send_content(const communicator& comm, int dest, int tag,
const content& c)
{
comm.send(dest, tag, c.base());
}
/// Receive the content of a Python object. We return the object
/// received, not the content wrapper.
object
communicator_recv_content(const communicator& comm, int source, int tag,
const content& c, bool return_status)
{
using boost::python::make_tuple;
status stat = comm.recv(source, tag, c.base());
if (return_status)
return make_tuple(c.object, stat);
else
return c.object;
}
/// Receive the content of a Python object. The request object's value
/// attribute will reference the object whose content is being
/// received, not the content wrapper.
object
communicator_irecv_content(const communicator& comm, int source, int tag,
const content& c)
{
using boost::python::make_tuple;
object req(comm.irecv(source, tag, c.base()));
req.attr("value") = c.object;
return req;
}
extern const char* object_without_skeleton_docstring;
extern const char* object_without_skeleton_object_docstring;
extern const char* skeleton_proxy_docstring;
extern const char* skeleton_proxy_object_docstring;
extern const char* content_docstring;
extern const char* skeleton_docstring;
extern const char* get_content_docstring;
void export_skeleton_and_content(class_<communicator>& comm)
{
using boost::python::arg;
// Expose the object_without_skeleton exception
object type =
class_<object_without_skeleton>
("object_without_skeleton", object_without_skeleton_docstring, no_init)
.def_readonly("object", &object_without_skeleton::value,
object_without_skeleton_object_docstring)
.def("__str__", &object_without_skeleton_str)
;
translate_exception<object_without_skeleton>::declare(type);
// Expose the Python variants of "skeleton_proxy" and "content", and
// their generator functions.
detail::skeleton_proxy_base_type =
class_<skeleton_proxy_base>("skeleton_proxy", skeleton_proxy_docstring,
no_init)
.def_readonly("object", &skeleton_proxy_base::object,
skeleton_proxy_object_docstring);
class_<content>("content", content_docstring, no_init);
def("skeleton", &skeleton, arg("object"), skeleton_docstring);
def("get_content", &get_content, arg("object"), get_content_docstring);
// Expose communicator send/recv operations for content.
comm
.def("send", communicator_send_content,
(arg("dest"), arg("tag") = 0, arg("value")))
.def("recv", communicator_recv_content,
(arg("source") = any_source, arg("tag") = any_tag, arg("buffer"),
arg("return_status") = false))
.def("irecv", communicator_irecv_content,
(arg("source") = any_source, arg("tag") = any_tag, arg("buffer")));
}
} } } // end namespace boost::mpi::python

41
src/python/status.cpp Normal file
View File

@@ -0,0 +1,41 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file status.cpp
*
* This file reflects the Boost.MPI @c status class into
* Python.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
using namespace boost::python;
using namespace boost::mpi;
namespace boost { namespace mpi { namespace python {
extern const char* status_docstring;
extern const char* status_source_docstring;
extern const char* status_tag_docstring;
extern const char* status_error_docstring;
extern const char* status_cancelled_docstring;
void export_status()
{
using boost::python::arg;
using boost::python::object;
class_<status>("status", status_docstring, no_init)
.add_property("source", &status::source, status_source_docstring)
.add_property("tag", &status::tag, status_tag_docstring)
.add_property("error", &status::error, status_error_docstring)
.add_property("cancelled", &status::cancelled, status_cancelled_docstring)
;
}
} } } // end namespace boost::mpi::python

43
src/python/utility.hpp Normal file
View File

@@ -0,0 +1,43 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
#ifndef BOOST_MPI_PYTHON_UTILITY_HPP
#define BOOST_MPI_PYTHON_UTILITY_HPP
/** @file utility.hpp
*
* This file is a utility header for the Boost.MPI Python bindings.
*/
#include <boost/python.hpp>
namespace boost { namespace mpi { namespace python {
template<typename E>
class translate_exception
{
explicit translate_exception(boost::python::object type) : type(type) { }
public:
static void declare(boost::python::object type)
{
using boost::python::register_exception_translator;
register_exception_translator<E>(translate_exception(type));
}
void operator()(const E& e) const
{
using boost::python::object;
PyErr_SetObject(type.ptr(), object(e).ptr());
}
private:
boost::python::object type;
};
} } } // end namespace boost::mpi::python
#endif // BOOST_MPI_PYTHON_UTILITY_HPP

120
src/request.cpp Normal file
View File

@@ -0,0 +1,120 @@
// Copyright (C) 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpi/request.hpp>
#include <boost/mpi/status.hpp>
namespace boost { namespace mpi {
/***************************************************************************
* request *
***************************************************************************/
request::request()
: m_handler(0), m_data()
{
m_requests[0] = MPI_REQUEST_NULL;
m_requests[1] = MPI_REQUEST_NULL;
}
status request::wait()
{
if (m_handler) {
// This request is a receive for a serialized type. Use the
// handler to wait for completion.
return *m_handler(this, ra_wait);
} else if (m_requests[1] == MPI_REQUEST_NULL) {
// This request is either a send or a receive for a type with an
// associated MPI datatype, or a serialized datatype that has been
// packed into a single message. Just wait on the one receive/send
// and return the status to the user.
status result;
BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_requests[0], &result.m_status));
return result;
} else {
// This request is a send of a serialized type, broken into two
// separate messages. Complete both sends at once.
MPI_Status stats[2];
int error_code = MPI_Waitall(2, m_requests, stats);
if (error_code == MPI_ERR_IN_STATUS) {
// Dig out which status structure has the error, and use that
// one when throwing the exception.
if (stats[0].MPI_ERROR == MPI_SUCCESS
|| stats[0].MPI_ERROR == MPI_ERR_PENDING)
boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR));
else
boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR));
} else if (error_code != MPI_SUCCESS) {
// There was an error somewhere in the MPI_Waitall call; throw
// an exception for it.
boost::throw_exception(exception("MPI_Waitall", error_code));
}
// No errors. Returns the first status structure.
status result;
result.m_status = stats[0];
return result;
}
}
optional<status> request::test()
{
if (m_handler) {
// This request is a receive for a serialized type. Use the
// handler to test for completion.
return m_handler(this, ra_test);
} else if (m_requests[1] == MPI_REQUEST_NULL) {
// This request is either a send or a receive for a type with an
// associated MPI datatype, or a serialized datatype that has been
// packed into a single message. Just test the one receive/send
// and return the status to the user if it has completed.
status result;
int flag = 0;
BOOST_MPI_CHECK_RESULT(MPI_Test,
(&m_requests[0], &flag, &result.m_status));
return flag != 0? optional<status>(result) : optional<status>();
} else {
// This request is a send of a serialized type, broken into two
// separate messages. We only get a result if both complete.
MPI_Status stats[2];
int flag = 0;
int error_code = MPI_Testall(2, m_requests, &flag, stats);
if (error_code == MPI_ERR_IN_STATUS) {
// Dig out which status structure has the error, and use that
// one when throwing the exception.
if (stats[0].MPI_ERROR == MPI_SUCCESS
|| stats[0].MPI_ERROR == MPI_ERR_PENDING)
boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR));
else
boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR));
} else if (error_code != MPI_SUCCESS) {
// There was an error somewhere in the MPI_Testall call; throw
// an exception for it.
boost::throw_exception(exception("MPI_Testall", error_code));
}
// No errors. Returns the second status structure if the send has
// completed.
if (flag != 0) {
status result;
result.m_status = stats[1];
return result;
} else {
return optional<status>();
}
}
}
void request::cancel()
{
if (m_handler) {
m_handler(this, ra_cancel);
} else {
BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0]));
if (m_requests[1] != MPI_REQUEST_NULL)
BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1]));
}
}
} } // end namespace boost::mpi

View File

@@ -0,0 +1,24 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/archive/impl/archive_pointer_iserializer.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
#include <boost/mpi/text_skeleton_iarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/impl/basic_binary_iprimitive.ipp>
#include <boost/archive/impl/basic_binary_iarchive.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class detail::archive_pointer_iserializer<mpi::text_skeleton_iarchive> ;
} } // end namespace boost::archive

View File

@@ -0,0 +1,22 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#define BOOST_ARCHIVE_SOURCE
#include <boost/mpi/detail/text_skeleton_oarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/impl/basic_binary_oprimitive.ipp>
#include <boost/archive/impl/basic_binary_oarchive.ipp>
#include <boost/archive/impl/archive_pointer_oserializer.ipp>
namespace boost { namespace archive {
// explicitly instantiate all required templates
template class detail::archive_pointer_oserializer<mpi::text_skeleton_oarchive> ;
} } // end namespace boost::archive

25
src/timer.cpp Normal file
View File

@@ -0,0 +1,25 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpi/timer.hpp>
#include <boost/mpi/exception.hpp>
namespace boost { namespace mpi {
bool timer::time_is_global()
{
int* is_global;
int found = 0;
BOOST_MPI_CHECK_RESULT(MPI_Attr_get,
(MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &is_global,
&found));
if (!found)
return false;
else
return *is_global != 0;
}
} } // end namespace boost::mpi

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the all_gather() collective.
#include <boost/parallel/mpi/collectives/all_gather.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/mpi/collectives/all_gather.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
@@ -16,10 +16,10 @@
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
using boost::parallel::mpi::packed_skeleton_iarchive;
using boost::parallel::mpi::packed_skeleton_oarchive;
using boost::mpi::packed_skeleton_iarchive;
using boost::mpi::packed_skeleton_oarchive;
template<typename Generator>
void
@@ -29,7 +29,7 @@ all_gather_test(const communicator& comm, Generator generator,
typedef typename Generator::result_type value_type;
value_type value = generator(comm.rank());
using boost::parallel::mpi::all_gather;
using boost::mpi::all_gather;
std::vector<value_type> values;
if (comm.rank() == 0) {
@@ -98,7 +98,7 @@ struct string_list_generator
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
communicator comm;
all_gather_test(comm, int_generator(), "integers");
all_gather_test(comm, gps_generator(), "GPS positions");

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the all_reduce() collective.
#include <boost/parallel/mpi/collectives/all_reduce.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/collectives/all_reduce.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include <boost/serialization/string.hpp>
@@ -15,7 +15,7 @@
#include <boost/lexical_cast.hpp>
#include <numeric>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
// A simple point class that we can build, add, compare, and
// serialize.
@@ -58,12 +58,12 @@ point operator+(const point& p1, const point& p2)
return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template <>
struct is_mpi_datatype<point> : public mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
template<typename Generator, typename Op>
void
@@ -74,7 +74,7 @@ all_reduce_test(const communicator& comm, Generator generator,
typedef typename Generator::result_type value_type;
value_type value = generator(comm.rank());
using boost::parallel::mpi::all_reduce;
using boost::mpi::all_reduce;
if (comm.rank() == 0) {
std::cout << "Reducing to " << op_kind << " of " << type_kind << "...";
@@ -181,18 +181,18 @@ struct wrapped_int_generator
int base;
};
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
// Make std::plus<wrapped_int> commutative.
template<>
struct is_commutative<std::plus<wrapped_int>, wrapped_int>
: mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
int test_main(int argc, char* argv[])
{
using namespace boost::parallel::mpi;
using namespace boost::mpi;
environment env(argc, argv);
communicator comm;

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the all_to_all() collective.
#include <boost/parallel/mpi/collectives/all_to_all.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/collectives/all_to_all.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
@@ -16,10 +16,10 @@
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
using boost::parallel::mpi::packed_skeleton_iarchive;
using boost::parallel::mpi::packed_skeleton_oarchive;
using boost::mpi::packed_skeleton_iarchive;
using boost::mpi::packed_skeleton_oarchive;
template<typename Generator>
void
@@ -28,7 +28,7 @@ all_to_all_test(const communicator& comm, Generator generator,
{
typedef typename Generator::result_type value_type;
using boost::parallel::mpi::all_to_all;
using boost::mpi::all_to_all;
std::vector<value_type> in_values;
for (int p = 0; p < comm.size(); ++p)
@@ -93,7 +93,7 @@ struct string_list_generator
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
communicator comm;
all_to_all_test(comm, int_generator(), "integers");

View File

@@ -5,21 +5,21 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the broadcast() collective.
#include <boost/parallel/mpi/collectives/broadcast.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
#include <boost/parallel/mpi/skeleton_and_content.hpp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/iterator/counting_iterator.hpp>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
using boost::parallel::mpi::packed_skeleton_iarchive;
using boost::parallel::mpi::packed_skeleton_oarchive;
using boost::mpi::packed_skeleton_iarchive;
using boost::mpi::packed_skeleton_oarchive;
template<typename T>
void
@@ -30,7 +30,7 @@ broadcast_test(const communicator& comm, const T& bc_value,
for (root = 0; root < comm.size(); ++root)
broadcast_test(comm, bc_value, kind, root);
} else {
using boost::parallel::mpi::broadcast;
using boost::mpi::broadcast;
T value;
if (comm.rank() == root) {
@@ -51,12 +51,12 @@ broadcast_test(const communicator& comm, const T& bc_value,
void
test_skeleton_and_content(const communicator& comm, int root = 0)
{
using boost::parallel::mpi::content;
using boost::parallel::mpi::get_content;
using boost::mpi::content;
using boost::mpi::get_content;
using boost::make_counting_iterator;
using boost::parallel::mpi::broadcast;
using boost::parallel::mpi::content;
using boost::parallel::mpi::get_content;
using boost::mpi::broadcast;
using boost::mpi::content;
using boost::mpi::get_content;
typedef std::list<int>::iterator iterator;
@@ -128,7 +128,7 @@ test_skeleton_and_content(const communicator& comm, int root = 0)
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
communicator comm;
if (comm.size() == 1) {

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the gather() collective.
#include <boost/parallel/mpi/collectives/gather.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/collectives/gather.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
@@ -16,10 +16,10 @@
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
using boost::parallel::mpi::packed_skeleton_iarchive;
using boost::parallel::mpi::packed_skeleton_oarchive;
using boost::mpi::packed_skeleton_iarchive;
using boost::mpi::packed_skeleton_oarchive;
template<typename Generator>
void
@@ -33,7 +33,7 @@ gather_test(const communicator& comm, Generator generator,
for (root = 0; root < comm.size(); ++root)
gather_test(comm, generator, kind, root);
} else {
using boost::parallel::mpi::gather;
using boost::mpi::gather;
std::vector<value_type> values;
if (comm.rank() == root) {
@@ -107,7 +107,7 @@ struct string_list_generator
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
communicator comm;
gather_test(comm, int_generator(), "integers");

View File

@@ -5,18 +5,18 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the is_mpi_op functionality.
#include <boost/parallel/mpi/operations.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/operations.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/type_traits/is_base_and_derived.hpp>
#include <boost/test/minimal.hpp>
using namespace boost::parallel::mpi;
using namespace boost::mpi;
using namespace std;
using boost::is_base_and_derived;
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
// Check each predefined MPI_Op type that we support directly.
BOOST_CHECK((is_mpi_op<maximum<int>, int>::op() == MPI_MAX));

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the nonblocking point-to-point operations.
#include <boost/parallel/mpi/nonblocking.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/nonblocking.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include "gps_position.hpp"
#include <boost/lexical_cast.hpp>
@@ -16,9 +16,9 @@
#include <iterator>
#include <algorithm>
using boost::parallel::mpi::communicator;
using boost::parallel::mpi::request;
using boost::parallel::mpi::status;
using boost::mpi::communicator;
using boost::mpi::request;
using boost::mpi::status;
enum method_kind {
mk_wait_any, mk_test_any, mk_wait_all, mk_wait_all_keep,
@@ -47,12 +47,12 @@ void
nonblocking_test(const communicator& comm, const T* values, int num_values,
const char* kind, method_kind method = mk_all)
{
using boost::parallel::mpi::wait_any;
using boost::parallel::mpi::test_any;
using boost::parallel::mpi::wait_all;
using boost::parallel::mpi::test_all;
using boost::parallel::mpi::wait_some;
using boost::parallel::mpi::test_some;
using boost::mpi::wait_any;
using boost::mpi::test_any;
using boost::mpi::wait_all;
using boost::mpi::test_all;
using boost::mpi::wait_some;
using boost::mpi::test_some;
if (method == mk_all || method == mk_all_except_test_all) {
nonblocking_test(comm, values, num_values, kind, mk_wait_any);
@@ -219,7 +219,7 @@ nonblocking_test(const communicator& comm, const T* values, int num_values,
int test_main(int argc, char* argv[])
{
boost::parallel::mpi::environment env(argc, argv);
boost::mpi::environment env(argc, argv);
communicator comm;

View File

@@ -5,9 +5,9 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the reduce() collective.
#include <boost/parallel/mpi/collectives/reduce.hpp>
#include <boost/parallel/mpi/communicator.hpp>
#include <boost/parallel/mpi/environment.hpp>
#include <boost/mpi/collectives/reduce.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
#include <algorithm>
#include <boost/serialization/string.hpp>
@@ -15,7 +15,7 @@
#include <boost/lexical_cast.hpp>
#include <numeric>
using boost::parallel::mpi::communicator;
using boost::mpi::communicator;
// A simple point class that we can build, add, compare, and
// serialize.
@@ -58,12 +58,12 @@ point operator+(const point& p1, const point& p2)
return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
template <>
struct is_mpi_datatype<point> : public mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
template<typename Generator, typename Op>
void
@@ -79,7 +79,7 @@ reduce_test(const communicator& comm, Generator generator,
for (root = 0; root < comm.size(); ++root)
reduce_test(comm, generator, type_kind, op, op_kind, init, root);
} else {
using boost::parallel::mpi::reduce;
using boost::mpi::reduce;
if (comm.rank() == root) {
std::cout << "Reducing to " << op_kind << " of " << type_kind
@@ -191,18 +191,18 @@ struct wrapped_int_generator
int base;
};
namespace boost { namespace parallel { namespace mpi {
namespace boost { namespace mpi {
// Make std::plus<wrapped_int> commutative.
template<>
struct is_commutative<std::plus<wrapped_int>, wrapped_int>
: mpl::true_ { };
} } } // end namespace boost::parallel::mpi
} } // end namespace boost::mpi
int test_main(int argc, char* argv[])
{
using namespace boost::parallel::mpi;
using namespace boost::mpi;
environment env(argc, argv);
communicator comm;

Some files were not shown because too many files have changed in this diff Show More