Added boost header

This commit is contained in:
Christophe Riccio
2012-01-08 01:26:07 +00:00
parent 9c3faaca40
commit c7d752cdf8
8946 changed files with 1732316 additions and 0 deletions

210
test/external/boost/mpi/allocator.hpp vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file allocator.hpp
*
* This header provides an STL-compliant allocator that uses the
* MPI-2 memory allocation facilities.
*/
#ifndef BOOST_MPI_ALLOCATOR_HPP
#define BOOST_MPI_ALLOCATOR_HPP
#include <boost/mpi/config.hpp>
#include <boost/mpi/exception.hpp>
#include <cstddef>
#include <memory>
#include <boost/limits.hpp>
namespace boost { namespace mpi {
#if defined(BOOST_MPI_HAS_MEMORY_ALLOCATION)
template<typename T> class allocator;
/** @brief Allocator specialization for @c void value types.
*
* The @c void specialization of @c allocator is useful only for
* rebinding to another, different value type.
*/
template<>
class BOOST_MPI_DECL allocator<void>
{
public:
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template <class U>
struct rebind
{
typedef allocator<U> other;
};
};
/** @brief Standard Library-compliant allocator for the MPI-2 memory
* allocation routines.
*
* This allocator provides a standard C++ interface to the @c
* MPI_Alloc_mem and @c MPI_Free_mem routines of MPI-2. It is
* intended to be used with the containers in the Standard Library
* (@c vector, in particular) in cases where the contents of the
* container will be directly transmitted via MPI. This allocator is
* also used internally by the library for character buffers that
* will be used in the transmission of data.
*
* The @c allocator class template only provides MPI memory
* allocation when the underlying MPI implementation is either MPI-2
* compliant or is known to provide @c MPI_Alloc_mem and @c
* MPI_Free_mem as extensions. When the MPI memory allocation
* routines are not available, @c allocator is brought in directly
* from namespace @c std, so that standard allocators are used
* throughout. The macro @c BOOST_MPI_HAS_MEMORY_ALLOCATION will be
* defined when the MPI-2 memory allocation facilities are available.
*/
template<typename T>
class BOOST_MPI_DECL allocator
{
public:
/// Holds the size of objects
typedef std::size_t size_type;
/// Holds the number of elements between two pointers
typedef std::ptrdiff_t difference_type;
/// A pointer to an object of type @c T
typedef T* pointer;
/// A pointer to a constant object of type @c T
typedef const T* const_pointer;
/// A reference to an object of type @c T
typedef T& reference;
/// A reference to a constant object of type @c T
typedef const T& const_reference;
/// The type of memory allocated by this allocator
typedef T value_type;
/** @brief Retrieve the type of an allocator similar to this
* allocator but for a different value type.
*/
template <typename U>
struct rebind
{
typedef allocator<U> other;
};
/** Default-construct an allocator. */
allocator() throw() { }
/** Copy-construct an allocator. */
allocator(const allocator&) throw() { }
/**
* Copy-construct an allocator from another allocator for a
* different value type.
*/
template <typename U>
allocator(const allocator<U>&) throw() { }
/** Destroy an allocator. */
~allocator() throw() { }
/** Returns the address of object @p x. */
pointer address(reference x) const
{
return &x;
}
/** Returns the address of object @p x. */
const_pointer address(const_reference x) const
{
return &x;
}
/**
* Allocate enough memory for @p n elements of type @c T.
*
* @param n The number of elements for which memory should be
* allocated.
*
* @return a pointer to the newly-allocated memory
*/
pointer allocate(size_type n, allocator<void>::const_pointer /*hint*/ = 0)
{
pointer result;
BOOST_MPI_CHECK_RESULT(MPI_Alloc_mem,
(static_cast<MPI_Aint>(n * sizeof(T)),
MPI_INFO_NULL,
&result));
return result;
}
/**
* Deallocate memory referred to by the pointer @c p.
*
* @param p The pointer whose memory should be deallocated. This
* pointer shall have been returned from the @c allocate() function
* and not have already been freed.
*/
void deallocate(pointer p, size_type /*n*/)
{
BOOST_MPI_CHECK_RESULT(MPI_Free_mem, (p));
}
/**
* Returns the maximum number of elements that can be allocated
* with @c allocate().
*/
size_type max_size() const throw()
{
return (std::numeric_limits<std::size_t>::max)() / sizeof(T);
}
/** Construct a copy of @p val at the location referenced by @c p. */
void construct(pointer p, const T& val)
{
new ((void *)p) T(val);
}
/** Destroy the object referenced by @c p. */
void destroy(pointer p)
{
((T*)p)->~T();
}
};
/** @brief Compare two allocators for equality.
*
* Since MPI allocators have no state, all MPI allocators are equal.
*
* @returns @c true
*/
template<typename T1, typename T2>
inline bool operator==(const allocator<T1>&, const allocator<T2>&) throw()
{
return true;
}
/** @brief Compare two allocators for inequality.
*
* Since MPI allocators have no state, all MPI allocators are equal.
*
* @returns @c false
*/
template<typename T1, typename T2>
inline bool operator!=(const allocator<T1>&, const allocator<T2>&) throw()
{
return false;
}
#else
// Bring in the default allocator from namespace std.
using std::allocator;
#endif
} } /// end namespace boost::mpi
#endif // BOOST_MPI_ALLOCATOR_HPP

545
test/external/boost/mpi/collectives.hpp vendored Normal file
View File

@@ -0,0 +1,545 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4. MPI Collectives
/** @file collectives.hpp
*
* This header contains MPI collective operations, which implement
* various parallel algorithms that require the coordination of all
* processes within a communicator. The header @c collectives_fwd.hpp
* provides forward declarations for each of these operations. To
* include only specific collective algorithms, use the headers @c
* boost/mpi/collectives/algorithm_name.hpp.
*/
#ifndef BOOST_MPI_COLLECTIVES_HPP
#define BOOST_MPI_COLLECTIVES_HPP
#include <boost/mpi/communicator.hpp>
#include <vector>
namespace boost { namespace mpi {
/**
* @brief Gather the values stored at every process into vectors of
* values from each process.
*
* @c all_gather is a collective algorithm that collects the values
* stored at each process into a vector of values indexed by the
* process number they came from. The type @c T of the values may be
* any type that is serializable or has an associated MPI data type.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Allgather to gather the values.
*
* @param comm The communicator over which the all-gather will
* occur.
*
* @param in_value The value to be transmitted by each process. To
* gather an array of values, @c in_values points to the @c n local
* values to be transmitted.
*
* @param out_values A vector or pointer to storage that will be
* populated with the values from each process, indexed by the
* process ID number. If it is a vector, the vector will be resized
* accordingly.
*/
template<typename T>
void
all_gather(const communicator& comm, const T& in_value,
std::vector<T>& out_values);
/**
* \overload
*/
template<typename T>
void
all_gather(const communicator& comm, const T& in_value, T* out_values);
/**
* \overload
*/
template<typename T>
void
all_gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values);
/**
* \overload
*/
template<typename T>
void
all_gather(const communicator& comm, const T* in_values, int n, T* out_values);
/**
* @brief Combine the values stored by each process into a single
* value available to all processes.
*
* @c all_reduce is a collective algorithm that combines the values
* stored by each process into a single value available to all
* processes. The values are combined in a user-defined way,
* specified via a function object. The type @c T of the values may
* be any type that is serializable or has an associated MPI data
* type. One can think of this operation as a @c all_gather, followed
* by an @c std::accumulate() over the gather values and using the
* operation @c op.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Allreduce to perform the reduction. If possible,
* built-in MPI operations will be used; otherwise, @c all_reduce()
* will create a custom MPI_Op for the call to MPI_Allreduce.
*
* @param comm The communicator over which the reduction will
* occur.
*
* @param in_value The local value to be combined with the local
* values of every other process. For reducing arrays, @c in_values
* is a pointer to the local values to be reduced and @c n is the
* number of values to reduce. See @c reduce for more information.
*
* @param out_value Will receive the result of the reduction
* operation. If this parameter is omitted, the outgoing value will
* instead be returned.
*
* @param op The binary operation that combines two values of type
* @c T and returns a third value of type @c T. For types @c T that has
* ssociated MPI data types, @c op will either be translated into
* an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
* directly to a built-in MPI operation. See @c is_mpi_op in the @c
* operations.hpp header for more details on this mapping. For any
* non-built-in operation, commutativity will be determined by the
* @c is_commmutative trait (also in @c operations.hpp): users are
* encouraged to mark commutative operations as such, because it
* gives the implementation additional lattitude to optimize the
* reduction operation.
*
* @returns If no @p out_value parameter is supplied, returns the
* result of the reduction operation.
*/
template<typename T, typename Op>
void
all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
T all_reduce(const communicator& comm, const T& in_value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
void
all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
Op op);
/**
* @brief Send data from every process to every other process.
*
* @c all_to_all is a collective algorithm that transmits @c p values
* from every process to every other process. On process i, jth value
* of the @p in_values vector is sent to process j and placed in the
* ith position of the @p out_values vector in process @p j. The type
* @c T of the values may be any type that is serializable or has an
* associated MPI data type. If @c n is provided, then arrays of @p n
* values will be transferred from one process to another.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Alltoall to scatter the values.
*
* @param comm The communicator over which the all-to-all
* communication will occur.
*
* @param in_values A vector or pointer to storage that contains
* the values to send to each process, indexed by the process ID
* number.
*
* @param out_values A vector or pointer to storage that will be
* updated to contain the values received from other processes. The
* jth value in @p out_values will come from the procss with rank j.
*/
template<typename T>
void
all_to_all(const communicator& comm, const std::vector<T>& in_values,
std::vector<T>& out_values);
/**
* \overload
*/
template<typename T>
void all_to_all(const communicator& comm, const T* in_values, T* out_values);
/**
* \overload
*/
template<typename T>
void
all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
std::vector<T>& out_values);
/**
* \overload
*/
template<typename T>
void
all_to_all(const communicator& comm, const T* in_values, int n, T* out_values);
/**
* @brief Broadcast a value from a root process to all other
* processes.
*
* @c broadcast is a collective algorithm that transfers a value from
* an arbitrary @p root process to every other process that is part of
* the given communicator. The @c broadcast algorithm can transmit any
* Serializable value, values that have associated MPI data types,
* packed archives, skeletons, and the content of skeletons; see the
* @c send primitive for communicators for a complete list. The type
* @c T shall be the same for all processes that are a part of the
* communicator @p comm, unless packed archives are being transferred:
* with packed archives, the root sends a @c packed_oarchive or @c
* packed_skeleton_oarchive whereas the other processes receive a
* @c packed_iarchive or @c packed_skeleton_iarchve, respectively.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Bcast to perform the broadcast.
*
* @param comm The communicator over which the broadcast will
* occur.
*
* @param value The value (or values, if @p n is provided) to be
* transmitted (if the rank of @p comm is equal to @p root) or
* received (if the rank of @p comm is not equal to @p root). When
* the @p value is a @c skeleton_proxy, only the skeleton of the
* object will be broadcast. In this case, the @p root will build a
* skeleton from the object help in the proxy and all of the
* non-roots will reshape the objects held in their proxies based on
* the skeleton sent from the root.
*
* @param n When supplied, the number of values that the pointer @p
* values points to, for broadcasting an array of values. The value
* of @p n must be the same for all processes in @p comm.
*
* @param root The rank/process ID of the process that will be
* transmitting the value.
*/
template<typename T>
void broadcast(const communicator& comm, T& value, int root);
/**
* \overload
*/
template<typename T>
void broadcast(const communicator& comm, T* values, int n, int root);
/**
* \overload
*/
template<typename T>
void broadcast(const communicator& comm, skeleton_proxy<T>& value, int root);
/**
* \overload
*/
template<typename T>
void
broadcast(const communicator& comm, const skeleton_proxy<T>& value, int root);
/**
* @brief Gather the values stored at every process into a vector at
* the root process.
*
* @c gather is a collective algorithm that collects the values
* stored at each process into a vector of values at the @p root
* process. This vector is indexed by the process number that the
* value came from. The type @c T of the values may be any type that
* is serializable or has an associated MPI data type.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Gather to gather the values.
*
* @param comm The communicator over which the gather will occur.
*
* @param in_value The value to be transmitted by each process. For
* gathering arrays of values, @c in_values points to storage for
* @c n*comm.size() values.
*
* @param out_values A vector or pointer to storage that will be
* populated with the values from each process, indexed by the
* process ID number. If it is a vector, it will be resized
* accordingly. For non-root processes, this parameter may be
* omitted. If it is still provided, however, it will be unchanged.
*
* @param root The process ID number that will collect the
* values. This value must be the same on all processes.
*/
template<typename T>
void
gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
int root);
/**
* \overload
*/
template<typename T>
void
gather(const communicator& comm, const T& in_value, T* out_values, int root);
/**
* \overload
*/
template<typename T>
void gather(const communicator& comm, const T& in_value, int root);
/**
* \overload
*/
template<typename T>
void
gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values, int root);
/**
* \overload
*/
template<typename T>
void
gather(const communicator& comm, const T* in_values, int n, T* out_values,
int root);
/**
* \overload
*/
template<typename T>
void gather(const communicator& comm, const T* in_values, int n, int root);
/**
* @brief Scatter the values stored at the root to all processes
* within the communicator.
*
* @c scatter is a collective algorithm that scatters the values
* stored in the @p root process (inside a vector) to all of the
* processes in the communicator. The vector @p out_values (only
* significant at the @p root) is indexed by the process number to
* which the corresponding value will be sent. The type @c T of the
* values may be any type that is serializable or has an associated
* MPI data type.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Scatter to scatter the values.
*
* @param comm The communicator over which the gather will occur.
*
* @param in_values A vector or pointer to storage that will contain
* the values to send to each process, indexed by the process rank.
* For non-root processes, this parameter may be omitted. If it is
* still provided, however, it will be unchanged.
*
* @param out_value The value received by each process. When
* scattering an array of values, @p out_values points to the @p n
* values that will be received by each process.
*
* @param root The process ID number that will scatter the
* values. This value must be the same on all processes.
*/
template<typename T>
void
scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
int root);
/**
* \overload
*/
template<typename T>
void
scatter(const communicator& comm, const T* in_values, T& out_value, int root);
/**
* \overload
*/
template<typename T>
void scatter(const communicator& comm, T& out_value, int root);
/**
* \overload
*/
template<typename T>
void
scatter(const communicator& comm, const std::vector<T>& in_values,
T* out_values, int n, int root);
/**
* \overload
*/
template<typename T>
void
scatter(const communicator& comm, const T* in_values, T* out_values, int n,
int root);
/**
* \overload
*/
template<typename T>
void scatter(const communicator& comm, T* out_values, int n, int root);
/**
* @brief Combine the values stored by each process into a single
* value at the root.
*
* @c reduce is a collective algorithm that combines the values
* stored by each process into a single value at the @c root. The
* values can be combined arbitrarily, specified via a function
* object. The type @c T of the values may be any type that is
* serializable or has an associated MPI data type. One can think of
* this operation as a @c gather to the @p root, followed by an @c
* std::accumulate() over the gathered values and using the operation
* @c op.
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Reduce to perform the reduction. If possible,
* built-in MPI operations will be used; otherwise, @c reduce() will
* create a custom MPI_Op for the call to MPI_Reduce.
*
* @param comm The communicator over which the reduction will
* occur.
*
* @param in_value The local value to be combined with the local
* values of every other process. For reducing arrays, @c in_values
* contains a pointer to the local values. In this case, @c n is
* the number of values that will be reduced. Reduction occurs
* independently for each of the @p n values referenced by @p
* in_values, e.g., calling reduce on an array of @p n values is
* like calling @c reduce @p n separate times, one for each
* location in @p in_values and @p out_values.
*
* @param out_value Will receive the result of the reduction
* operation, but only for the @p root process. Non-root processes
* may omit if parameter; if they choose to supply the parameter,
* it will be unchanged. For reducing arrays, @c out_values
* contains a pointer to the storage for the output values.
*
* @param op The binary operation that combines two values of type
* @c T into a third value of type @c T. For types @c T that has
* ssociated MPI data types, @c op will either be translated into
* an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
* directly to a built-in MPI operation. See @c is_mpi_op in the @c
* operations.hpp header for more details on this mapping. For any
* non-built-in operation, commutativity will be determined by the
* @c is_commmutative trait (also in @c operations.hpp): users are
* encouraged to mark commutative operations as such, because it
* gives the implementation additional lattitude to optimize the
* reduction operation.
*
* @param root The process ID number that will receive the final,
* combined value. This value must be the same on all processes.
*/
template<typename T, typename Op>
void
reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
int root);
/**
* \overload
*/
template<typename T, typename Op>
void reduce(const communicator& comm, const T& in_value, Op op, int root);
/**
* \overload
*/
template<typename T, typename Op>
void
reduce(const communicator& comm, const T* in_values, int n, T* out_values,
Op op, int root);
/**
* \overload
*/
template<typename T, typename Op>
void
reduce(const communicator& comm, const T* in_values, int n, Op op, int root);
/**
* @brief Compute a prefix reduction of values from all processes in
* the communicator.
*
* @c scan is a collective algorithm that combines the values stored
* by each process with the values of all processes with a smaller
* rank. The values can be arbitrarily combined, specified via a
* function object @p op. The type @c T of the values may be any type
* that is serializable or has an associated MPI data type. One can
* think of this operation as a @c gather to some process, followed
* by an @c std::prefix_sum() over the gathered values using the
* operation @c op. The ith process returns the ith value emitted by
* @c std::prefix_sum().
*
* When the type @c T has an associated MPI data type, this routine
* invokes @c MPI_Scan to perform the reduction. If possible,
* built-in MPI operations will be used; otherwise, @c scan() will
* create a custom @c MPI_Op for the call to MPI_Scan.
*
* @param comm The communicator over which the prefix reduction
* will occur.
*
* @param in_value The local value to be combined with the local
* values of other processes. For the array variant, the @c
* in_values parameter points to the @c n local values that will be
* combined.
*
* @param out_value If provided, the ith process will receive the
* value @c op(in_value[0], op(in_value[1], op(..., in_value[i])
* ... )). For the array variant, @c out_values contains a pointer
* to storage for the @c n output values. The prefix reduction
* occurs independently for each of the @p n values referenced by
* @p in_values, e.g., calling scan on an array of @p n values is
* like calling @c scan @p n separate times, one for each location
* in @p in_values and @p out_values.
*
* @param op The binary operation that combines two values of type
* @c T into a third value of type @c T. For types @c T that has
* ssociated MPI data types, @c op will either be translated into
* an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
* directly to a built-in MPI operation. See @c is_mpi_op in the @c
* operations.hpp header for more details on this mapping. For any
* non-built-in operation, commutativity will be determined by the
* @c is_commmutative trait (also in @c operations.hpp).
*
* @returns If no @p out_value parameter is provided, returns the
* result of prefix reduction.
*/
template<typename T, typename Op>
void
scan(const communicator& comm, const T& in_value, T& out_value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
T
scan(const communicator& comm, const T& in_value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
void
scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op);
} } // end namespace boost::mpi
#endif // BOOST_MPI_COLLECTIVES_HPP
#ifndef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
// Include implementations of each of the collectives
# include <boost/mpi/collectives/all_gather.hpp>
# include <boost/mpi/collectives/all_reduce.hpp>
# include <boost/mpi/collectives/all_to_all.hpp>
# include <boost/mpi/collectives/broadcast.hpp>
# include <boost/mpi/collectives/gather.hpp>
# include <boost/mpi/collectives/scatter.hpp>
# include <boost/mpi/collectives/reduce.hpp>
# include <boost/mpi/collectives/scan.hpp>
#endif

View File

@@ -0,0 +1,82 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.7. Gather-to-all
#ifndef BOOST_MPI_ALL_GATHER_HPP
#define BOOST_MPI_ALL_GATHER_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/serialization/vector.hpp>
// all_gather falls back to gather+broadcast in some cases
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/collectives/gather.hpp>
namespace boost { namespace mpi {
namespace detail {
// We're all-gathering for a type that has an associated MPI
// datatype, so we'll use MPI_Gather to do all of the work.
template<typename T>
void
all_gather_impl(const communicator& comm, const T* in_values, int n,
T* out_values, mpl::true_)
{
MPI_Datatype type = boost::mpi::get_mpi_datatype<T>(*in_values);
BOOST_MPI_CHECK_RESULT(MPI_Allgather,
(const_cast<T*>(in_values), n, type,
out_values, n, type, comm));
}
// We're all-gathering for a type that has no associated MPI
// type. So, we'll do a manual gather followed by a broadcast.
template<typename T>
void
all_gather_impl(const communicator& comm, const T* in_values, int n,
T* out_values, mpl::false_)
{
gather(comm, in_values, n, out_values, 0);
broadcast(comm, out_values, comm.size() * n, 0);
}
} // end namespace detail
template<typename T>
inline void
all_gather(const communicator& comm, const T& in_value, T* out_values)
{
detail::all_gather_impl(comm, &in_value, 1, out_values, is_mpi_datatype<T>());
}
template<typename T>
void
all_gather(const communicator& comm, const T& in_value,
std::vector<T>& out_values)
{
out_values.resize(comm.size());
::boost::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
}
template<typename T>
inline void
all_gather(const communicator& comm, const T* in_values, int n, T* out_values)
{
detail::all_gather_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
}
template<typename T>
void
all_gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values)
{
out_values.resize(comm.size() * n);
::boost::mpi::all_gather(comm, in_values, n, &out_values[0]);
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_ALL_GATHER_HPP

View File

@@ -0,0 +1,102 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Copyright (C) 2004 The Trustees of Indiana University
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
#ifndef BOOST_MPI_ALL_REDUCE_HPP
#define BOOST_MPI_ALL_REDUCE_HPP
// All-reduce falls back to reduce() + broadcast() in some cases.
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/collectives/reduce.hpp>
namespace boost { namespace mpi {
namespace detail {
/**********************************************************************
* Simple reduction with MPI_Allreduce *
**********************************************************************/
// We are reducing for a type that has an associated MPI
// datatype and operation, so we'll use MPI_Allreduce directly.
template<typename T, typename Op>
void
all_reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op /*op*/, mpl::true_ /*is_mpi_op*/,
mpl::true_ /*is_mpi_datatype*/)
{
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
(is_mpi_op<Op, T>::op()), comm));
}
/**********************************************************************
* User-defined reduction with MPI_Allreduce *
**********************************************************************/
// We are reducing at the root for a type that has an associated MPI
// datatype but with a custom operation. We'll use MPI_Reduce
// directly, but we'll need to create an MPI_Op manually.
template<typename T, typename Op>
void
all_reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
mpl::true_ /*is_mpi_datatype*/)
{
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
mpi_op.get_mpi_op(), comm));
}
/**********************************************************************
* User-defined, tree-based reduction for non-MPI data types *
**********************************************************************/
// We are reducing at the root for a type that has no associated MPI
// datatype and operation, so we'll use a simple tree-based
// algorithm.
template<typename T, typename Op>
void
all_reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
mpl::false_ /*is_mpi_datatype*/)
{
reduce(comm, in_values, n, out_values, op, 0);
broadcast(comm, out_values, n, 0);
}
} // end namespace detail
template<typename T, typename Op>
inline void
all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
Op op)
{
detail::all_reduce_impl(comm, in_values, n, out_values, op,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
inline void
all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op)
{
detail::all_reduce_impl(comm, &in_value, 1, &out_value, op,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
T all_reduce(const communicator& comm, const T& in_value, Op op)
{
T result;
::boost::mpi::all_reduce(comm, in_value, result, op);
return result;
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_ALL_REDUCE_HPP

View File

@@ -0,0 +1,153 @@
// Copyright (C) 2005, 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.8. All-to-all
#ifndef BOOST_MPI_ALL_TO_ALL_HPP
#define BOOST_MPI_ALL_TO_ALL_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
#include <boost/mpi/collectives_fwd.hpp>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace mpi {
namespace detail {
// We're performaing an all-to-all with a type that has an
// associated MPI datatype, so we'll use MPI_Alltoall to do all of
// the work.
template<typename T>
void
all_to_all_impl(const communicator& comm, const T* in_values, int n,
T* out_values, mpl::true_)
{
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
BOOST_MPI_CHECK_RESULT(MPI_Alltoall,
(const_cast<T*>(in_values), n, type,
out_values, n, type, comm));
}
// We're performing an all-to-all with a type that does not have an
// associated MPI datatype, so we'll need to serialize
// it. Unfortunately, this means that we cannot use MPI_Alltoall, so
// we'll just have to send individual messages to the other
// processes.
template<typename T>
void
all_to_all_impl(const communicator& comm, const T* in_values, int n,
T* out_values, mpl::false_)
{
int size = comm.size();
int rank = comm.rank();
// The amount of data to be sent to each process
std::vector<int> send_sizes(size);
// The displacements for each outgoing value.
std::vector<int> send_disps(size);
// The buffer that will store all of the outgoing values
std::vector<char, allocator<char> > outgoing;
// Pack the buffer with all of the outgoing values.
for (int dest = 0; dest < size; ++dest) {
// Keep track of the displacements
send_disps[dest] = outgoing.size();
// Our own value will never be transmitted, so don't pack it.
if (dest != rank) {
packed_oarchive oa(comm, outgoing);
for (int i = 0; i < n; ++i)
oa << in_values[dest * n + i];
}
// Keep track of the sizes
send_sizes[dest] = outgoing.size() - send_disps[dest];
}
// Determine how much data each process will receive.
std::vector<int> recv_sizes(size);
all_to_all(comm, send_sizes, recv_sizes);
// Prepare a buffer to receive the incoming data.
std::vector<int> recv_disps(size);
int sum = 0;
for (int src = 0; src < size; ++src) {
recv_disps[src] = sum;
sum += recv_sizes[src];
}
std::vector<char, allocator<char> > incoming(sum > 0? sum : 1);
// Make sure we don't try to reference an empty vector
if (outgoing.empty())
outgoing.push_back(0);
// Transmit the actual data
BOOST_MPI_CHECK_RESULT(MPI_Alltoallv,
(&outgoing[0], &send_sizes[0],
&send_disps[0], MPI_PACKED,
&incoming[0], &recv_sizes[0],
&recv_disps[0], MPI_PACKED,
comm));
// Deserialize data from the iarchive
for (int src = 0; src < size; ++src) {
if (src == rank)
std::copy(in_values + src * n, in_values + (src + 1) * n,
out_values + src * n);
else {
packed_iarchive ia(comm, incoming, boost::archive::no_header,
recv_disps[src]);
for (int i = 0; i < n; ++i)
ia >> out_values[src * n + i];
}
}
}
} // end namespace detail
template<typename T>
inline void
all_to_all(const communicator& comm, const T* in_values, T* out_values)
{
detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype<T>());
}
template<typename T>
void
all_to_all(const communicator& comm, const std::vector<T>& in_values,
std::vector<T>& out_values)
{
BOOST_ASSERT((int)in_values.size() == comm.size());
out_values.resize(comm.size());
::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
}
template<typename T>
inline void
all_to_all(const communicator& comm, const T* in_values, int n, T* out_values)
{
detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
}
template<typename T>
void
all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
std::vector<T>& out_values)
{
BOOST_ASSERT((int)in_values.size() == comm.size() * n);
out_values.resize(comm.size() * n);
::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_ALL_TO_ALL_HPP

View File

@@ -0,0 +1,145 @@
// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.4. Broadcast
#ifndef BOOST_MPI_BROADCAST_HPP
#define BOOST_MPI_BROADCAST_HPP
#include <boost/mpi/collectives_fwd.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/communicator.hpp>
namespace boost { namespace mpi {
/************************************************************************
* Specializations *
************************************************************************/
/**
* INTERNAL ONLY
*/
template<>
BOOST_MPI_DECL void
broadcast<const packed_oarchive>(const communicator& comm,
const packed_oarchive& oa,
int root);
/**
* INTERNAL ONLY
*/
template<>
BOOST_MPI_DECL void
broadcast<packed_oarchive>(const communicator& comm, packed_oarchive& oa,
int root);
/**
* INTERNAL ONLY
*/
template<>
BOOST_MPI_DECL void
broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia,
int root);
/**
* INTERNAL ONLY
*/
template<>
BOOST_MPI_DECL void
broadcast<const packed_skeleton_oarchive>(const communicator& comm,
const packed_skeleton_oarchive& oa,
int root);
/**
* INTERNAL ONLY
*/
template<>
void
broadcast<packed_skeleton_oarchive>(const communicator& comm,
packed_skeleton_oarchive& oa, int root);
/**
* INTERNAL ONLY
*/
template<>
void
broadcast<packed_skeleton_iarchive>(const communicator& comm,
packed_skeleton_iarchive& ia, int root);
/**
* INTERNAL ONLY
*/
template<>
void broadcast<content>(const communicator& comm, content& c, int root);
/**
* INTERNAL ONLY
*/
template<>
void broadcast<const content>(const communicator& comm, const content& c,
int root);
/************************************************************************
* broadcast() implementation *
************************************************************************/
namespace detail {
// We're sending a type that has an associated MPI datatype, so
// we'll use MPI_Bcast to do all of the work.
template<typename T>
void
broadcast_impl(const communicator& comm, T* values, int n, int root,
mpl::true_)
{
BOOST_MPI_CHECK_RESULT(MPI_Bcast,
(values, n,
boost::mpi::get_mpi_datatype<T>(*values),
root, MPI_Comm(comm)));
}
// We're sending a type that does not have an associated MPI
// datatype, so we'll need to serialize it. Unfortunately, this
// means that we cannot use MPI_Bcast, so we'll just send from the
// root to everyone else.
template<typename T>
void
broadcast_impl(const communicator& comm, T* values, int n, int root,
mpl::false_)
{
if (comm.rank() == root) {
packed_oarchive oa(comm);
for (int i = 0; i < n; ++i)
oa << values[i];
broadcast(comm, oa, root);
} else {
packed_iarchive ia(comm);
broadcast(comm, ia, root);
for (int i = 0; i < n; ++i)
ia >> values[i];
}
}
} // end namespace detail
template<typename T>
void broadcast(const communicator& comm, T& value, int root)
{
detail::broadcast_impl(comm, &value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
void broadcast(const communicator& comm, T* values, int n, int root)
{
detail::broadcast_impl(comm, values, n, root, is_mpi_datatype<T>());
}
} } // end namespace boost::mpi
// If the user has already included skeleton_and_content.hpp, include
// the code to broadcast skeletons and content.
#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
# include <boost/mpi/detail/broadcast_sc.hpp>
#endif
#endif // BOOST_MPI_BROADCAST_HPP

View File

@@ -0,0 +1,147 @@
// Copyright (C) 2005, 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.5. Gather
#ifndef BOOST_MPI_GATHER_HPP
#define BOOST_MPI_GATHER_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
namespace detail {
// We're gathering at the root for a type that has an associated MPI
// datatype, so we'll use MPI_Gather to do all of the work.
template<typename T>
void
gather_impl(const communicator& comm, const T* in_values, int n,
T* out_values, int root, mpl::true_)
{
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
BOOST_MPI_CHECK_RESULT(MPI_Gather,
(const_cast<T*>(in_values), n, type,
out_values, n, type, root, comm));
}
// We're gathering from a non-root for a type that has an associated MPI
// datatype, so we'll use MPI_Gather to do all of the work.
template<typename T>
void
gather_impl(const communicator& comm, const T* in_values, int n, int root,
mpl::true_)
{
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
BOOST_MPI_CHECK_RESULT(MPI_Gather,
(const_cast<T*>(in_values), n, type,
0, n, type, root, comm));
}
// We're gathering at the root for a type that does not have an
// associated MPI datatype, so we'll need to serialize
// it. Unfortunately, this means that we cannot use MPI_Gather, so
// we'll just have all of the non-root nodes send individual
// messages to the root.
template<typename T>
void
gather_impl(const communicator& comm, const T* in_values, int n,
T* out_values, int root, mpl::false_)
{
int tag = environment::collectives_tag();
int size = comm.size();
for (int src = 0; src < size; ++src) {
if (src == root)
std::copy(in_values, in_values + n, out_values + n * src);
else
comm.recv(src, tag, out_values + n * src, n);
}
}
// We're gathering at a non-root for a type that does not have an
// associated MPI datatype, so we'll need to serialize
// it. Unfortunately, this means that we cannot use MPI_Gather, so
// we'll just have all of the non-root nodes send individual
// messages to the root.
template<typename T>
void
gather_impl(const communicator& comm, const T* in_values, int n, int root,
mpl::false_)
{
int tag = environment::collectives_tag();
comm.send(root, tag, in_values, n);
}
} // end namespace detail
template<typename T>
void
gather(const communicator& comm, const T& in_value, T* out_values, int root)
{
if (comm.rank() == root)
detail::gather_impl(comm, &in_value, 1, out_values, root,
is_mpi_datatype<T>());
else
detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
void gather(const communicator& comm, const T& in_value, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
void
gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
int root)
{
if (comm.rank() == root) {
out_values.resize(comm.size());
::boost::mpi::gather(comm, in_value, &out_values[0], root);
} else {
::boost::mpi::gather(comm, in_value, root);
}
}
template<typename T>
void
gather(const communicator& comm, const T* in_values, int n, T* out_values,
int root)
{
if (comm.rank() == root)
detail::gather_impl(comm, in_values, n, out_values, root,
is_mpi_datatype<T>());
else
detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
}
template<typename T>
void
gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values, int root)
{
::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
}
template<typename T>
void gather(const communicator& comm, const T* in_values, int n, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_GATHER_HPP

View File

@@ -0,0 +1,357 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
// Copyright (C) 2004 The Trustees of Indiana University
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
#ifndef BOOST_MPI_REDUCE_HPP
#define BOOST_MPI_REDUCE_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
// For (de-)serializing sends and receives
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
// For packed_[io]archive sends and receives
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/detail/computation_tree.hpp>
#include <boost/mpi/operations.hpp>
#include <algorithm>
#include <exception>
#include <boost/assert.hpp>
#include <boost/scoped_array.hpp>
namespace boost { namespace mpi {
/************************************************************************
* Implementation details *
************************************************************************/
namespace detail {
/**********************************************************************
* Simple reduction with MPI_Reduce *
**********************************************************************/
// We are reducing at the root for a type that has an associated MPI
// datatype and operation, so we'll use MPI_Reduce directly.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, int root, mpl::true_ /*is_mpi_op*/,
mpl::true_/*is_mpi_datatype*/)
{
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
(is_mpi_op<Op, T>::op()), root, comm));
}
// We are reducing to the root for a type that has an associated MPI
// datatype and operation, so we'll use MPI_Reduce directly.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
int root, mpl::true_ /*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
{
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), 0, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
(is_mpi_op<Op, T>::op()), root, comm));
}
/**********************************************************************
* User-defined reduction with MPI_Reduce *
**********************************************************************/
// We are reducing at the root for a type that has an associated MPI
// datatype but with a custom operation. We'll use MPI_Reduce
// directly, but we'll need to create an MPI_Op manually.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
mpl::true_/*is_mpi_datatype*/)
{
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
mpi_op.get_mpi_op(), root, comm));
}
// We are reducing to the root for a type that has an associated MPI
// datatype but with a custom operation. We'll use MPI_Reduce
// directly, but we'll need to create an MPI_Op manually.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
int root, mpl::false_/*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
{
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
(const_cast<T*>(in_values), 0, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
mpi_op.get_mpi_op(), root, comm));
}
/**********************************************************************
* User-defined, tree-based reduction for non-MPI data types *
**********************************************************************/
// Commutative reduction
template<typename T, typename Op>
void
tree_reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, int root,
mpl::true_ /*is_commutative*/)
{
std::copy(in_values, in_values + n, out_values);
int size = comm.size();
int rank = comm.rank();
// The computation tree we will use.
detail::computation_tree tree(rank, size, root);
int tag = environment::collectives_tag();
MPI_Status status;
int children = 0;
for (int child = tree.child_begin();
children < tree.branching_factor() && child != root;
++children, child = (child + 1) % size) {
// Receive archive
packed_iarchive ia(comm);
detail::packed_archive_recv(comm, child, tag, ia, status);
T incoming;
for (int i = 0; i < n; ++i) {
ia >> incoming;
out_values[i] = op(out_values[i], incoming);
}
}
// For non-roots, send the result to the parent.
if (tree.parent() != rank) {
packed_oarchive oa(comm);
for (int i = 0; i < n; ++i)
oa << out_values[i];
detail::packed_archive_send(comm, tree.parent(), tag, oa);
}
}
// Commutative reduction from a non-root.
template<typename T, typename Op>
void
tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
int root, mpl::true_ /*is_commutative*/)
{
scoped_array<T> results(new T[n]);
detail::tree_reduce_impl(comm, in_values, n, results.get(), op, root,
mpl::true_());
}
// Non-commutative reduction
template<typename T, typename Op>
void
tree_reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, int root,
mpl::false_ /*is_commutative*/)
{
int tag = environment::collectives_tag();
int left_child = root / 2;
int right_child = (root + comm.size()) / 2;
MPI_Status status;
if (left_child != root) {
// Receive value from the left child and merge it with the value
// we had incoming.
packed_iarchive ia(comm);
detail::packed_archive_recv(comm, left_child, tag, ia, status);
T incoming;
for (int i = 0; i < n; ++i) {
ia >> incoming;
out_values[i] = op(incoming, in_values[i]);
}
} else {
// There was no left value, so copy our incoming value.
std::copy(in_values, in_values + n, out_values);
}
if (right_child != root) {
// Receive value from the right child and merge it with the
// value we had incoming.
packed_iarchive ia(comm);
detail::packed_archive_recv(comm, right_child, tag, ia, status);
T incoming;
for (int i = 0; i < n; ++i) {
ia >> incoming;
out_values[i] = op(out_values[i], incoming);
}
}
}
// Non-commutative reduction from a non-root.
template<typename T, typename Op>
void
tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
int root, mpl::false_ /*is_commutative*/)
{
int size = comm.size();
int rank = comm.rank();
int tag = environment::collectives_tag();
// Determine our parents and children in the commutative binary
// computation tree.
int grandparent = root;
int parent = root;
int left_bound = 0;
int right_bound = size;
int left_child, right_child;
do {
left_child = (left_bound + parent) / 2;
right_child = (parent + right_bound) / 2;
if (rank < parent) {
// Go left.
grandparent = parent;
right_bound = parent;
parent = left_child;
} else if (rank > parent) {
// Go right.
grandparent = parent;
left_bound = parent + 1;
parent = right_child;
} else {
// We've found the parent
break;
}
} while (true);
// Our parent is the grandparent of our children. This is a slight
// abuse of notation, but it makes the send-to-parent below make
// more sense.
parent = grandparent;
MPI_Status status;
scoped_array<T> out_values(new T[n]);
if (left_child != rank) {
// Receive value from the left child and merge it with the value
// we had incoming.
packed_iarchive ia(comm);
detail::packed_archive_recv(comm, left_child, tag, ia, status);
T incoming;
for (int i = 0; i < n; ++i) {
ia >> incoming;
out_values[i] = op(incoming, in_values[i]);
}
} else {
// There was no left value, so copy our incoming value.
std::copy(in_values, in_values + n, out_values.get());
}
if (right_child != rank) {
// Receive value from the right child and merge it with the
// value we had incoming.
packed_iarchive ia(comm);
detail::packed_archive_recv(comm, right_child, tag, ia, status);
T incoming;
for (int i = 0; i < n; ++i) {
ia >> incoming;
out_values[i] = op(out_values[i], incoming);
}
}
// Send the combined value to our parent.
packed_oarchive oa(comm);
for (int i = 0; i < n; ++i)
oa << out_values[i];
detail::packed_archive_send(comm, parent, tag, oa);
}
// We are reducing at the root for a type that has no associated MPI
// datatype and operation, so we'll use a simple tree-based
// algorithm.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n,
T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
mpl::false_ /*is_mpi_datatype*/)
{
detail::tree_reduce_impl(comm, in_values, n, out_values, op, root,
is_commutative<Op, T>());
}
// We are reducing to the root for a type that has no associated MPI
// datatype and operation, so we'll use a simple tree-based
// algorithm.
template<typename T, typename Op>
void
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
int root, mpl::false_ /*is_mpi_op*/,
mpl::false_ /*is_mpi_datatype*/)
{
detail::tree_reduce_impl(comm, in_values, n, op, root,
is_commutative<Op, T>());
}
} // end namespace detail
template<typename T, typename Op>
void
reduce(const communicator& comm, const T* in_values, int n, T* out_values,
Op op, int root)
{
if (comm.rank() == root)
detail::reduce_impl(comm, in_values, n, out_values, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
else
detail::reduce_impl(comm, in_values, n, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
void
reduce(const communicator& comm, const T* in_values, int n, Op op, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::reduce_impl(comm, in_values, n, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
void
reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
int root)
{
if (comm.rank() == root)
detail::reduce_impl(comm, &in_value, 1, &out_value, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
else
detail::reduce_impl(comm, &in_value, 1, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
void reduce(const communicator& comm, const T& in_value, Op op, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::reduce_impl(comm, &in_value, 1, op, root,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_REDUCE_HPP

View File

@@ -0,0 +1,168 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
// Copyright (C) 2004 The Trustees of Indiana University
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Message Passing Interface 1.1 -- Section 4.9.1. Scan
#ifndef BOOST_MPI_SCAN_HPP
#define BOOST_MPI_SCAN_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
// For (de-)serializing sends and receives
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
// For packed_[io]archive sends and receives
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/detail/computation_tree.hpp>
#include <boost/mpi/operations.hpp>
#include <algorithm>
#include <exception>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
/************************************************************************
* Implementation details *
************************************************************************/
namespace detail {
/**********************************************************************
* Simple prefix reduction with MPI_Scan *
**********************************************************************/
// We are performing prefix reduction for a type that has an
// associated MPI datatype and operation, so we'll use MPI_Scan
// directly.
template<typename T, typename Op>
void
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
Op op, mpl::true_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
{
BOOST_MPI_CHECK_RESULT(MPI_Scan,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
(is_mpi_op<Op, T>::op()), comm));
}
/**********************************************************************
* User-defined prefix reduction with MPI_Scan *
**********************************************************************/
// We are performing prefix reduction for a type that has an
// associated MPI datatype but with a custom operation. We'll use
// MPI_Scan directly, but we'll need to create an MPI_Op manually.
template<typename T, typename Op>
void
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
Op op, mpl::false_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
{
user_op<Op, T> mpi_op(op);
BOOST_MPI_CHECK_RESULT(MPI_Scan,
(const_cast<T*>(in_values), out_values, n,
boost::mpi::get_mpi_datatype<T>(*in_values),
mpi_op.get_mpi_op(), comm));
}
/**********************************************************************
* User-defined, tree-based reduction for non-MPI data types *
**********************************************************************/
template<typename T, typename Op>
void
upper_lower_scan(const communicator& comm, const T* in_values, int n,
T* out_values, Op& op, int lower, int upper)
{
int tag = environment::collectives_tag();
int rank = comm.rank();
if (lower + 1 == upper) {
std::copy(in_values, in_values + n, out_values);
} else {
int middle = (lower + upper) / 2;
if (rank < middle) {
// Lower half
upper_lower_scan(comm, in_values, n, out_values, op, lower, middle);
// If we're the last process in the lower half, send our values
// to everyone in the upper half.
if (rank == middle - 1) {
packed_oarchive oa(comm);
for (int i = 0; i < n; ++i)
oa << out_values[i];
for (int p = middle; p < upper; ++p)
comm.send(p, tag, oa);
}
} else {
// Upper half
upper_lower_scan(comm, in_values, n, out_values, op, middle, upper);
// Receive value from the last process in the lower half.
packed_iarchive ia(comm);
comm.recv(middle - 1, tag, ia);
// Combine value that came from the left with our value
T left_value;
for (int i = 0; i < n; ++i)
{
ia >> left_value;
out_values[i] = op(left_value, out_values[i]);
}
}
}
}
// We are performing prefix reduction for a type that has no
// associated MPI datatype and operation, so we'll use a simple
// upper/lower algorithm.
template<typename T, typename Op>
inline void
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
Op op, mpl::false_ /*is_mpi_op*/, mpl::false_/*is_mpi_datatype*/)
{
upper_lower_scan(comm, in_values, n, out_values, op, 0, comm.size());
}
} // end namespace detail
template<typename T, typename Op>
inline void
scan(const communicator& comm, const T& in_value, T& out_value, Op op)
{
detail::scan_impl(comm, &in_value, 1, &out_value, op,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
inline void
scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op)
{
detail::scan_impl(comm, in_values, n, out_values, op,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
template<typename T, typename Op>
inline T
scan(const communicator& comm, const T& in_value, Op op)
{
T out_value;
detail::scan_impl(comm, &in_value, 1, &out_value, op,
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
return out_value;
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_SCAN_HPP

View File

@@ -0,0 +1,161 @@
// Copyright (C) 2005, 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4.6. Scatter
#ifndef BOOST_MPI_SCATTER_HPP
#define BOOST_MPI_SCATTER_HPP
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
namespace detail {
// We're scattering from the root for a type that has an associated MPI
// datatype, so we'll use MPI_Scatter to do all of the work.
template<typename T>
void
scatter_impl(const communicator& comm, const T* in_values, T* out_values,
int n, int root, mpl::true_)
{
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
BOOST_MPI_CHECK_RESULT(MPI_Scatter,
(const_cast<T*>(in_values), n, type,
out_values, n, type, root, comm));
}
// We're scattering from a non-root for a type that has an associated MPI
// datatype, so we'll use MPI_Scatter to do all of the work.
template<typename T>
void
scatter_impl(const communicator& comm, T* out_values, int n, int root,
mpl::true_)
{
MPI_Datatype type = get_mpi_datatype<T>(*out_values);
BOOST_MPI_CHECK_RESULT(MPI_Scatter,
(0, n, type,
out_values, n, type,
root, comm));
}
// We're scattering from the root for a type that does not have an
// associated MPI datatype, so we'll need to serialize
// it. Unfortunately, this means that we cannot use MPI_Scatter, so
// we'll just have the root send individual messages to the other
// processes.
template<typename T>
void
scatter_impl(const communicator& comm, const T* in_values, T* out_values,
int n, int root, mpl::false_)
{
int tag = environment::collectives_tag();
int size = comm.size();
for (int dest = 0; dest < size; ++dest) {
if (dest == root) {
// Our own values will never be transmitted: just copy them.
std::copy(in_values + dest * n, in_values + (dest + 1) * n, out_values);
} else {
// Send archive
packed_oarchive oa(comm);
for (int i = 0; i < n; ++i)
oa << in_values[dest * n + i];
detail::packed_archive_send(comm, dest, tag, oa);
}
}
}
// We're scattering to a non-root for a type that does not have an
// associated MPI datatype, so we'll need to de-serialize
// it. Unfortunately, this means that we cannot use MPI_Scatter, so
// we'll just have all of the non-root nodes send individual
// messages to the root.
template<typename T>
void
scatter_impl(const communicator& comm, T* out_values, int n, int root,
mpl::false_)
{
int tag = environment::collectives_tag();
packed_iarchive ia(comm);
MPI_Status status;
detail::packed_archive_recv(comm, root, tag, ia, status);
for (int i = 0; i < n; ++i)
ia >> out_values[i];
}
} // end namespace detail
template<typename T>
void
scatter(const communicator& comm, const T* in_values, T& out_value, int root)
{
if (comm.rank() == root)
detail::scatter_impl(comm, in_values, &out_value, 1, root,
is_mpi_datatype<T>());
else
detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
void
scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
int root)
{
if (comm.rank() == root)
::boost::mpi::scatter<T>(comm, &in_values[0], out_value, root);
else
::boost::mpi::scatter<T>(comm, static_cast<const T*>(0), out_value,
root);
}
template<typename T>
void scatter(const communicator& comm, T& out_value, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
void
scatter(const communicator& comm, const T* in_values, T* out_values, int n,
int root)
{
if (comm.rank() == root)
detail::scatter_impl(comm, in_values, out_values, n, root,
is_mpi_datatype<T>());
else
detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
}
template<typename T>
void
scatter(const communicator& comm, const std::vector<T>& in_values,
T* out_values, int n, int root)
{
if (comm.rank() == root)
::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
else
::boost::mpi::scatter(comm, static_cast<const T*>(0), out_values,
n, root);
}
template<typename T>
void scatter(const communicator& comm, T* out_values, int n, int root)
{
BOOST_ASSERT(comm.rank() != root);
detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_SCATTER_HPP

View File

@@ -0,0 +1,23 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 4. MPI Collectives
/** @file collectives_fwd.hpp
*
* This header provides forward declarations for all of the
* collective operations contained in the header @c collectives.hpp.
*/
#ifndef BOOST_MPI_COLLECTIVES_FWD_HPP
#define BOOST_MPI_COLLECTIVES_FWD_HPP
/// INTERNAL ONLY
#define BOOST_MPI_COLLECTIVES_FORWARD_ONLY
#include <boost/mpi/collectives.hpp>
#undef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
#endif // BOOST_MPI_COLLECTIVES_FWD_HPP

1725
test/external/boost/mpi/communicator.hpp vendored Normal file

File diff suppressed because it is too large Load Diff

107
test/external/boost/mpi/config.hpp vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file config.hpp
*
* This header provides MPI configuration details that expose the
* capabilities of the underlying MPI implementation, and provides
* auto-linking support on Windows.
*/
#ifndef BOOST_MPI_CONFIG_HPP
#define BOOST_MPI_CONFIG_HPP
/* Force MPICH not to define SEEK_SET, SEEK_CUR, and SEEK_END, which
conflict with the versions in <stdio.h> and <cstdio>. */
#define MPICH_IGNORE_CXX_SEEK 1
#include <mpi.h>
#include <boost/config.hpp>
/** @brief Define this macro to avoid expensice MPI_Pack/Unpack calls on
* homogeneous machines.
*/
//#define BOOST_MPI_HOMOGENEOUS
// If this is an MPI-2 implementation, define configuration macros for
// the features we are interested in.
#if defined(MPI_VERSION) && MPI_VERSION == 2
/** @brief Determine if the MPI implementation has support for memory
* allocation.
*
* This macro will be defined when the underlying MPI implementation
* has support for the MPI-2 memory allocation routines @c
* MPI_Alloc_mem and @c MPI_Free_mem. When defined, the @c allocator
* class template will provide Standard Library-compliant access to
* these memory-allocation routines.
*/
# define BOOST_MPI_HAS_MEMORY_ALLOCATION
/** @brief Determine if the MPI implementation has supports initialization
* without command-line arguments.
*
* This macro will be defined when the underlying implementation
* supports initialization of MPI without passing along command-line
* arguments, e.g., @c MPI_Init(NULL, NULL). When defined, the @c
* environment class will provide a default constructor. This macro is
* always defined for MPI-2 implementations. */
# define BOOST_MPI_HAS_NOARG_INITIALIZATION
#endif
#if defined(MPIAPI)
# define BOOST_MPI_CALLING_CONVENTION MPIAPI
#else
/** @brief Specifies the calling convention that will be used for callbacks
* from the underlying C MPI.
*
* This is a Windows-specific macro, which will be used internally to state
* the calling convention of any function that is to be used as a callback
* from MPI. For example, the internally-defined functions that are used in
* a call to @c MPI_Op_create. This macro is likely only to be useful to
* users that wish to bypass Boost.MPI, registering their own callbacks in
* certain cases, e.g., through @c MPI_Op_create.
*/
# define BOOST_MPI_CALLING_CONVENTION
#endif
#if defined(LAM_MPI)
// Configuration for LAM/MPI
# define BOOST_MPI_HAS_MEMORY_ALLOCATION
# define BOOST_MPI_HAS_NOARG_INITIALIZATION
#elif defined(MPICH_NAME)
// Configuration for MPICH
#endif
/*****************************************************************************
* *
* DLL import/export options *
* *
*****************************************************************************/
#if defined(BOOST_HAS_DECLSPEC) && (defined(BOOST_MPI_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && !defined(BOOST_MPI_STATIC_LINK)
# if defined(BOOST_MPI_SOURCE)
# define BOOST_MPI_DECL __declspec(dllexport)
# define BOOST_MPI_BUILD_DLL
# else
# define BOOST_MPI_DECL __declspec(dllimport)
# endif
#endif
#ifndef BOOST_MPI_DECL
# define BOOST_MPI_DECL
#endif
#if !defined(BOOST_MPI_NO_LIB) && !defined(BOOST_MPI_SOURCE) && !defined(BOOST_ALL_NO_LIB) && defined(__cplusplus)
# define BOOST_LIB_NAME boost_mpi
# if defined(BOOST_MPI_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)
# define BOOST_DYN_LINK
# endif
# ifdef BOOST_MPI_DIAG
# define BOOST_LIB_DIAGNOSTIC
# endif
# include <boost/config/auto_link.hpp>
#endif
#endif // BOOST_MPI_CONFIG_HPP

361
test/external/boost/mpi/datatype.hpp vendored Normal file
View File

@@ -0,0 +1,361 @@
// Copyright 2004 The Trustees of Indiana University.
// Copyright 2005 Matthias Troyer.
// Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Matthias Troyer
/** @file datatype.hpp
*
* This header provides the mapping from C++ types to MPI data types.
*/
#ifndef BOOST_MPI_DATATYPE_HPP
#define BOOST_MPI_DATATYPE_HPP
#include <boost/mpi/config.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <mpi.h>
#include <boost/config.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/or.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpi/detail/mpi_datatype_cache.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/serialization/item_version_type.hpp>
#include <utility> // for std::pair
namespace boost { namespace mpi {
/**
* @brief Type trait that determines if there exists a built-in
* integer MPI data type for a given C++ type.
*
* This ytpe trait determines when there is a direct mapping from a
* C++ type to an MPI data type that is classified as an integer data
* type. See @c is_mpi_builtin_datatype for general information about
* built-in MPI data types.
*/
template<typename T>
struct is_mpi_integer_datatype
: public boost::mpl::false_ { };
/**
* @brief Type trait that determines if there exists a built-in
* floating point MPI data type for a given C++ type.
*
* This ytpe trait determines when there is a direct mapping from a
* C++ type to an MPI data type that is classified as a floating
* point data type. See @c is_mpi_builtin_datatype for general
* information about built-in MPI data types.
*/
template<typename T>
struct is_mpi_floating_point_datatype
: public boost::mpl::false_ { };
/**
* @brief Type trait that determines if there exists a built-in
* logical MPI data type for a given C++ type.
*
* This ytpe trait determines when there is a direct mapping from a
* C++ type to an MPI data type that is classified as an logical data
* type. See @c is_mpi_builtin_datatype for general information about
* built-in MPI data types.
*/
template<typename T>
struct is_mpi_logical_datatype
: public boost::mpl::false_ { };
/**
* @brief Type trait that determines if there exists a built-in
* complex MPI data type for a given C++ type.
*
* This ytpe trait determines when there is a direct mapping from a
* C++ type to an MPI data type that is classified as an complex data
* type. See @c is_mpi_builtin_datatype for general information about
* built-in MPI data types.
*/
template<typename T>
struct is_mpi_complex_datatype
: public boost::mpl::false_ { };
/**
* @brief Type trait that determines if there exists a built-in
* byte MPI data type for a given C++ type.
*
* This ytpe trait determines when there is a direct mapping from a
* C++ type to an MPI data type that is classified as an byte data
* type. See @c is_mpi_builtin_datatype for general information about
* built-in MPI data types.
*/
template<typename T>
struct is_mpi_byte_datatype
: public boost::mpl::false_ { };
/** @brief Type trait that determines if there exists a built-in MPI
* data type for a given C++ type.
*
* This type trait determines when there is a direct mapping from a
* C++ type to an MPI type. For instance, the C++ @c int type maps
* directly to the MPI type @c MPI_INT. When there is a direct
* mapping from the type @c T to an MPI type, @c
* is_mpi_builtin_datatype will derive from @c mpl::true_ and the MPI
* data type will be accessible via @c get_mpi_datatype.
*
* In general, users should not need to specialize this
* trait. However, if you have an additional C++ type that can map
* directly to only of MPI's built-in types, specialize either this
* trait or one of the traits corresponding to categories of MPI data
* types (@c is_mpi_integer_datatype, @c
* is_mpi_floating_point_datatype, @c is_mpi_logical_datatype, @c
* is_mpi_complex_datatype, or @c is_mpi_builtin_datatype). @c
* is_mpi_builtin_datatype derives @c mpl::true_ if any of the traits
* corresponding to MPI data type categories derived @c mpl::true_.
*/
template<typename T>
struct is_mpi_builtin_datatype
: boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_floating_point_datatype<T>,
is_mpi_logical_datatype<T>,
is_mpi_complex_datatype<T>,
is_mpi_byte_datatype<T> >
{
};
/** @brief Type trait that determines if a C++ type can be mapped to
* an MPI data type.
*
* This type trait determines if it is possible to build an MPI data
* type that represents a C++ data type. When this is the case, @c
* is_mpi_datatype derives @c mpl::true_ and the MPI data type will
* be accessible via @c get_mpi_datatype.
* For any C++ type that maps to a built-in MPI data type (see @c
* is_mpi_builtin_datatype), @c is_mpi_data_type is trivially
* true. However, any POD ("Plain Old Data") type containing types
* that themselves can be represented by MPI data types can itself be
* represented as an MPI data type. For instance, a @c point3d class
* containing three @c double values can be represented as an MPI
* data type. To do so, first make the data type Serializable (using
* the Boost.Serialization library); then, specialize the @c
* is_mpi_datatype trait for the point type so that it will derive @c
* mpl::true_:
*
* @code
* namespace boost { namespace mpi {
* template<> struct is_mpi_datatype<point>
* : public mpl::true_ { };
* } }
* @endcode
*/
template<typename T>
struct is_mpi_datatype
: public is_mpi_builtin_datatype<T>
{
};
/** @brief Returns an MPI data type for a C++ type.
*
* The function creates an MPI data type for the given object @c
* x. The first time it is called for a class @c T, the MPI data type
* is created and cached. Subsequent calls for objects of the same
* type @c T return the cached MPI data type. The type @c T must
* allow creation of an MPI data type. That is, it must be
* Serializable and @c is_mpi_datatype<T> must derive @c mpl::true_.
*
* For fundamental MPI types, a copy of the MPI data type of the MPI
* library is returned.
*
* Note that since the data types are cached, the caller should never
* call @c MPI_Type_free() for the MPI data type returned by this
* call.
*
* @param x for an optimized call, a constructed object of the type
* should be passed; otherwise, an object will be
* default-constructed.
*
* @returns The MPI data type corresponding to type @c T.
*/
template<typename T> MPI_Datatype get_mpi_datatype(const T& x)
{
BOOST_MPL_ASSERT((is_mpi_datatype<T>));
return detail::mpi_datatype_cache().datatype(x);
}
// Don't parse this part when we're generating Doxygen documentation.
#ifndef BOOST_MPI_DOXYGEN
/// INTERNAL ONLY
#define BOOST_MPI_DATATYPE(CppType, MPIType, Kind) \
template<> \
inline MPI_Datatype \
get_mpi_datatype< CppType >(const CppType&) { return MPIType; } \
\
template<> \
struct BOOST_JOIN(is_mpi_,BOOST_JOIN(Kind,_datatype))< CppType > \
: boost::mpl::bool_<true> \
{}
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(packed, MPI_PACKED, builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(char, MPI_CHAR, builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(short, MPI_SHORT, integer);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(int, MPI_INT, integer);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(long, MPI_LONG, integer);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(float, MPI_FLOAT, floating_point);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(double, MPI_DOUBLE, floating_point);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(long double, MPI_LONG_DOUBLE, floating_point);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(unsigned char, MPI_UNSIGNED_CHAR, builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(unsigned short, MPI_UNSIGNED_SHORT, integer);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(unsigned, MPI_UNSIGNED, integer);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(unsigned long, MPI_UNSIGNED_LONG, integer);
/// INTERNAL ONLY
#define BOOST_MPI_LIST2(A, B) A, B
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(float, int)>, MPI_FLOAT_INT,
builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(double, int)>, MPI_DOUBLE_INT,
builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(long double, int)>,
MPI_LONG_DOUBLE_INT, builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(long, int>), MPI_LONG_INT,
builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(short, int>), MPI_SHORT_INT,
builtin);
/// INTERNAL ONLY
BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(int, int>), MPI_2INT, builtin);
#undef BOOST_MPI_LIST2
/// specialization of is_mpi_datatype for pairs
template <class T, class U>
struct is_mpi_datatype<std::pair<T,U> >
: public mpl::and_<is_mpi_datatype<T>,is_mpi_datatype<U> >
{
};
// Define wchar_t specialization of is_mpi_datatype, if possible.
#if !defined(BOOST_NO_INTRINSIC_WCHAR_T) && \
(defined(MPI_WCHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(wchar_t, MPI_WCHAR, builtin);
#endif
// Define long long or __int64 specialization of is_mpi_datatype, if possible.
#if defined(BOOST_HAS_LONG_LONG) && \
(defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(long long, MPI_LONG_LONG_INT, builtin);
#elif defined(BOOST_HAS_MS_INT64) && \
(defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(__int64, MPI_LONG_LONG_INT, builtin);
#endif
// Define unsigned long long or unsigned __int64 specialization of
// is_mpi_datatype, if possible. We separate this from the check for
// the (signed) long long/__int64 because some MPI implementations
// (e.g., MPICH-MX) have MPI_LONG_LONG_INT but not
// MPI_UNSIGNED_LONG_LONG.
#if defined(BOOST_HAS_LONG_LONG) && \
(defined(MPI_UNSIGNED_LONG_LONG) \
|| (defined(MPI_VERSION) && MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG, builtin);
#elif defined(BOOST_HAS_MS_INT64) && \
(defined(MPI_UNSIGNED_LONG_LONG) \
|| (defined(MPI_VERSION) && MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(unsigned __int64, MPI_UNSIGNED_LONG_LONG, builtin);
#endif
// Define signed char specialization of is_mpi_datatype, if possible.
#if defined(MPI_SIGNED_CHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2)
BOOST_MPI_DATATYPE(signed char, MPI_SIGNED_CHAR, builtin);
#endif
#endif // Doxygen
namespace detail {
inline MPI_Datatype build_mpi_datatype_for_bool()
{
MPI_Datatype type;
MPI_Type_contiguous(sizeof(bool), MPI_BYTE, &type);
MPI_Type_commit(&type);
return type;
}
}
/// Support for bool. There is no corresponding MPI_BOOL.
/// INTERNAL ONLY
template<>
inline MPI_Datatype get_mpi_datatype<bool>(const bool&)
{
static MPI_Datatype type = detail::build_mpi_datatype_for_bool();
return type;
}
/// INTERNAL ONLY
template<>
struct is_mpi_datatype<bool>
: boost::mpl::bool_<true>
{};
#ifndef BOOST_MPI_DOXYGEN
// direct support for special primitive data types of the serialization library
BOOST_MPI_DATATYPE(boost::archive::library_version_type, get_mpi_datatype(uint_least16_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::version_type, get_mpi_datatype(uint_least8_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::class_id_type, get_mpi_datatype(int_least16_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::class_id_reference_type, get_mpi_datatype(int_least16_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::class_id_optional_type, get_mpi_datatype(int_least16_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::object_id_type, get_mpi_datatype(uint_least32_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::object_reference_type, get_mpi_datatype(uint_least32_t()), integer);
BOOST_MPI_DATATYPE(boost::archive::tracking_type, get_mpi_datatype(bool()), builtin);
BOOST_MPI_DATATYPE(boost::serialization::collection_size_type, get_mpi_datatype(std::size_t()), integer);
BOOST_MPI_DATATYPE(boost::serialization::item_version_type, get_mpi_datatype(uint_least8_t()), integer);
#endif // Doxygen
} } // end namespace boost::mpi
// direct support for special primitive data types of the serialization library
// in the case of homogeneous systems
// define a macro to make explicit designation of this more transparent
#define BOOST_IS_MPI_DATATYPE(T) \
namespace boost { \
namespace mpi { \
template<> \
struct is_mpi_datatype< T > : mpl::true_ {}; \
}} \
/**/
#endif // BOOST_MPI_MPI_DATATYPE_HPP

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file datatype_fwd.hpp
*
* This header provides forward declarations for the contents of the
* header @c datatype.hpp. It is expected to be used primarily by
* user-defined C++ classes that need to specialize @c
* is_mpi_datatype.
*/
#ifndef BOOST_MPI_DATATYPE_FWD_HPP
#define BOOST_MPI_DATATYPE_FWD_HPP
#include <boost/mpi/config.hpp>
namespace boost { namespace mpi {
template<typename T> struct is_mpi_builtin_datatype;
template<typename T> struct is_mpi_integer_datatype;
template<typename T> struct is_mpi_floating_point_datatype;
template<typename T> struct is_mpi_logical_datatype;
template<typename T> struct is_mpi_complex_datatype;
template<typename T> struct is_mpi_byte_datatype;
template<typename T> struct is_mpi_datatype;
template<typename T> MPI_Datatype get_mpi_datatype(const T& x);
template<typename T> MPI_Datatype get_mpi_datatype()
{ return get_mpi_datatype(T());}
/// a dummy data type giving MPI_PACKED as its MPI_Datatype
struct packed {};
} } // end namespace boost::mpi
#endif // BOOST_MPI_MPI_DATATYPE_FWD_HPP

View File

@@ -0,0 +1,121 @@
// (C) Copyright 2005-2007 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_BINARY_BUFFER_IPRIMITIVE_HPP
#define BOOST_MPI_BINARY_BUFFER_IPRIMITIVE_HPP
#include <mpi.h>
#include <iostream>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/assert.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/is_bitwise_serializable.hpp>
#include <vector>
#include <boost/mpi/allocator.hpp>
#include <cstring> // for memcpy
namespace boost { namespace mpi {
/// deserialization using MPI_Unpack
class BOOST_MPI_DECL binary_buffer_iprimitive
{
public:
/// the type of the buffer from which the data is unpacked upon deserialization
typedef std::vector<char, allocator<char> > buffer_type;
binary_buffer_iprimitive(buffer_type & b, MPI_Comm const &, int position = 0)
: buffer_(b),
position(position)
{
}
void* address ()
{
return &buffer_.front();
}
void const* address () const
{
return &buffer_.front();
}
const std::size_t& size() const
{
return size_ = buffer_.size();
}
void resize(std::size_t s)
{
buffer_.resize(s);
}
void load_binary(void *address, std::size_t count)
{
load_impl(address,count);
}
// fast saving of arrays of fundamental types
template<class T>
void load_array(serialization::array<T> const& x, unsigned int /* file_version */)
{
BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
if (x.count())
load_impl(x.address(), sizeof(T)*x.count());
}
typedef serialization::is_bitwise_serializable<mpl::_1> use_array_optimization;
template<class T>
void load(serialization::array<T> const& x)
{
load_array(x,0u);
}
// default saving of primitives.
template<class T>
void load( T & t)
{
BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
load_impl(&t, sizeof(T));
}
template<class CharType>
void load(std::basic_string<CharType> & s)
{
unsigned int l;
load(l);
// borland de-allocator fixup
#if BOOST_WORKAROUND(_RWSTD_VER, BOOST_TESTED_AT(20101))
if(NULL != s.data())
#endif
s.resize(l);
// note breaking a rule here - could be a problem on some platform
load_impl(const_cast<char *>(s.data()),l);
}
private:
void load_impl(void * p, int l)
{
assert(position+l<=static_cast<int>(buffer_.size()));
std::memcpy(p,&buffer_[position],l);
position += l;
}
buffer_type & buffer_;
mutable std::size_t size_;
int position;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_PACKED_IPRIMITIVE_HPP

View File

@@ -0,0 +1,104 @@
// (C) Copyright 2005-2007 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP
#define BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP
#include <mpi.h>
#include <iostream>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/is_bitwise_serializable.hpp>
#include <boost/assert.hpp>
#include <boost/mpl/assert.hpp>
#include <vector>
#include <boost/mpi/allocator.hpp>
#include <boost/mpl/always.hpp>
#include <boost/type_traits/remove_const.hpp>
namespace boost { namespace mpi {
/// serialization using binary copy into a buffer
class BOOST_MPI_DECL binary_buffer_oprimitive
{
public:
/// the type of the buffer into which the data is packed upon serialization
typedef std::vector<char, allocator<char> > buffer_type;
binary_buffer_oprimitive(buffer_type & b, MPI_Comm const &)
: buffer_(b)
{
}
void const * address() const
{
return &buffer_.front();
}
const std::size_t& size() const
{
return size_ = buffer_.size();
}
void save_binary(void const *address, std::size_t count)
{
save_impl(address,count);
}
// fast saving of arrays
template<class T>
void save_array(serialization::array<T> const& x, unsigned int /* file_version */)
{
BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
if (x.count())
save_impl(x.address(), x.count()*sizeof(T));
}
template<class T>
void save(serialization::array<T> const& x)
{
save_array(x,0u);
}
typedef serialization::is_bitwise_serializable<mpl::_1> use_array_optimization;
// default saving of primitives.
template<class T>
void save(const T & t)
{
BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
save_impl(&t, sizeof(T));
}
template<class CharType>
void save(const std::basic_string<CharType> &s)
{
unsigned int l = static_cast<unsigned int>(s.size());
save(l);
save_impl(s.data(),s.size());
}
private:
void save_impl(void const * p, int l)
{
char const* ptr = reinterpret_cast<char const*>(p);
buffer_.insert(buffer_.end(),ptr,ptr+l);
}
buffer_type& buffer_;
mutable std::size_t size_;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP

View File

@@ -0,0 +1,41 @@
// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Allows broadcast of skeletons via proxy.
// This header may only be included after both the broadcast.hpp and
// and skeleton_and_content.hpp headers have been included.
#ifndef BOOST_MPI_BROADCAST_SC_HPP
#define BOOST_MPI_BROADCAST_SC_HPP
namespace boost { namespace mpi {
template<typename T>
inline void
broadcast(const communicator& comm, skeleton_proxy<T>& proxy, int root)
{
const skeleton_proxy<T>& const_proxy(proxy);
broadcast(comm, const_proxy, root);
}
template<typename T>
void
broadcast(const communicator& comm, const skeleton_proxy<T>& proxy, int root)
{
if (comm.rank() == root) {
packed_skeleton_oarchive oa(comm);
oa << proxy.object;
broadcast(comm, oa, root);
} else {
packed_skeleton_iarchive ia(comm);
broadcast(comm, ia, root);
ia >> proxy.object;
}
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_BROADCAST_SC_HPP

View File

@@ -0,0 +1,96 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Skeleton and content support for communicators
// This header should be included only after both communicator.hpp and
// skeleton_and_content.hpp have been included.
#ifndef BOOST_MPI_COMMUNICATOR_SC_HPP
#define BOOST_MPI_COMMUNICATOR_SC_HPP
namespace boost { namespace mpi {
template<typename T>
void
communicator::send(int dest, int tag, const skeleton_proxy<T>& proxy) const
{
packed_skeleton_oarchive ar(*this);
ar << proxy.object;
send(dest, tag, ar);
}
template<typename T>
status
communicator::recv(int source, int tag, const skeleton_proxy<T>& proxy) const
{
packed_skeleton_iarchive ar(*this);
status result = recv(source, tag, ar);
ar >> proxy.object;
return result;
}
template<typename T>
status communicator::recv(int source, int tag, skeleton_proxy<T>& proxy) const
{
packed_skeleton_iarchive ar(*this);
status result = recv(source, tag, ar);
ar >> proxy.object;
return result;
}
template<typename T>
request
communicator::isend(int dest, int tag, const skeleton_proxy<T>& proxy) const
{
shared_ptr<packed_skeleton_oarchive>
archive(new packed_skeleton_oarchive(*this));
*archive << proxy.object;
request result = isend(dest, tag, *archive);
result.m_data = archive;
return result;
}
namespace detail {
template<typename T>
struct serialized_irecv_data<const skeleton_proxy<T> >
{
serialized_irecv_data(const communicator& comm, int source, int tag,
skeleton_proxy<T> proxy)
: comm(comm), source(source), tag(tag), isa(comm),
ia(isa.get_skeleton()), proxy(proxy) { }
void deserialize(status& stat)
{
isa >> proxy.object;
stat.m_count = 1;
}
communicator comm;
int source;
int tag;
std::size_t count;
packed_skeleton_iarchive isa;
packed_iarchive& ia;
skeleton_proxy<T> proxy;
};
template<typename T>
struct serialized_irecv_data<skeleton_proxy<T> >
: public serialized_irecv_data<const skeleton_proxy<T> >
{
typedef serialized_irecv_data<const skeleton_proxy<T> > inherited;
serialized_irecv_data(const communicator& comm, int source, int tag,
const skeleton_proxy<T>& proxy)
: inherited(comm, source, tag, proxy) { }
};
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_COMMUNICATOR_SC_HPP

View File

@@ -0,0 +1,86 @@
// Copyright (C) 2005 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Compute parents, children, levels, etc. to effect a parallel
// computation tree.
#ifndef BOOST_MPI_COMPUTATION_TREE_HPP
#define BOOST_MPI_COMPUTATION_TREE_HPP
namespace boost { namespace mpi { namespace detail {
/**
* @brief Aids tree-based parallel collective algorithms.
*
* Objects of this type
*/
class computation_tree
{
public:
computation_tree(int rank, int size, int root, int branching_factor = -1);
/// Returns the branching factor of the tree.
int branching_factor() const { return branching_factor_; }
/// Returns the level in the tree on which this process resides.
int level() const { return level_; }
/**
* Returns the index corresponding to the n^th level of the tree.
*
* @param n The level in the tree whose index will be returned.
*/
int level_index(int n) const;
/**
* @brief Returns the parent of this process.
*
* @returns If this process is the root, returns itself. Otherwise,
* returns the process number that is the parent in the computation
* tree.
*/
int parent() const;
/// Returns the index for the first child of this process.
int child_begin() const;
/**
* @brief The default branching factor within the computation tree.
*
* This is the default branching factor for the computation tree, to
* be used by any computation tree that does not fix the branching
* factor itself. The default is initialized to 3, but may be
* changed by the application so long as all processes have the same
* branching factor.
*/
static int default_branching_factor;
protected:
/// The rank of this process in the computation tree.
int rank;
/// The number of processes participating in the computation tree.
int size;
/// The process number that is acting as the root in the computation
/// tree.
int root;
/**
* @brief The branching factor within the computation tree.
*
* This is the default number of children that each node in a
* computation tree will have. This value will be used for
* collective operations that use tree-based algorithms.
*/
int branching_factor_;
/// The level in the tree at which this process resides.
int level_;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_COMPUTATION_TREE_HPP

View File

@@ -0,0 +1,66 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/archive/detail/register_archive.hpp>
namespace boost { namespace mpi {
namespace detail {
// an archive wrapper that stores only the data members but not the
// special types defined by the serialization library
// to define the data skeletons (classes, pointers, container sizes, ...)
class BOOST_MPI_DECL content_oarchive
: public mpi_datatype_primitive,
public ignore_skeleton_oarchive<content_oarchive>
{
public:
content_oarchive()
: committed(false)
{}
content get_content()
{
if (!committed)
{
// create the content holder only once
c=this->get_mpi_datatype();
committed=true;
}
return c;
}
private:
bool committed;
content c;
};
} // end namespace detail
template <class T>
const content get_content(const T& x)
{
detail::content_oarchive ar;
ar << x;
return ar.get_content();
}
} } // end namespace boost::mpi
// required by export
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::content_oarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::content_oarchive>)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::detail::content_oarchive)
#endif // BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP

View File

@@ -0,0 +1,72 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#include <boost/serialization/array.hpp>
#ifndef BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal input archive, which forwards reading to another archive
///
/// This class template is designed to use the loading facilities of another
/// input archive (the "implementation archive", whose type is specified by
/// the template argument, to handle serialization of primitive types,
/// while serialization for specific types can be overriden independently
/// of that archive.
template <class ImplementationArchive>
class forward_iprimitive
{
public:
/// the type of the archive to which the loading of primitive types will be forwarded
typedef ImplementationArchive implementation_archive_type;
/// the constructor takes a reference to the implementation archive used for loading primitve types
forward_iprimitive(implementation_archive_type& ar)
: implementation_archive(ar)
{}
/// binary loading is forwarded to the implementation archive
void load_binary(void * address, std::size_t count )
{
implementation_archive.load_binary(address,count);
}
/// loading of arrays is forwarded to the implementation archive
template<class T>
void load_array(serialization::array<T> & x, unsigned int file_version )
{
implementation_archive.load_array(x,file_version);
}
typedef typename ImplementationArchive::use_array_optimization use_array_optimization;
#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
friend class archive::load_access;
protected:
#else
public:
#endif
/// loading of primitives is forwarded to the implementation archive
template<class T>
void load(T & t)
{
implementation_archive >> t;
}
private:
implementation_archive_type& implementation_archive;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_FORWARD_IPRIMITIVE_HPP

View File

@@ -0,0 +1,73 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal output archive, which forwards saving to another archive
///
/// This class template is designed to use the saving facilities of another
/// output archive (the "implementation archive", whose type is specified by
/// the template argument, to handle serialization of primitive types,
/// while serialization for specific types can be overriden independently
/// of that archive.
template <class ImplementationArchive>
class forward_oprimitive
{
public:
/// the type of the archive to which the saving of primitive types will be forwarded
typedef ImplementationArchive implementation_archive_type;
/// the constructor takes a reference to the implementation archive used for saving primitve types
forward_oprimitive(implementation_archive_type& ar)
: implementation_archive(ar)
{}
/// binary saving is forwarded to the implementation archive
void save_binary(const void * address, std::size_t count)
{
implementation_archive.save_binary(address,count);
}
/// saving of arrays is forwarded to the implementation archive
template<class T>
void save_array(serialization::array<T> const& x, unsigned int file_version )
{
implementation_archive.save_array(x,file_version);
}
typedef typename ImplementationArchive::use_array_optimization use_array_optimization;
#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
friend class archive::save_access;
protected:
#else
public:
#endif
/// saving of primitives is forwarded to the implementation archive
template<class T>
void save(const T & t)
{
implementation_archive << t;
}
private:
implementation_archive_type& implementation_archive;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_FORWARD_OPRIMITIVE_HPP

View File

@@ -0,0 +1,84 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
#include <boost/serialization/pfto.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/iserializer.hpp>
#include <boost/archive/detail/interface_iarchive.hpp>
#include <boost/archive/detail/common_iarchive.hpp>
#include <boost/serialization/collection_size_type.hpp>
namespace boost { namespace mpi { namespace detail {
template<class Archive, class ImplementationArchive>
class forward_skeleton_iarchive
: public archive::detail::common_iarchive<Archive>
{
public:
typedef ImplementationArchive implementation_archive_type;
forward_skeleton_iarchive(implementation_archive_type& ar)
: archive::detail::common_iarchive<Archive>(archive::no_header),
implementation_archive(ar)
{
}
#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
public:
#else
friend class archive::detail::interface_iarchive<Archive>;
friend class archive::load_access;
protected:
#endif
// intermediate level to support override of operators
// for templates in the absence of partial function
// template ordering
template<class T>
void load_override(T & t, BOOST_PFTO int)
{
archive::load(* this->This(), t);
}
#define BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(T) \
void load_override(T & t , int) \
{ \
implementation_archive >> t; \
}
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_optional_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::version_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_reference_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_id_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::tracking_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_name_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(serialization::collection_size_type)
void load_override(std::string & s , int)
{
serialization::collection_size_type length(s.size());
load_override(length,0);
s.resize(length);
}
#undef BOOST_ARCHIVE_FORWARD_IMPLEMENTATION
protected:
/// the actual archive used to serialize the information we actually want to store
implementation_archive_type& implementation_archive;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP

View File

@@ -0,0 +1,83 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
#include <boost/serialization/pfto.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/oserializer.hpp>
#include <boost/archive/detail/interface_oarchive.hpp>
#include <boost/archive/detail/common_oarchive.hpp>
#include <boost/serialization/collection_size_type.hpp>
namespace boost { namespace mpi { namespace detail {
template<class Archive, class ImplementationArchive>
class forward_skeleton_oarchive
: public archive::detail::common_oarchive<Archive>
{
public:
typedef ImplementationArchive implementation_archive_type;
forward_skeleton_oarchive(implementation_archive_type& ar)
: archive::detail::common_oarchive<Archive>(archive::no_header),
implementation_archive(ar)
{
}
#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
public:
#else
friend class archive::detail::interface_oarchive<Archive>;
friend class archive::save_access;
protected:
#endif
// intermediate level to support override of operators
// for templates in the absence of partial function
// template ordering
template<class T>
void save_override(T const& t, BOOST_PFTO int)
{
archive::save(* this->This(), t);
}
#define BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(T) \
void save_override(T const & t , int) \
{ \
implementation_archive << t; \
}
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_optional_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::version_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_reference_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_id_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_reference_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::tracking_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_name_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(serialization::collection_size_type)
void save_override(std::string const & t , int)
{
save_override(serialization::collection_size_type(t.size()),0);
}
#undef BOOST_ARCHIVE_FORWARD_IMPLEMENTATION
protected:
/// the actual archive used to serialize the information we actually want to store
implementation_archive_type& implementation_archive;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP

View File

@@ -0,0 +1,54 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal input archive, which ignores any load
///
/// This class implements a minimal input archive, probably an input archive
/// archetype, doing nothing at any load. It's use, besides acting as an
/// archetype is as a base class to implement special archives that ignore
/// loading of most types
class ignore_iprimitive
{
public:
/// a trivial default constructor
ignore_iprimitive()
{}
/// don't do anything when loading binary data
void load_binary(void *, std::size_t )
{}
/// don't do anything when loading arrays
template<class T>
void load_array(serialization::array<T> &, unsigned int )
{}
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
/// don't do anything when loading primitive types
template<class T>
void load(T & t)
{
}
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP

View File

@@ -0,0 +1,62 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
#include <boost/config.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/serialization/array.hpp>
namespace boost { namespace mpi { namespace detail {
/// @brief a minimal output archive, which ignores any save
///
/// This class implements a minimal output archive, probably an output archive
/// archetype, doing nothing at any save. It's use, besides acting as an
/// archetype is as a base class to implement special archives that ignore
/// saving of most types
class ignore_oprimitive
{
public:
/// a trivial default constructor
ignore_oprimitive()
{}
/// don't do anything when saving binary data
void save_binary(const void *, std::size_t )
{
}
/// don't do anything when saving arrays
template<class T>
void save_array(serialization::array<T> const&, unsigned int )
{
}
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
friend class archive::save_access;
protected:
#else
public:
#endif
/// don't do anything when saving primitive types
template<class T>
void save(const T & t)
{
}
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP

View File

@@ -0,0 +1,79 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
#include <boost/serialization/pfto.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/common_oarchive.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/archive/detail/oserializer.hpp>
#include <boost/serialization/collection_size_type.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/item_version_type.hpp>
namespace boost { namespace mpi { namespace detail {
template<class Archive>
class ignore_skeleton_oarchive
: public archive::detail::common_oarchive<Archive>
{
public:
ignore_skeleton_oarchive()
: archive::detail::common_oarchive<Archive>(archive::no_header)
{
}
#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
public:
#else
friend class archive::detail::interface_oarchive<Archive>;
friend class archive::save_access;
protected:
#endif
// intermediate level to support override of operators
// for templates in the absence of partial function
// template ordering
template<class T>
void save_override(T const& t, BOOST_PFTO int)
{
archive::save(* this->This(), t);
}
#define BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(T) \
void save_override(T const & , int) \
{}
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_optional_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::version_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::library_version_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_reference_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::object_id_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::object_reference_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::tracking_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_name_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(serialization::collection_size_type)
BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(serialization::item_version_type)
void save_override(std::string const & s, int)
{
if (s.size())
save_override(serialization::make_array(s.data(),s.size()),0);
}
#undef BOOST_ARCHIVE_IGNORE_IMPLEMENTATION
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP

View File

@@ -0,0 +1,99 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#define BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpi/detail/mpi_datatype_oarchive.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/utility/enable_if.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/noncopyable.hpp>
#include <typeinfo>
// The std::type_info::before function in Visual C++ 8.0 (and probably earlier)
// incorrectly returns an "int" instead of a "bool". Then the compiler has the
// audacity to complain when that "int" is converted to a "bool". Silence
// this warning.
#ifdef BOOST_MSVC
# pragma warning(push)
# pragma warning(disable : 4800)
#endif
namespace boost { namespace mpi { namespace detail {
/// @brief comparison function object for two std::type_info pointers
///
/// is implemented using the before() member function of the std::type_info
/// class
struct type_info_compare
{
bool operator()(std::type_info const* lhs, std::type_info const* rhs) const
{
return lhs->before(*rhs);
}
};
/// @brief a map of MPI data types, indexed by their type_info
///
///
class BOOST_MPI_DECL mpi_datatype_map
: public boost::noncopyable
{
struct implementation;
implementation *impl;
public:
mpi_datatype_map();
~mpi_datatype_map();
template <class T>
MPI_Datatype datatype(const T& x = T(), typename boost::enable_if<is_mpi_builtin_datatype<T> >::type* =0)
{
return get_mpi_datatype<T>(x);
}
template <class T>
MPI_Datatype datatype(const T& x =T(), typename boost::disable_if<is_mpi_builtin_datatype<T> >::type* =0 )
{
BOOST_MPL_ASSERT((is_mpi_datatype<T>));
// check whether the type already exists
std::type_info const* t = &typeid(T);
MPI_Datatype datatype = get(t);
if (datatype == MPI_DATATYPE_NULL) {
// need to create a type
mpi_datatype_oarchive ar(x);
datatype = ar.get_mpi_datatype();
set(t, datatype);
}
return datatype;
}
void clear();
private:
MPI_Datatype get(const std::type_info* t);
void set(const std::type_info* t, MPI_Datatype datatype);
};
/// Retrieve the MPI datatype cache
BOOST_MPI_DECL mpi_datatype_map& mpi_datatype_cache();
} } } // end namespace boost::mpi::detail
#ifdef BOOST_MSVC
# pragma warning(pop)
#endif
#endif // BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP

View File

@@ -0,0 +1,78 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#define BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
#include <boost/type_traits/is_enum.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/archive/detail/oserializer.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/static_assert.hpp>
#include <boost/integer.hpp>
#include <boost/archive/detail/register_archive.hpp>
namespace boost { namespace mpi { namespace detail {
// an archive wrapper that stores only the data members but not the
// special types defined by the serialization library
// to define the data skeletons (classes, pointers, container sizes, ...)
class mpi_datatype_oarchive
: public mpi_datatype_primitive,
public ignore_skeleton_oarchive<mpi_datatype_oarchive>
{
public:
template <class T>
mpi_datatype_oarchive(const T& x)
: mpi_datatype_primitive(&x) // register address
{
BOOST_MPL_ASSERT((is_mpi_datatype<T>));
*this << x; // serialize the object
}
// intermediate level to support override of operators
// for templates in the absence of partial function
// template ordering
template<class T>
void save_override(T const& t, BOOST_PFTO int)
{
save_enum(t,boost::is_enum<T>());
}
template<class T>
void save_enum(T const& t, mpl::false_)
{
ignore_skeleton_oarchive<mpi_datatype_oarchive>::save_override(t, 0);
}
template<class T>
void save_enum(T const& t, mpl::true_)
{
// select the right sized integer for the enum
typedef typename boost::uint_t<8*sizeof(T)>::least int_type;
BOOST_STATIC_ASSERT((sizeof(T)==sizeof(int_type)));
this->save(*reinterpret_cast<int_type const*>(&t));
}
};
} } } // end namespace boost::mpi::detail
// required by export
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::mpi_datatype_oarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::mpi_datatype_oarchive>)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::detail::mpi_datatype_oarchive)
#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP

View File

@@ -0,0 +1,128 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#define BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
#include <boost/mpi/config.hpp>
#include <cstddef> // size_t
#include <boost/config.hpp>
#if defined(BOOST_NO_STDC_NAMESPACE)
namespace std{
using ::size_t;
} // namespace std
#endif
#include <boost/mpi/datatype_fwd.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/throw_exception.hpp>
#include <boost/assert.hpp>
#include <boost/mpl/placeholders.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/detail/get_data.hpp>
#include <stdexcept>
#include <iostream>
#include <vector>
namespace boost { namespace mpi { namespace detail {
/////////////////////////////////////////////////////////////////////////
// class mpi_data_type_oprimitive - creation of custom MPI data types
class mpi_datatype_primitive
{
public:
// trivial default constructor
mpi_datatype_primitive()
: is_committed(false),
origin(0)
{}
mpi_datatype_primitive(void const* orig)
: is_committed(false),
origin()
{
BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(orig), &origin));
}
void save_binary(void const *address, std::size_t count)
{
save_impl(address,MPI_BYTE,count);
}
// fast saving of arrays of MPI types
template<class T>
void save_array(serialization::array<T> const& x, unsigned int /* version */)
{
if (x.count())
save_impl(x.address(), boost::mpi::get_mpi_datatype(*x.address()), x.count());
}
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
// create and return the custom MPI data type
MPI_Datatype get_mpi_datatype()
{
if (!is_committed)
{
BOOST_MPI_CHECK_RESULT(MPI_Type_struct,
(
addresses.size(),
boost::serialization::detail::get_data(lengths),
boost::serialization::detail::get_data(addresses),
boost::serialization::detail::get_data(types),
&datatype_
));
BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&datatype_));
is_committed = true;
}
return datatype_;
}
// default saving of primitives.
template<class T>
void save(const T & t)
{
save_impl(&t, boost::mpi::get_mpi_datatype(t), 1);
}
private:
void save_impl(void const * p, MPI_Datatype t, int l)
{
BOOST_ASSERT ( !is_committed );
// store address, type and length
MPI_Aint a;
BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(p), &a));
addresses.push_back(a-origin);
types.push_back(t);
lengths.push_back(l);
}
std::vector<MPI_Aint> addresses;
std::vector<MPI_Datatype> types;
std::vector<int> lengths;
bool is_committed;
MPI_Datatype datatype_;
MPI_Aint origin;
};
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP

View File

@@ -0,0 +1,116 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_PACKED_IPRIMITIVE_HPP
#define BOOST_MPI_PACKED_IPRIMITIVE_HPP
#include <boost/mpi/config.hpp>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/assert.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/detail/get_data.hpp>
#include <vector>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace mpi {
/// deserialization using MPI_Unpack
class BOOST_MPI_DECL packed_iprimitive
{
public:
/// the type of the buffer from which the data is unpacked upon deserialization
typedef std::vector<char, allocator<char> > buffer_type;
packed_iprimitive(buffer_type & b, MPI_Comm const & comm, int position = 0)
: buffer_(b),
comm(comm),
position(position)
{
}
void* address ()
{
return &buffer_[0];
}
void const* address () const
{
return &buffer_[0];
}
const std::size_t& size() const
{
return size_ = buffer_.size();
}
void resize(std::size_t s)
{
buffer_.resize(s);
}
void load_binary(void *address, std::size_t count)
{
load_impl(address,MPI_BYTE,count);
}
// fast saving of arrays of fundamental types
template<class T>
void load_array(serialization::array<T> const& x, unsigned int /* file_version */)
{
if (x.count())
load_impl(x.address(), get_mpi_datatype(*x.address()), x.count());
}
/*
template<class T>
void load(serialization::array<T> const& x)
{
load_array(x,0u);
}
*/
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
// default saving of primitives.
template<class T>
void load( T & t)
{
load_impl(&t, get_mpi_datatype(t), 1);
}
template<class CharType>
void load(std::basic_string<CharType> & s)
{
unsigned int l;
load(l);
s.resize(l);
// note breaking a rule here - could be a problem on some platform
load_impl(const_cast<CharType *>(s.data()),get_mpi_datatype(CharType()),l);
}
private:
void load_impl(void * p, MPI_Datatype t, int l)
{
BOOST_MPI_CHECK_RESULT(MPI_Unpack,
(const_cast<char*>(boost::serialization::detail::get_data(buffer_)), buffer_.size(), &position, p, l, t, comm));
}
buffer_type & buffer_;
mutable std::size_t size_;
MPI_Comm comm;
int position;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_PACKED_IPRIMITIVE_HPP

View File

@@ -0,0 +1,114 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_PACKED_OPRIMITIVE_HPP
#define BOOST_MPI_PACKED_OPRIMITIVE_HPP
#include <boost/mpi/config.hpp>
#include <cstddef> // size_t
#include <boost/config.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/serialization/detail/get_data.hpp>
#include <boost/serialization/array.hpp>
#include <boost/assert.hpp>
#include <vector>
#include <boost/mpi/allocator.hpp>
namespace boost { namespace mpi {
/// serialization using MPI::Pack
class BOOST_MPI_DECL packed_oprimitive
{
public:
/// the type of the buffer into which the data is packed upon serialization
typedef std::vector<char, allocator<char> > buffer_type;
packed_oprimitive(buffer_type & b, MPI_Comm const & comm)
: buffer_(b),
comm(comm)
{
}
void const * address() const
{
return &buffer_[0];
}
const std::size_t& size() const
{
return size_ = buffer_.size();
}
void save_binary(void const *address, std::size_t count)
{
save_impl(address,MPI_BYTE,count);
}
// fast saving of arrays
template<class T>
void save_array(serialization::array<T> const& x, unsigned int /* file_version */)
{
if (x.count())
save_impl(x.address(), get_mpi_datatype(*x.address()), x.count());
}
typedef is_mpi_datatype<mpl::_1> use_array_optimization;
#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
friend class archive::save_access;
protected:
#else
public:
#endif
// default saving of primitives.
template<class T>
void save(const T & t)
{
save_impl(&t, get_mpi_datatype<T>(t), 1);
}
template<class CharType>
void save(const std::basic_string<CharType> &s)
{
unsigned int l = static_cast<unsigned int>(s.size());
save(l);
save_impl(s.data(),get_mpi_datatype(CharType()),s.size());
}
private:
void save_impl(void const * p, MPI_Datatype t, int l)
{
// allocate enough memory
int memory_needed;
BOOST_MPI_CHECK_RESULT(MPI_Pack_size,(l,t,comm,&memory_needed));
int position = buffer_.size();
buffer_.resize(position + memory_needed);
// pack the data into the buffer
BOOST_MPI_CHECK_RESULT(MPI_Pack,
(const_cast<void*>(p), l, t, boost::serialization::detail::get_data(buffer_), buffer_.size(), &position, comm));
// reduce the buffer size if needed
BOOST_ASSERT(std::size_t(position) <= buffer_.size());
if (std::size_t(position) < buffer_.size())
buffer_.resize(position);
}
buffer_type& buffer_;
mutable std::size_t size_;
MPI_Comm comm;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_PACKED_OPRIMITIVE_HPP

View File

@@ -0,0 +1,52 @@
// Copyright 2005 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point
#ifndef BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
#define BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
// For (de-)serializing sends and receives
#include <boost/mpi/config.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
namespace boost { namespace mpi { namespace detail {
/** Sends a packed archive using MPI_Send. */
BOOST_MPI_DECL void
packed_archive_send(MPI_Comm comm, int dest, int tag,
const packed_oarchive& ar);
/** Sends a packed archive using MPI_Isend.
*
* This routine may split sends into multiple packets. The MPI_Request
* for each packet will be placed into the out_requests array, up to
* num_out_requests packets. The number of packets sent will be
* returned from the function.
*
* @pre num_out_requests >= 2
*/
BOOST_MPI_DECL int
packed_archive_isend(MPI_Comm comm, int dest, int tag,
const packed_oarchive& ar,
MPI_Request* out_requests, int num_out_requests);
/**
* \overload
*/
BOOST_MPI_DECL int
packed_archive_isend(MPI_Comm comm, int dest, int tag,
const packed_iarchive& ar,
MPI_Request* out_requests, int num_out_requests);
/** Receives a packed archive using MPI_Recv. */
BOOST_MPI_DECL void
packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
MPI_Status& status);
} } } // end namespace boost::mpi::detail
#endif // BOOST_MPI_DETAIL_POINT_TO_POINT_HPP

View File

@@ -0,0 +1,50 @@
// (C) Copyright 2005 Matthias Troyer
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
#ifndef BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP
#define BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/mpi/detail/ignore_oprimitive.hpp>
#include <boost/archive/detail/register_archive.hpp>
namespace boost { namespace mpi {
// an archive that writes a text skeleton into a stream
class text_skeleton_oarchive
: public detail::ignore_oprimitive,
public detail::forward_skeleton_oarchive<text_skeleton_oarchive,boost::archive::text_oarchive>
{
public:
text_skeleton_oarchive(std::ostream & s, unsigned int flags = 0)
: detail::forward_skeleton_oarchive<text_skeleton_oarchive,boost::archive::text_oarchive>(skeleton_archive_)
, skeleton_archive_(s,flags)
{}
private:
boost::archive::text_oarchive skeleton_archive_;
};
namespace detail {
typedef boost::mpi::detail::forward_skeleton_oarchive<boost::mpi::text_skeleton_oarchive,boost::archive::text_oarchive> type3;
}
} } // end namespace boost::mpi
// required by export
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::text_skeleton_oarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::type3)
#endif // BOOST_MPI_TEXT_SKELETON_OARCHIVE_HPP

201
test/external/boost/mpi/environment.hpp vendored Normal file
View File

@@ -0,0 +1,201 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file environment.hpp
*
* This header provides the @c environment class, which provides
* routines to initialize, finalization, and query the status of the
* Boost MPI environment.
*/
#ifndef BOOST_MPI_ENVIRONMENT_HPP
#define BOOST_MPI_ENVIRONMENT_HPP
#include <boost/mpi/config.hpp>
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <string>
namespace boost { namespace mpi {
/** @brief Initialize, finalize, and query the MPI environment.
*
* The @c environment class is used to initialize, finalize, and
* query the MPI environment. It will typically be used in the @c
* main() function of a program, which will create a single instance
* of @c environment initialized with the arguments passed to the
* program:
*
* @code
* int main(int argc, char* argv[])
* {
* mpi::environment env(argc, argv);
* }
* @endcode
*
* The instance of @c environment will initialize MPI (by calling @c
* MPI_Init) in its constructor and finalize MPI (by calling @c
* MPI_Finalize for normal termination or @c MPI_Abort for an
* uncaught exception) in its destructor.
*
* The use of @c environment is not mandatory. Users may choose to
* invoke @c MPI_Init and @c MPI_Finalize manually. In this case, no
* @c environment object is needed. If one is created, however, it
* will do nothing on either construction or destruction.
*/
class BOOST_MPI_DECL environment : noncopyable {
public:
#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION
/** Initialize the MPI environment.
*
* If the MPI environment has not already been initialized,
* initializes MPI with a call to @c MPI_Init. Since this
* constructor does not take command-line arguments (@c argc and @c
* argv), it is only available when the underlying MPI
* implementation supports calling @c MPI_Init with @c NULL
* arguments, indicated by the macro @c
* BOOST_MPI_HAS_NOARG_INITIALIZATION.
*
* @param abort_on_exception When true, this object will abort the
* program if it is destructed due to an uncaught exception.
*/
explicit environment(bool abort_on_exception = true);
#endif
/** Initialize the MPI environment.
*
* If the MPI environment has not already been initialized,
* initializes MPI with a call to @c MPI_Init.
*
* @param argc The number of arguments provided in @p argv, as
* passed into the program's @c main function.
*
* @param argv The array of argument strings passed to the program
* via @c main.
*
* @param abort_on_exception When true, this object will abort the
* program if it is destructed due to an uncaught exception.
*/
environment(int& argc, char** &argv, bool abort_on_exception = true);
/** Shuts down the MPI environment.
*
* If this @c environment object was used to initialize the MPI
* environment, and the MPI environment has not already been shut
* down (finalized), this destructor will shut down the MPI
* environment. Under normal circumstances, this only involves
* invoking @c MPI_Finalize. However, if destruction is the result
* of an uncaught exception and the @c abort_on_exception parameter
* of the constructor had the value @c true, this destructor will
* invoke @c MPI_Abort with @c MPI_COMM_WORLD to abort the entire
* MPI program with a result code of -1.
*/
~environment();
/** Abort all MPI processes.
*
* Aborts all MPI processes and returns to the environment. The
* precise behavior will be defined by the underlying MPI
* implementation. This is equivalent to a call to @c MPI_Abort
* with @c MPI_COMM_WORLD.
*
* @param errcode The error code to return to the environment.
* @returns Will not return.
*/
static void abort(int errcode);
/** Determine if the MPI environment has already been initialized.
*
* This routine is equivalent to a call to @c MPI_Initialized.
*
* @returns @c true if the MPI environment has been initialized.
*/
static bool initialized();
/** Determine if the MPI environment has already been finalized.
*
* The routine is equivalent to a call to @c MPI_Finalized.
*
* @returns @c true if the MPI environment has been finalized.
*/
static bool finalized();
/** Retrieves the maximum tag value.
*
* Returns the maximum value that may be used for the @c tag
* parameter of send/receive operations. This value will be
* somewhat smaller than the value of @c MPI_TAG_UB, because the
* Boost.MPI implementation reserves some tags for collective
* operations.
*
* @returns the maximum tag value.
*/
static int max_tag();
/** The tag value used for collective operations.
*
* Returns the reserved tag value used by the Boost.MPI
* implementation for collective operations. Although users are not
* permitted to use this tag to send or receive messages, it may be
* useful when monitoring communication patterns.
*
* @returns the tag value used for collective operations.
*/
static int collectives_tag();
/** Retrieves the rank of the host process, if one exists.
*
* If there is a host process, this routine returns the rank of
* that process. Otherwise, it returns an empty @c
* optional<int>. MPI does not define the meaning of a "host"
* process: consult the documentation for the MPI
* implementation. This routine examines the @c MPI_HOST attribute
* of @c MPI_COMM_WORLD.
*
* @returns The rank of the host process, if one exists.
*/
static optional<int> host_rank();
/** Retrieves the rank of a process that can perform input/output.
*
* This routine returns the rank of a process that can perform
* input/output via the standard C and C++ I/O facilities. If every
* process can perform I/O using the standard facilities, this
* routine will return @c any_source; if no process can perform
* I/O, this routine will return no value (an empty @c
* optional). This routine examines the @c MPI_IO attribute of @c
* MPI_COMM_WORLD.
*
* @returns the rank of the process that can perform I/O, @c
* any_source if every process can perform I/O, or no value if no
* process can perform I/O.
*/
static optional<int> io_rank();
/** Retrieve the name of this processor.
*
* This routine returns the name of this processor. The actual form
* of the name is unspecified, but may be documented by the
* underlying MPI implementation. This routine is implemented as a
* call to @c MPI_Get_processor_name.
*
* @returns the name of this processor.
*/
static std::string processor_name();
private:
/// Whether this environment object called MPI_Init
bool i_initialized;
/// Whether we should abort if the destructor is
bool abort_on_exception;
/// The number of reserved tags.
static const int num_reserved_tags = 1;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_ENVIRONMENT_HPP

104
test/external/boost/mpi/exception.hpp vendored Normal file
View File

@@ -0,0 +1,104 @@
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file exception.hpp
*
* This header provides exception classes that report MPI errors to
* the user and macros that translate MPI error codes into Boost.MPI
* exceptions.
*/
#ifndef BOOST_MPI_EXCEPTION_HPP
#define BOOST_MPI_EXCEPTION_HPP
#include <boost/mpi/config.hpp>
#include <exception>
#include <string>
#include <boost/config.hpp>
#include <boost/throw_exception.hpp>
namespace boost { namespace mpi {
/** @brief Catch-all exception class for MPI errors.
*
* Instances of this class will be thrown when an MPI error
* occurs. MPI failures that trigger these exceptions may or may not
* be recoverable, depending on the underlying MPI
* implementation. Consult the documentation for your MPI
* implementation to determine the effect of MPI errors.
*/
class BOOST_MPI_DECL exception : public std::exception
{
public:
/**
* Build a new @c exception exception.
*
* @param routine The MPI routine in which the error
* occurred. This should be a pointer to a string constant: it
* will not be copied.
*
* @param result_code The result code returned from the MPI
* routine that aborted with an error.
*/
exception(const char* routine, int result_code);
virtual ~exception() throw();
/**
* A description of the error that occurred.
*/
virtual const char * what () const throw ()
{
return this->message.c_str();
}
/** Retrieve the name of the MPI routine that reported the error. */
const char* routine() const { return routine_; }
/**
* @brief Retrieve the result code returned from the MPI routine
* that reported the error.
*/
int result_code() const { return result_code_; }
/**
* @brief Returns the MPI error class associated with the error that
* triggered this exception.
*/
int error_class() const
{
int result;
MPI_Error_class(result_code_, &result);
return result;
}
protected:
/// The MPI routine that triggered the error
const char* routine_;
/// The failed result code reported by the MPI implementation.
int result_code_;
/// The formatted error message
std::string message;
};
/**
* Call the MPI routine MPIFunc with arguments Args (surrounded by
* parentheses). If the result is not MPI_SUCCESS, use
* boost::throw_exception to throw an exception or abort, depending on
* BOOST_NO_EXCEPTIONS.
*/
#define BOOST_MPI_CHECK_RESULT( MPIFunc, Args ) \
{ \
int _check_result = MPIFunc Args; \
if (_check_result != MPI_SUCCESS) \
boost::throw_exception(boost::mpi::exception(#MPIFunc, \
_check_result)); \
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_EXCEPTION_HPP

View File

@@ -0,0 +1,575 @@
// Copyright (C) 2007 Trustees of Indiana University
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file graph_communicator.hpp
*
* This header defines facilities to support MPI communicators with
* graph topologies, using the graph interface defined by the Boost
* Graph Library. One can construct a communicator whose topology is
* described by any graph meeting the requirements of the Boost Graph
* Library's graph concepts. Likewise, any communicator that has a
* graph topology can be viewed as a graph by the Boost Graph
* Library, permitting one to use the BGL's graph algorithms on the
* process topology.
*/
#ifndef BOOST_MPI_GRAPH_COMMUNICATOR_HPP
#define BOOST_MPI_GRAPH_COMMUNICATOR_HPP
#include <boost/mpi/communicator.hpp>
#include <vector>
#include <utility>
// Headers required to implement graph topologies
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/properties.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/graph/iteration_macros.hpp>
#include <boost/shared_array.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
/**
* @brief An MPI communicator with a graph topology.
*
* A @c graph_communicator is a communicator whose topology is
* expressed as a graph. Graph communicators have the same
* functionality as (intra)communicators, but also allow one to query
* the relationships among processes. Those relationships are
* expressed via a graph, using the interface defined by the Boost
* Graph Library. The @c graph_communicator class meets the
* requirements of the BGL Graph, Incidence Graph, Adjacency Graph,
* Vertex List Graph, and Edge List Graph concepts.
*/
class BOOST_MPI_DECL graph_communicator : public communicator
{
friend class communicator;
/**
* INTERNAL ONLY
*
* Construct a graph communicator given a shared pointer to the
* underlying MPI_Comm. This operation is used for "casting" from a
* communicator to a graph communicator.
*/
explicit graph_communicator(const shared_ptr<MPI_Comm>& comm_ptr)
{
#ifndef BOOST_DISABLE_ASSERTS
int status;
BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status));
BOOST_ASSERT(status == MPI_GRAPH);
#endif
this->comm_ptr = comm_ptr;
}
public:
/**
* Build a new Boost.MPI graph communicator based on the MPI
* communicator @p comm with graph topology.
*
* @p comm may be any valid MPI communicator. If @p comm is
* MPI_COMM_NULL, an empty communicator (that cannot be used for
* communication) is created and the @p kind parameter is
* ignored. Otherwise, the @p kind parameter determines how the
* Boost.MPI communicator will be related to @p comm:
*
* - If @p kind is @c comm_duplicate, duplicate @c comm to create
* a new communicator. This new communicator will be freed when
* the Boost.MPI communicator (and all copies of it) is
* destroyed. This option is only permitted if the underlying MPI
* implementation supports MPI 2.0; duplication of
* intercommunicators is not available in MPI 1.x.
*
* - If @p kind is @c comm_take_ownership, take ownership of @c
* comm. It will be freed automatically when all of the Boost.MPI
* communicators go out of scope.
*
* - If @p kind is @c comm_attach, this Boost.MPI communicator
* will reference the existing MPI communicator @p comm but will
* not free @p comm when the Boost.MPI communicator goes out of
* scope. This option should only be used when the communicator is
* managed by the user.
*/
graph_communicator(const MPI_Comm& comm, comm_create_kind kind)
: communicator(comm, kind)
{
#ifndef BOOST_DISABLE_ASSERTS
int status;
BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status));
BOOST_ASSERT(status == MPI_GRAPH);
#endif
}
/**
* Create a new communicator whose topology is described by the
* given graph. The indices of the vertices in the graph will be
* assumed to be the ranks of the processes within the
* communicator. There may be fewer vertices in the graph than
* there are processes in the communicator; in this case, the
* resulting communicator will be a NULL communicator.
*
* @param comm The communicator that the new, graph communicator
* will be based on.
*
* @param graph Any type that meets the requirements of the
* Incidence Graph and Vertex List Graph concepts from the Boost Graph
* Library. This structure of this graph will become the topology
* of the communicator that is returned.
*
* @param reorder Whether MPI is permitted to re-order the process
* ranks within the returned communicator, to better optimize
* communication. If false, the ranks of each process in the
* returned process will match precisely the rank of that process
* within the original communicator.
*/
template<typename Graph>
explicit
graph_communicator(const communicator& comm, const Graph& graph,
bool reorder = false);
/**
* Create a new communicator whose topology is described by the
* given graph. The rank map (@p rank) gives the mapping from
* vertices in the graph to ranks within the communicator. There
* may be fewer vertices in the graph than there are processes in
* the communicator; in this case, the resulting communicator will
* be a NULL communicator.
*
* @param comm The communicator that the new, graph communicator
* will be based on. The ranks in @c rank refer to the processes in
* this communicator.
*
* @param graph Any type that meets the requirements of the
* Incidence Graph and Vertex List Graph concepts from the Boost Graph
* Library. This structure of this graph will become the topology
* of the communicator that is returned.
*
* @param rank This map translates vertices in the @c graph into
* ranks within the current communicator. It must be a Readable
* Property Map (see the Boost Property Map library) whose key type
* is the vertex type of the @p graph and whose value type is @c
* int.
*
* @param reorder Whether MPI is permitted to re-order the process
* ranks within the returned communicator, to better optimize
* communication. If false, the ranks of each process in the
* returned process will match precisely the rank of that process
* within the original communicator.
*/
template<typename Graph, typename RankMap>
explicit
graph_communicator(const communicator& comm, const Graph& graph,
RankMap rank, bool reorder = false);
protected:
/**
* INTERNAL ONLY
*
* Used by the constructors to create the new communicator with a
* graph topology.
*/
template<typename Graph, typename RankMap>
void
setup_graph(const communicator& comm, const Graph& graph, RankMap rank,
bool reorder);
};
/****************************************************************************
* Implementation Details *
****************************************************************************/
template<typename Graph>
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
bool reorder)
{
this->setup_graph(comm, graph, get(vertex_index, graph), reorder);
}
template<typename Graph, typename RankMap>
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
RankMap rank, bool reorder)
{
this->setup_graph(comm, graph, rank, reorder);
}
template<typename Graph, typename RankMap>
void
graph_communicator::setup_graph(const communicator& comm, const Graph& graph,
RankMap rank, bool reorder)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
// Build a mapping from ranks to vertices
std::vector<vertex_descriptor> vertex_with_rank(num_vertices(graph));
if (vertex_with_rank.empty())
return;
BGL_FORALL_VERTICES_T(v, graph, Graph)
vertex_with_rank[get(rank, v)] = v;
// Build the representation of the graph required by
// MPI_Graph_create.
std::vector<int> indices(num_vertices(graph));
std::vector<int> edges;
int nvertices = indices.size();
for (int vertex_index = 0; vertex_index < nvertices; ++vertex_index) {
vertex_descriptor v = vertex_with_rank[vertex_index];
BGL_FORALL_OUTEDGES_T(v, e, graph, Graph)
edges.push_back(get(rank, target(e, graph)));
indices[vertex_index] = edges.size();
}
// Create the new communicator
MPI_Comm newcomm;
BOOST_MPI_CHECK_RESULT(MPI_Graph_create,
((MPI_Comm)comm,
nvertices,
&indices[0],
edges.empty()? (int*)0 : &edges[0],
reorder,
&newcomm));
this->comm_ptr.reset(new MPI_Comm(newcomm), comm_free());
}
/****************************************************************************
* Communicator with Graph Topology as BGL Graph *
****************************************************************************/
namespace detail {
/**
* INTERNAL ONLY
*
* The iterator used to access the outgoing edges within a
* communicator's graph topology.
*/
class comm_out_edge_iterator
: public iterator_facade<comm_out_edge_iterator,
std::pair<int, int>,
random_access_traversal_tag,
const std::pair<int, int>&,
int>
{
public:
comm_out_edge_iterator() { }
comm_out_edge_iterator(int source, shared_array<int> neighbors, int index)
: edge(source, -1), neighbors(neighbors), index(index) { }
protected:
friend class boost::iterator_core_access;
const std::pair<int, int>& dereference() const
{
edge.second = neighbors[index];
return edge;
}
bool equal(const comm_out_edge_iterator& other) const
{
return (edge.first == other.edge.first
&& index == other.index);
}
void increment() { ++index; }
void decrement() { --index; }
void advance(int n) { index += n; }
int distance_to(const comm_out_edge_iterator& other) const
{
return other.index - index;
}
mutable std::pair<int, int> edge;
shared_array<int> neighbors;
int index;
};
/**
* INTERNAL ONLY
*
* The iterator used to access the adjacent vertices within a
* communicator's graph topology.
*/
class comm_adj_iterator
: public iterator_facade<comm_adj_iterator,
int,
random_access_traversal_tag,
int,
int>
{
public:
comm_adj_iterator() { }
comm_adj_iterator(shared_array<int> neighbors, int index)
: neighbors(neighbors), index(index) { }
protected:
friend class boost::iterator_core_access;
int dereference() const { return neighbors[index]; }
bool equal(const comm_adj_iterator& other) const
{
return (neighbors == other.neighbors
&& index == other.index);
}
void increment() { ++index; }
void decrement() { --index; }
void advance(int n) { index += n; }
int distance_to(const comm_adj_iterator& other) const
{
return other.index - index;
}
shared_array<int> neighbors;
int index;
};
/**
* INTERNAL ONLY
*
* The iterator used to access the edges in a communicator's graph
* topology.
*/
class comm_edge_iterator
: public iterator_facade<comm_edge_iterator,
std::pair<int, int>,
forward_traversal_tag,
const std::pair<int, int>&,
int>
{
public:
comm_edge_iterator() { }
/// Constructor for a past-the-end iterator
comm_edge_iterator(int nedges) : edge_index(nedges) { }
comm_edge_iterator(shared_array<int> indices, shared_array<int> edges)
: indices(indices), edges(edges), edge_index(0), edge(0, 0)
{ }
protected:
friend class boost::iterator_core_access;
const std::pair<int, int>& dereference() const
{
while (edge_index == indices[edge.first])
++edge.first;
edge.second = edges[edge_index];
return edge;
}
bool equal(const comm_edge_iterator& other) const
{
return edge_index == other.edge_index;
}
void increment()
{
++edge_index;
}
shared_array<int> indices;
shared_array<int> edges;
int edge_index;
mutable std::pair<int, int> edge;
};
} // end namespace detail
// Incidence Graph requirements
/**
* @brief Returns the source vertex from an edge in the graph topology
* of a communicator.
*/
inline int source(const std::pair<int, int>& edge, const graph_communicator&)
{
return edge.first;
}
/**
* @brief Returns the target vertex from an edge in the graph topology
* of a communicator.
*/
inline int target(const std::pair<int, int>& edge, const graph_communicator&)
{
return edge.second;
}
/**
* @brief Returns an iterator range containing all of the edges
* outgoing from the given vertex in a graph topology of a
* communicator.
*/
std::pair<detail::comm_out_edge_iterator, detail::comm_out_edge_iterator>
out_edges(int vertex, const graph_communicator& comm);
/**
* @brief Returns the out-degree of a vertex in the graph topology of
* a communicator.
*/
int out_degree(int vertex, const graph_communicator& comm);
// Adjacency Graph requirements
/**
* @brief Returns an iterator range containing all of the neighbors of
* the given vertex in the communicator's graph topology.
*/
std::pair<detail::comm_adj_iterator, detail::comm_adj_iterator>
adjacent_vertices(int vertex, const graph_communicator& comm);
// Vertex List Graph requirements
/**
* @brief Returns an iterator range that contains all of the vertices
* with the communicator's graph topology, i.e., all of the process
* ranks in the communicator.
*/
inline std::pair<counting_iterator<int>, counting_iterator<int> >
vertices(const graph_communicator& comm)
{
return std::make_pair(counting_iterator<int>(0),
counting_iterator<int>(comm.size()));
}
/**
* @brief Returns the number of vertices within the graph topology of
* the communicator, i.e., the number of processes in the
* communicator.
*/
inline int num_vertices(const graph_communicator& comm) { return comm.size(); }
// Edge List Graph requirements
/**
* @brief Returns an iterator range that contains all of the edges
* with the communicator's graph topology.
*/
std::pair<detail::comm_edge_iterator, detail::comm_edge_iterator>
edges(const graph_communicator& comm);
/**
* @brief Returns the number of edges in the communicator's graph
* topology.
*/
int num_edges(const graph_communicator& comm);
// Property Graph requirements
/**
* @brief Returns a property map that maps from vertices in a
* communicator's graph topology to their index values.
*
* Since the vertices are ranks in the communicator, the returned
* property map is the identity property map.
*/
inline identity_property_map get(vertex_index_t, const graph_communicator&)
{
return identity_property_map();
}
/**
* @brief Returns the index of a vertex in the communicator's graph
* topology.
*
* Since the vertices are ranks in the communicator, this is the
* identity function.
*/
inline int get(vertex_index_t, const graph_communicator&, int vertex)
{
return vertex;
}
} } // end namespace boost::mpi
namespace boost {
/**
* @brief Traits structure that allows a communicator with graph
* topology to be view as a graph by the Boost Graph Library.
*
* The specialization of @c graph_traits for an MPI communicator
* allows a communicator with graph topology to be viewed as a
* graph. An MPI communicator with graph topology meets the
* requirements of the Graph, Incidence Graph, Adjacency Graph, Vertex
* List Graph, and Edge List Graph concepts from the Boost Graph
* Library.
*/
template<>
struct graph_traits<mpi::graph_communicator> {
// Graph concept requirements
typedef int vertex_descriptor;
typedef std::pair<int, int> edge_descriptor;
typedef directed_tag directed_category;
typedef disallow_parallel_edge_tag edge_parallel_category;
/**
* INTERNAL ONLY
*/
struct traversal_category
: incidence_graph_tag,
adjacency_graph_tag,
vertex_list_graph_tag,
edge_list_graph_tag
{
};
/**
* @brief Returns a vertex descriptor that can never refer to any
* valid vertex.
*/
static vertex_descriptor null_vertex() { return -1; }
// Incidence Graph requirements
typedef mpi::detail::comm_out_edge_iterator out_edge_iterator;
typedef int degree_size_type;
// Adjacency Graph requirements
typedef mpi::detail::comm_adj_iterator adjacency_iterator;
// Vertex List Graph requirements
typedef counting_iterator<int> vertex_iterator;
typedef int vertices_size_type;
// Edge List Graph requirements
typedef mpi::detail::comm_edge_iterator edge_iterator;
typedef int edges_size_type;
};
// Property Graph requirements
/**
* INTERNAL ONLY
*/
template<>
struct property_map<mpi::graph_communicator, vertex_index_t>
{
typedef identity_property_map type;
typedef identity_property_map const_type;
};
} // end namespace boost
#endif // BOOST_MPI_GRAPH_COMMUNICATOR_HPP

340
test/external/boost/mpi/group.hpp vendored Normal file
View File

@@ -0,0 +1,340 @@
// Copyright (C) 2007 Trustees of Indiana University
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file group.hpp
*
* This header defines the @c group class, which allows one to
* manipulate and query groups of processes.
*/
#ifndef BOOST_MPI_GROUP_HPP
#define BOOST_MPI_GROUP_HPP
#include <boost/mpi/exception.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/optional.hpp>
#include <vector>
namespace boost { namespace mpi {
/**
* @brief A @c group is a representation of a subset of the processes
* within a @c communicator.
*
* The @c group class allows one to create arbitrary subsets of the
* processes within a communicator. One can compute the union,
* intersection, or difference of two groups, or create new groups by
* specifically including or excluding certain processes. Given a
* group, one can create a new communicator containing only the
* processes in that group.
*/
class BOOST_MPI_DECL group
{
public:
/**
* @brief Constructs an empty group.
*/
group() : group_ptr() { }
/**
* @brief Constructs a group from an @c MPI_Group.
*
* This routine allows one to construct a Boost.MPI @c group from a
* C @c MPI_Group. The @c group object can (optionally) adopt the @c
* MPI_Group, after which point the @c group object becomes
* responsible for freeing the @c MPI_Group when the last copy of @c
* group disappears.
*
* @param in_group The @c MPI_Group used to construct this @c group.
*
* @param adopt Whether the @c group should adopt the @c
* MPI_Group. When true, the @c group object (or one of its copies)
* will free the group (via @c MPI_Comm_free) when the last copy is
* destroyed. Otherwise, the user is responsible for calling @c
* MPI_Group_free.
*/
group(const MPI_Group& in_group, bool adopt);
/**
* @brief Determine the rank of the calling process in the group.
*
* This routine is equivalent to @c MPI_Group_rank.
*
* @returns The rank of the calling process in the group, which will
* be a value in [0, size()). If the calling process is not in the
* group, returns an empty value.
*/
optional<int> rank() const;
/**
* @brief Determine the number of processes in the group.
*
* This routine is equivalent to @c MPI_Group_size.
*
* @returns The number of processes in the group.
*/
int size() const;
/**
* @brief Translates the ranks from one group into the ranks of the
* same processes in another group.
*
* This routine translates each of the integer rank values in the
* iterator range @c [first, last) from the current group into rank
* values of the corresponding processes in @p to_group. The
* corresponding rank values are written via the output iterator @c
* out. When there is no correspondence between a rank in the
* current group and a rank in @c to_group, the value @c
* MPI_UNDEFINED is written to the output iterator.
*
* @param first Beginning of the iterator range of ranks in the
* current group.
*
* @param last Past the end of the iterator range of ranks in the
* current group.
*
* @param to_group The group that we are translating ranks to.
*
* @param out The output iterator to which the translated ranks will
* be written.
*
* @returns the output iterator, which points one step past the last
* rank written.
*/
template<typename InputIterator, typename OutputIterator>
OutputIterator translate_ranks(InputIterator first, InputIterator last,
const group& to_group, OutputIterator out);
/**
* @brief Determines whether the group is non-empty.
*
* @returns True if the group is not empty, false if it is empty.
*/
operator bool() const { return (bool)group_ptr; }
/**
* @brief Retrieves the underlying @c MPI_Group associated with this
* group.
*
* @returns The @c MPI_Group handle manipulated by this object. If
* this object represents the empty group, returns @c
* MPI_GROUP_EMPTY.
*/
operator MPI_Group() const
{
if (group_ptr)
return *group_ptr;
else
return MPI_GROUP_EMPTY;
}
/**
* @brief Creates a new group including a subset of the processes
* in the current group.
*
* This routine creates a new @c group which includes only those
* processes in the current group that are listed in the integer
* iterator range @c [first, last). Equivalent to @c
* MPI_Group_incl.
*
* @c first The beginning of the iterator range of ranks to include.
*
* @c last Past the end of the iterator range of ranks to include.
*
* @returns A new group containing those processes with ranks @c
* [first, last) in the current group.
*/
template<typename InputIterator>
group include(InputIterator first, InputIterator last);
/**
* @brief Creates a new group from all of the processes in the
* current group, exluding a specific subset of the processes.
*
* This routine creates a new @c group which includes all of the
* processes in the current group except those whose ranks are
* listed in the integer iterator range @c [first,
* last). Equivalent to @c MPI_Group_excl.
*
* @c first The beginning of the iterator range of ranks to exclude.
*
* @c last Past the end of the iterator range of ranks to exclude.
*
* @returns A new group containing all of the processes in the
* current group except those processes with ranks @c [first, last)
* in the current group.
*/
template<typename InputIterator>
group exclude(InputIterator first, InputIterator last);
protected:
/**
* INTERNAL ONLY
*
* Function object that frees an MPI group and deletes the
* memory associated with it. Intended to be used as a deleter with
* shared_ptr.
*/
struct group_free
{
void operator()(MPI_Group* comm) const
{
int finalized;
BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
if (!finalized)
BOOST_MPI_CHECK_RESULT(MPI_Group_free, (comm));
delete comm;
}
};
/**
* The underlying MPI group. This is a shared pointer, so the actual
* MPI group which will be shared among all related instances of the
* @c group class. When there are no more such instances, the group
* will be automatically freed.
*/
shared_ptr<MPI_Group> group_ptr;
};
/**
* @brief Determines whether two process groups are identical.
*
* Equivalent to calling @c MPI_Group_compare and checking whether the
* result is @c MPI_IDENT.
*
* @returns True when the two process groups contain the same
* processes in the same order.
*/
BOOST_MPI_DECL bool operator==(const group& g1, const group& g2);
/**
* @brief Determines whether two process groups are not identical.
*
* Equivalent to calling @c MPI_Group_compare and checking whether the
* result is not @c MPI_IDENT.
*
* @returns False when the two process groups contain the same
* processes in the same order.
*/
inline bool operator!=(const group& g1, const group& g2)
{
return !(g1 == g2);
}
/**
* @brief Computes the union of two process groups.
*
* This routine returns a new @c group that contains all processes
* that are either in group @c g1 or in group @c g2 (or both). The
* processes that are in @c g1 will be first in the resulting group,
* followed by the processes from @c g2 (but not also in @c
* g1). Equivalent to @c MPI_Group_union.
*/
BOOST_MPI_DECL group operator|(const group& g1, const group& g2);
/**
* @brief Computes the intersection of two process groups.
*
* This routine returns a new @c group that contains all processes
* that are in group @c g1 and in group @c g2, ordered in the same way
* as @c g1. Equivalent to @c MPI_Group_intersection.
*/
BOOST_MPI_DECL group operator&(const group& g1, const group& g2);
/**
* @brief Computes the difference between two process groups.
*
* This routine returns a new @c group that contains all processes
* that are in group @c g1 but not in group @c g2, ordered in the same way
* as @c g1. Equivalent to @c MPI_Group_difference.
*/
BOOST_MPI_DECL group operator-(const group& g1, const group& g2);
/************************************************************************
* Implementation details *
************************************************************************/
template<typename InputIterator, typename OutputIterator>
OutputIterator
group::translate_ranks(InputIterator first, InputIterator last,
const group& to_group, OutputIterator out)
{
std::vector<int> in_array(first, last);
if (in_array.empty())
return out;
std::vector<int> out_array(in_array.size());
BOOST_MPI_CHECK_RESULT(MPI_Group_translate_ranks,
((MPI_Group)*this,
in_array.size(),
&in_array[0],
(MPI_Group)to_group,
&out_array[0]));
for (std::vector<int>::size_type i = 0, n = out_array.size(); i < n; ++i)
*out++ = out_array[i];
return out;
}
/**
* INTERNAL ONLY
*
* Specialization of translate_ranks that handles the one case where
* we can avoid any memory allocation or copying.
*/
template<>
BOOST_MPI_DECL int*
group::translate_ranks(int* first, int* last, const group& to_group, int* out);
template<typename InputIterator>
group group::include(InputIterator first, InputIterator last)
{
if (first == last)
return group();
std::vector<int> ranks(first, last);
MPI_Group result;
BOOST_MPI_CHECK_RESULT(MPI_Group_incl,
((MPI_Group)*this, ranks.size(), &ranks[0], &result));
return group(result, /*adopt=*/true);
}
/**
* INTERNAL ONLY
*
* Specialization of group::include that handles the one case where we
* can avoid any memory allocation or copying before creating the
* group.
*/
template<> BOOST_MPI_DECL group group::include(int* first, int* last);
template<typename InputIterator>
group group::exclude(InputIterator first, InputIterator last)
{
if (first == last)
return group();
std::vector<int> ranks(first, last);
MPI_Group result;
BOOST_MPI_CHECK_RESULT(MPI_Group_excl,
((MPI_Group)*this, ranks.size(), &ranks[0], &result));
return group(result, /*adopt=*/true);
}
/**
* INTERNAL ONLY
*
* Specialization of group::exclude that handles the one case where we
* can avoid any memory allocation or copying before creating the
* group.
*/
template<> BOOST_MPI_DECL group group::exclude(int* first, int* last);
} } // end namespace boost::mpi
#endif // BOOST_MPI_GROUP_HPP

View File

@@ -0,0 +1,165 @@
// Copyright (C) 2007 The Trustees of Indiana University.
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file intercommunicator.hpp
*
* This header defines the @c intercommunicator class, which permits
* communication between different process groups.
*/
#ifndef BOOST_MPI_INTERCOMMUNICATOR_HPP
#define BOOST_MPI_INTERCOMMUNICATOR_HPP
#include <boost/mpi/communicator.hpp>
namespace boost { namespace mpi {
/**
* INTERNAL ONLY
*
* Forward declaration of the MPI "group" representation, for use in
* the description of the @c intercommunicator class.
*/
class group;
/**
* @brief Communication facilities among processes in different
* groups.
*
* The @c intercommunicator class provides communication facilities
* among processes from different groups. An intercommunicator is
* always associated with two process groups: one "local" process
* group, containing the process that initiates an MPI operation
* (e.g., the sender in a @c send operation), and one "remote" process
* group, containing the process that is the target of the MPI
* operation.
*
* While intercommunicators have essentially the same point-to-point
* operations as intracommunicators (the latter communicate only
* within a single process group), all communication with
* intercommunicators occurs between the processes in the local group
* and the processes in the remote group; communication within a group
* must use a different (intra-)communicator.
*
*/
class BOOST_MPI_DECL intercommunicator : public communicator
{
private:
friend class communicator;
/**
* INTERNAL ONLY
*
* Construct an intercommunicator given a shared pointer to the
* underlying MPI_Comm. This operation is used for "casting" from a
* communicator to an intercommunicator.
*/
explicit intercommunicator(const shared_ptr<MPI_Comm>& cp)
{
this->comm_ptr = cp;
}
public:
/**
* Build a new Boost.MPI intercommunicator based on the MPI
* intercommunicator @p comm.
*
* @p comm may be any valid MPI intercommunicator. If @p comm is
* MPI_COMM_NULL, an empty communicator (that cannot be used for
* communication) is created and the @p kind parameter is
* ignored. Otherwise, the @p kind parameter determines how the
* Boost.MPI communicator will be related to @p comm:
*
* - If @p kind is @c comm_duplicate, duplicate @c comm to create
* a new communicator. This new communicator will be freed when
* the Boost.MPI communicator (and all copies of it) is
* destroyed. This option is only permitted if the underlying MPI
* implementation supports MPI 2.0; duplication of
* intercommunicators is not available in MPI 1.x.
*
* - If @p kind is @c comm_take_ownership, take ownership of @c
* comm. It will be freed automatically when all of the Boost.MPI
* communicators go out of scope.
*
* - If @p kind is @c comm_attach, this Boost.MPI communicator
* will reference the existing MPI communicator @p comm but will
* not free @p comm when the Boost.MPI communicator goes out of
* scope. This option should only be used when the communicator is
* managed by the user.
*/
intercommunicator(const MPI_Comm& comm, comm_create_kind kind)
: communicator(comm, kind) { }
/**
* Constructs a new intercommunicator whose local group is @p local
* and whose remote group is @p peer. The intercommunicator can then
* be used to communicate between processes in the two groups. This
* constructor is equivalent to a call to @c MPI_Intercomm_create.
*
* @param local The intracommunicator containing all of the
* processes that will go into the local group.
*
* @param local_leader The rank within the @p local
* intracommunicator that will serve as its leader.
*
* @param peer The intracommunicator containing all of the processes
* that will go into the remote group.
*
* @param remote_leader The rank within the @p peer group that will
* serve as its leader.
*/
intercommunicator(const communicator& local, int local_leader,
const communicator& peer, int remote_leader);
/**
* Returns the size of the local group, i.e., the number of local
* processes that are part of the group.
*/
int local_size() const { return this->size(); }
/**
* Returns the local group, containing all of the local processes in
* this intercommunicator.
*/
boost::mpi::group local_group() const;
/**
* Returns the rank of this process within the local group.
*/
int local_rank() const { return this->rank(); }
/**
* Returns the size of the remote group, i.e., the number of
* processes that are part of the remote group.
*/
int remote_size() const;
/**
* Returns the remote group, containing all of the remote processes
* in this intercommunicator.
*/
boost::mpi::group remote_group() const;
/**
* Merge the local and remote groups in this intercommunicator into
* a new intracommunicator containing the union of the processes in
* both groups. This method is equivalent to @c MPI_Intercomm_merge.
*
* @param high Whether the processes in this group should have the
* higher rank numbers than the processes in the other group. Each
* of the processes within a particular group shall have the same
* "high" value.
*
* @returns the new, merged intracommunicator
*/
communicator merge(bool high) const;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_INTERCOMMUNICATOR_HPP

732
test/external/boost/mpi/nonblocking.hpp vendored Normal file
View File

@@ -0,0 +1,732 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file nonblocking.hpp
*
* This header defines operations for completing non-blocking
* communication requests.
*/
#ifndef BOOST_MPI_NONBLOCKING_HPP
#define BOOST_MPI_NONBLOCKING_HPP
#include <boost/mpi/config.hpp>
#include <vector>
#include <iterator> // for std::iterator_traits
#include <boost/optional.hpp>
#include <utility> // for std::pair
#include <algorithm> // for iter_swap, reverse
#include <boost/static_assert.hpp>
#include <boost/mpi/request.hpp>
#include <boost/mpi/status.hpp>
#include <boost/mpi/exception.hpp>
namespace boost { namespace mpi {
/**
* @brief Wait until any non-blocking request has completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and waits until any of these requests has
* been completed. It provides functionality equivalent to
* @c MPI_Waitany.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects. This may not be equal to @c first.
*
* @returns A pair containing the status object that corresponds to
* the completed operation and the iterator referencing the completed
* request.
*/
template<typename ForwardIterator>
std::pair<status, ForwardIterator>
wait_any(ForwardIterator first, ForwardIterator last)
{
using std::advance;
BOOST_ASSERT(first != last);
typedef typename std::iterator_traits<ForwardIterator>::difference_type
difference_type;
bool all_trivial_requests = true;
difference_type n = 0;
ForwardIterator current = first;
while (true) {
// Check if we have found a completed request. If so, return it.
if (optional<status> result = current->test())
return std::make_pair(*result, current);
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
all_trivial_requests =
all_trivial_requests
&& !current->m_handler
&& current->m_requests[1] == MPI_REQUEST_NULL;
// Move to the next request.
++n;
if (++current == last) {
// We have reached the end of the list. If all requests thus far
// have been trivial, we can call MPI_Waitany directly, because
// it may be more efficient than our busy-wait semantics.
if (all_trivial_requests) {
std::vector<MPI_Request> requests;
requests.reserve(n);
for (current = first; current != last; ++current)
requests.push_back(current->m_requests[0]);
// Let MPI wait until one of these operations completes.
int index;
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Waitany,
(n, &requests[0], &index, &stat.m_status));
// We don't have a notion of empty requests or status objects,
// so this is an error.
if (index == MPI_UNDEFINED)
boost::throw_exception(exception("MPI_Waitany", MPI_ERR_REQUEST));
// Find the iterator corresponding to the completed request.
current = first;
advance(current, index);
current->m_requests[0] = requests[index];
return std::make_pair(stat, current);
}
// There are some nontrivial requests, so we must continue our
// busy waiting loop.
n = 0;
current = first;
all_trivial_requests = true;
}
}
// We cannot ever get here
BOOST_ASSERT(false);
}
/**
* @brief Test whether any non-blocking request has completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and tests whether any of these requests has
* been completed. This routine is similar to @c wait_any, but will
* not block waiting for requests to completed. It provides
* functionality equivalent to @c MPI_Testany.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects.
*
* @returns If any outstanding requests have completed, a pair
* containing the status object that corresponds to the completed
* operation and the iterator referencing the completed
* request. Otherwise, an empty @c optional<>.
*/
template<typename ForwardIterator>
optional<std::pair<status, ForwardIterator> >
test_any(ForwardIterator first, ForwardIterator last)
{
for (ForwardIterator current = first; first != last; ++first) {
// Check if we have found a completed request. If so, return it.
if (optional<status> result = current->test())
return std::make_pair(*result, current);
}
// We found nothing
return optional<std::pair<status, ForwardIterator> >();
}
/**
* @brief Wait until all non-blocking requests have completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and waits until all of these requests have
* been completed. It provides functionality equivalent to
* @c MPI_Waitall.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects.
*
* @param out If provided, an output iterator through which the
* status of each request will be emitted. The @c status objects are
* emitted in the same order as the requests are retrieved from
* @c [first,last).
*
* @returns If an @p out parameter was provided, the value @c out
* after all of the @c status objects have been emitted.
*/
template<typename ForwardIterator, typename OutputIterator>
OutputIterator
wait_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
{
typedef typename std::iterator_traits<ForwardIterator>::difference_type
difference_type;
using std::distance;
difference_type num_outstanding_requests = distance(first, last);
std::vector<status> results(num_outstanding_requests);
std::vector<bool> completed(num_outstanding_requests);
while (num_outstanding_requests > 0) {
bool all_trivial_requests = true;
difference_type idx = 0;
for (ForwardIterator current = first; current != last; ++current, ++idx) {
if (!completed[idx]) {
if (optional<status> stat = current->test()) {
// This outstanding request has been completed. We're done.
results[idx] = *stat;
completed[idx] = true;
--num_outstanding_requests;
all_trivial_requests = false;
} else {
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
all_trivial_requests =
all_trivial_requests
&& !current->m_handler
&& current->m_requests[1] == MPI_REQUEST_NULL;
}
}
}
// If we have yet to fulfill any requests and all of the requests
// are trivial (i.e., require only a single MPI_Request to be
// fulfilled), call MPI_Waitall directly.
if (all_trivial_requests
&& num_outstanding_requests == (difference_type)results.size()) {
std::vector<MPI_Request> requests;
requests.reserve(num_outstanding_requests);
for (ForwardIterator current = first; current != last; ++current)
requests.push_back(current->m_requests[0]);
// Let MPI wait until all of these operations completes.
std::vector<MPI_Status> stats(num_outstanding_requests);
BOOST_MPI_CHECK_RESULT(MPI_Waitall,
(num_outstanding_requests, &requests[0],
&stats[0]));
for (std::vector<MPI_Status>::iterator i = stats.begin();
i != stats.end(); ++i, ++out) {
status stat;
stat.m_status = *i;
*out = stat;
}
return out;
}
all_trivial_requests = false;
}
return std::copy(results.begin(), results.end(), out);
}
/**
* \overload
*/
template<typename ForwardIterator>
void
wait_all(ForwardIterator first, ForwardIterator last)
{
typedef typename std::iterator_traits<ForwardIterator>::difference_type
difference_type;
using std::distance;
difference_type num_outstanding_requests = distance(first, last);
std::vector<bool> completed(num_outstanding_requests);
while (num_outstanding_requests > 0) {
bool all_trivial_requests = true;
difference_type idx = 0;
for (ForwardIterator current = first; current != last; ++current, ++idx) {
if (!completed[idx]) {
if (optional<status> stat = current->test()) {
// This outstanding request has been completed.
completed[idx] = true;
--num_outstanding_requests;
all_trivial_requests = false;
} else {
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
all_trivial_requests =
all_trivial_requests
&& !current->m_handler
&& current->m_requests[1] == MPI_REQUEST_NULL;
}
}
}
// If we have yet to fulfill any requests and all of the requests
// are trivial (i.e., require only a single MPI_Request to be
// fulfilled), call MPI_Waitall directly.
if (all_trivial_requests
&& num_outstanding_requests == (difference_type)completed.size()) {
std::vector<MPI_Request> requests;
requests.reserve(num_outstanding_requests);
for (ForwardIterator current = first; current != last; ++current)
requests.push_back(current->m_requests[0]);
// Let MPI wait until all of these operations completes.
BOOST_MPI_CHECK_RESULT(MPI_Waitall,
(num_outstanding_requests, &requests[0],
MPI_STATUSES_IGNORE));
// Signal completion
num_outstanding_requests = 0;
}
}
}
/**
* @brief Tests whether all non-blocking requests have completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and determines whether all of these requests
* have been completed. However, due to limitations of the underlying
* MPI implementation, if any of the requests refers to a
* non-blocking send or receive of a serialized data type, @c
* test_all will always return the equivalent of @c false (i.e., the
* requests cannot all be finished at this time). This routine
* performs the same functionality as @c wait_all, except that this
* routine will not block. This routine provides functionality
* equivalent to @c MPI_Testall.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects.
*
* @param out If provided and all requests hav been completed, an
* output iterator through which the status of each request will be
* emitted. The @c status objects are emitted in the same order as
* the requests are retrieved from @c [first,last).
*
* @returns If an @p out parameter was provided, the value @c out
* after all of the @c status objects have been emitted (if all
* requests were completed) or an empty @c optional<>. If no @p out
* parameter was provided, returns @c true if all requests have
* completed or @c false otherwise.
*/
template<typename ForwardIterator, typename OutputIterator>
optional<OutputIterator>
test_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
{
std::vector<MPI_Request> requests;
for (; first != last; ++first) {
// If we have a non-trivial request, then no requests can be
// completed.
if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
return optional<OutputIterator>();
requests.push_back(first->m_requests[0]);
}
int flag = 0;
int n = requests.size();
std::vector<MPI_Status> stats(n);
BOOST_MPI_CHECK_RESULT(MPI_Testall, (n, &requests[0], &flag, &stats[0]));
if (flag) {
for (int i = 0; i < n; ++i, ++out) {
status stat;
stat.m_status = stats[i];
*out = stat;
}
return out;
} else {
return optional<OutputIterator>();
}
}
/**
* \overload
*/
template<typename ForwardIterator>
bool
test_all(ForwardIterator first, ForwardIterator last)
{
std::vector<MPI_Request> requests;
for (; first != last; ++first) {
// If we have a non-trivial request, then no requests can be
// completed.
if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
return false;
requests.push_back(first->m_requests[0]);
}
int flag = 0;
int n = requests.size();
BOOST_MPI_CHECK_RESULT(MPI_Testall,
(n, &requests[0], &flag, MPI_STATUSES_IGNORE));
return flag != 0;
}
/**
* @brief Wait until some non-blocking requests have completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and waits until at least one of the requests
* has completed. It then completes all of the requests it can,
* partitioning the input sequence into pending requests followed by
* completed requests. If an output iterator is provided, @c status
* objects will be emitted for each of the completed requests. This
* routine provides functionality equivalent to @c MPI_Waitsome.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects. This may not be equal to @c first.
*
* @param out If provided, the @c status objects corresponding to
* completed requests will be emitted through this output iterator.
* @returns If the @p out parameter was provided, a pair containing
* the output iterator @p out after all of the @c status objects have
* been written through it and an iterator referencing the first
* completed request. If no @p out parameter was provided, only the
* iterator referencing the first completed request will be emitted.
*/
template<typename BidirectionalIterator, typename OutputIterator>
std::pair<OutputIterator, BidirectionalIterator>
wait_some(BidirectionalIterator first, BidirectionalIterator last,
OutputIterator out)
{
using std::advance;
if (first == last)
return std::make_pair(out, first);
typedef typename std::iterator_traits<BidirectionalIterator>::difference_type
difference_type;
bool all_trivial_requests = true;
difference_type n = 0;
BidirectionalIterator current = first;
BidirectionalIterator start_of_completed = last;
while (true) {
// Check if we have found a completed request.
if (optional<status> result = current->test()) {
using std::iter_swap;
// Emit the resulting status object
*out++ = *result;
// We're expanding the set of completed requests
--start_of_completed;
if (current == start_of_completed) {
// If we have hit the end of the list of pending
// requests. Finish up by fixing the order of the completed
// set to match the order in which we emitted status objects,
// then return.
std::reverse(start_of_completed, last);
return std::make_pair(out, start_of_completed);
}
// Swap the request we just completed with the last request that
// has not yet been tested.
iter_swap(current, start_of_completed);
continue;
}
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
all_trivial_requests =
all_trivial_requests
&& !current->m_handler
&& current->m_requests[1] == MPI_REQUEST_NULL;
// Move to the next request.
++n;
if (++current == start_of_completed) {
if (start_of_completed != last) {
// We have satisfied some requests. Make the order of the
// completed requests match that of the status objects we've
// already emitted and we're done.
std::reverse(start_of_completed, last);
return std::make_pair(out, start_of_completed);
}
// We have reached the end of the list. If all requests thus far
// have been trivial, we can call MPI_Waitsome directly, because
// it may be more efficient than our busy-wait semantics.
if (all_trivial_requests) {
std::vector<MPI_Request> requests;
std::vector<int> indices(n);
std::vector<MPI_Status> stats(n);
requests.reserve(n);
for (current = first; current != last; ++current)
requests.push_back(current->m_requests[0]);
// Let MPI wait until some of these operations complete.
int num_completed;
BOOST_MPI_CHECK_RESULT(MPI_Waitsome,
(n, &requests[0], &num_completed, &indices[0],
&stats[0]));
// Translate the index-based result of MPI_Waitsome into a
// partitioning on the requests.
int current_offset = 0;
current = first;
for (int index = 0; index < num_completed; ++index, ++out) {
using std::iter_swap;
// Move "current" to the request object at this index
advance(current, indices[index] - current_offset);
current_offset = indices[index];
// Emit the status object
status stat;
stat.m_status = stats[index];
*out = stat;
// Finish up the request and swap it into the "completed
// requests" partition.
current->m_requests[0] = requests[indices[index]];
--start_of_completed;
iter_swap(current, start_of_completed);
}
// We have satisfied some requests. Make the order of the
// completed requests match that of the status objects we've
// already emitted and we're done.
std::reverse(start_of_completed, last);
return std::make_pair(out, start_of_completed);
}
// There are some nontrivial requests, so we must continue our
// busy waiting loop.
n = 0;
current = first;
}
}
// We cannot ever get here
BOOST_ASSERT(false);
}
/**
* \overload
*/
template<typename BidirectionalIterator>
BidirectionalIterator
wait_some(BidirectionalIterator first, BidirectionalIterator last)
{
using std::advance;
if (first == last)
return first;
typedef typename std::iterator_traits<BidirectionalIterator>::difference_type
difference_type;
bool all_trivial_requests = true;
difference_type n = 0;
BidirectionalIterator current = first;
BidirectionalIterator start_of_completed = last;
while (true) {
// Check if we have found a completed request.
if (optional<status> result = current->test()) {
using std::iter_swap;
// We're expanding the set of completed requests
--start_of_completed;
// If we have hit the end of the list of pending requests, we're
// done.
if (current == start_of_completed)
return start_of_completed;
// Swap the request we just completed with the last request that
// has not yet been tested.
iter_swap(current, start_of_completed);
continue;
}
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
all_trivial_requests =
all_trivial_requests
&& !current->m_handler
&& current->m_requests[1] == MPI_REQUEST_NULL;
// Move to the next request.
++n;
if (++current == start_of_completed) {
// If we have satisfied some requests, we're done.
if (start_of_completed != last)
return start_of_completed;
// We have reached the end of the list. If all requests thus far
// have been trivial, we can call MPI_Waitsome directly, because
// it may be more efficient than our busy-wait semantics.
if (all_trivial_requests) {
std::vector<MPI_Request> requests;
std::vector<int> indices(n);
requests.reserve(n);
for (current = first; current != last; ++current)
requests.push_back(current->m_requests[0]);
// Let MPI wait until some of these operations complete.
int num_completed;
BOOST_MPI_CHECK_RESULT(MPI_Waitsome,
(n, &requests[0], &num_completed, &indices[0],
MPI_STATUSES_IGNORE));
// Translate the index-based result of MPI_Waitsome into a
// partitioning on the requests.
int current_offset = 0;
current = first;
for (int index = 0; index < num_completed; ++index) {
using std::iter_swap;
// Move "current" to the request object at this index
advance(current, indices[index] - current_offset);
current_offset = indices[index];
// Finish up the request and swap it into the "completed
// requests" partition.
current->m_requests[0] = requests[indices[index]];
--start_of_completed;
iter_swap(current, start_of_completed);
}
// We have satisfied some requests, so we are done.
return start_of_completed;
}
// There are some nontrivial requests, so we must continue our
// busy waiting loop.
n = 0;
current = first;
}
}
// We cannot ever get here
BOOST_ASSERT(false);
}
/**
* @brief Test whether some non-blocking requests have completed.
*
* This routine takes in a set of requests stored in the iterator
* range @c [first,last) and tests to see if any of the requests has
* completed. It completes all of the requests it can, partitioning
* the input sequence into pending requests followed by completed
* requests. If an output iterator is provided, @c status objects
* will be emitted for each of the completed requests. This routine
* is similar to @c wait_some, but does not wait until any requests
* have completed. This routine provides functionality equivalent to
* @c MPI_Testsome.
*
* @param first The iterator that denotes the beginning of the
* sequence of request objects.
*
* @param last The iterator that denotes the end of the sequence of
* request objects. This may not be equal to @c first.
*
* @param out If provided, the @c status objects corresponding to
* completed requests will be emitted through this output iterator.
* @returns If the @p out parameter was provided, a pair containing
* the output iterator @p out after all of the @c status objects have
* been written through it and an iterator referencing the first
* completed request. If no @p out parameter was provided, only the
* iterator referencing the first completed request will be emitted.
*/
template<typename BidirectionalIterator, typename OutputIterator>
std::pair<OutputIterator, BidirectionalIterator>
test_some(BidirectionalIterator first, BidirectionalIterator last,
OutputIterator out)
{
BidirectionalIterator current = first;
BidirectionalIterator start_of_completed = last;
while (current != start_of_completed) {
// Check if we have found a completed request.
if (optional<status> result = current->test()) {
using std::iter_swap;
// Emit the resulting status object
*out++ = *result;
// We're expanding the set of completed requests
--start_of_completed;
// Swap the request we just completed with the last request that
// has not yet been tested.
iter_swap(current, start_of_completed);
continue;
}
// Move to the next request.
++current;
}
// Finish up by fixing the order of the completed set to match the
// order in which we emitted status objects, then return.
std::reverse(start_of_completed, last);
return std::make_pair(out, start_of_completed);
}
/**
* \overload
*/
template<typename BidirectionalIterator>
BidirectionalIterator
test_some(BidirectionalIterator first, BidirectionalIterator last)
{
BidirectionalIterator current = first;
BidirectionalIterator start_of_completed = last;
while (current != start_of_completed) {
// Check if we have found a completed request.
if (optional<status> result = current->test()) {
using std::iter_swap;
// We're expanding the set of completed requests
--start_of_completed;
// Swap the request we just completed with the last request that
// has not yet been tested.
iter_swap(current, start_of_completed);
continue;
}
// Move to the next request.
++current;
}
return start_of_completed;
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_NONBLOCKING_HPP

322
test/external/boost/mpi/operations.hpp vendored Normal file
View File

@@ -0,0 +1,322 @@
// Copyright (C) 2004 The Trustees of Indiana University.
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
/** @file operations.hpp
*
* This header provides a mapping from function objects to @c MPI_Op
* constants used in MPI collective operations. It also provides
* several new function object types not present in the standard @c
* <functional> header that have direct mappings to @c MPI_Op.
*/
#ifndef BOOST_MPI_IS_MPI_OP_HPP
#define BOOST_MPI_IS_MPI_OP_HPP
#include <boost/mpi/config.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/utility/enable_if.hpp>
#include <functional>
namespace boost { namespace mpi {
template<typename Op, typename T> struct is_mpi_op;
/**
* @brief Determine if a function object type is commutative.
*
* This trait determines if an operation @c Op is commutative when
* applied to values of type @c T. Parallel operations such as @c
* reduce and @c prefix_sum can be implemented more efficiently with
* commutative operations. To mark an operation as commutative, users
* should specialize @c is_commutative and derive from the class @c
* mpl::true_.
*/
template<typename Op, typename T>
struct is_commutative : public mpl::false_ { };
/**************************************************************************
* Function objects for MPI operations not in <functional> header *
**************************************************************************/
/**
* @brief Compute the maximum of two values.
*
* This binary function object computes the maximum of the two values
* it is given. When used with MPI and a type @c T that has an
* associated, built-in MPI data type, translates to @c MPI_MAX.
*/
template<typename T>
struct maximum : public std::binary_function<T, T, T>
{
/** @returns the maximum of x and y. */
const T& operator()(const T& x, const T& y) const
{
return x < y? y : x;
}
};
/**
* @brief Compute the minimum of two values.
*
* This binary function object computes the minimum of the two values
* it is given. When used with MPI and a type @c T that has an
* associated, built-in MPI data type, translates to @c MPI_MIN.
*/
template<typename T>
struct minimum : public std::binary_function<T, T, T>
{
/** @returns the minimum of x and y. */
const T& operator()(const T& x, const T& y) const
{
return x < y? x : y;
}
};
/**
* @brief Compute the bitwise AND of two integral values.
*
* This binary function object computes the bitwise AND of the two
* values it is given. When used with MPI and a type @c T that has an
* associated, built-in MPI data type, translates to @c MPI_BAND.
*/
template<typename T>
struct bitwise_and : public std::binary_function<T, T, T>
{
/** @returns @c x & y. */
T operator()(const T& x, const T& y) const
{
return x & y;
}
};
/**
* @brief Compute the bitwise OR of two integral values.
*
* This binary function object computes the bitwise OR of the two
* values it is given. When used with MPI and a type @c T that has an
* associated, built-in MPI data type, translates to @c MPI_BOR.
*/
template<typename T>
struct bitwise_or : public std::binary_function<T, T, T>
{
/** @returns the @c x | y. */
T operator()(const T& x, const T& y) const
{
return x | y;
}
};
/**
* @brief Compute the logical exclusive OR of two integral values.
*
* This binary function object computes the logical exclusive of the
* two values it is given. When used with MPI and a type @c T that has
* an associated, built-in MPI data type, translates to @c MPI_LXOR.
*/
template<typename T>
struct logical_xor : public std::binary_function<T, T, T>
{
/** @returns the logical exclusive OR of x and y. */
T operator()(const T& x, const T& y) const
{
return (x || y) && !(x && y);
}
};
/**
* @brief Compute the bitwise exclusive OR of two integral values.
*
* This binary function object computes the bitwise exclusive OR of
* the two values it is given. When used with MPI and a type @c T that
* has an associated, built-in MPI data type, translates to @c
* MPI_BXOR.
*/
template<typename T>
struct bitwise_xor : public std::binary_function<T, T, T>
{
/** @returns @c x ^ y. */
T operator()(const T& x, const T& y) const
{
return x ^ y;
}
};
/**************************************************************************
* MPI_Op queries *
**************************************************************************/
/**
* @brief Determine if a function object has an associated @c MPI_Op.
*
* This trait determines if a function object type @c Op, when used
* with argument type @c T, has an associated @c MPI_Op. If so, @c
* is_mpi_op<Op,T> will derive from @c mpl::false_ and will
* contain a static member function @c op that takes no arguments but
* returns the associated @c MPI_Op value. For instance, @c
* is_mpi_op<std::plus<int>,int>::op() returns @c MPI_SUM.
*
* Users may specialize @c is_mpi_op for any other class templates
* that map onto operations that have @c MPI_Op equivalences, such as
* bitwise OR, logical and, or maximum. However, users are encouraged
* to use the standard function objects in the @c functional and @c
* boost/mpi/operations.hpp headers whenever possible. For
* function objects that are class templates with a single template
* parameter, it may be easier to specialize @c is_builtin_mpi_op.
*/
template<typename Op, typename T>
struct is_mpi_op : public mpl::false_ { };
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<maximum<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_floating_point_datatype<T> >
{
static MPI_Op op() { return MPI_MAX; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<minimum<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_floating_point_datatype<T> >
{
static MPI_Op op() { return MPI_MIN; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<std::plus<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_floating_point_datatype<T>,
is_mpi_complex_datatype<T> >
{
static MPI_Op op() { return MPI_SUM; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<std::multiplies<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_floating_point_datatype<T>,
is_mpi_complex_datatype<T> >
{
static MPI_Op op() { return MPI_PROD; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<std::logical_and<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_logical_datatype<T> >
{
static MPI_Op op() { return MPI_LAND; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<std::logical_or<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_logical_datatype<T> >
{
static MPI_Op op() { return MPI_LOR; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<logical_xor<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_logical_datatype<T> >
{
static MPI_Op op() { return MPI_LXOR; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<bitwise_and<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_byte_datatype<T> >
{
static MPI_Op op() { return MPI_BAND; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<bitwise_or<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_byte_datatype<T> >
{
static MPI_Op op() { return MPI_BOR; }
};
/// INTERNAL ONLY
template<typename T>
struct is_mpi_op<bitwise_xor<T>, T>
: public boost::mpl::or_<is_mpi_integer_datatype<T>,
is_mpi_byte_datatype<T> >
{
static MPI_Op op() { return MPI_BXOR; }
};
namespace detail {
// A helper class used to create user-defined MPI_Ops
template<typename Op, typename T>
class user_op
{
public:
explicit user_op(Op& op)
{
BOOST_MPI_CHECK_RESULT(MPI_Op_create,
(&user_op<Op, T>::perform,
is_commutative<Op, T>::value,
&mpi_op));
op_ptr = &op;
}
~user_op()
{
if (std::uncaught_exception()) {
// Ignore failure cases: there are obviously other problems
// already, and we don't want to cause program termination if
// MPI_Op_free fails.
MPI_Op_free(&mpi_op);
} else {
BOOST_MPI_CHECK_RESULT(MPI_Op_free, (&mpi_op));
}
}
MPI_Op& get_mpi_op()
{
return mpi_op;
}
private:
MPI_Op mpi_op;
static Op* op_ptr;
static void BOOST_MPI_CALLING_CONVENTION perform(void* vinvec, void* voutvec, int* plen, MPI_Datatype*)
{
T* invec = static_cast<T*>(vinvec);
T* outvec = static_cast<T*>(voutvec);
std::transform(invec, invec + *plen, outvec, outvec, *op_ptr);
}
};
template<typename Op, typename T> Op* user_op<Op, T>::op_ptr = 0;
} // end namespace detail
} } // end namespace boost::mpi
#endif // BOOST_MPI_GET_MPI_OP_HPP

View File

@@ -0,0 +1,146 @@
// (C) Copyright 2005 Matthias Troyer
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
// Douglas Gregor
/** @file packed_iarchive.hpp
*
* This header provides the facilities for packing Serializable data
* types into a buffer using @c MPI_Pack. The buffers can then be
* transmitted via MPI and then be unpacked either via the facilities
* in @c packed_oarchive.hpp or @c MPI_Unpack.
*/
#ifndef BOOST_MPI_PACKED_IARCHIVE_HPP
#define BOOST_MPI_PACKED_IARCHIVE_HPP
#include <boost/mpi/datatype.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/common_iarchive.hpp>
#include <boost/archive/shared_ptr_helper.hpp>
#include <boost/mpi/detail/packed_iprimitive.hpp>
#include <boost/mpi/detail/binary_buffer_iprimitive.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/collection_size_type.hpp>
#include <boost/serialization/item_version_type.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
#ifdef BOOST_MPI_HOMOGENEOUS
typedef binary_buffer_iprimitive iprimitive;
#else
typedef packed_iprimitive iprimitive;
#endif
/** @brief An archive that packs binary data into an MPI buffer.
*
* The @c packed_iarchive class is an Archiver (as in the
* Boost.Serialization library) that packs binary data into a buffer
* for transmission via MPI. It can operate on any Serializable data
* type and will use the @c MPI_Pack function of the underlying MPI
* implementation to perform serialization.
*/
class BOOST_MPI_DECL packed_iarchive
: public iprimitive
, public archive::detail::common_iarchive<packed_iarchive>
, public archive::detail::shared_ptr_helper
{
public:
/**
* Construct a @c packed_iarchive for transmission over the given
* MPI communicator and with an initial buffer.
*
* @param comm The communicator over which this archive will be
* sent.
*
* @param b A user-defined buffer that will be filled with the
* binary representation of serialized objects.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*
* @param position Set the offset into buffer @p b at which
* deserialization will begin.
*/
packed_iarchive(MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header, int position = 0)
: iprimitive(b,comm,position),
archive::detail::common_iarchive<packed_iarchive>(flags)
{}
/**
* Construct a @c packed_iarchive for transmission over the given
* MPI communicator.
*
* @param comm The communicator over which this archive will be
* sent.
*
* @param s The size of the buffer to be received.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*/
packed_iarchive
( MPI_Comm const & comm , std::size_t s=0,
unsigned int flags = boost::archive::no_header)
: iprimitive(internal_buffer_,comm)
, archive::detail::common_iarchive<packed_iarchive>(flags)
, internal_buffer_(s)
{}
// Load everything else in the usual way, forwarding on to the Base class
template<class T>
void load_override(T& x, int version, mpl::false_)
{
archive::detail::common_iarchive<packed_iarchive>::load_override(x,version);
}
// Load it directly using the primnivites
template<class T>
void load_override(T& x, int /*version*/, mpl::true_)
{
iprimitive::load(x);
}
// Load all supported datatypes directly
template<class T>
void load_override(T& x, int version)
{
typedef typename mpl::apply1<use_array_optimization
, BOOST_DEDUCED_TYPENAME remove_const<T>::type
>::type use_optimized;
load_override(x, version, use_optimized());
}
// input archives need to ignore the optional information
void load_override(archive::class_id_optional_type & /*t*/, int){}
void load_override(archive::class_name_type & t, int)
{
std::string cn;
cn.reserve(BOOST_SERIALIZATION_MAX_KEY_SIZE);
* this->This() >> cn;
std::memcpy(t, cn.data(), cn.size());
// borland tweak
t.t[cn.size()] = '\0';
}
private:
/// An internal buffer to be used when the user does not supply his
/// own buffer.
buffer_type internal_buffer_;
};
} } // end namespace boost::mpi
BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION(boost::mpi::packed_iarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_iarchive)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_iarchive)
#endif // BOOST_MPI_PACKED_IARCHIVE_HPP

View File

@@ -0,0 +1,134 @@
// (C) Copyright 2005 Matthias Troyer
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
// Douglas Gregor
/** @file packed_oarchive.hpp
*
* This header provides the facilities for unpacking Serializable
* data types from a buffer using @c MPI_Unpack. The buffers are
* typically received via MPI and have been packed either by via the
* facilities in @c packed_iarchive.hpp or @c MPI_Pack.
*/
#ifndef BOOST_MPI_PACKED_OARCHIVE_HPP
#define BOOST_MPI_PACKED_OARCHIVE_HPP
#include <boost/mpi/datatype.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/common_oarchive.hpp>
#include <boost/archive/shared_ptr_helper.hpp>
#include <boost/mpi/detail/packed_oprimitive.hpp>
#include <boost/mpi/detail/binary_buffer_oprimitive.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/collection_size_type.hpp>
#include <boost/serialization/item_version_type.hpp>
namespace boost { namespace mpi {
#ifdef BOOST_MPI_HOMOGENEOUS
typedef binary_buffer_oprimitive oprimitive;
#else
typedef packed_oprimitive oprimitive;
#endif
/** @brief An archive that unpacks binary data from an MPI buffer.
*
* The @c packed_oarchive class is an Archiver (as in the
* Boost.Serialization library) that unpacks binary data from a
* buffer received via MPI. It can operate on any Serializable data
* type and will use the @c MPI_Unpack function of the underlying MPI
* implementation to perform deserialization.
*/
class BOOST_MPI_DECL packed_oarchive
: public oprimitive
, public archive::detail::common_oarchive<packed_oarchive>
, public archive::detail::shared_ptr_helper
{
public:
/**
* Construct a @c packed_oarchive to receive data over the given
* MPI communicator and with an initial buffer.
*
* @param comm The communicator over which this archive will be
* received.
*
* @param b A user-defined buffer that contains the binary
* representation of serialized objects.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*/
packed_oarchive( MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header)
: oprimitive(b,comm),
archive::detail::common_oarchive<packed_oarchive>(flags)
{}
/**
* Construct a @c packed_oarchive to receive data over the given
* MPI communicator.
*
* @param comm The communicator over which this archive will be
* received.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*/
packed_oarchive ( MPI_Comm const & comm, unsigned int flags = boost::archive::no_header)
: oprimitive(internal_buffer_,comm),
archive::detail::common_oarchive<packed_oarchive>(flags)
{}
// Save everything else in the usual way, forwarding on to the Base class
template<class T>
void save_override(T const& x, int version, mpl::false_)
{
archive::detail::common_oarchive<packed_oarchive>::save_override(x,version);
}
// Save it directly using the primnivites
template<class T>
void save_override(T const& x, int /*version*/, mpl::true_)
{
oprimitive::save(x);
}
// Save all supported datatypes directly
template<class T>
void save_override(T const& x, int version)
{
typedef typename mpl::apply1<use_array_optimization,T>::type use_optimized;
save_override(x, version, use_optimized());
}
// input archives need to ignore the optional information
void save_override(const archive::class_id_optional_type & /*t*/, int){}
// explicitly convert to char * to avoid compile ambiguities
void save_override(const archive::class_name_type & t, int){
const std::string s(t);
* this->This() << s;
}
private:
/// An internal buffer to be used when the user does not supply his
/// own buffer.
buffer_type internal_buffer_;
};
} } // end namespace boost::mpi
// required by export
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_oarchive)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_oarchive)
#endif // BOOST_MPI_PACKED_OARCHIVE_HPP

79
test/external/boost/mpi/python.hpp vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
#ifndef BOOST_MPI_PYTHON_HPP
#define BOOST_MPI_PYTHON_HPP
#include <boost/python/object.hpp>
/** @file python.hpp
*
* This header interacts with the Python bindings for Boost.MPI. The
* routines in this header can be used to register user-defined and
* library-defined data types with Boost.MPI for efficient
* (de-)serialization and separate transmission of skeletons and
* content.
*
*/
namespace boost { namespace mpi { namespace python {
/**
* @brief Register the type T for direct serialization within Boost.MPI
*
* The @c register_serialized function registers a C++ type for direct
* serialization within Boost.MPI. Direct serialization elides the use
* of the Python @c pickle package when serializing Python objects
* that represent C++ values. Direct serialization can be beneficial
* both to improve serialization performance (Python pickling can be
* very inefficient) and to permit serialization for Python-wrapped
* C++ objects that do not support pickling.
*
* @param value A sample value of the type @c T. This may be used
* to compute the Python type associated with the C++ type @c T.
*
* @param type The Python type associated with the C++ type @c
* T. If not provided, it will be computed from the same value @p
* value.
*/
template<typename T>
void
register_serialized(const T& value = T(), PyTypeObject* type = 0);
/**
* @brief Registers a type for use with the skeleton/content mechanism
* in Python.
*
* The skeleton/content mechanism can only be used from Python with
* C++ types that have previously been registered via a call to this
* function. Both the sender and the transmitter must register the
* type. It is permitted to call this function multiple times for the
* same type @c T, but only one call per process per type is
* required. The type @c T must be Serializable.
*
* @param value A sample object of type T that will be used to
* determine the Python type associated with T, if @p type is not
* specified.
*
* @param type The Python type associated with the C++ type @c
* T. If not provided, it will be computed from the same value @p
* value.
*/
template<typename T>
void
register_skeleton_and_content(const T& value = T(), PyTypeObject* type = 0);
} } } // end namespace boost::mpi::python
#ifndef BOOST_MPI_PYTHON_FORWARD_ONLY
# include <boost/mpi/python/serialize.hpp>
# include <boost/mpi/python/skeleton_and_content.hpp>
#else
# undef BOOST_MPI_PYTHON_FORWARD_ONLY
#endif
#endif // BOOST_MPI_PYTHON_HPP

View File

@@ -0,0 +1,47 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file config.hpp
*
* This header provides auto-linking and configuration support for the
* Boost.MPI python bindings.
*/
#ifndef BOOST_MPI_PYTHON_CONFIG_HPP
#define BOOST_MPI_PYTHON_CONFIG_HPP
#include <boost/mpi/config.hpp>
/*****************************************************************************
* *
* DLL import/export options *
* *
*****************************************************************************/
#if defined(BOOST_HAS_DECLSPEC) && (defined(BOOST_MPI_PYTHON_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && !defined(BOOST_MPI_PYTHON_STATIC_LINK)
# if defined(BOOST_MPI_PYTHON_SOURCE)
# define BOOST_MPI_PYTHON_DECL __declspec(dllexport)
# define BOOST_MPI_PYTHON_BUILD_DLL
# else
# define BOOST_MPI_PYTHON_DECL __declspec(dllimport)
# endif
#endif
#ifndef BOOST_MPI_PYTHON_DECL
# define BOOST_MPI_PYTHON_DECL
#endif
#if !defined(BOOST_MPI_PYTHON_NO_LIB) && !defined(BOOST_MPI_PYTHON_SOURCE) && !defined(BOOST_ALL_NO_LIB)
# define BOOST_LIB_NAME boost_mpi_python
# if defined(BOOST_MPI_PYTHON_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)
# define BOOST_DYN_LINK
# endif
# ifdef BOOST_MPI_PYTHON_DIAG
# define BOOST_LIB_DIAGNOSTIC
# endif
# include <boost/config/auto_link.hpp>
#endif
#endif // BOOST_MPI_PYTHON_CONFIG_HPP

View File

@@ -0,0 +1,539 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file serialize.hpp
*
* This file provides Boost.Serialization support for Python objects
* within Boost.MPI. Python objects can be serialized in one of two
* ways. The default serialization method involves using the Python
* "pickle" module to pickle the Python objects, transmits the
* pickled representation, and unpickles the result when
* received. For C++ types that have been exposed to Python and
* registered with register_serialized(), objects are directly
* serialized for transmissing, skipping the pickling step.
*/
#ifndef BOOST_MPI_PYTHON_SERIALIZE_HPP
#define BOOST_MPI_PYTHON_SERIALIZE_HPP
#include <boost/mpi/python/config.hpp>
#include <boost/python/object.hpp>
#include <boost/python/str.hpp>
#include <boost/python/extract.hpp>
#include <memory>
#include <map>
#include <boost/function/function3.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
#include <boost/serialization/split_free.hpp>
#include <boost/serialization/array.hpp>
#include <boost/assert.hpp>
#include <boost/type_traits/is_fundamental.hpp>
#define BOOST_MPI_PYTHON_FORWARD_ONLY
#include <boost/mpi/python.hpp>
/************************************************************************
* Boost.Python Serialization Section *
************************************************************************/
#if !defined(BOOST_NO_SFINAE) && !defined(BOOST_NO_IS_CONVERTIBLE)
/**
* @brief Declare IArchive and OArchive as a Boost.Serialization
* archives that can be used for Python objects.
*
* This macro can only be expanded from the global namespace. It only
* requires that Archiver be forward-declared. IArchiver and OArchiver
* will only support Serialization of Python objects by pickling
* them. If the Archiver type should also support "direct"
* serialization (for C++ types), use
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE instead.
*/
# define BOOST_PYTHON_SERIALIZATION_ARCHIVE(IArchiver, OArchiver) \
namespace boost { namespace python { namespace api { \
template<typename R, typename T> \
struct enable_binary< IArchiver , R, T> {}; \
\
template<typename R, typename T> \
struct enable_binary< OArchiver , R, T> {}; \
} } }
# else
# define BOOST_PYTHON_SERIALIZATION_ARCHIVE(IArchiver, OArchiver)
#endif
/**
* @brief Declare IArchiver and OArchiver as a Boost.Serialization
* archives that can be used for Python objects and C++ objects
* wrapped in Python.
*
* This macro can only be expanded from the global namespace. It only
* requires that IArchiver and OArchiver be forward-declared. However,
* note that you will also need to write
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL(IArchiver,
* OArchiver) in one of your translation units.
DPG PICK UP HERE
*/
#define BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE(IArchiver, OArchiver) \
BOOST_PYTHON_SERIALIZATION_ARCHIVE(IArchiver, OArchiver) \
namespace boost { namespace python { namespace detail { \
template<> \
BOOST_MPI_PYTHON_DECL direct_serialization_table< IArchiver , OArchiver >& \
get_direct_serialization_table< IArchiver , OArchiver >(); \
} \
\
template<> \
struct has_direct_serialization< IArchiver , OArchiver> : mpl::true_ { }; \
\
template<> \
struct output_archiver< IArchiver > { typedef OArchiver type; }; \
\
template<> \
struct input_archiver< OArchiver > { typedef IArchiver type; }; \
} }
/**
* @brief Define the implementation for Boost.Serialization archivers
* that can be used for Python objects and C++ objects wrapped in
* Python.
*
* This macro can only be expanded from the global namespace. It only
* requires that IArchiver and OArchiver be forward-declared. Before
* using this macro, you will need to declare IArchiver and OArchiver
* as direct serialization archives with
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE(IArchiver, OArchiver).
*/
#define BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL(IArchiver, OArchiver) \
namespace boost { namespace python { namespace detail { \
template \
class BOOST_MPI_PYTHON_DECL direct_serialization_table< IArchiver , OArchiver >; \
\
template<> \
BOOST_MPI_PYTHON_DECL \
direct_serialization_table< IArchiver , OArchiver >& \
get_direct_serialization_table< IArchiver , OArchiver >( ) \
{ \
static direct_serialization_table< IArchiver, OArchiver > table; \
return table; \
} \
} } }
namespace boost { namespace python {
/**
* INTERNAL ONLY
*
* Provides access to the Python "pickle" module from within C++.
*/
class BOOST_MPI_PYTHON_DECL pickle {
struct data_t;
public:
static str dumps(object obj, int protocol = -1);
static object loads(str s);
private:
static void initialize_data();
static data_t* data;
};
/**
* @brief Whether the input/output archiver pair has "direct"
* serialization for C++ objects exposed in Python.
*
* Users do not typically need to specialize this trait, as it will be
* specialized as part of the macro
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE.
*/
template<typename IArchiver, typename OArchiver>
struct has_direct_serialization : mpl::false_ { };
/**
* @brief A metafunction that determines the output archiver for the
* given input archiver.
*
* Users do not typically need to specialize this trait, as it will be
* specialized as part of the macro
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE.
*/
template<typename IArchiver> struct output_archiver { };
/**
* @brief A metafunction that determines the input archiver for the
* given output archiver.
*
* Users do not typically need to specialize this trait, as it will be
* specialized as part of the macro
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE.
*
*/
template<typename OArchiver> struct input_archiver { };
namespace detail {
/**
* INTERNAL ONLY
*
* This class contains the direct-serialization code for the given
* IArchiver/OArchiver pair. It is intended to be used as a
* singleton class, and will be accessed when (de-)serializing a
* Boost.Python object with an archiver that supports direct
* serializations. Do not create instances of this class directly:
* instead, use get_direct_serialization_table.
*/
template<typename IArchiver, typename OArchiver>
class BOOST_MPI_PYTHON_DECL direct_serialization_table
{
public:
typedef boost::function3<void, OArchiver&, const object&, const unsigned int>
saver_t;
typedef boost::function3<void, IArchiver&, object&, const unsigned int>
loader_t;
typedef std::map<PyTypeObject*, std::pair<int, saver_t> > savers_t;
typedef std::map<int, loader_t> loaders_t;
/**
* Retrieve the saver (serializer) associated with the Python
* object @p obj.
*
* @param obj The object we want to save. Only its (Python) type
* is important.
*
* @param descriptor The value of the descriptor associated to
* the returned saver. Will be set to zero if no saver was found
* for @p obj.
*
* @returns a function object that can be used to serialize this
* object (and other objects of the same type), if possible. If
* no saver can be found, returns an empty function object..
*/
saver_t saver(const object& obj, int& descriptor)
{
typename savers_t::iterator pos = savers.find(obj.ptr()->ob_type);
if (pos != savers.end()) {
descriptor = pos->second.first;
return pos->second.second;
}
else {
descriptor = 0;
return saver_t();
}
}
/**
* Retrieve the loader (deserializer) associated with the given
* descriptor.
*
* @param descriptor The descriptor number provided by saver()
* when determining the saver for this type.
*
* @returns a function object that can be used to deserialize an
* object whose type is the same as that corresponding to the
* descriptor. If the descriptor is unknown, the return value
* will be an empty function object.
*/
loader_t loader(int descriptor)
{
typename loaders_t::iterator pos = loaders.find(descriptor);
if (pos != loaders.end())
return pos->second;
else
return loader_t();
}
/**
* Register the type T for direct serialization.
*
* @param value A sample value of the type @c T. This may be used
* to compute the Python type associated with the C++ type @c T.
*
* @param type The Python type associated with the C++ type @c
* T. If not provided, it will be computed from the same value @p
* value.
*/
template<typename T>
void register_type(const T& value = T(), PyTypeObject* type = 0)
{
// If the user did not provide us with a Python type, figure it
// out for ourselves.
if (!type) {
object obj(value);
type = obj.ptr()->ob_type;
}
register_type(default_saver<T>(), default_loader<T>(type), value, type);
}
/**
* Register the type T for direct serialization.
*
* @param saver A function object that will serialize a
* Boost.Python object (that represents a C++ object of type @c
* T) to an @c OArchive.
*
* @param loader A function object that will deserialize from an
* @c IArchive into a Boost.Python object that represents a C++
* object of type @c T.
*
* @param value A sample value of the type @c T. This may be used
* to compute the Python type associated with the C++ type @c T.
*
* @param type The Python type associated with the C++ type @c
* T. If not provided, it will be computed from the same value @p
* value.
*/
template<typename T>
void register_type(const saver_t& saver, const loader_t& loader,
const T& value = T(), PyTypeObject* type = 0)
{
// If the user did not provide us with a Python type, figure it
// out for ourselves.
if (!type) {
object obj(value);
type = obj.ptr()->ob_type;
}
int descriptor = savers.size() + 1;
if (savers.find(type) != savers.end())
return;
savers[type] = std::make_pair(descriptor, saver);
loaders[descriptor] = loader;
}
protected:
template<typename T>
struct default_saver {
void operator()(OArchiver& ar, const object& obj, const unsigned int) {
T value = extract<T>(obj)();
ar << value;
}
};
template<typename T>
struct default_loader {
default_loader(PyTypeObject* type) : type(type) { }
void operator()(IArchiver& ar, object& obj, const unsigned int) {
// If we can, extract the object in place.
if (!is_fundamental<T>::value && obj && obj.ptr()->ob_type == type) {
ar >> extract<T&>(obj)();
} else {
T value;
ar >> value;
obj = object(value);
}
}
private:
PyTypeObject* type;
};
savers_t savers;
loaders_t loaders;
};
/**
* @brief Retrieve the direct-serialization table for an
* IArchiver/OArchiver pair.
*
* This function is responsible for returning a reference to the
* singleton direct-serialization table. Its primary template is
* left undefined, to force the use of an explicit specialization
* with a definition in a single translation unit. Use the macro
* BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL to define this
* explicit specialization.
*/
template<typename IArchiver, typename OArchiver>
direct_serialization_table<IArchiver, OArchiver>&
get_direct_serialization_table();
} // end namespace detail
/**
* @brief Register the type T for direct serialization.
*
* The @c register_serialized function registers a C++ type for direct
* serialization with the given @c IArchiver/@c OArchiver pair. Direct
* serialization elides the use of the Python @c pickle package when
* serializing Python objects that represent C++ values. Direct
* serialization can be beneficial both to improve serialization
* performance (Python pickling can be very inefficient) and to permit
* serialization for Python-wrapped C++ objects that do not support
* pickling.
*
* @param value A sample value of the type @c T. This may be used
* to compute the Python type associated with the C++ type @c T.
*
* @param type The Python type associated with the C++ type @c
* T. If not provided, it will be computed from the same value @p
* value.
*/
template<typename IArchiver, typename OArchiver, typename T>
void
register_serialized(const T& value = T(), PyTypeObject* type = 0)
{
detail::direct_serialization_table<IArchiver, OArchiver>& table =
detail::get_direct_serialization_table<IArchiver, OArchiver>();
table.register_type(value, type);
}
namespace detail {
/// Save a Python object by pickling it.
template<typename Archiver>
void
save_impl(Archiver& ar, const boost::python::object& obj,
const unsigned int /*version*/,
mpl::false_ /*has_direct_serialization*/)
{
boost::python::str py_string = boost::python::pickle::dumps(obj);
int len = boost::python::extract<int>(py_string.attr("__len__")());
const char* string = boost::python::extract<const char*>(py_string);
ar << len << boost::serialization::make_array(string, len);
}
/// Try to save a Python object by directly serializing it; fall back
/// on pickling if required.
template<typename Archiver>
void
save_impl(Archiver& ar, const boost::python::object& obj,
const unsigned int version,
mpl::true_ /*has_direct_serialization*/)
{
typedef Archiver OArchiver;
typedef typename input_archiver<OArchiver>::type IArchiver;
typedef typename direct_serialization_table<IArchiver, OArchiver>::saver_t
saver_t;
direct_serialization_table<IArchiver, OArchiver>& table =
get_direct_serialization_table<IArchiver, OArchiver>();
int descriptor = 0;
if (saver_t saver = table.saver(obj, descriptor)) {
ar << descriptor;
saver(ar, obj, version);
} else {
// Pickle it
ar << descriptor;
detail::save_impl(ar, obj, version, mpl::false_());
}
}
/// Load a Python object by unpickling it
template<typename Archiver>
void
load_impl(Archiver& ar, boost::python::object& obj,
const unsigned int /*version*/,
mpl::false_ /*has_direct_serialization*/)
{
int len;
ar >> len;
std::auto_ptr<char> string(new char[len]);
ar >> boost::serialization::make_array(string.get(), len);
boost::python::str py_string(string.get(), len);
obj = boost::python::pickle::loads(py_string);
}
/// Try to load a Python object by directly deserializing it; fall back
/// on unpickling if required.
template<typename Archiver>
void
load_impl(Archiver& ar, boost::python::object& obj,
const unsigned int version,
mpl::true_ /*has_direct_serialization*/)
{
typedef Archiver IArchiver;
typedef typename output_archiver<IArchiver>::type OArchiver;
typedef typename direct_serialization_table<IArchiver, OArchiver>::loader_t
loader_t;
direct_serialization_table<IArchiver, OArchiver>& table =
get_direct_serialization_table<IArchiver, OArchiver>();
int descriptor;
ar >> descriptor;
if (descriptor) {
loader_t loader = table.loader(descriptor);
BOOST_ASSERT(loader);
loader(ar, obj, version);
} else {
// Unpickle it
detail::load_impl(ar, obj, version, mpl::false_());
}
}
} // end namespace detail
template<typename Archiver>
void
save(Archiver& ar, const boost::python::object& obj,
const unsigned int version)
{
typedef Archiver OArchiver;
typedef typename input_archiver<OArchiver>::type IArchiver;
detail::save_impl(ar, obj, version,
has_direct_serialization<IArchiver, OArchiver>());
}
template<typename Archiver>
void
load(Archiver& ar, boost::python::object& obj,
const unsigned int version)
{
typedef Archiver IArchiver;
typedef typename output_archiver<IArchiver>::type OArchiver;
detail::load_impl(ar, obj, version,
has_direct_serialization<IArchiver, OArchiver>());
}
template<typename Archive>
inline void
serialize(Archive& ar, boost::python::object& obj, const unsigned int version)
{
boost::serialization::split_free(ar, obj, version);
}
} } // end namespace boost::python
/************************************************************************
* Boost.MPI-Specific Section *
************************************************************************/
namespace boost { namespace mpi {
class packed_iarchive;
class packed_oarchive;
} } // end namespace boost::mpi
BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE(
::boost::mpi::packed_iarchive,
::boost::mpi::packed_oarchive)
namespace boost { namespace mpi { namespace python {
template<typename T>
void
register_serialized(const T& value, PyTypeObject* type)
{
using boost::python::register_serialized;
register_serialized<packed_iarchive, packed_oarchive>(value, type);
}
} } } // end namespace boost::mpi::python
#endif // BOOST_MPI_PYTHON_SERIALIZE_HPP

View File

@@ -0,0 +1,209 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
#ifndef BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
#define BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP
/** @file skeleton_and_content.hpp
*
* This file reflects the skeleton/content facilities into Python.
*/
#include <boost/python.hpp>
#include <boost/mpi.hpp>
#include <boost/function/function1.hpp>
#define BOOST_MPI_PYTHON_FORWARD_ONLY
#include <boost/mpi/python.hpp>
#include <boost/mpi/python/serialize.hpp>
namespace boost { namespace mpi { namespace python {
/**
* INTERNAL ONLY
*
* This @c content class is a wrapper around the C++ "content"
* retrieved from get_content. This wrapper is only needed to store a
* copy of the Python object on which get_content() was called.
*/
class content : public boost::mpi::content
{
typedef boost::mpi::content inherited;
public:
content(const inherited& base, boost::python::object object)
: inherited(base), object(object) { }
inherited& base() { return *this; }
const inherited& base() const { return *this; }
boost::python::object object;
};
/**
* INTERNAL ONLY
*
* A class specific to the Python bindings that mimics the behavior of
* the skeleton_proxy<T> template. In the case of Python skeletons, we
* only need to know the object (and its type) to transmit the
* skeleton. This is the only user-visible skeleton proxy type,
* although instantiations of its derived classes (@c
* skeleton_proxy<T>) will be returned from the Python skeleton()
* function.
*/
class skeleton_proxy_base
{
public:
skeleton_proxy_base(const boost::python::object& object) : object(object) { }
boost::python::object object;
};
/**
* INTERNAL ONLY
*
* The templated @c skeleton_proxy class represents a skeleton proxy
* in Python. The only data is stored in the @c skeleton_proxy_base
* class (which is the type actually exposed as @c skeleton_proxy in
* Python). However, the type of @c skeleton_proxy<T> is important for
* (de-)serialization of @c skeleton_proxy<T>'s for transmission.
*/
template<typename T>
class skeleton_proxy : public skeleton_proxy_base
{
public:
skeleton_proxy(const boost::python::object& object)
: skeleton_proxy_base(object) { }
};
namespace detail {
using boost::python::object;
using boost::python::extract;
extern BOOST_MPI_DECL boost::python::object skeleton_proxy_base_type;
template<typename T>
struct skeleton_saver
{
void
operator()(packed_oarchive& ar, const object& obj, const unsigned int)
{
packed_skeleton_oarchive pso(ar);
pso << extract<T&>(obj.attr("object"))();
}
};
template<typename T>
struct skeleton_loader
{
void
operator()(packed_iarchive& ar, object& obj, const unsigned int)
{
packed_skeleton_iarchive psi(ar);
extract<skeleton_proxy<T>&> proxy(obj);
if (!proxy.check())
obj = object(skeleton_proxy<T>(object(T())));
psi >> extract<T&>(obj.attr("object"))();
}
};
/**
* The @c skeleton_content_handler structure contains all of the
* information required to extract a skeleton and content from a
* Python object with a certain C++ type.
*/
struct skeleton_content_handler {
function1<object, const object&> get_skeleton_proxy;
function1<content, const object&> get_content;
};
/**
* A function object that extracts the skeleton from of a Python
* object, which is actually a wrapped C++ object of type T.
*/
template<typename T>
struct do_get_skeleton_proxy
{
object operator()(object value) {
return object(skeleton_proxy<T>(value));
}
};
/**
* A function object that extracts the content of a Python object,
* which is actually a wrapped C++ object of type T.
*/
template<typename T>
struct do_get_content
{
content operator()(object value_obj) {
T& value = extract<T&>(value_obj)();
return content(boost::mpi::get_content(value), value_obj);
}
};
/**
* Determine if a skeleton and content handler for @p type has
* already been registered.
*/
BOOST_MPI_PYTHON_DECL bool
skeleton_and_content_handler_registered(PyTypeObject* type);
/**
* Register a skeleton/content handler with a particular Python type
* (which actually wraps a C++ type).
*/
BOOST_MPI_PYTHON_DECL void
register_skeleton_and_content_handler(PyTypeObject*,
const skeleton_content_handler&);
} // end namespace detail
template<typename T>
void register_skeleton_and_content(const T& value, PyTypeObject* type)
{
using boost::python::detail::direct_serialization_table;
using boost::python::detail::get_direct_serialization_table;
using namespace boost::python;
// Determine the type
if (!type)
type = object(value).ptr()->ob_type;
// Don't re-register the same type.
if (detail::skeleton_and_content_handler_registered(type))
return;
// Register the skeleton proxy type
{
boost::python::scope proxy_scope(detail::skeleton_proxy_base_type);
std::string name("skeleton_proxy<");
name += typeid(T).name();
name += ">";
class_<skeleton_proxy<T>, bases<skeleton_proxy_base> >(name.c_str(),
no_init);
}
// Register the saver and loader for the associated skeleton and
// proxy, to allow (de-)serialization of skeletons via the proxy.
direct_serialization_table<packed_iarchive, packed_oarchive>& table =
get_direct_serialization_table<packed_iarchive, packed_oarchive>();
table.register_type(detail::skeleton_saver<T>(),
detail::skeleton_loader<T>(),
skeleton_proxy<T>(object(value)));
// Register the rest of the skeleton/content mechanism, including
// handlers that extract a skeleton proxy from a Python object and
// extract the content from a Python object.
detail::skeleton_content_handler handler;
handler.get_skeleton_proxy = detail::do_get_skeleton_proxy<T>();
handler.get_content = detail::do_get_content<T>();
detail::register_skeleton_and_content_handler(type, handler);
}
} } } // end namespace boost::mpi::python
#endif // BOOST_MPI_PYTHON_SKELETON_AND_CONTENT_HPP

102
test/external/boost/mpi/request.hpp vendored Normal file
View File

@@ -0,0 +1,102 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file request.hpp
*
* This header defines the class @c request, which contains a request
* for non-blocking communication.
*/
#ifndef BOOST_MPI_REQUEST_HPP
#define BOOST_MPI_REQUEST_HPP
#include <boost/mpi/config.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/mpi/packed_iarchive.hpp>
namespace boost { namespace mpi {
class status;
class communicator;
/**
* @brief A request for a non-blocking send or receive.
*
* This structure contains information about a non-blocking send or
* receive and will be returned from @c isend or @c irecv,
* respectively.
*/
class BOOST_MPI_DECL request
{
public:
/**
* Constructs a NULL request.
*/
request();
/**
* Wait until the communication associated with this request has
* completed, then return a @c status object describing the
* communication.
*/
status wait();
/**
* Determine whether the communication associated with this request
* has completed successfully. If so, returns the @c status object
* describing the communication. Otherwise, returns an empty @c
* optional<> to indicate that the communication has not completed
* yet. Note that once @c test() returns a @c status object, the
* request has completed and @c wait() should not be called.
*/
optional<status> test();
/**
* Cancel a pending communication, assuming it has not already been
* completed.
*/
void cancel();
private:
enum request_action { ra_wait, ra_test, ra_cancel };
typedef optional<status> (*handler_type)(request* self,
request_action action);
/**
* INTERNAL ONLY
*
* Handles the non-blocking receive of a serialized value.
*/
template<typename T>
static optional<status>
handle_serialized_irecv(request* self, request_action action);
/**
* INTERNAL ONLY
*
* Handles the non-blocking receive of an array of serialized values.
*/
template<typename T>
static optional<status>
handle_serialized_array_irecv(request* self, request_action action);
public: // template friends are not portable
/// INTERNAL ONLY
MPI_Request m_requests[2];
/// INTERNAL ONLY
handler_type m_handler;
/// INTERNAL ONLY
shared_ptr<void> m_data;
friend class communicator;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_REQUEST_HPP

View File

@@ -0,0 +1,392 @@
// (C) Copyright 2005 Matthias Troyer
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Matthias Troyer
// Douglas Gregor
/** @file skeleton_and_content.hpp
*
* This header provides facilities that allow the structure of data
* types (called the "skeleton") to be transmitted and received
* separately from the content stored in those data types. These
* facilities are useful when the data in a stable data structure
* (e.g., a mesh or a graph) will need to be transmitted
* repeatedly. In this case, transmitting the skeleton only once
* saves both communication effort (it need not be sent again) and
* local computation (serialization need only be performed once for
* the content).
*/
#ifndef BOOST_MPI_SKELETON_AND_CONTENT_HPP
#define BOOST_MPI_SKELETON_AND_CONTENT_HPP
#include <boost/mpi/config.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/detail/forward_skeleton_iarchive.hpp>
#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/mpi/detail/ignore_iprimitive.hpp>
#include <boost/mpi/detail/ignore_oprimitive.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/archive/detail/register_archive.hpp>
namespace boost { namespace mpi {
/**
* @brief A proxy that requests that the skeleton of an object be
* transmitted.
*
* The @c skeleton_proxy is a lightweight proxy object used to
* indicate that the skeleton of an object, not the object itself,
* should be transmitted. It can be used with the @c send and @c recv
* operations of communicators or the @c broadcast collective. When a
* @c skeleton_proxy is sent, Boost.MPI generates a description
* containing the structure of the stored object. When that skeleton
* is received, the receiving object is reshaped to match the
* structure. Once the skeleton of an object as been transmitted, its
* @c content can be transmitted separately (often several times)
* without changing the structure of the object.
*/
template <class T>
struct BOOST_MPI_DECL skeleton_proxy
{
/**
* Constructs a @c skeleton_proxy that references object @p x.
*
* @param x the object whose structure will be transmitted or
* altered.
*/
skeleton_proxy(T& x)
: object(x)
{}
T& object;
};
/**
* @brief Create a skeleton proxy object.
*
* This routine creates an instance of the skeleton_proxy class. It
* will typically be used when calling @c send, @c recv, or @c
* broadcast, to indicate that only the skeleton (structure) of an
* object should be transmitted and not its contents.
*
* @param x the object whose structure will be transmitted.
*
* @returns a skeleton_proxy object referencing @p x
*/
template <class T>
inline const skeleton_proxy<T> skeleton(T& x)
{
return skeleton_proxy<T>(x);
}
namespace detail {
/// @brief a class holding an MPI datatype
/// INTERNAL ONLY
/// the type is freed upon destruction
class BOOST_MPI_DECL mpi_datatype_holder : public boost::noncopyable
{
public:
mpi_datatype_holder()
: is_committed(false)
{}
mpi_datatype_holder(MPI_Datatype t, bool committed = true)
: d(t)
, is_committed(committed)
{}
void commit()
{
BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&d));
is_committed=true;
}
MPI_Datatype get_mpi_datatype() const
{
return d;
}
~mpi_datatype_holder()
{
int finalized=0;
BOOST_MPI_CHECK_RESULT(MPI_Finalized,(&finalized));
if (!finalized && is_committed)
BOOST_MPI_CHECK_RESULT(MPI_Type_free,(&d));
}
private:
MPI_Datatype d;
bool is_committed;
};
} // end namespace detail
/** @brief A proxy object that transfers the content of an object
* without its structure.
*
* The @c content class indicates that Boost.MPI should transmit or
* receive the content of an object, but without any information
* about the structure of the object. It is only meaningful to
* transmit the content of an object after the receiver has already
* received the skeleton for the same object.
*
* Most users will not use @c content objects directly. Rather, they
* will invoke @c send, @c recv, or @c broadcast operations using @c
* get_content().
*/
class BOOST_MPI_DECL content
{
public:
/**
* Constructs an empty @c content object. This object will not be
* useful for any Boost.MPI operations until it is reassigned.
*/
content() {}
/**
* This routine initializes the @c content object with an MPI data
* type that refers to the content of an object without its structure.
*
* @param d the MPI data type referring to the content of the object.
*
* @param committed @c true indicates that @c MPI_Type_commit has
* already been excuted for the data type @p d.
*/
content(MPI_Datatype d, bool committed=true)
: holder(new detail::mpi_datatype_holder(d,committed))
{}
/**
* Replace the MPI data type referencing the content of an object.
*
* @param d the new MPI data type referring to the content of the
* object.
*
* @returns *this
*/
const content& operator=(MPI_Datatype d)
{
holder.reset(new detail::mpi_datatype_holder(d));
return *this;
}
/**
* Retrieve the MPI data type that refers to the content of the
* object.
*
* @returns the MPI data type, which should only be transmitted or
* received using @c MPI_BOTTOM as the address.
*/
MPI_Datatype get_mpi_datatype() const
{
return holder->get_mpi_datatype();
}
/**
* Commit the MPI data type referring to the content of the
* object.
*/
void commit()
{
holder->commit();
}
private:
boost::shared_ptr<detail::mpi_datatype_holder> holder;
};
/** @brief Returns the content of an object, suitable for transmission
* via Boost.MPI.
*
* The function creates an absolute MPI datatype for the object,
* where all offsets are counted from the address 0 (a.k.a. @c
* MPI_BOTTOM) instead of the address @c &x of the object. This
* allows the creation of MPI data types for complex data structures
* containing pointers, such as linked lists or trees.
*
* The disadvantage, compared to relative MPI data types is that for
* each object a new MPI data type has to be created.
*
* The contents of an object can only be transmitted when the
* receiver already has an object with the same structure or shape as
* the sender. To accomplish this, first transmit the skeleton of the
* object using, e.g., @c skeleton() or @c skeleton_proxy.
*
* The type @c T has to allow creation of an absolute MPI data type
* (content).
*
* @param x the object for which the content will be transmitted.
*
* @returns the content of the object @p x, which can be used for
* transmission via @c send, @c recv, or @c broadcast.
*/
template <class T> const content get_content(const T& x);
/** @brief An archiver that reconstructs a data structure based on the
* binary skeleton stored in a buffer.
*
* The @c packed_skeleton_iarchive class is an Archiver (as in the
* Boost.Serialization library) that can construct the the shape of a
* data structure based on a binary skeleton stored in a buffer. The
* @c packed_skeleton_iarchive is typically used by the receiver of a
* skeleton, to prepare a data structure that will eventually receive
* content separately.
*
* Users will not generally need to use @c packed_skeleton_iarchive
* directly. Instead, use @c skeleton or @c get_skeleton.
*/
class BOOST_MPI_DECL packed_skeleton_iarchive
: public detail::ignore_iprimitive,
public detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>
{
public:
/**
* Construct a @c packed_skeleton_iarchive for the given
* communicator.
*
* @param comm The communicator over which this archive will be
* transmitted.
*
* @param flags Control the serialization of the skeleton. Refer to
* the Boost.Serialization documentation before changing the
* default flags.
*/
packed_skeleton_iarchive(MPI_Comm const & comm,
unsigned int flags = boost::archive::no_header)
: detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(skeleton_archive_)
, skeleton_archive_(comm,flags)
{}
/**
* Construct a @c packed_skeleton_iarchive that unpacks a skeleton
* from the given @p archive.
*
* @param archive the archive from which the skeleton will be
* unpacked.
*
*/
explicit packed_skeleton_iarchive(packed_iarchive & archive)
: detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(archive)
, skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
{}
/**
* Retrieve the archive corresponding to this skeleton.
*/
const packed_iarchive& get_skeleton() const
{
return this->implementation_archive;
}
/**
* Retrieve the archive corresponding to this skeleton.
*/
packed_iarchive& get_skeleton()
{
return this->implementation_archive;
}
private:
/// Store the actual archive that holds the structure, unless the
/// user overrides this with their own archive.
packed_iarchive skeleton_archive_;
};
/** @brief An archiver that records the binary skeleton of a data
* structure into a buffer.
*
* The @c packed_skeleton_oarchive class is an Archiver (as in the
* Boost.Serialization library) that can record the shape of a data
* structure (called the "skeleton") into a binary representation
* stored in a buffer. The @c packed_skeleton_oarchive is typically
* used by the send of a skeleton, to pack the skeleton of a data
* structure for transmission separately from the content.
*
* Users will not generally need to use @c packed_skeleton_oarchive
* directly. Instead, use @c skeleton or @c get_skeleton.
*/
class BOOST_MPI_DECL packed_skeleton_oarchive
: public detail::ignore_oprimitive,
public detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>
{
public:
/**
* Construct a @c packed_skeleton_oarchive for the given
* communicator.
*
* @param comm The communicator over which this archive will be
* transmitted.
*
* @param flags Control the serialization of the skeleton. Refer to
* the Boost.Serialization documentation before changing the
* default flags.
*/
packed_skeleton_oarchive(MPI_Comm const & comm,
unsigned int flags = boost::archive::no_header)
: detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(skeleton_archive_)
, skeleton_archive_(comm,flags)
{}
/**
* Construct a @c packed_skeleton_oarchive that packs a skeleton
* into the given @p archive.
*
* @param archive the archive to which the skeleton will be packed.
*
*/
explicit packed_skeleton_oarchive(packed_oarchive & archive)
: detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(archive)
, skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
{}
/**
* Retrieve the archive corresponding to this skeleton.
*/
const packed_oarchive& get_skeleton() const
{
return this->implementation_archive;
}
private:
/// Store the actual archive that holds the structure.
packed_oarchive skeleton_archive_;
};
namespace detail {
typedef boost::mpi::detail::forward_skeleton_oarchive<boost::mpi::packed_skeleton_oarchive,boost::mpi::packed_oarchive> type1;
typedef boost::mpi::detail::forward_skeleton_iarchive<boost::mpi::packed_skeleton_iarchive,boost::mpi::packed_iarchive> type2;
}
} } // end namespace boost::mpi
#include <boost/mpi/detail/content_oarchive.hpp>
// For any headers that have provided declarations based on forward
// declarations of the contents of this header, include definitions
// for those declarations. This means that the inclusion of
// skeleton_and_content.hpp enables the use of skeleton/content
// transmission throughout the library.
#ifdef BOOST_MPI_BROADCAST_HPP
# include <boost/mpi/detail/broadcast_sc.hpp>
#endif
#ifdef BOOST_MPI_COMMUNICATOR_HPP
# include <boost/mpi/detail/communicator_sc.hpp>
#endif
// required by export
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_skeleton_oarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_skeleton_iarchive)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::type1)
BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::type2)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_skeleton_oarchive)
BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_skeleton_iarchive)
#endif // BOOST_MPI_SKELETON_AND_CONTENT_HPP

View File

@@ -0,0 +1,31 @@
// (C) Copyright 2006 Douglas Gregor <doug.gregor -at gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
/** @file skeleton_and_content_fwd.hpp
*
* This header contains all of the forward declarations required to
* use transmit skeletons of data structures and the content of data
* structures separately. To actually transmit skeletons or content,
* include the header @c boost/mpi/skeleton_and_content.hpp.
*/
#ifndef BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
#define BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
namespace boost { namespace mpi {
template <class T> struct skeleton_proxy;
template <class T> const skeleton_proxy<T> skeleton(T& x);
class content;
template <class T> const content get_content(const T& x);
class packed_skeleton_iarchive;
class packed_skeleton_oarchive;
} } // end namespace boost::mpi
#endif // BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP

107
test/external/boost/mpi/status.hpp vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file status.hpp
*
* This header defines the class @c status, which reports on the
* results of point-to-point communication.
*/
#ifndef BOOST_MPI_STATUS_HPP
#define BOOST_MPI_STATUS_HPP
#include <boost/mpi/config.hpp>
#include <boost/optional.hpp>
namespace boost { namespace mpi {
class request;
class communicator;
/** @brief Contains information about a message that has been or can
* be received.
*
* This structure contains status information about messages that
* have been received (with @c communicator::recv) or can be received
* (returned from @c communicator::probe or @c
* communicator::iprobe). It permits access to the source of the
* message, message tag, error code (rarely used), or the number of
* elements that have been transmitted.
*/
class BOOST_MPI_DECL status
{
public:
status() : m_count(-1) { }
status(MPI_Status const& s) : m_status(s), m_count(-1) {}
/**
* Retrieve the source of the message.
*/
int source() const { return m_status.MPI_SOURCE; }
/**
* Retrieve the message tag.
*/
int tag() const { return m_status.MPI_TAG; }
/**
* Retrieve the error code.
*/
int error() const { return m_status.MPI_ERROR; }
/**
* Determine whether the communication associated with this object
* has been successfully cancelled.
*/
bool cancelled() const;
/**
* Determines the number of elements of type @c T contained in the
* message. The type @c T must have an associated data type, i.e.,
* @c is_mpi_datatype<T> must derive @c mpl::true_. In cases where
* the type @c T does not match the transmitted type, this routine
* will return an empty @c optional<int>.
*
* @returns the number of @c T elements in the message, if it can be
* determined.
*/
template<typename T> optional<int> count() const;
/**
* References the underlying @c MPI_Status
*/
operator MPI_Status&() { return m_status; }
/**
* References the underlying @c MPI_Status
*/
operator const MPI_Status&() const { return m_status; }
private:
/**
* INTERNAL ONLY
*/
template<typename T> optional<int> count_impl(mpl::true_) const;
/**
* INTERNAL ONLY
*/
template<typename T> optional<int> count_impl(mpl::false_) const;
public: // friend templates are not portable
/// INTERNAL ONLY
mutable MPI_Status m_status;
mutable int m_count;
friend class communicator;
friend class request;
};
} } // end namespace boost::mpi
#endif // BOOST_MPI_STATUS_HPP

91
test/external/boost/mpi/timer.hpp vendored Normal file
View File

@@ -0,0 +1,91 @@
// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** @file timer.hpp
*
* This header provides the @c timer class, which provides access to
* the MPI timers.
*/
#ifndef BOOST_MPI_TIMER_HPP
#define BOOST_MPI_TIMER_HPP
#include <boost/mpi/config.hpp>
#include <boost/limits.hpp>
namespace boost { namespace mpi {
/** @brief A simple timer that provides access to the MPI timing
* facilities.
*
* The @c timer class is a simple wrapper around the MPI timing
* facilities that mimics the interface of the Boost Timer library.
*/
class BOOST_MPI_DECL timer {
public:
/** Initializes the timer
*
* @post @c elapsed() == 0
*/
timer();
/** Restart the timer.
*
* @post @c elapsed() == 0
*/
void restart();
/** Return the amount of time that has elapsed since the last
* construction or reset, in seconds.
*/
double elapsed() const;
/** Return an estimate of the maximum possible value of
* elapsed(). Note that this routine may return too high a value on
* some systems.
*/
double elapsed_max() const;
/** Returns the minimum non-zero value that @c elapsed() may
* return. This is the resolution of the timer.
*/
double elapsed_min() const;
/** Determines whether the elapsed time values are global times or
local processor times. */
static bool time_is_global();
private:
double start_time;
}; // timer
inline timer::timer()
{
restart();
}
inline void timer::restart()
{
start_time = MPI_Wtime();
}
inline double timer::elapsed() const
{
return MPI_Wtime() - start_time;
}
inline double timer::elapsed_max() const
{
return (std::numeric_limits<double>::max)();
}
inline double timer::elapsed_min() const
{
return MPI_Wtick();
}
} } // end namespace boost::mpi
#endif // BOOST_MPI_TIMER_HPP