Added boost header
This commit is contained in:
82
test/external/boost/mpi/collectives/all_gather.hpp
vendored
Normal file
82
test/external/boost/mpi/collectives/all_gather.hpp
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.7. Gather-to-all
|
||||
#ifndef BOOST_MPI_ALL_GATHER_HPP
|
||||
#define BOOST_MPI_ALL_GATHER_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
#include <vector>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
// all_gather falls back to gather+broadcast in some cases
|
||||
#include <boost/mpi/collectives/broadcast.hpp>
|
||||
#include <boost/mpi/collectives/gather.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
namespace detail {
|
||||
// We're all-gathering for a type that has an associated MPI
|
||||
// datatype, so we'll use MPI_Gather to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
all_gather_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = boost::mpi::get_mpi_datatype<T>(*in_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Allgather,
|
||||
(const_cast<T*>(in_values), n, type,
|
||||
out_values, n, type, comm));
|
||||
}
|
||||
|
||||
// We're all-gathering for a type that has no associated MPI
|
||||
// type. So, we'll do a manual gather followed by a broadcast.
|
||||
template<typename T>
|
||||
void
|
||||
all_gather_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, mpl::false_)
|
||||
{
|
||||
gather(comm, in_values, n, out_values, 0);
|
||||
broadcast(comm, out_values, comm.size() * n, 0);
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T>
|
||||
inline void
|
||||
all_gather(const communicator& comm, const T& in_value, T* out_values)
|
||||
{
|
||||
detail::all_gather_impl(comm, &in_value, 1, out_values, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
all_gather(const communicator& comm, const T& in_value,
|
||||
std::vector<T>& out_values)
|
||||
{
|
||||
out_values.resize(comm.size());
|
||||
::boost::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void
|
||||
all_gather(const communicator& comm, const T* in_values, int n, T* out_values)
|
||||
{
|
||||
detail::all_gather_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
all_gather(const communicator& comm, const T* in_values, int n,
|
||||
std::vector<T>& out_values)
|
||||
{
|
||||
out_values.resize(comm.size() * n);
|
||||
::boost::mpi::all_gather(comm, in_values, n, &out_values[0]);
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_ALL_GATHER_HPP
|
||||
102
test/external/boost/mpi/collectives/all_reduce.hpp
vendored
Normal file
102
test/external/boost/mpi/collectives/all_reduce.hpp
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
|
||||
// Copyright (C) 2004 The Trustees of Indiana University
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Authors: Douglas Gregor
|
||||
// Andrew Lumsdaine
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
|
||||
#ifndef BOOST_MPI_ALL_REDUCE_HPP
|
||||
#define BOOST_MPI_ALL_REDUCE_HPP
|
||||
|
||||
// All-reduce falls back to reduce() + broadcast() in some cases.
|
||||
#include <boost/mpi/collectives/broadcast.hpp>
|
||||
#include <boost/mpi/collectives/reduce.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
namespace detail {
|
||||
/**********************************************************************
|
||||
* Simple reduction with MPI_Allreduce *
|
||||
**********************************************************************/
|
||||
// We are reducing for a type that has an associated MPI
|
||||
// datatype and operation, so we'll use MPI_Allreduce directly.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
all_reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op /*op*/, mpl::true_ /*is_mpi_op*/,
|
||||
mpl::true_ /*is_mpi_datatype*/)
|
||||
{
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
(is_mpi_op<Op, T>::op()), comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined reduction with MPI_Allreduce *
|
||||
**********************************************************************/
|
||||
// We are reducing at the root for a type that has an associated MPI
|
||||
// datatype but with a custom operation. We'll use MPI_Reduce
|
||||
// directly, but we'll need to create an MPI_Op manually.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
all_reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
|
||||
mpl::true_ /*is_mpi_datatype*/)
|
||||
{
|
||||
user_op<Op, T> mpi_op(op);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
mpi_op.get_mpi_op(), comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined, tree-based reduction for non-MPI data types *
|
||||
**********************************************************************/
|
||||
// We are reducing at the root for a type that has no associated MPI
|
||||
// datatype and operation, so we'll use a simple tree-based
|
||||
// algorithm.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
all_reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
|
||||
mpl::false_ /*is_mpi_datatype*/)
|
||||
{
|
||||
reduce(comm, in_values, n, out_values, op, 0);
|
||||
broadcast(comm, out_values, n, 0);
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T, typename Op>
|
||||
inline void
|
||||
all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
Op op)
|
||||
{
|
||||
detail::all_reduce_impl(comm, in_values, n, out_values, op,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
inline void
|
||||
all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op)
|
||||
{
|
||||
detail::all_reduce_impl(comm, &in_value, 1, &out_value, op,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
T all_reduce(const communicator& comm, const T& in_value, Op op)
|
||||
{
|
||||
T result;
|
||||
::boost::mpi::all_reduce(comm, in_value, result, op);
|
||||
return result;
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_ALL_REDUCE_HPP
|
||||
153
test/external/boost/mpi/collectives/all_to_all.hpp
vendored
Normal file
153
test/external/boost/mpi/collectives/all_to_all.hpp
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright (C) 2005, 2006 Douglas Gregor.
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.8. All-to-all
|
||||
#ifndef BOOST_MPI_ALL_TO_ALL_HPP
|
||||
#define BOOST_MPI_ALL_TO_ALL_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
#include <vector>
|
||||
#include <boost/mpi/packed_oarchive.hpp>
|
||||
#include <boost/mpi/packed_iarchive.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/mpi/collectives_fwd.hpp>
|
||||
#include <boost/mpi/allocator.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
namespace detail {
|
||||
// We're performaing an all-to-all with a type that has an
|
||||
// associated MPI datatype, so we'll use MPI_Alltoall to do all of
|
||||
// the work.
|
||||
template<typename T>
|
||||
void
|
||||
all_to_all_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Alltoall,
|
||||
(const_cast<T*>(in_values), n, type,
|
||||
out_values, n, type, comm));
|
||||
}
|
||||
|
||||
// We're performing an all-to-all with a type that does not have an
|
||||
// associated MPI datatype, so we'll need to serialize
|
||||
// it. Unfortunately, this means that we cannot use MPI_Alltoall, so
|
||||
// we'll just have to send individual messages to the other
|
||||
// processes.
|
||||
template<typename T>
|
||||
void
|
||||
all_to_all_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, mpl::false_)
|
||||
{
|
||||
int size = comm.size();
|
||||
int rank = comm.rank();
|
||||
|
||||
// The amount of data to be sent to each process
|
||||
std::vector<int> send_sizes(size);
|
||||
|
||||
// The displacements for each outgoing value.
|
||||
std::vector<int> send_disps(size);
|
||||
|
||||
// The buffer that will store all of the outgoing values
|
||||
std::vector<char, allocator<char> > outgoing;
|
||||
|
||||
// Pack the buffer with all of the outgoing values.
|
||||
for (int dest = 0; dest < size; ++dest) {
|
||||
// Keep track of the displacements
|
||||
send_disps[dest] = outgoing.size();
|
||||
|
||||
// Our own value will never be transmitted, so don't pack it.
|
||||
if (dest != rank) {
|
||||
packed_oarchive oa(comm, outgoing);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << in_values[dest * n + i];
|
||||
}
|
||||
|
||||
// Keep track of the sizes
|
||||
send_sizes[dest] = outgoing.size() - send_disps[dest];
|
||||
}
|
||||
|
||||
// Determine how much data each process will receive.
|
||||
std::vector<int> recv_sizes(size);
|
||||
all_to_all(comm, send_sizes, recv_sizes);
|
||||
|
||||
// Prepare a buffer to receive the incoming data.
|
||||
std::vector<int> recv_disps(size);
|
||||
int sum = 0;
|
||||
for (int src = 0; src < size; ++src) {
|
||||
recv_disps[src] = sum;
|
||||
sum += recv_sizes[src];
|
||||
}
|
||||
std::vector<char, allocator<char> > incoming(sum > 0? sum : 1);
|
||||
|
||||
// Make sure we don't try to reference an empty vector
|
||||
if (outgoing.empty())
|
||||
outgoing.push_back(0);
|
||||
|
||||
// Transmit the actual data
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Alltoallv,
|
||||
(&outgoing[0], &send_sizes[0],
|
||||
&send_disps[0], MPI_PACKED,
|
||||
&incoming[0], &recv_sizes[0],
|
||||
&recv_disps[0], MPI_PACKED,
|
||||
comm));
|
||||
|
||||
// Deserialize data from the iarchive
|
||||
for (int src = 0; src < size; ++src) {
|
||||
if (src == rank)
|
||||
std::copy(in_values + src * n, in_values + (src + 1) * n,
|
||||
out_values + src * n);
|
||||
else {
|
||||
packed_iarchive ia(comm, incoming, boost::archive::no_header,
|
||||
recv_disps[src]);
|
||||
for (int i = 0; i < n; ++i)
|
||||
ia >> out_values[src * n + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T>
|
||||
inline void
|
||||
all_to_all(const communicator& comm, const T* in_values, T* out_values)
|
||||
{
|
||||
detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
all_to_all(const communicator& comm, const std::vector<T>& in_values,
|
||||
std::vector<T>& out_values)
|
||||
{
|
||||
BOOST_ASSERT((int)in_values.size() == comm.size());
|
||||
out_values.resize(comm.size());
|
||||
::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void
|
||||
all_to_all(const communicator& comm, const T* in_values, int n, T* out_values)
|
||||
{
|
||||
detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
|
||||
std::vector<T>& out_values)
|
||||
{
|
||||
BOOST_ASSERT((int)in_values.size() == comm.size() * n);
|
||||
out_values.resize(comm.size() * n);
|
||||
::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_ALL_TO_ALL_HPP
|
||||
145
test/external/boost/mpi/collectives/broadcast.hpp
vendored
Normal file
145
test/external/boost/mpi/collectives/broadcast.hpp
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.4. Broadcast
|
||||
#ifndef BOOST_MPI_BROADCAST_HPP
|
||||
#define BOOST_MPI_BROADCAST_HPP
|
||||
|
||||
#include <boost/mpi/collectives_fwd.hpp>
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
/************************************************************************
|
||||
* Specializations *
|
||||
************************************************************************/
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
BOOST_MPI_DECL void
|
||||
broadcast<const packed_oarchive>(const communicator& comm,
|
||||
const packed_oarchive& oa,
|
||||
int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
BOOST_MPI_DECL void
|
||||
broadcast<packed_oarchive>(const communicator& comm, packed_oarchive& oa,
|
||||
int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
BOOST_MPI_DECL void
|
||||
broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia,
|
||||
int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
BOOST_MPI_DECL void
|
||||
broadcast<const packed_skeleton_oarchive>(const communicator& comm,
|
||||
const packed_skeleton_oarchive& oa,
|
||||
int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
void
|
||||
broadcast<packed_skeleton_oarchive>(const communicator& comm,
|
||||
packed_skeleton_oarchive& oa, int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
void
|
||||
broadcast<packed_skeleton_iarchive>(const communicator& comm,
|
||||
packed_skeleton_iarchive& ia, int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
void broadcast<content>(const communicator& comm, content& c, int root);
|
||||
|
||||
/**
|
||||
* INTERNAL ONLY
|
||||
*/
|
||||
template<>
|
||||
void broadcast<const content>(const communicator& comm, const content& c,
|
||||
int root);
|
||||
|
||||
/************************************************************************
|
||||
* broadcast() implementation *
|
||||
************************************************************************/
|
||||
namespace detail {
|
||||
// We're sending a type that has an associated MPI datatype, so
|
||||
// we'll use MPI_Bcast to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
broadcast_impl(const communicator& comm, T* values, int n, int root,
|
||||
mpl::true_)
|
||||
{
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Bcast,
|
||||
(values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*values),
|
||||
root, MPI_Comm(comm)));
|
||||
}
|
||||
|
||||
// We're sending a type that does not have an associated MPI
|
||||
// datatype, so we'll need to serialize it. Unfortunately, this
|
||||
// means that we cannot use MPI_Bcast, so we'll just send from the
|
||||
// root to everyone else.
|
||||
template<typename T>
|
||||
void
|
||||
broadcast_impl(const communicator& comm, T* values, int n, int root,
|
||||
mpl::false_)
|
||||
{
|
||||
if (comm.rank() == root) {
|
||||
packed_oarchive oa(comm);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << values[i];
|
||||
broadcast(comm, oa, root);
|
||||
} else {
|
||||
packed_iarchive ia(comm);
|
||||
broadcast(comm, ia, root);
|
||||
for (int i = 0; i < n; ++i)
|
||||
ia >> values[i];
|
||||
}
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T>
|
||||
void broadcast(const communicator& comm, T& value, int root)
|
||||
{
|
||||
detail::broadcast_impl(comm, &value, 1, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void broadcast(const communicator& comm, T* values, int n, int root)
|
||||
{
|
||||
detail::broadcast_impl(comm, values, n, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
// If the user has already included skeleton_and_content.hpp, include
|
||||
// the code to broadcast skeletons and content.
|
||||
#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
|
||||
# include <boost/mpi/detail/broadcast_sc.hpp>
|
||||
#endif
|
||||
|
||||
#endif // BOOST_MPI_BROADCAST_HPP
|
||||
147
test/external/boost/mpi/collectives/gather.hpp
vendored
Normal file
147
test/external/boost/mpi/collectives/gather.hpp
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright (C) 2005, 2006 Douglas Gregor.
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.5. Gather
|
||||
#ifndef BOOST_MPI_GATHER_HPP
|
||||
#define BOOST_MPI_GATHER_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
#include <vector>
|
||||
#include <boost/mpi/packed_oarchive.hpp>
|
||||
#include <boost/mpi/packed_iarchive.hpp>
|
||||
#include <boost/mpi/detail/point_to_point.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
namespace detail {
|
||||
// We're gathering at the root for a type that has an associated MPI
|
||||
// datatype, so we'll use MPI_Gather to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
gather_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, int root, mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Gather,
|
||||
(const_cast<T*>(in_values), n, type,
|
||||
out_values, n, type, root, comm));
|
||||
}
|
||||
|
||||
// We're gathering from a non-root for a type that has an associated MPI
|
||||
// datatype, so we'll use MPI_Gather to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
gather_impl(const communicator& comm, const T* in_values, int n, int root,
|
||||
mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Gather,
|
||||
(const_cast<T*>(in_values), n, type,
|
||||
0, n, type, root, comm));
|
||||
}
|
||||
|
||||
// We're gathering at the root for a type that does not have an
|
||||
// associated MPI datatype, so we'll need to serialize
|
||||
// it. Unfortunately, this means that we cannot use MPI_Gather, so
|
||||
// we'll just have all of the non-root nodes send individual
|
||||
// messages to the root.
|
||||
template<typename T>
|
||||
void
|
||||
gather_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, int root, mpl::false_)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
int size = comm.size();
|
||||
|
||||
for (int src = 0; src < size; ++src) {
|
||||
if (src == root)
|
||||
std::copy(in_values, in_values + n, out_values + n * src);
|
||||
else
|
||||
comm.recv(src, tag, out_values + n * src, n);
|
||||
}
|
||||
}
|
||||
|
||||
// We're gathering at a non-root for a type that does not have an
|
||||
// associated MPI datatype, so we'll need to serialize
|
||||
// it. Unfortunately, this means that we cannot use MPI_Gather, so
|
||||
// we'll just have all of the non-root nodes send individual
|
||||
// messages to the root.
|
||||
template<typename T>
|
||||
void
|
||||
gather_impl(const communicator& comm, const T* in_values, int n, int root,
|
||||
mpl::false_)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
comm.send(root, tag, in_values, n);
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
gather(const communicator& comm, const T& in_value, T* out_values, int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::gather_impl(comm, &in_value, 1, out_values, root,
|
||||
is_mpi_datatype<T>());
|
||||
else
|
||||
detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void gather(const communicator& comm, const T& in_value, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
|
||||
int root)
|
||||
{
|
||||
if (comm.rank() == root) {
|
||||
out_values.resize(comm.size());
|
||||
::boost::mpi::gather(comm, in_value, &out_values[0], root);
|
||||
} else {
|
||||
::boost::mpi::gather(comm, in_value, root);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
gather(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::gather_impl(comm, in_values, n, out_values, root,
|
||||
is_mpi_datatype<T>());
|
||||
else
|
||||
detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
gather(const communicator& comm, const T* in_values, int n,
|
||||
std::vector<T>& out_values, int root)
|
||||
{
|
||||
::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void gather(const communicator& comm, const T* in_values, int n, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_GATHER_HPP
|
||||
357
test/external/boost/mpi/collectives/reduce.hpp
vendored
Normal file
357
test/external/boost/mpi/collectives/reduce.hpp
vendored
Normal file
@@ -0,0 +1,357 @@
|
||||
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
|
||||
// Copyright (C) 2004 The Trustees of Indiana University
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Authors: Douglas Gregor
|
||||
// Andrew Lumsdaine
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
|
||||
#ifndef BOOST_MPI_REDUCE_HPP
|
||||
#define BOOST_MPI_REDUCE_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
|
||||
// For (de-)serializing sends and receives
|
||||
#include <boost/mpi/packed_oarchive.hpp>
|
||||
#include <boost/mpi/packed_iarchive.hpp>
|
||||
|
||||
// For packed_[io]archive sends and receives
|
||||
#include <boost/mpi/detail/point_to_point.hpp>
|
||||
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/mpi/detail/computation_tree.hpp>
|
||||
#include <boost/mpi/operations.hpp>
|
||||
#include <algorithm>
|
||||
#include <exception>
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/scoped_array.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
|
||||
/************************************************************************
|
||||
* Implementation details *
|
||||
************************************************************************/
|
||||
namespace detail {
|
||||
/**********************************************************************
|
||||
* Simple reduction with MPI_Reduce *
|
||||
**********************************************************************/
|
||||
// We are reducing at the root for a type that has an associated MPI
|
||||
// datatype and operation, so we'll use MPI_Reduce directly.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, int root, mpl::true_ /*is_mpi_op*/,
|
||||
mpl::true_/*is_mpi_datatype*/)
|
||||
{
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
(is_mpi_op<Op, T>::op()), root, comm));
|
||||
}
|
||||
|
||||
// We are reducing to the root for a type that has an associated MPI
|
||||
// datatype and operation, so we'll use MPI_Reduce directly.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
|
||||
int root, mpl::true_ /*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
|
||||
{
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
|
||||
(const_cast<T*>(in_values), 0, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
(is_mpi_op<Op, T>::op()), root, comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined reduction with MPI_Reduce *
|
||||
**********************************************************************/
|
||||
|
||||
// We are reducing at the root for a type that has an associated MPI
|
||||
// datatype but with a custom operation. We'll use MPI_Reduce
|
||||
// directly, but we'll need to create an MPI_Op manually.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
|
||||
mpl::true_/*is_mpi_datatype*/)
|
||||
{
|
||||
user_op<Op, T> mpi_op(op);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
mpi_op.get_mpi_op(), root, comm));
|
||||
}
|
||||
|
||||
// We are reducing to the root for a type that has an associated MPI
|
||||
// datatype but with a custom operation. We'll use MPI_Reduce
|
||||
// directly, but we'll need to create an MPI_Op manually.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
|
||||
int root, mpl::false_/*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
|
||||
{
|
||||
user_op<Op, T> mpi_op(op);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Reduce,
|
||||
(const_cast<T*>(in_values), 0, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
mpi_op.get_mpi_op(), root, comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined, tree-based reduction for non-MPI data types *
|
||||
**********************************************************************/
|
||||
|
||||
// Commutative reduction
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
tree_reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, int root,
|
||||
mpl::true_ /*is_commutative*/)
|
||||
{
|
||||
std::copy(in_values, in_values + n, out_values);
|
||||
|
||||
int size = comm.size();
|
||||
int rank = comm.rank();
|
||||
|
||||
// The computation tree we will use.
|
||||
detail::computation_tree tree(rank, size, root);
|
||||
|
||||
int tag = environment::collectives_tag();
|
||||
|
||||
MPI_Status status;
|
||||
int children = 0;
|
||||
for (int child = tree.child_begin();
|
||||
children < tree.branching_factor() && child != root;
|
||||
++children, child = (child + 1) % size) {
|
||||
// Receive archive
|
||||
packed_iarchive ia(comm);
|
||||
detail::packed_archive_recv(comm, child, tag, ia, status);
|
||||
|
||||
T incoming;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ia >> incoming;
|
||||
out_values[i] = op(out_values[i], incoming);
|
||||
}
|
||||
}
|
||||
|
||||
// For non-roots, send the result to the parent.
|
||||
if (tree.parent() != rank) {
|
||||
packed_oarchive oa(comm);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << out_values[i];
|
||||
detail::packed_archive_send(comm, tree.parent(), tag, oa);
|
||||
}
|
||||
}
|
||||
|
||||
// Commutative reduction from a non-root.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
|
||||
int root, mpl::true_ /*is_commutative*/)
|
||||
{
|
||||
scoped_array<T> results(new T[n]);
|
||||
detail::tree_reduce_impl(comm, in_values, n, results.get(), op, root,
|
||||
mpl::true_());
|
||||
}
|
||||
|
||||
// Non-commutative reduction
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
tree_reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, int root,
|
||||
mpl::false_ /*is_commutative*/)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
|
||||
int left_child = root / 2;
|
||||
int right_child = (root + comm.size()) / 2;
|
||||
|
||||
MPI_Status status;
|
||||
if (left_child != root) {
|
||||
// Receive value from the left child and merge it with the value
|
||||
// we had incoming.
|
||||
packed_iarchive ia(comm);
|
||||
detail::packed_archive_recv(comm, left_child, tag, ia, status);
|
||||
T incoming;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ia >> incoming;
|
||||
out_values[i] = op(incoming, in_values[i]);
|
||||
}
|
||||
} else {
|
||||
// There was no left value, so copy our incoming value.
|
||||
std::copy(in_values, in_values + n, out_values);
|
||||
}
|
||||
|
||||
if (right_child != root) {
|
||||
// Receive value from the right child and merge it with the
|
||||
// value we had incoming.
|
||||
packed_iarchive ia(comm);
|
||||
detail::packed_archive_recv(comm, right_child, tag, ia, status);
|
||||
T incoming;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ia >> incoming;
|
||||
out_values[i] = op(out_values[i], incoming);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Non-commutative reduction from a non-root.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
|
||||
int root, mpl::false_ /*is_commutative*/)
|
||||
{
|
||||
int size = comm.size();
|
||||
int rank = comm.rank();
|
||||
|
||||
int tag = environment::collectives_tag();
|
||||
|
||||
// Determine our parents and children in the commutative binary
|
||||
// computation tree.
|
||||
int grandparent = root;
|
||||
int parent = root;
|
||||
int left_bound = 0;
|
||||
int right_bound = size;
|
||||
int left_child, right_child;
|
||||
do {
|
||||
left_child = (left_bound + parent) / 2;
|
||||
right_child = (parent + right_bound) / 2;
|
||||
|
||||
if (rank < parent) {
|
||||
// Go left.
|
||||
grandparent = parent;
|
||||
right_bound = parent;
|
||||
parent = left_child;
|
||||
} else if (rank > parent) {
|
||||
// Go right.
|
||||
grandparent = parent;
|
||||
left_bound = parent + 1;
|
||||
parent = right_child;
|
||||
} else {
|
||||
// We've found the parent
|
||||
break;
|
||||
}
|
||||
} while (true);
|
||||
|
||||
// Our parent is the grandparent of our children. This is a slight
|
||||
// abuse of notation, but it makes the send-to-parent below make
|
||||
// more sense.
|
||||
parent = grandparent;
|
||||
|
||||
MPI_Status status;
|
||||
scoped_array<T> out_values(new T[n]);
|
||||
if (left_child != rank) {
|
||||
// Receive value from the left child and merge it with the value
|
||||
// we had incoming.
|
||||
packed_iarchive ia(comm);
|
||||
detail::packed_archive_recv(comm, left_child, tag, ia, status);
|
||||
T incoming;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ia >> incoming;
|
||||
out_values[i] = op(incoming, in_values[i]);
|
||||
}
|
||||
} else {
|
||||
// There was no left value, so copy our incoming value.
|
||||
std::copy(in_values, in_values + n, out_values.get());
|
||||
}
|
||||
|
||||
if (right_child != rank) {
|
||||
// Receive value from the right child and merge it with the
|
||||
// value we had incoming.
|
||||
packed_iarchive ia(comm);
|
||||
detail::packed_archive_recv(comm, right_child, tag, ia, status);
|
||||
T incoming;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ia >> incoming;
|
||||
out_values[i] = op(out_values[i], incoming);
|
||||
}
|
||||
}
|
||||
|
||||
// Send the combined value to our parent.
|
||||
packed_oarchive oa(comm);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << out_values[i];
|
||||
detail::packed_archive_send(comm, parent, tag, oa);
|
||||
}
|
||||
|
||||
// We are reducing at the root for a type that has no associated MPI
|
||||
// datatype and operation, so we'll use a simple tree-based
|
||||
// algorithm.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
|
||||
mpl::false_ /*is_mpi_datatype*/)
|
||||
{
|
||||
detail::tree_reduce_impl(comm, in_values, n, out_values, op, root,
|
||||
is_commutative<Op, T>());
|
||||
}
|
||||
|
||||
// We are reducing to the root for a type that has no associated MPI
|
||||
// datatype and operation, so we'll use a simple tree-based
|
||||
// algorithm.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
|
||||
int root, mpl::false_ /*is_mpi_op*/,
|
||||
mpl::false_ /*is_mpi_datatype*/)
|
||||
{
|
||||
detail::tree_reduce_impl(comm, in_values, n, op, root,
|
||||
is_commutative<Op, T>());
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
Op op, int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::reduce_impl(comm, in_values, n, out_values, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
else
|
||||
detail::reduce_impl(comm, in_values, n, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce(const communicator& comm, const T* in_values, int n, Op op, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
|
||||
detail::reduce_impl(comm, in_values, n, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
|
||||
int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::reduce_impl(comm, &in_value, 1, &out_value, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
else
|
||||
detail::reduce_impl(comm, &in_value, 1, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
void reduce(const communicator& comm, const T& in_value, Op op, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
|
||||
detail::reduce_impl(comm, &in_value, 1, op, root,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_REDUCE_HPP
|
||||
168
test/external/boost/mpi/collectives/scan.hpp
vendored
Normal file
168
test/external/boost/mpi/collectives/scan.hpp
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
|
||||
// Copyright (C) 2004 The Trustees of Indiana University
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Authors: Douglas Gregor
|
||||
// Andrew Lumsdaine
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.9.1. Scan
|
||||
#ifndef BOOST_MPI_SCAN_HPP
|
||||
#define BOOST_MPI_SCAN_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
|
||||
// For (de-)serializing sends and receives
|
||||
#include <boost/mpi/packed_oarchive.hpp>
|
||||
#include <boost/mpi/packed_iarchive.hpp>
|
||||
|
||||
// For packed_[io]archive sends and receives
|
||||
#include <boost/mpi/detail/point_to_point.hpp>
|
||||
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/mpi/detail/computation_tree.hpp>
|
||||
#include <boost/mpi/operations.hpp>
|
||||
#include <algorithm>
|
||||
#include <exception>
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
|
||||
/************************************************************************
|
||||
* Implementation details *
|
||||
************************************************************************/
|
||||
namespace detail {
|
||||
/**********************************************************************
|
||||
* Simple prefix reduction with MPI_Scan *
|
||||
**********************************************************************/
|
||||
|
||||
// We are performing prefix reduction for a type that has an
|
||||
// associated MPI datatype and operation, so we'll use MPI_Scan
|
||||
// directly.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
Op op, mpl::true_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
|
||||
{
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Scan,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
(is_mpi_op<Op, T>::op()), comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined prefix reduction with MPI_Scan *
|
||||
**********************************************************************/
|
||||
|
||||
// We are performing prefix reduction for a type that has an
|
||||
// associated MPI datatype but with a custom operation. We'll use
|
||||
// MPI_Scan directly, but we'll need to create an MPI_Op manually.
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
Op op, mpl::false_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
|
||||
{
|
||||
user_op<Op, T> mpi_op(op);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Scan,
|
||||
(const_cast<T*>(in_values), out_values, n,
|
||||
boost::mpi::get_mpi_datatype<T>(*in_values),
|
||||
mpi_op.get_mpi_op(), comm));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* User-defined, tree-based reduction for non-MPI data types *
|
||||
**********************************************************************/
|
||||
|
||||
template<typename T, typename Op>
|
||||
void
|
||||
upper_lower_scan(const communicator& comm, const T* in_values, int n,
|
||||
T* out_values, Op& op, int lower, int upper)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
int rank = comm.rank();
|
||||
|
||||
if (lower + 1 == upper) {
|
||||
std::copy(in_values, in_values + n, out_values);
|
||||
} else {
|
||||
int middle = (lower + upper) / 2;
|
||||
|
||||
if (rank < middle) {
|
||||
// Lower half
|
||||
upper_lower_scan(comm, in_values, n, out_values, op, lower, middle);
|
||||
|
||||
// If we're the last process in the lower half, send our values
|
||||
// to everyone in the upper half.
|
||||
if (rank == middle - 1) {
|
||||
packed_oarchive oa(comm);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << out_values[i];
|
||||
|
||||
for (int p = middle; p < upper; ++p)
|
||||
comm.send(p, tag, oa);
|
||||
}
|
||||
} else {
|
||||
// Upper half
|
||||
upper_lower_scan(comm, in_values, n, out_values, op, middle, upper);
|
||||
|
||||
// Receive value from the last process in the lower half.
|
||||
packed_iarchive ia(comm);
|
||||
comm.recv(middle - 1, tag, ia);
|
||||
|
||||
// Combine value that came from the left with our value
|
||||
T left_value;
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
ia >> left_value;
|
||||
out_values[i] = op(left_value, out_values[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We are performing prefix reduction for a type that has no
|
||||
// associated MPI datatype and operation, so we'll use a simple
|
||||
// upper/lower algorithm.
|
||||
template<typename T, typename Op>
|
||||
inline void
|
||||
scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
|
||||
Op op, mpl::false_ /*is_mpi_op*/, mpl::false_/*is_mpi_datatype*/)
|
||||
{
|
||||
upper_lower_scan(comm, in_values, n, out_values, op, 0, comm.size());
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
|
||||
template<typename T, typename Op>
|
||||
inline void
|
||||
scan(const communicator& comm, const T& in_value, T& out_value, Op op)
|
||||
{
|
||||
detail::scan_impl(comm, &in_value, 1, &out_value, op,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
inline void
|
||||
scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op)
|
||||
{
|
||||
detail::scan_impl(comm, in_values, n, out_values, op,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T, typename Op>
|
||||
inline T
|
||||
scan(const communicator& comm, const T& in_value, Op op)
|
||||
{
|
||||
T out_value;
|
||||
detail::scan_impl(comm, &in_value, 1, &out_value, op,
|
||||
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
|
||||
return out_value;
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_SCAN_HPP
|
||||
161
test/external/boost/mpi/collectives/scatter.hpp
vendored
Normal file
161
test/external/boost/mpi/collectives/scatter.hpp
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright (C) 2005, 2006 Douglas Gregor.
|
||||
|
||||
// Use, modification and distribution is subject to the Boost Software
|
||||
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Message Passing Interface 1.1 -- Section 4.6. Scatter
|
||||
#ifndef BOOST_MPI_SCATTER_HPP
|
||||
#define BOOST_MPI_SCATTER_HPP
|
||||
|
||||
#include <boost/mpi/exception.hpp>
|
||||
#include <boost/mpi/datatype.hpp>
|
||||
#include <vector>
|
||||
#include <boost/mpi/packed_oarchive.hpp>
|
||||
#include <boost/mpi/packed_iarchive.hpp>
|
||||
#include <boost/mpi/detail/point_to_point.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
namespace boost { namespace mpi {
|
||||
|
||||
namespace detail {
|
||||
// We're scattering from the root for a type that has an associated MPI
|
||||
// datatype, so we'll use MPI_Scatter to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
scatter_impl(const communicator& comm, const T* in_values, T* out_values,
|
||||
int n, int root, mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = get_mpi_datatype<T>(*in_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Scatter,
|
||||
(const_cast<T*>(in_values), n, type,
|
||||
out_values, n, type, root, comm));
|
||||
}
|
||||
|
||||
// We're scattering from a non-root for a type that has an associated MPI
|
||||
// datatype, so we'll use MPI_Scatter to do all of the work.
|
||||
template<typename T>
|
||||
void
|
||||
scatter_impl(const communicator& comm, T* out_values, int n, int root,
|
||||
mpl::true_)
|
||||
{
|
||||
MPI_Datatype type = get_mpi_datatype<T>(*out_values);
|
||||
BOOST_MPI_CHECK_RESULT(MPI_Scatter,
|
||||
(0, n, type,
|
||||
out_values, n, type,
|
||||
root, comm));
|
||||
}
|
||||
|
||||
// We're scattering from the root for a type that does not have an
|
||||
// associated MPI datatype, so we'll need to serialize
|
||||
// it. Unfortunately, this means that we cannot use MPI_Scatter, so
|
||||
// we'll just have the root send individual messages to the other
|
||||
// processes.
|
||||
template<typename T>
|
||||
void
|
||||
scatter_impl(const communicator& comm, const T* in_values, T* out_values,
|
||||
int n, int root, mpl::false_)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
int size = comm.size();
|
||||
|
||||
for (int dest = 0; dest < size; ++dest) {
|
||||
if (dest == root) {
|
||||
// Our own values will never be transmitted: just copy them.
|
||||
std::copy(in_values + dest * n, in_values + (dest + 1) * n, out_values);
|
||||
} else {
|
||||
// Send archive
|
||||
packed_oarchive oa(comm);
|
||||
for (int i = 0; i < n; ++i)
|
||||
oa << in_values[dest * n + i];
|
||||
detail::packed_archive_send(comm, dest, tag, oa);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We're scattering to a non-root for a type that does not have an
|
||||
// associated MPI datatype, so we'll need to de-serialize
|
||||
// it. Unfortunately, this means that we cannot use MPI_Scatter, so
|
||||
// we'll just have all of the non-root nodes send individual
|
||||
// messages to the root.
|
||||
template<typename T>
|
||||
void
|
||||
scatter_impl(const communicator& comm, T* out_values, int n, int root,
|
||||
mpl::false_)
|
||||
{
|
||||
int tag = environment::collectives_tag();
|
||||
|
||||
packed_iarchive ia(comm);
|
||||
MPI_Status status;
|
||||
detail::packed_archive_recv(comm, root, tag, ia, status);
|
||||
for (int i = 0; i < n; ++i)
|
||||
ia >> out_values[i];
|
||||
}
|
||||
} // end namespace detail
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
scatter(const communicator& comm, const T* in_values, T& out_value, int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::scatter_impl(comm, in_values, &out_value, 1, root,
|
||||
is_mpi_datatype<T>());
|
||||
else
|
||||
detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
|
||||
int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
::boost::mpi::scatter<T>(comm, &in_values[0], out_value, root);
|
||||
else
|
||||
::boost::mpi::scatter<T>(comm, static_cast<const T*>(0), out_value,
|
||||
root);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void scatter(const communicator& comm, T& out_value, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
scatter(const communicator& comm, const T* in_values, T* out_values, int n,
|
||||
int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
detail::scatter_impl(comm, in_values, out_values, n, root,
|
||||
is_mpi_datatype<T>());
|
||||
else
|
||||
detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
scatter(const communicator& comm, const std::vector<T>& in_values,
|
||||
T* out_values, int n, int root)
|
||||
{
|
||||
if (comm.rank() == root)
|
||||
::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
|
||||
else
|
||||
::boost::mpi::scatter(comm, static_cast<const T*>(0), out_values,
|
||||
n, root);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void scatter(const communicator& comm, T* out_values, int n, int root)
|
||||
{
|
||||
BOOST_ASSERT(comm.rank() != root);
|
||||
detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
|
||||
}
|
||||
|
||||
} } // end namespace boost::mpi
|
||||
|
||||
#endif // BOOST_MPI_SCATTER_HPP
|
||||
Reference in New Issue
Block a user