Added boost header

This commit is contained in:
Christophe Riccio
2012-01-08 01:26:07 +00:00
parent 9c3faaca40
commit c7d752cdf8
8946 changed files with 1732316 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,148 @@
// Copyright (C) 2007 Douglas Gregor
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// This file contains code for the distributed adjacency list's
// message handlers. It should not be included directly by users.
#ifndef BOOST_GRAPH_DISTRIBUTED_ADJLIST_HANDLERS_HPP
#define BOOST_GRAPH_DISTRIBUTED_ADJLIST_HANDLERS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/parallel/simple_trigger.hpp>
#include <boost/graph/parallel/detail/untracked_pair.hpp>
namespace boost {
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
setup_triggers()
{
using boost::graph::parallel::simple_trigger;
simple_trigger(process_group_, msg_add_vertex_with_property, this,
&adjacency_list::handle_add_vertex_with_property);
simple_trigger(process_group_, msg_add_vertex_with_property_and_reply, this,
&adjacency_list::handle_add_vertex_with_property_and_reply);
simple_trigger(process_group_, msg_add_edge, this,
&adjacency_list::handle_add_edge);
simple_trigger(process_group_, msg_add_edge_with_reply, this,
&adjacency_list::handle_add_edge_with_reply);
simple_trigger(process_group_, msg_add_edge_with_property, this,
&adjacency_list::handle_add_edge_with_property);
simple_trigger(process_group_, msg_add_edge_with_property_and_reply, this,
&adjacency_list::handle_add_edge_with_property_and_reply);
simple_trigger(process_group_, msg_nonlocal_edge, this,
&adjacency_list::handle_nonlocal_edge);
simple_trigger(process_group_, msg_remove_edge, this,
&adjacency_list::handle_remove_edge);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_vertex_with_property(int source, int tag,
const vertex_property_type& data,
trigger_receive_context)
{
vertex_descriptor v(this->processor(),
add_vertex(this->build_vertex_property(data),
this->base()));
if (on_add_vertex)
on_add_vertex(v, *this);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
typename PBGL_DISTRIB_ADJLIST_TYPE::local_vertex_descriptor
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_vertex_with_property_and_reply(int source, int tag,
const vertex_property_type& data,
trigger_receive_context)
{
// Try to find a vertex with this name
local_vertex_descriptor local_v
= add_vertex(this->build_vertex_property(data), this->base());
vertex_descriptor v(processor(), local_v);
if (on_add_vertex)
on_add_vertex(v, *this);
return local_v;
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_edge(int source, int tag, const msg_add_edge_data& data,
trigger_receive_context)
{
add_edge(vertex_descriptor(processor(), data.source),
data.target, *this);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
boost::parallel::detail::untracked_pair<typename PBGL_DISTRIB_ADJLIST_TYPE::edge_descriptor, bool>
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_edge_with_reply(int source, int tag, const msg_add_edge_data& data,
trigger_receive_context)
{
std::pair<typename PBGL_DISTRIB_ADJLIST_TYPE::edge_descriptor, bool> p =
add_edge(vertex_descriptor(processor(), data.source),data.target, *this);
return p;
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_edge_with_property(int source, int tag,
const msg_add_edge_with_property_data& data,
trigger_receive_context)
{
add_edge(vertex_descriptor(processor(), data.source),
data.target, data.get_property(), *this);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
boost::parallel::detail::untracked_pair<typename PBGL_DISTRIB_ADJLIST_TYPE::edge_descriptor, bool>
PBGL_DISTRIB_ADJLIST_TYPE::
handle_add_edge_with_property_and_reply
(int source, int tag,
const msg_add_edge_with_property_data& data,
trigger_receive_context)
{
std::pair<typename PBGL_DISTRIB_ADJLIST_TYPE::edge_descriptor, bool> p =
add_edge(vertex_descriptor(processor(), data.source),
data.target, data.get_property(), *this);
return p;
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
handle_nonlocal_edge(int source, int tag,
const msg_nonlocal_edge_data& data,
trigger_receive_context)
{
add_remote_edge(data, source, directed_selector());
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
handle_remove_edge(int source, int tag,
const msg_remove_edge_data& data,
trigger_receive_context)
{
remove_local_edge(data, source, directed_selector());
}
}
#endif // BOOST_GRAPH_DISTRIBUTED_ADJLIST_HANDLERS_HPP

View File

@@ -0,0 +1,319 @@
// Copyright (C) 2007 Douglas Gregor
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// This file contains code for the distributed adjacency list's
// initializations. It should not be included directly by users.
#ifndef BOOST_GRAPH_DISTRIBUTED_ADJLIST_INITIALIZE_HPP
#define BOOST_GRAPH_DISTRIBUTED_ADJLIST_INITIALIZE_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
namespace boost {
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename EdgeIterator>
void
PBGL_DISTRIB_ADJLIST_TYPE::
initialize(EdgeIterator first, EdgeIterator last,
vertices_size_type, const base_distribution_type& distribution,
vecS)
{
process_id_type id = process_id(process_group_);
while (first != last) {
if ((process_id_type)distribution(first->first) == id) {
vertex_descriptor source(id, distribution.local(first->first));
vertex_descriptor target(distribution(first->second),
distribution.local(first->second));
add_edge(source, target, *this);
}
++first;
}
synchronize(process_group_);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename EdgeIterator, typename EdgePropertyIterator>
void
PBGL_DISTRIB_ADJLIST_TYPE::
initialize(EdgeIterator first, EdgeIterator last,
EdgePropertyIterator ep_iter,
vertices_size_type, const base_distribution_type& distribution,
vecS)
{
process_id_type id = process_id(process_group_);
while (first != last) {
if (static_cast<process_id_type>(distribution(first->first)) == id) {
vertex_descriptor source(id, distribution.local(first->first));
vertex_descriptor target(distribution(first->second),
distribution.local(first->second));
add_edge(source, target, *ep_iter, *this);
}
++first;
++ep_iter;
}
synchronize(process_group_);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename EdgeIterator, typename EdgePropertyIterator,
typename VertexListS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
initialize(EdgeIterator first, EdgeIterator last,
EdgePropertyIterator ep_iter,
vertices_size_type n, const base_distribution_type& distribution,
VertexListS)
{
using boost::parallel::inplace_all_to_all;
typedef vertices_size_type vertex_number_t;
typedef typename std::iterator_traits<EdgePropertyIterator>::value_type
edge_property_init_t;
typedef std::pair<vertex_descriptor, vertex_number_t>
st_pair;
typedef std::pair<st_pair, edge_property_init_t> delayed_edge_t;
process_group_type pg = process_group();
process_id_type id = process_id(pg);
// Vertex indices
std::vector<local_vertex_descriptor> index_to_vertex;
index_to_vertex.reserve(num_vertices(*this));
BGL_FORALL_VERTICES_T(v, base(), inherited)
index_to_vertex.push_back(v);
// The list of edges we can't add immediately.
std::vector<delayed_edge_t> delayed_edges;
std::vector<std::vector<vertex_number_t> > descriptor_requests;
descriptor_requests.resize(num_processes(pg));
// Add all of the edges we can, up to the point where we run
// into a descriptor we don't know.
while (first != last) {
if (distribution(first->first) == id) {
if (distribution(first->second) != id) break;
vertex_descriptor source
(id, index_to_vertex[distribution.local(first->first)]);
vertex_descriptor target
(distribution(first->second),
index_to_vertex[distribution.local(first->second)]);
add_edge(source, target, *ep_iter, *this);
}
++first;
++ep_iter;
}
// Queue all of the remaining edges and determine the set of
// descriptors we need to know about.
while (first != last) {
if (distribution(first->first) == id) {
vertex_descriptor source
(id, index_to_vertex[distribution.local(first->first)]);
process_id_type dest = distribution(first->second);
if (dest != id) {
descriptor_requests[dest]
.push_back(distribution.local(first->second));
// Compact request list if we need to
if (descriptor_requests[dest].size() >
distribution.block_size(dest, n)) {
std::sort(descriptor_requests[dest].begin(),
descriptor_requests[dest].end());
descriptor_requests[dest].erase(
std::unique(descriptor_requests[dest].begin(),
descriptor_requests[dest].end()),
descriptor_requests[dest].end());
}
}
// Save the edge for later
delayed_edges.push_back
(delayed_edge_t(st_pair(source, first->second), *ep_iter));
}
++first;
++ep_iter;
}
// Compact descriptor requests
for (process_id_type dest = 0; dest < num_processes(pg); ++dest) {
std::sort(descriptor_requests[dest].begin(),
descriptor_requests[dest].end());
descriptor_requests[dest].erase(
std::unique(descriptor_requests[dest].begin(),
descriptor_requests[dest].end()),
descriptor_requests[dest].end());
}
// Send out all of the descriptor requests
std::vector<std::vector<vertex_number_t> > in_descriptor_requests;
in_descriptor_requests.resize(num_processes(pg));
inplace_all_to_all(pg, descriptor_requests, in_descriptor_requests);
// Reply to all of the descriptor requests
std::vector<std::vector<local_vertex_descriptor> >
descriptor_responses;
descriptor_responses.resize(num_processes(pg));
for (process_id_type dest = 0; dest < num_processes(pg); ++dest) {
for (std::size_t i = 0; i < in_descriptor_requests[dest].size(); ++i) {
local_vertex_descriptor v =
index_to_vertex[in_descriptor_requests[dest][i]];
descriptor_responses[dest].push_back(v);
}
in_descriptor_requests[dest].clear();
}
in_descriptor_requests.clear();
inplace_all_to_all(pg, descriptor_responses);
// Add the queued edges
for(typename std::vector<delayed_edge_t>::iterator i
= delayed_edges.begin(); i != delayed_edges.end(); ++i) {
process_id_type dest = distribution(i->first.second);
local_vertex_descriptor tgt_local;
if (dest == id) {
tgt_local = index_to_vertex[distribution.local(i->first.second)];
} else {
std::vector<vertex_number_t>& requests = descriptor_requests[dest];
typename std::vector<vertex_number_t>::iterator pos =
std::lower_bound(requests.begin(), requests.end(),
distribution.local(i->first.second));
tgt_local = descriptor_responses[dest][pos - requests.begin()];
}
add_edge(i->first.first, vertex_descriptor(dest, tgt_local),
i->second, *this);
}
synchronize(process_group_);
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename EdgeIterator, typename VertexListS>
void
PBGL_DISTRIB_ADJLIST_TYPE::
initialize(EdgeIterator first, EdgeIterator last,
vertices_size_type n, const base_distribution_type& distribution,
VertexListS)
{
using boost::parallel::inplace_all_to_all;
typedef vertices_size_type vertex_number_t;
typedef std::pair<vertex_descriptor, vertex_number_t> delayed_edge_t;
process_group_type pg = process_group();
process_id_type id = process_id(pg);
// Vertex indices
std::vector<local_vertex_descriptor> index_to_vertex;
index_to_vertex.reserve(num_vertices(*this));
BGL_FORALL_VERTICES_T(v, base(), inherited)
index_to_vertex.push_back(v);
// The list of edges we can't add immediately.
std::vector<delayed_edge_t> delayed_edges;
std::vector<std::vector<vertex_number_t> > descriptor_requests;
descriptor_requests.resize(num_processes(pg));
// Add all of the edges we can, up to the point where we run
// into a descriptor we don't know.
while (first != last) {
if (distribution(first->first) == id) {
if (distribution(first->second) != id) break;
vertex_descriptor source
(id, index_to_vertex[distribution.local(first->first)]);
vertex_descriptor target
(distribution(first->second),
index_to_vertex[distribution.local(first->second)]);
add_edge(source, target, *this);
}
++first;
}
// Queue all of the remaining edges and determine the set of
// descriptors we need to know about.
while (first != last) {
if (distribution(first->first) == id) {
vertex_descriptor source
(id, index_to_vertex[distribution.local(first->first)]);
process_id_type dest = distribution(first->second);
if (dest != id) {
descriptor_requests[dest]
.push_back(distribution.local(first->second));
// Compact request list if we need to
if (descriptor_requests[dest].size() >
distribution.block_size(dest, n)) {
std::sort(descriptor_requests[dest].begin(),
descriptor_requests[dest].end());
descriptor_requests[dest].erase(
std::unique(descriptor_requests[dest].begin(),
descriptor_requests[dest].end()),
descriptor_requests[dest].end());
}
}
// Save the edge for later
delayed_edges.push_back(delayed_edge_t(source, first->second));
}
++first;
}
// Compact descriptor requests
for (process_id_type dest = 0; dest < num_processes(pg); ++dest) {
std::sort(descriptor_requests[dest].begin(),
descriptor_requests[dest].end());
descriptor_requests[dest].erase(
std::unique(descriptor_requests[dest].begin(),
descriptor_requests[dest].end()),
descriptor_requests[dest].end());
}
// Send out all of the descriptor requests
std::vector<std::vector<vertex_number_t> > in_descriptor_requests;
in_descriptor_requests.resize(num_processes(pg));
inplace_all_to_all(pg, descriptor_requests, in_descriptor_requests);
// Reply to all of the descriptor requests
std::vector<std::vector<local_vertex_descriptor> >
descriptor_responses;
descriptor_responses.resize(num_processes(pg));
for (process_id_type dest = 0; dest < num_processes(pg); ++dest) {
for (std::size_t i = 0; i < in_descriptor_requests[dest].size(); ++i) {
local_vertex_descriptor v =
index_to_vertex[in_descriptor_requests[dest][i]];
descriptor_responses[dest].push_back(v);
}
in_descriptor_requests[dest].clear();
}
in_descriptor_requests.clear();
inplace_all_to_all(pg, descriptor_responses);
// Add the queued edges
for(typename std::vector<delayed_edge_t>::iterator i
= delayed_edges.begin(); i != delayed_edges.end(); ++i) {
process_id_type dest = distribution(i->second);
local_vertex_descriptor tgt_local;
if (dest == id) {
tgt_local = index_to_vertex[distribution.local(i->second)];
} else {
std::vector<vertex_number_t>& requests = descriptor_requests[dest];
typename std::vector<vertex_number_t>::iterator pos =
std::lower_bound(requests.begin(), requests.end(),
distribution.local(i->second));
tgt_local = descriptor_responses[dest][pos - requests.begin()];
}
add_edge(i->first, vertex_descriptor(dest, tgt_local), *this);
}
synchronize(process_group_);
}
} // end namespace boost
#endif // BOOST_GRAPH_DISTRIBUTED_ADJLIST_INITIALIZE_HPP

View File

@@ -0,0 +1,393 @@
// Copyright (C) 2005-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
//
// Implements redistribution of vertices for a distributed adjacency
// list. This file should not be included by users. It will be
// included by the distributed adjacency list header.
//
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/pending/container_traits.hpp>
namespace boost { namespace detail { namespace parallel {
/* This structure contains a (vertex or edge) descriptor that is being
moved from one processor to another. It contains the properties for
that descriptor (if any).
*/
template<typename Descriptor, typename DescriptorProperty>
struct redistributed_descriptor : maybe_store_property<DescriptorProperty>
{
typedef maybe_store_property<DescriptorProperty> inherited;
redistributed_descriptor() { }
redistributed_descriptor(const Descriptor& v, const DescriptorProperty& p)
: inherited(p), descriptor(v) { }
Descriptor descriptor;
private:
friend class boost::serialization::access;
template<typename Archiver>
void serialize(Archiver& ar, unsigned int /*version*/)
{
ar & boost::serialization::base_object<inherited>(*this)
& unsafe_serialize(descriptor);
}
};
/* Predicate that returns true if the target has migrated. */
template<typename VertexProcessorMap, typename Graph>
struct target_migrated_t
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
target_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
: vertex_to_processor(vertex_to_processor), g(g) { }
bool operator()(Edge e) const
{
typedef global_descriptor<Vertex> DVertex;
processor_id_type owner = get(edge_target_processor_id, g, e);
return get(vertex_to_processor, DVertex(owner, target(e, g))) != owner;
}
private:
VertexProcessorMap vertex_to_processor;
const Graph& g;
};
template<typename VertexProcessorMap, typename Graph>
inline target_migrated_t<VertexProcessorMap, Graph>
target_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
{ return target_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
/* Predicate that returns true if the source of an in-edge has migrated. */
template<typename VertexProcessorMap, typename Graph>
struct source_migrated_t
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
source_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
: vertex_to_processor(vertex_to_processor), g(g) { }
bool operator()(stored_in_edge<Edge> e) const
{
return get(vertex_to_processor, DVertex(e.source_processor, source(e.e, g)))
!= e.source_processor;
}
private:
VertexProcessorMap vertex_to_processor;
const Graph& g;
};
template<typename VertexProcessorMap, typename Graph>
inline source_migrated_t<VertexProcessorMap, Graph>
source_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
{ return source_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
/* Predicate that returns true if the target has migrated. */
template<typename VertexProcessorMap, typename Graph>
struct source_or_target_migrated_t
{
typedef typename graph_traits<Graph>::edge_descriptor Edge;
source_or_target_migrated_t(VertexProcessorMap vertex_to_processor,
const Graph& g)
: vertex_to_processor(vertex_to_processor), g(g) { }
bool operator()(Edge e) const
{
return get(vertex_to_processor, source(e, g)) != source(e, g).owner
|| get(vertex_to_processor, target(e, g)) != target(e, g).owner;
}
private:
VertexProcessorMap vertex_to_processor;
const Graph& g;
};
template<typename VertexProcessorMap, typename Graph>
inline source_or_target_migrated_t<VertexProcessorMap, Graph>
source_or_target_migrated(VertexProcessorMap vertex_to_processor,
const Graph& g)
{
typedef source_or_target_migrated_t<VertexProcessorMap, Graph> result_type;
return result_type(vertex_to_processor, g);
}
} } // end of namespace detail::parallel
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename VertexProcessorMap>
void
PBGL_DISTRIB_ADJLIST_TYPE
::request_in_neighbors(vertex_descriptor v,
VertexProcessorMap vertex_to_processor,
bidirectionalS)
{
BGL_FORALL_INEDGES_T(v, e, *this, graph_type)
request(vertex_to_processor, source(e, *this));
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename VertexProcessorMap>
void
PBGL_DISTRIB_ADJLIST_TYPE
::remove_migrated_in_edges(vertex_descriptor v,
VertexProcessorMap vertex_to_processor,
bidirectionalS)
{
graph_detail::erase_if(get(vertex_in_edges, base())[v.local],
source_migrated(vertex_to_processor, base()));
}
template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
template<typename VertexProcessorMap>
void
PBGL_DISTRIB_ADJLIST_TYPE
::redistribute(VertexProcessorMap vertex_to_processor)
{
using boost::parallel::inplace_all_to_all;
// When we have stable descriptors, we only move those descriptors
// that actually need to be moved. Otherwise, we essentially have to
// regenerate the entire graph.
const bool has_stable_descriptors =
is_same<typename config_type::vertex_list_selector, listS>::value
|| is_same<typename config_type::vertex_list_selector, setS>::value
|| is_same<typename config_type::vertex_list_selector, multisetS>::value;
typedef detail::parallel::redistributed_descriptor<vertex_descriptor,
vertex_property_type>
redistributed_vertex;
typedef detail::parallel::redistributed_descriptor<edge_descriptor,
edge_property_type>
redistributed_edge;
typedef std::pair<vertices_size_type, edges_size_type> num_relocated_pair;
vertex_iterator vi, vi_end;
edge_iterator ei, ei_end;
process_group_type pg = process_group();
// Initial synchronization makes sure that we have all of our ducks
// in a row. We don't want any outstanding add/remove messages
// coming in mid-redistribution!
synchronize(process_group_);
// We cannot cope with eviction of ghost cells
vertex_to_processor.set_max_ghost_cells(0);
process_id_type p = num_processes(pg);
// Send vertices and edges to the processor where they will
// actually reside. This requires O(|V| + |E|) communication
std::vector<std::vector<redistributed_vertex> > redistributed_vertices(p);
std::vector<std::vector<redistributed_edge> > redistributed_edges(p);
// Build the sets of relocated vertices for each process and then do
// an all-to-all transfer.
for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; ++vi) {
if (!has_stable_descriptors
|| get(vertex_to_processor, *vi) != vi->owner) {
redistributed_vertices[get(vertex_to_processor, *vi)]
.push_back(redistributed_vertex(*vi, get(vertex_all_t(), base(),
vi->local)));
}
// When our descriptors are stable, we need to determine which
// adjacent descriptors are stable to determine which edges will
// be removed.
if (has_stable_descriptors) {
BGL_FORALL_OUTEDGES_T(*vi, e, *this, graph_type)
request(vertex_to_processor, target(e, *this));
request_in_neighbors(*vi, vertex_to_processor, directed_selector());
}
}
inplace_all_to_all(pg, redistributed_vertices);
// If we have stable descriptors, we need to know where our neighbor
// vertices are moving.
if (has_stable_descriptors)
synchronize(vertex_to_processor);
// Build the sets of relocated edges for each process and then do
// an all-to-all transfer.
for (boost::tie(ei, ei_end) = edges(*this); ei != ei_end; ++ei) {
vertex_descriptor src = source(*ei, *this);
vertex_descriptor tgt = target(*ei, *this);
if (!has_stable_descriptors
|| get(vertex_to_processor, src) != src.owner
|| get(vertex_to_processor, tgt) != tgt.owner)
redistributed_edges[get(vertex_to_processor, source(*ei, *this))]
.push_back(redistributed_edge(*ei, get(edge_all_t(), base(),
ei->local)));
}
inplace_all_to_all(pg, redistributed_edges);
// A mapping from old vertex descriptors to new vertex
// descriptors. This is an STL map partly because I'm too lazy to
// build a real property map (which is hard in the general case) but
// also because it won't try to look in the graph itself, because
// the keys are all vertex descriptors that have been invalidated.
std::map<vertex_descriptor, vertex_descriptor> old_to_new_vertex_map;
if (has_stable_descriptors) {
// Clear out all vertices and edges that will have moved. There
// are several stages to this.
// First, eliminate all outgoing edges from the (local) vertices
// that have been moved or whose targets have been moved.
BGL_FORALL_VERTICES_T(v, *this, graph_type) {
if (get(vertex_to_processor, v) != v.owner) {
clear_out_edges(v.local, base());
clear_in_edges_local(v, directed_selector());
} else {
remove_out_edge_if(v.local,
target_migrated(vertex_to_processor, base()),
base());
remove_migrated_in_edges(v, vertex_to_processor, directed_selector());
}
}
// Next, eliminate locally-stored edges that have migrated (for
// undirected graphs).
graph_detail::erase_if(local_edges_,
source_or_target_migrated(vertex_to_processor, *this));
// Eliminate vertices that have migrated
for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; /* in loop */) {
if (get(vertex_to_processor, *vi) != vi->owner)
remove_vertex((*vi++).local, base());
else {
// Add the identity relation for vertices that have not migrated
old_to_new_vertex_map[*vi] = *vi;
++vi;
}
}
} else {
// Clear out the local graph: the entire graph is in transit
clear();
}
// Add the new vertices to the graph. When we do so, update the old
// -> new vertex mapping both locally and for the owner of the "old"
// vertex.
{
typedef std::pair<vertex_descriptor, vertex_descriptor> mapping_pair;
std::vector<std::vector<mapping_pair> > mappings(p);
for (process_id_type src = 0; src < p; ++src) {
for (typename std::vector<redistributed_vertex>::iterator vi =
redistributed_vertices[src].begin();
vi != redistributed_vertices[src].end(); ++vi) {
vertex_descriptor new_vertex =
add_vertex(vi->get_property(), *this);
old_to_new_vertex_map[vi->descriptor] = new_vertex;
mappings[vi->descriptor.owner].push_back(mapping_pair(vi->descriptor,
new_vertex));
}
redistributed_vertices[src].clear();
}
inplace_all_to_all(pg, mappings);
// Add the mappings we were sent into the old->new map.
for (process_id_type src = 0; src < p; ++src)
old_to_new_vertex_map.insert(mappings[src].begin(), mappings[src].end());
}
// Get old->new vertex mappings for all of the vertices we need to
// know about.
// TBD: An optimization here might involve sending the
// request-response pairs without an explicit request step (for
// bidirectional and undirected graphs). However, it may not matter
// all that much given the cost of redistribution.
{
std::vector<std::vector<vertex_descriptor> > vertex_map_requests(p);
std::vector<std::vector<vertex_descriptor> > vertex_map_responses(p);
// We need to know about all of the vertices incident on edges
// that have been relocated to this processor. Tell each processor
// what each other processor needs to know.
for (process_id_type src = 0; src < p; ++src)
for (typename std::vector<redistributed_edge>::iterator ei =
redistributed_edges[src].begin();
ei != redistributed_edges[src].end(); ++ei) {
vertex_descriptor need_vertex = target(ei->descriptor, *this);
if (old_to_new_vertex_map.find(need_vertex)
== old_to_new_vertex_map.end())
{
old_to_new_vertex_map[need_vertex] = need_vertex;
vertex_map_requests[need_vertex.owner].push_back(need_vertex);
}
}
inplace_all_to_all(pg,
vertex_map_requests,
vertex_map_responses);
// Process the requests made for vertices we own. Then perform yet
// another all-to-all swap. This one matches the requests we've
// made to the responses we were given.
for (process_id_type src = 0; src < p; ++src)
for (typename std::vector<vertex_descriptor>::iterator vi =
vertex_map_responses[src].begin();
vi != vertex_map_responses[src].end(); ++vi)
*vi = old_to_new_vertex_map[*vi];
inplace_all_to_all(pg, vertex_map_responses);
// Matching the requests to the responses, update the old->new
// vertex map for all of the vertices we will need to know.
for (process_id_type src = 0; src < p; ++src) {
typedef typename std::vector<vertex_descriptor>::size_type size_type;
for (size_type i = 0; i < vertex_map_requests[src].size(); ++i) {
old_to_new_vertex_map[vertex_map_requests[src][i]] =
vertex_map_responses[src][i];
}
}
}
// Add edges to the graph by mapping the source and target.
for (process_id_type src = 0; src < p; ++src) {
for (typename std::vector<redistributed_edge>::iterator ei =
redistributed_edges[src].begin();
ei != redistributed_edges[src].end(); ++ei) {
add_edge(old_to_new_vertex_map[source(ei->descriptor, *this)],
old_to_new_vertex_map[target(ei->descriptor, *this)],
ei->get_property(),
*this);
}
redistributed_edges[src].clear();
}
// Be sure that edge-addition messages are received now, completing
// the graph.
synchronize(process_group_);
this->distribution().clear();
detail::parallel::maybe_initialize_vertex_indices(vertices(base()),
get(vertex_index, base()));
}
} // end namespace boost

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,372 @@
// Copyright (C) 2005-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_BOMAN_ET_AL_GRAPH_COLORING_HPP
#define BOOST_GRAPH_DISTRIBUTED_BOMAN_ET_AL_GRAPH_COLORING_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <functional>
#include <vector>
#include <utility>
#include <boost/graph/iteration_macros.hpp>
#include <boost/optional.hpp>
#include <boost/assert.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/properties.hpp>
#ifdef PBGL_ACCOUNTING
# include <boost/graph/accounting.hpp>
#endif // PBGL_ACCOUNTING
namespace boost { namespace graph { namespace distributed {
/**************************************************************************
* This source file implements the distributed graph coloring algorithm *
* by Boman et al in: *
* *
* Erik G. Boman, Doruk Bozdag, Umit Catalyurek, Assefaw H. Gebremedhin,*
* and Fredrik Manne. A Scalable Parallel Graph Coloring Algorithm for *
* Distributed Memory Computers. [unpublished preprint?] *
* *
**************************************************************************/
#ifdef PBGL_ACCOUNTING
struct boman_et_al_graph_coloring_stats_t
{
/* The size of the blocks to step through (i.e., the parameter s). */
std::size_t block_size;
/* Total wall-clock time used by the algorithm.*/
accounting::time_type execution_time;
/* The number of conflicts that occurred during execution. */
std::size_t conflicts;
/* The number of supersteps. */
std::size_t supersteps;
/* The number of colors used. */
std::size_t num_colors;
template<typename OutputStream>
void print(OutputStream& out)
{
out << "Problem = \"Coloring\"\n"
<< "Algorithm = \"Boman et al\"\n"
<< "Function = boman_et_al_graph_coloring\n"
<< "(P) Block size = " << block_size << "\n"
<< "Wall clock time = " << accounting::print_time(execution_time)
<< "\nConflicts = " << conflicts << "\n"
<< "Supersteps = " << supersteps << "\n"
<< "(R) Colors = " << num_colors << "\n";
}
};
static boman_et_al_graph_coloring_stats_t boman_et_al_graph_coloring_stats;
#endif
namespace detail {
template<typename T>
struct graph_coloring_reduce
{
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
template<typename Key>
T operator()(const Key&) const { return (std::numeric_limits<T>::max)(); }
template<typename Key> T operator()(const Key&, T, T y) const { return y; }
};
}
template<typename Color>
struct first_fit_color
{
template<typename T>
Color operator()(const std::vector<T>& marked, T marked_true)
{
Color k = 0;
while (k < (Color)marked.size() && marked[k] == marked_true)
++k;
return k;
}
};
template<typename DistributedGraph, typename ColorMap, typename ChooseColor,
typename VertexOrdering, typename VertexIndexMap>
typename property_traits<ColorMap>::value_type
boman_et_al_graph_coloring
(const DistributedGraph& g,
ColorMap color,
typename graph_traits<DistributedGraph>::vertices_size_type s,
ChooseColor choose_color,
VertexOrdering ordering, VertexIndexMap vertex_index)
{
using namespace boost::graph::parallel;
using boost::parallel::all_reduce;
typename property_map<DistributedGraph, vertex_owner_t>::const_type
owner = get(vertex_owner, g);
typedef typename process_group_type<DistributedGraph>::type
process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor Vertex;
typedef typename graph_traits<DistributedGraph>::edge_descriptor Edge;
typedef typename graph_traits<DistributedGraph>::vertices_size_type
vertices_size_type;
typedef typename property_traits<ColorMap>::value_type color_type;
typedef unsigned long long iterations_type;
typedef typename std::vector<Vertex>::iterator vertex_set_iterator;
typedef std::pair<Vertex, color_type> message_type;
#ifdef PBGL_ACCOUNTING
boman_et_al_graph_coloring_stats.block_size = s;
boman_et_al_graph_coloring_stats.execution_time = accounting::get_time();
boman_et_al_graph_coloring_stats.conflicts = 0;
boman_et_al_graph_coloring_stats.supersteps = 0;
#endif
// Initialize color map
color_type no_color = (std::numeric_limits<color_type>::max)();
BGL_FORALL_VERTICES_T(v, g, DistributedGraph)
put(color, v, no_color);
color.set_reduce(detail::graph_coloring_reduce<color_type>());
// Determine if we'll be using synchronous or asynchronous communication.
typedef typename process_group_type::communication_category
communication_category;
static const bool asynchronous =
is_convertible<communication_category, immediate_process_group_tag>::value;
process_group_type pg = process_group(g);
// U_i <- V_i
std::vector<Vertex> vertices_to_color(vertices(g).first, vertices(g).second);
iterations_type iter_num = 1, outer_iter_num = 1;
std::vector<iterations_type> marked;
std::vector<iterations_type> marked_conflicting(num_vertices(g), 0);
std::vector<bool> sent_to_processors;
std::size_t rounds = vertices_to_color.size() / s
+ (vertices_to_color.size() % s == 0? 0 : 1);
rounds = all_reduce(pg, rounds, boost::parallel::maximum<std::size_t>());
#ifdef PBGL_GRAPH_COLORING_DEBUG
std::cerr << "Number of rounds = " << rounds << std::endl;
#endif
while (rounds > 0) {
if (!vertices_to_color.empty()) {
// Set of conflicting vertices
std::vector<Vertex> conflicting_vertices;
vertex_set_iterator first = vertices_to_color.begin();
while (first != vertices_to_color.end()) {
// For each subset of size s (or smaller for the last subset)
vertex_set_iterator start = first;
for (vertices_size_type counter = s;
first != vertices_to_color.end() && counter > 0;
++first, --counter) {
// This vertex hasn't been sent to anyone yet
sent_to_processors.assign(num_processes(pg), false);
sent_to_processors[process_id(pg)] = true;
// Mark all of the colors that we see
BGL_FORALL_OUTEDGES_T(*first, e, g, DistributedGraph) {
color_type k = get(color, target(e, g));
if (k != no_color) {
if (k >= (color_type)marked.size()) marked.resize(k + 1, 0);
marked[k] = iter_num;
}
}
// Find a color for this vertex
put(color, *first, choose_color(marked, iter_num));
#ifdef PBGL_GRAPH_COLORING_DEBUG
std::cerr << "Chose color " << get(color, *first) << " for vertex "
<< *first << std::endl;
#endif
// Send this vertex's color to the owner of the edge target.
BGL_FORALL_OUTEDGES_T(*first, e, g, DistributedGraph) {
if (!sent_to_processors[get(owner, target(e, g))]) {
send(pg, get(owner, target(e, g)), 17,
message_type(source(e, g), get(color, source(e, g))));
sent_to_processors[get(owner, target(e, g))] = true;
}
}
++iter_num;
}
// Synchronize for non-immediate process groups.
if (!asynchronous) {
--rounds;
synchronize(pg);
}
// Receive boundary colors from other processors
while (optional<std::pair<process_id_type, int> > stp = probe(pg)) {
BOOST_ASSERT(stp->second == 17);
message_type msg;
receive(pg, stp->first, stp->second, msg);
cache(color, msg.first, msg.second);
#ifdef PBGL_GRAPH_COLORING_DEBUG
std::cerr << "Cached color " << msg.second << " for vertex "
<< msg.first << std::endl;
#endif
}
// Compute the set of conflicting vertices
// [start, first) contains all vertices in this subset
for (vertex_set_iterator vi = start; vi != first; ++vi) {
Vertex v = *vi;
BGL_FORALL_OUTEDGES_T(v, e, g, DistributedGraph) {
Vertex w = target(e, g);
if (get(owner, w) != process_id(pg) // boundary vertex
&& marked_conflicting[get(vertex_index, v)] != outer_iter_num
&& get(color, v) == get(color, w)
&& ordering(v, w)) {
conflicting_vertices.push_back(v);
marked_conflicting[get(vertex_index, v)] = outer_iter_num;
put(color, v, no_color);
#ifdef PBGL_GRAPH_COLORING_DEBUG
std::cerr << "Vertex " << v << " has a conflict with vertex "
<< w << std::endl;
#endif
break;
}
}
}
#ifdef PBGL_ACCOUNTING
boman_et_al_graph_coloring_stats.conflicts +=
conflicting_vertices.size();
#endif
}
if (asynchronous) synchronize(pg);
else {
while (rounds > 0) {
synchronize(pg);
--rounds;
}
}
conflicting_vertices.swap(vertices_to_color);
++outer_iter_num;
} else {
if (asynchronous) synchronize(pg);
else {
while (rounds > 0) {
synchronize(pg);
--rounds;
}
}
}
// Receive boundary colors from other processors
while (optional<std::pair<process_id_type, int> > stp = probe(pg)) {
BOOST_ASSERT(stp->second == 17);
message_type msg;
receive(pg, stp->first, stp->second, msg);
cache(color, msg.first, msg.second);
}
rounds = vertices_to_color.size() / s
+ (vertices_to_color.size() % s == 0? 0 : 1);
rounds = all_reduce(pg, rounds, boost::parallel::maximum<std::size_t>());
#ifdef PBGL_ACCOUNTING
++boman_et_al_graph_coloring_stats.supersteps;
#endif
}
// Determine the number of colors used.
color_type num_colors = 0;
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
color_type k = get(color, v);
BOOST_ASSERT(k != no_color);
if (k != no_color) {
if (k >= (color_type)marked.size()) marked.resize(k + 1, 0); // TBD: perf?
if (marked[k] != iter_num) {
marked[k] = iter_num;
++num_colors;
}
}
}
num_colors =
all_reduce(pg, num_colors, boost::parallel::maximum<color_type>());
#ifdef PBGL_ACCOUNTING
boman_et_al_graph_coloring_stats.execution_time =
accounting::get_time() - boman_et_al_graph_coloring_stats.execution_time;
boman_et_al_graph_coloring_stats.conflicts =
all_reduce(pg, boman_et_al_graph_coloring_stats.conflicts,
std::plus<color_type>());
boman_et_al_graph_coloring_stats.num_colors = num_colors;
#endif
return num_colors;
}
template<typename DistributedGraph, typename ColorMap, typename ChooseColor,
typename VertexOrdering>
inline typename property_traits<ColorMap>::value_type
boman_et_al_graph_coloring
(const DistributedGraph& g, ColorMap color,
typename graph_traits<DistributedGraph>::vertices_size_type s,
ChooseColor choose_color, VertexOrdering ordering)
{
return boman_et_al_graph_coloring(g, color, s, choose_color, ordering,
get(vertex_index, g));
}
template<typename DistributedGraph, typename ColorMap, typename ChooseColor>
inline typename property_traits<ColorMap>::value_type
boman_et_al_graph_coloring
(const DistributedGraph& g,
ColorMap color,
typename graph_traits<DistributedGraph>::vertices_size_type s,
ChooseColor choose_color)
{
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
return boman_et_al_graph_coloring(g, color, s, choose_color,
std::less<vertex_descriptor>());
}
template<typename DistributedGraph, typename ColorMap>
inline typename property_traits<ColorMap>::value_type
boman_et_al_graph_coloring
(const DistributedGraph& g,
ColorMap color,
typename graph_traits<DistributedGraph>::vertices_size_type s = 100)
{
typedef typename property_traits<ColorMap>::value_type Color;
return boman_et_al_graph_coloring(g, color, s, first_fit_color<Color>());
}
} } } // end namespace boost::graph::distributed
namespace boost { namespace graph {
using distributed::boman_et_al_graph_coloring;
} } // end namespace boost::graph
#endif // BOOST_GRAPH_DISTRIBUTED_BOMAN_ET_AL_GRAPH_COLORING_HPP

View File

@@ -0,0 +1,164 @@
// Copyright 2004 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_BFS_HPP
#define BOOST_GRAPH_PARALLEL_BFS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/breadth_first_search.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/graph/distributed/detail/filtered_queue.hpp>
#include <boost/graph/distributed/queue.hpp>
#include <boost/dynamic_bitset.hpp>
#include <boost/pending/queue.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/parallel/container_traits.hpp>
namespace boost {
namespace detail {
/** @brief A unary predicate that decides when to push into a
* breadth-first search queue.
*
* This predicate stores a color map that is used to determine
* when to push. If it is provided with a key for which the color
* is white, it darkens the color to gray and returns true (so
* that the value will be pushed appropriately); if the color is
* not white, it returns false so that the vertex will be
* ignored.
*/
template<typename ColorMap>
struct darken_and_push
{
typedef typename property_traits<ColorMap>::key_type argument_type;
typedef bool result_type;
explicit darken_and_push(const ColorMap& color) : color(color) { }
bool operator()(const argument_type& value) const
{
typedef color_traits<typename property_traits<ColorMap>::value_type>
Color;
if (get(color, value) == Color::white()) {
put(color, value, Color::gray());
return true;
} else {
return false;
}
}
ColorMap color;
};
template<typename IndexMap>
struct has_not_been_seen
{
typedef bool result_type;
has_not_been_seen() { }
has_not_been_seen(std::size_t n, IndexMap index_map)
: seen(n), index_map(index_map) {}
template<typename Key>
result_type operator()(Key key)
{
bool result = seen[get(index_map, key)];
seen[get(index_map, key)] = true;
return !result;
}
void swap(has_not_been_seen& other)
{
using std::swap;
swap(seen, other.seen);
swap(index_map, other.index_map);
}
private:
dynamic_bitset<> seen;
IndexMap index_map;
};
template<typename IndexMap>
inline void
swap(has_not_been_seen<IndexMap>& x, has_not_been_seen<IndexMap>& y)
{
x.swap(y);
}
template <class DistributedGraph, class ColorMap, class BFSVisitor,
class BufferRef, class VertexIndexMap>
inline void
parallel_bfs_helper
(DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
ColorMap color,
BFSVisitor vis,
BufferRef Q,
VertexIndexMap)
{
set_property_map_role(vertex_color, color);
color.set_consistency_model(0);
breadth_first_search(g, s, Q.ref, vis, color);
}
template <class DistributedGraph, class ColorMap, class BFSVisitor,
class VertexIndexMap>
void parallel_bfs_helper
(DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
ColorMap color,
BFSVisitor vis,
error_property_not_found,
VertexIndexMap vertex_index)
{
using boost::graph::parallel::process_group;
typedef graph_traits<DistributedGraph> Traits;
typedef typename Traits::vertex_descriptor Vertex;
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>::type
process_group_type;
set_property_map_role(vertex_color, color);
color.set_consistency_model(0);
// Buffer default
typedef typename property_map<DistributedGraph, vertex_owner_t>
::const_type vertex_owner_map;
typedef boost::graph::distributed::distributed_queue<
process_group_type, vertex_owner_map, queue<Vertex>,
detail::darken_and_push<ColorMap> > queue_t;
queue_t Q(process_group(g),
get(vertex_owner, g),
detail::darken_and_push<ColorMap>(color));
breadth_first_search(g, s, Q, vis, color);
}
template <class DistributedGraph, class ColorMap, class BFSVisitor,
class P, class T, class R>
void bfs_helper
(DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
ColorMap color,
BFSVisitor vis,
const bgl_named_params<P, T, R>& params,
BOOST_GRAPH_ENABLE_IF_MODELS(DistributedGraph, distributed_graph_tag,
void)*)
{
parallel_bfs_helper
(g, s, color, vis, get_param(params, buffer_param_t()),
choose_const_pmap(get_param(params, vertex_index), g, vertex_index));
}
}
}
#endif // BOOST_GRAPH_PARALLEL_BFS_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,216 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
//
// Distributed graph concepts and helpers
//
#ifndef BOOST_GRAPH_DISTRIBUTED_CONCEPTS_HPP
#define BOOST_GRAPH_DISTRIBUTED_CONCEPTS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/version.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/graph_concepts.hpp>
#if BOOST_VERSION >= 103500
# include <boost/concept/detail/concept_def.hpp>
#endif
namespace boost {
class distributed_graph_tag { };
class distributed_vertex_list_graph_tag { };
class distributed_edge_list_graph_tag { };
#if BOOST_VERSION >= 103500
namespace concepts {
#endif
#if BOOST_VERSION < 103500
template <class G>
struct DistributedVertexListGraphConcept
{
typedef typename graph_traits<G>::vertex_iterator vertex_iterator;
typedef typename graph_traits<G>::vertices_size_type vertices_size_type;
typedef typename graph_traits<G>::traversal_category
traversal_category;
void constraints() {
function_requires< GraphConcept<G> >();
function_requires< MultiPassInputIteratorConcept<vertex_iterator> >();
function_requires< ConvertibleConcept<traversal_category,
distributed_vertex_list_graph_tag> >();
#ifdef BOOST_VECTOR_AS_GRAPH_GRAPH_ADL_HACK
// dwa 2003/7/11 -- This clearly shouldn't be necessary, but if
// you want to use vector_as_graph, it is! I'm sure the graph
// library leaves these out all over the place. Probably a
// redesign involving specializing a template with a static
// member function is in order :(
using boost::vertices;
#endif
p = vertices(g);
v = *p.first;
const_constraints(g);
}
void const_constraints(const G& cg) {
#ifdef BOOST_VECTOR_AS_GRAPH_GRAPH_ADL_HACK
// dwa 2003/7/11 -- This clearly shouldn't be necessary, but if
// you want to use vector_as_graph, it is! I'm sure the graph
// library leaves these out all over the place. Probably a
// redesign involving specializing a template with a static
// member function is in order :(
using boost::vertices;
#endif
p = vertices(cg);
v = *p.first;
V = num_vertices(cg);
}
std::pair<vertex_iterator,vertex_iterator> p;
typename graph_traits<G>::vertex_descriptor v;
G g;
vertices_size_type V;
};
template <class G>
struct DistributedEdgeListGraphConcept
{
typedef typename graph_traits<G>::edge_descriptor edge_descriptor;
typedef typename graph_traits<G>::edge_iterator edge_iterator;
typedef typename graph_traits<G>::edges_size_type edges_size_type;
typedef typename graph_traits<G>::traversal_category
traversal_category;
void constraints() {
function_requires< GraphConcept<G> >();
function_requires< MultiPassInputIteratorConcept<edge_iterator> >();
function_requires< DefaultConstructibleConcept<edge_descriptor> >();
function_requires< EqualityComparableConcept<edge_descriptor> >();
function_requires< AssignableConcept<edge_descriptor> >();
function_requires< ConvertibleConcept<traversal_category,
distributed_edge_list_graph_tag> >();
p = edges(g);
e = *p.first;
u = source(e, g);
v = target(e, g);
const_constraints(g);
}
void const_constraints(const G& cg) {
p = edges(cg);
E = num_edges(cg);
e = *p.first;
u = source(e, cg);
v = target(e, cg);
}
std::pair<edge_iterator,edge_iterator> p;
typename graph_traits<G>::vertex_descriptor u, v;
typename graph_traits<G>::edge_descriptor e;
edges_size_type E;
G g;
};
#else
BOOST_concept(DistributedVertexListGraph,(G))
: Graph<G>
{
typedef typename graph_traits<G>::vertex_iterator vertex_iterator;
typedef typename graph_traits<G>::vertices_size_type vertices_size_type;
typedef typename graph_traits<G>::traversal_category
traversal_category;
~DistributedVertexListGraph() {
BOOST_CONCEPT_ASSERT((MultiPassInputIterator<vertex_iterator>));
BOOST_CONCEPT_ASSERT((Convertible<traversal_category,
distributed_vertex_list_graph_tag>));
#ifdef BOOST_VECTOR_AS_GRAPH_GRAPH_ADL_HACK
// dwa 2003/7/11 -- This clearly shouldn't be necessary, but if
// you want to use vector_as_graph, it is! I'm sure the graph
// library leaves these out all over the place. Probably a
// redesign involving specializing a template with a static
// member function is in order :(
using boost::vertices;
#endif
p = vertices(g);
v = *p.first;
const_constraints(g);
}
void const_constraints(const G& cg) {
#ifdef BOOST_VECTOR_AS_GRAPH_GRAPH_ADL_HACK
// dwa 2003/7/11 -- This clearly shouldn't be necessary, but if
// you want to use vector_as_graph, it is! I'm sure the graph
// library leaves these out all over the place. Probably a
// redesign involving specializing a template with a static
// member function is in order :(
using boost::vertices;
#endif
p = vertices(cg);
v = *p.first;
V = num_vertices(cg);
}
std::pair<vertex_iterator,vertex_iterator> p;
typename graph_traits<G>::vertex_descriptor v;
G g;
vertices_size_type V;
};
BOOST_concept(DistributedEdgeListGraph,(G))
: Graph<G>
{
typedef typename graph_traits<G>::edge_descriptor edge_descriptor;
typedef typename graph_traits<G>::edge_iterator edge_iterator;
typedef typename graph_traits<G>::edges_size_type edges_size_type;
typedef typename graph_traits<G>::traversal_category
traversal_category;
~DistributedEdgeListGraph() {
BOOST_CONCEPT_ASSERT((MultiPassInputIterator<edge_iterator>));
BOOST_CONCEPT_ASSERT((DefaultConstructible<edge_descriptor>));
BOOST_CONCEPT_ASSERT((EqualityComparable<edge_descriptor>));
BOOST_CONCEPT_ASSERT((Assignable<edge_descriptor>));
BOOST_CONCEPT_ASSERT((Convertible<traversal_category,
distributed_edge_list_graph_tag>));
p = edges(g);
e = *p.first;
u = source(e, g);
v = target(e, g);
const_constraints(g);
}
void const_constraints(const G& cg) {
p = edges(cg);
E = num_edges(cg);
e = *p.first;
u = source(e, cg);
v = target(e, cg);
}
std::pair<edge_iterator,edge_iterator> p;
typename graph_traits<G>::vertex_descriptor u, v;
typename graph_traits<G>::edge_descriptor e;
edges_size_type E;
G g;
};
#endif
#if BOOST_VERSION >= 103500
} // end namespace concepts
using concepts::DistributedVertexListGraphConcept;
using concepts::DistributedEdgeListGraphConcept;
#endif
} // end namespace boost
#if BOOST_VERSION >= 103500
# include <boost/concept/detail/concept_undef.hpp>
#endif
#endif // BOOST_GRAPH_DISTRIBUTED_CONCEPTS_HPP

View File

@@ -0,0 +1,769 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Nick Edmonds
// Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_CC_HPP
#define BOOST_GRAPH_PARALLEL_CC_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/detail/is_sorted.hpp>
#include <boost/assert.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/distributed/local_subgraph.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/named_function_params.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/optional.hpp>
#include <functional>
#include <algorithm>
#include <vector>
#include <list>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/iteration_macros.hpp>
#define PBGL_IN_PLACE_MERGE /* In place merge instead of sorting */
//#define PBGL_SORT_ASSERT /* Assert sorted for in place merge */
/* Explicit sychronization in pointer doubling step? */
#define PBGL_EXPLICIT_SYNCH
//#define PBGL_CONSTRUCT_METAGRAPH
#ifdef PBGL_CONSTRUCT_METAGRAPH
# define MAX_VERTICES_IN_METAGRAPH 10000
#endif
namespace boost { namespace graph { namespace distributed {
namespace cc_detail {
enum connected_components_message {
edges_msg, req_parents_msg, parents_msg, root_adj_msg
};
template <typename Vertex>
struct metaVertex {
metaVertex() {}
metaVertex(const Vertex& v) : name(v) {}
template<typename Archiver>
void serialize(Archiver& ar, const unsigned int /*version*/)
{
ar & name;
}
Vertex name;
};
#ifdef PBGL_CONSTRUCT_METAGRAPH
// Build meta-graph on result of local connected components
template <typename Graph, typename ParentMap, typename RootIterator,
typename AdjacencyMap>
void
build_local_metagraph(const Graph& g, ParentMap p, RootIterator r,
RootIterator r_end, AdjacencyMap& adj)
{
// TODO: Static assert that AdjacencyMap::value_type is std::vector<vertex_descriptor>
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
BOOST_STATIC_ASSERT((is_same<typename AdjacencyMap::mapped_type,
std::vector<vertex_descriptor> >::value));
using boost::graph::parallel::process_group;
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
if (id != 0) {
// Send component roots and their associated edges to P0
for ( ; r != r_end; ++r ) {
std::vector<vertex_descriptor> adjs(1, *r); // Root
adjs.reserve(adjs.size() + adj[*r].size());
for (typename std::vector<vertex_descriptor>::iterator iter = adj[*r].begin();
iter != adj[*r].end(); ++iter)
adjs.push_back(get(p, *iter)); // Adjacencies
send(pg, 0, root_adj_msg, adjs);
}
}
synchronize(pg);
if (id == 0) {
typedef metaVertex<vertex_descriptor> VertexProperties;
typedef boost::adjacency_list<vecS, vecS, undirectedS,
VertexProperties> metaGraph;
typedef typename graph_traits<metaGraph>::vertex_descriptor
meta_vertex_descriptor;
std::map<vertex_descriptor, meta_vertex_descriptor> vertex_map;
std::vector<std::pair<vertex_descriptor, vertex_descriptor> > edges;
// Receive remote roots and edges
while (optional<std::pair<process_id_type, int> > m = probe(pg)) {
BOOST_ASSERT(m->second == root_adj_msg);
std::vector<vertex_descriptor> adjs;
receive(pg, m->first, m->second, adjs);
vertex_map[adjs[0]] = graph_traits<metaGraph>::null_vertex();
for (typename std::vector<vertex_descriptor>::iterator iter
= ++adjs.begin(); iter != adjs.end(); ++iter)
edges.push_back(std::make_pair(adjs[0], *iter));
}
// Add local roots and edges
for ( ; r != r_end; ++r ) {
vertex_map[*r] = graph_traits<metaGraph>::null_vertex();
edges.reserve(edges.size() + adj[*r].size());
for (typename std::vector<vertex_descriptor>::iterator iter = adj[*r].begin();
iter != adj[*r].end(); ++iter)
edges.push_back(std::make_pair(*r, get(p, *iter)));
}
// Build local meta-graph
metaGraph mg;
// Add vertices with property to map back to distributed graph vertex
for (typename std::map<vertex_descriptor, meta_vertex_descriptor>::iterator
iter = vertex_map.begin(); iter != vertex_map.end(); ++iter)
vertex_map[iter->first]
= add_vertex(metaVertex<vertex_descriptor>(iter->first), mg);
// Build meta-vertex map
typename property_map<metaGraph, vertex_descriptor VertexProperties::*>::type
metaVertexMap = get(&VertexProperties::name, mg);
typename std::vector<std::pair<vertex_descriptor, vertex_descriptor> >
::iterator edge_iter = edges.begin();
for ( ; edge_iter != edges.end(); ++edge_iter)
add_edge(vertex_map[edge_iter->first], vertex_map[edge_iter->second], mg);
edges.clear();
// Call connected_components on it
typedef typename property_map<metaGraph, vertex_index_t>::type
meta_index_map_type;
meta_index_map_type meta_index = get(vertex_index, mg);
std::vector<std::size_t> mg_component_vec(num_vertices(mg));
typedef iterator_property_map<std::vector<std::size_t>::iterator,
meta_index_map_type>
meta_components_map_type;
meta_components_map_type mg_component(mg_component_vec.begin(),
meta_index);
std::size_t num_comp = connected_components(mg, mg_component);
// Update Parent pointers
std::vector<meta_vertex_descriptor> roots(num_comp, graph_traits<metaGraph>::null_vertex());
BGL_FORALL_VERTICES_T(v, mg, metaGraph) {
size_t component = get(mg_component, v);
if (roots[component] == graph_traits<metaGraph>::null_vertex() ||
get(meta_index, v) < get(meta_index, roots[component]))
roots[component] = v;
}
// Set all the local parent pointers
BGL_FORALL_VERTICES_T(v, mg, metaGraph) {
// Problem in value being put (3rd parameter)
put(p, get(metaVertexMap, v), get(metaVertexMap, roots[get(mg_component, v)]));
}
}
synchronize(p);
}
#endif
/* Function object used to remove internal vertices and vertices >
the current vertex from the adjacent vertex lists at each
root */
template <typename Vertex, typename ParentMap>
class cull_adjacency_list
{
public:
cull_adjacency_list(const Vertex v, const ParentMap p) : v(v), p(p) {}
bool operator() (const Vertex x) { return (get(p, x) == v || x == v); }
private:
const Vertex v;
const ParentMap p;
};
/* Comparison operator used to choose targets for hooking s.t. vertices
that are hooked to are evenly distributed across processors */
template <typename OwnerMap, typename LocalMap>
class hashed_vertex_compare
{
public:
hashed_vertex_compare (const OwnerMap& o, const LocalMap& l)
: owner(o), local(l) { }
template <typename Vertex>
bool operator() (const Vertex x, const Vertex y)
{
if (get(local, x) < get(local, y))
return true;
else if (get(local, x) == get(local, y))
return (get(owner, x) < get(owner, y));
return false;
}
private:
OwnerMap owner;
LocalMap local;
};
#ifdef PBGL_EXPLICIT_SYNCH
template <typename Graph, typename ParentMap, typename VertexList>
void
request_parent_map_entries(const Graph& g, ParentMap p,
std::vector<VertexList>& parent_requests)
{
typedef typename boost::graph::parallel::process_group_type<Graph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
process_group_type pg = process_group(g);
/*
This should probably be send_oob_with_reply, especially when Dave
finishes prefetch-batching
*/
// Send root requests
for (process_id_type i = 0; i < num_processes(pg); ++i) {
if (!parent_requests[i].empty()) {
std::vector<vertex_descriptor> reqs(parent_requests[i].begin(),
parent_requests[i].end());
send(pg, i, req_parents_msg, reqs);
}
}
synchronize(pg);
// Receive root requests and reply to them
while (optional<std::pair<process_id_type, int> > m = probe(pg)) {
std::vector<vertex_descriptor> requests;
receive(pg, m->first, m->second, requests);
for (std::size_t i = 0; i < requests.size(); ++i)
requests[i] = get(p, requests[i]);
send(pg, m->first, parents_msg, requests);
}
synchronize(pg);
// Receive requested parents
std::vector<vertex_descriptor> responses;
for (process_id_type i = 0; i < num_processes(pg); ++i) {
if (!parent_requests[i].empty()) {
receive(pg, i, parents_msg, responses);
std::size_t parent_idx = 0;
for (typename VertexList::iterator v = parent_requests[i].begin();
v != parent_requests[i].end(); ++v, ++parent_idx)
put(p, *v, responses[parent_idx]);
}
}
}
#endif
template<typename DistributedGraph, typename ParentMap>
void
parallel_connected_components(DistributedGraph& g, ParentMap p)
{
using boost::connected_components;
typedef typename graph_traits<DistributedGraph>::adjacency_iterator
adjacency_iterator;
typedef typename graph_traits<DistributedGraph>::out_edge_iterator
out_edge_iterator;
typedef typename graph_traits<DistributedGraph>::edge_iterator
edge_iterator;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
typedef typename graph_traits<DistributedGraph>::edge_descriptor
edge_descriptor;
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
using boost::graph::parallel::process_group;
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
// TODO (NGE): Should old_roots, roots, and completed_roots be std::list
adjacency_iterator av1, av2;
std::vector<vertex_descriptor> old_roots;
typename std::vector<vertex_descriptor>::iterator liter;
typename std::vector<vertex_descriptor>::iterator aliter;
typename std::map<vertex_descriptor,
std::vector<vertex_descriptor> > adj;
typedef typename property_map<DistributedGraph, vertex_owner_t>::const_type
OwnerMap;
OwnerMap owner = get(vertex_owner, g);
typedef typename property_map<DistributedGraph, vertex_local_t>::const_type
LocalMap;
LocalMap local = get(vertex_local, g);
// We need to hold on to all of the parent pointers
p.set_max_ghost_cells(0);
//
// STAGE 1 : Compute local components
//
local_subgraph<const DistributedGraph> ls(g);
typedef typename property_map<local_subgraph<const DistributedGraph>,
vertex_index_t>::type local_index_map_type;
local_index_map_type local_index = get(vertex_index, ls);
// Compute local connected components
std::vector<std::size_t> ls_components_vec(num_vertices(ls));
typedef iterator_property_map<std::vector<std::size_t>::iterator,
local_index_map_type>
ls_components_map_type;
ls_components_map_type ls_component(ls_components_vec.begin(),
local_index);
std::size_t num_comp = connected_components(ls, ls_component);
std::vector<vertex_descriptor>
roots(num_comp, graph_traits<DistributedGraph>::null_vertex());
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
size_t component = get(ls_component, v);
if (roots[component] == graph_traits<DistributedGraph>::null_vertex() ||
get(local_index, v) < get(local_index, roots[component]))
roots[component] = v;
}
// Set all the local parent pointers
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
put(p, v, roots[get(ls_component, v)]);
}
if (num_processes(pg) == 1) return;
// Build adjacency list for all roots
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
std::vector<vertex_descriptor>& my_adj = adj[get(p, v)];
for (boost::tie(av1, av2) = adjacent_vertices(v, g);
av1 != av2; ++av1) {
if (get(owner, *av1) != id) my_adj.push_back(*av1);
}
}
// For all vertices adjacent to a local vertex get p(v)
for ( liter = roots.begin(); liter != roots.end(); ++liter ) {
std::vector<vertex_descriptor>& my_adj = adj[*liter];
for ( aliter = my_adj.begin(); aliter != my_adj.end(); ++aliter )
request(p, *aliter);
}
synchronize(p);
// Update adjacency list at root to make sure all adjacent
// vertices are roots of remote components
for ( liter = roots.begin(); liter != roots.end(); ++liter )
{
std::vector<vertex_descriptor>& my_adj = adj[*liter];
for ( aliter = my_adj.begin(); aliter != my_adj.end(); ++aliter )
*aliter = get(p, *aliter);
my_adj.erase
(std::remove_if(my_adj.begin(), my_adj.end(),
cull_adjacency_list<vertex_descriptor,
ParentMap>(*liter, p) ),
my_adj.end());
// This sort needs to be here to make sure the initial
// adjacency list is sorted
std::sort(my_adj.begin(), my_adj.end(), std::less<vertex_descriptor>());
my_adj.erase(std::unique(my_adj.begin(), my_adj.end()), my_adj.end());
}
// Get p(v) for the new adjacent roots
p.clear();
for ( liter = roots.begin(); liter != roots.end(); ++liter ) {
std::vector<vertex_descriptor>& my_adj = adj[*liter];
for ( aliter = my_adj.begin(); aliter != my_adj.end(); ++aliter )
request(p, *aliter);
}
#ifdef PBGL_EXPLICIT_SYNCH
synchronize(p);
#endif
// Lastly, remove roots with no adjacent vertices, this is
// unnecessary but will speed up sparse graphs
for ( liter = roots.begin(); liter != roots.end(); /*in loop*/)
{
if ( adj[*liter].empty() )
liter = roots.erase(liter);
else
++liter;
}
#ifdef PBGL_CONSTRUCT_METAGRAPH
/* TODO: If the number of roots is sufficiently small, we can
use a 'problem folding' approach like we do in MST
to gather all the roots and their adjacencies on one proc
and solve for the connected components of the meta-graph */
using boost::parallel::all_reduce;
std::size_t num_roots = all_reduce(pg, roots.size(), std::plus<std::size_t>());
if (num_roots < MAX_VERTICES_IN_METAGRAPH) {
build_local_metagraph(g, p, roots.begin(), roots.end(), adj);
// For each vertex in g, p(v) = p(p(v)), assign parent of leaf
// vertices from first step to final parent
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
put(p, v, get(p, get(p, v)));
}
synchronize(p);
return;
}
#endif
//
// Parallel Phase
//
std::vector<vertex_descriptor> completed_roots;
hashed_vertex_compare<OwnerMap, LocalMap> v_compare(owner, local);
bool any_hooked;
vertex_descriptor new_root;
std::size_t steps = 0;
do {
++steps;
// Pull in new parents for hooking phase
synchronize(p);
//
// Hooking
//
bool hooked = false;
completed_roots.clear();
for ( liter = roots.begin(); liter != roots.end(); )
{
new_root = graph_traits<DistributedGraph>::null_vertex();
std::vector<vertex_descriptor>& my_adj = adj[*liter];
for ( aliter = my_adj.begin(); aliter != my_adj.end(); ++aliter )
// try to hook to better adjacent vertex
if ( v_compare( get(p, *aliter), *liter ) )
new_root = get(p, *aliter);
if ( new_root != graph_traits<DistributedGraph>::null_vertex() )
{
hooked = true;
put(p, *liter, new_root);
old_roots.push_back(*liter);
completed_roots.push_back(*liter);
liter = roots.erase(liter);
}
else
++liter;
}
//
// Pointer jumping, perform until new roots determined
//
// TODO: Implement cycle reduction rules to reduce this from
// O(n) to O(log n) [n = cycle length]
bool all_done;
std::size_t parent_root_count;
std::size_t double_steps = 0;
do {
++double_steps;
#ifndef PBGL_EXPLICIT_SYNCH
// Get p(p(v)) for all old roots, and p(v) for all current roots
for ( liter = old_roots.begin(); liter != old_roots.end(); ++liter )
request(p, get(p, *liter));
synchronize(p);
#else
// Build root requests
typedef std::set<vertex_descriptor> VertexSet;
std::vector<VertexSet> parent_requests(num_processes(pg));
for ( liter = old_roots.begin(); liter != old_roots.end(); ++liter )
{
vertex_descriptor p1 = *liter;
if (get(owner, p1) != id) parent_requests[get(owner, p1)].insert(p1);
vertex_descriptor p2 = get(p, p1);
if (get(owner, p2) != id) parent_requests[get(owner, p2)].insert(p2);
}
request_parent_map_entries(g, p, parent_requests);
#endif
// Perform a pointer jumping step on all old roots
for ( liter = old_roots.begin(); liter != old_roots.end(); ++liter )
put(p, *liter, get(p, get(p, *liter)));
// make sure the parent of all old roots is itself a root
parent_root_count = 0;
for ( liter = old_roots.begin(); liter != old_roots.end(); ++liter )
if ( get(p, *liter) == get(p, get(p, *liter)) )
parent_root_count++;
bool done = parent_root_count == old_roots.size();
all_reduce(pg, &done, &done+1, &all_done,
std::logical_and<bool>());
} while ( !all_done );
#ifdef PARALLEL_BGL_DEBUG
if (id == 0) std::cerr << double_steps << " doubling steps.\n";
#endif
//
// Add adjacent vertices of just completed roots to adjacent
// vertex list at new parent
//
typename std::vector<vertex_descriptor> outgoing_edges;
for ( liter = completed_roots.begin(); liter != completed_roots.end();
++liter )
{
vertex_descriptor new_parent = get(p, *liter);
if ( get(owner, new_parent) == id )
{
std::vector<vertex_descriptor>& my_adj = adj[new_parent];
my_adj.reserve(my_adj.size() + adj[*liter].size());
my_adj.insert( my_adj.end(),
adj[*liter].begin(), adj[*liter].end() );
#ifdef PBGL_IN_PLACE_MERGE
#ifdef PBGL_SORT_ASSERT
BOOST_ASSERT(::boost::detail::is_sorted(my_adj.begin(),
my_adj.end() - adj[*liter].size(),
std::less<vertex_descriptor>()));
BOOST_ASSERT(::boost::detail::is_sorted(my_adj.end() - adj[*liter].size(),
my_adj.end(),
std::less<vertex_descriptor>()));
#endif
std::inplace_merge(my_adj.begin(),
my_adj.end() - adj[*liter].size(),
my_adj.end(),
std::less<vertex_descriptor>());
#endif
}
else if ( adj[*liter].begin() != adj[*liter].end() )
{
outgoing_edges.clear();
outgoing_edges.reserve(adj[*liter].size() + 1);
// First element is the destination of the adjacency list
outgoing_edges.push_back(new_parent);
outgoing_edges.insert(outgoing_edges.end(),
adj[*liter].begin(), adj[*liter].end() );
send(pg, get(owner, new_parent), edges_msg, outgoing_edges);
adj[*liter].clear();
}
}
synchronize(pg);
// Receive edges sent by remote nodes and add them to the
// indicated vertex's adjacency list
while (optional<std::pair<process_id_type, int> > m
= probe(pg))
{
std::vector<vertex_descriptor> incoming_edges;
receive(pg, m->first, edges_msg, incoming_edges);
typename std::vector<vertex_descriptor>::iterator aviter
= incoming_edges.begin();
++aviter;
std::vector<vertex_descriptor>& my_adj = adj[incoming_edges[0]];
my_adj.reserve(my_adj.size() + incoming_edges.size() - 1);
my_adj.insert( my_adj.end(), aviter, incoming_edges.end() );
#ifdef PBGL_IN_PLACE_MERGE
std::size_t num_incoming_edges = incoming_edges.size();
#ifdef PBGL_SORT_ASSERT
BOOST_ASSERT(::boost::detail::is_sorted(my_adj.begin(),
my_adj.end() - (num_incoming_edges-1),
std::less<vertex_descriptor>()));
BOOST_ASSERT(::boost::detail::is_sorted(my_adj.end() - (num_incoming_edges-1),
my_adj.end(),
std::less<vertex_descriptor>()));
#endif
std::inplace_merge(my_adj.begin(),
my_adj.end() - (num_incoming_edges - 1),
my_adj.end(),
std::less<vertex_descriptor>());
#endif
}
// Remove any adjacent vertices that are in the same component
// as a root from that root's list
for ( liter = roots.begin(); liter != roots.end(); ++liter )
{
// We can probably get away without sorting and removing
// duplicates Though sorting *may* cause root
// determination to occur faster by choosing the root with
// the most potential to hook to at each step
std::vector<vertex_descriptor>& my_adj = adj[*liter];
my_adj.erase
(std::remove_if(my_adj.begin(), my_adj.end(),
cull_adjacency_list<vertex_descriptor,
ParentMap>(*liter, p) ),
my_adj.end());
#ifndef PBGL_IN_PLACE_MERGE
std::sort(my_adj.begin(), my_adj.end(),
std::less<vertex_descriptor>() );
#endif
my_adj.erase(std::unique(my_adj.begin(), my_adj.end()), my_adj.end());
}
// Reduce result of empty root list test
all_reduce(pg, &hooked, &hooked+1, &any_hooked,
std::logical_or<bool>());
} while ( any_hooked );
#ifdef PARALLEL_BGL_DEBUG
if (id == 0) std::cerr << steps << " iterations.\n";
#endif
//
// Finalize
//
// For each vertex in g, p(v) = p(p(v)), assign parent of leaf
// vertices from first step to final parent
BGL_FORALL_VERTICES_T(v, g, DistributedGraph) {
put(p, v, get(p, get(p, v)));
}
synchronize(p);
}
} // end namespace cc_detail
template<typename Graph, typename ParentMap, typename ComponentMap>
typename property_traits<ComponentMap>::value_type
number_components_from_parents(const Graph& g, ParentMap p, ComponentMap c)
{
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef typename property_traits<ComponentMap>::value_type
ComponentMapType;
process_group_type pg = process_group(g);
/* Build list of roots */
std::vector<vertex_descriptor> my_roots, all_roots;
BGL_FORALL_VERTICES_T(v, g, Graph) {
if( std::find( my_roots.begin(), my_roots.end(), get(p, v) )
== my_roots.end() )
my_roots.push_back( get(p, v) );
}
all_gather(pg, my_roots.begin(), my_roots.end(), all_roots);
/* Number components */
std::map<vertex_descriptor, ComponentMapType> comp_numbers;
ComponentMapType c_num = 0;
// Compute component numbers
for (std::size_t i = 0; i < all_roots.size(); i++ )
if ( comp_numbers.count(all_roots[i]) == 0 )
comp_numbers[all_roots[i]] = c_num++;
// Broadcast component numbers
BGL_FORALL_VERTICES_T(v, g, Graph) {
put( c, v, comp_numbers[get(p, v)] );
}
// Broadcast number of components
if (process_id(pg) == 0) {
typedef typename process_group_type::process_size_type
process_size_type;
for (process_size_type dest = 1, n = num_processes(pg);
dest != n; ++dest)
send(pg, dest, 0, c_num);
}
synchronize(pg);
if (process_id(pg) != 0) receive(pg, 0, 0, c_num);
synchronize(c);
return c_num;
}
template<typename Graph, typename ParentMap>
int
number_components_from_parents(const Graph& g, ParentMap p,
dummy_property_map)
{
using boost::parallel::all_reduce;
// Count local roots.
int num_roots = 0;
BGL_FORALL_VERTICES_T(v, g, Graph)
if (get(p, v) == v) ++num_roots;
return all_reduce(g.process_group(), num_roots, std::plus<int>());
}
template<typename Graph, typename ComponentMap, typename ParentMap>
typename property_traits<ComponentMap>::value_type
connected_components
(const Graph& g, ComponentMap c, ParentMap p
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
{
cc_detail::parallel_connected_components(g, p);
return number_components_from_parents(g, p, c);
}
/* Construct ParentMap by default */
template<typename Graph, typename ComponentMap>
typename property_traits<ComponentMap>::value_type
connected_components
( const Graph& g, ComponentMap c
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag) )
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
std::vector<vertex_descriptor> x(num_vertices(g));
return connected_components
(g, c,
make_iterator_property_map(x.begin(), get(vertex_index, g)));
}
} // end namespace distributed
using distributed::connected_components;
} // end namespace graph
using graph::distributed::connected_components;
} // end namespace boost
#endif // BOOST_GRAPH_PARALLEL_CC_HPP

View File

@@ -0,0 +1,409 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Brian Barrett
// Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_CC_PS_HPP
#define BOOST_GRAPH_PARALLEL_CC_PS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/assert.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/optional.hpp>
#include <algorithm>
#include <vector>
#include <queue>
#include <limits>
#include <map>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/iteration_macros.hpp>
// Connected components algorithm based on a parallel search.
//
// Every N nodes starts a parallel search from the first vertex in
// their local vertex list during the first superstep (the other nodes
// remain idle during the first superstep to reduce the number of
// conflicts in numbering the components). At each superstep, all new
// component mappings from remote nodes are handled. If there is no
// work from remote updates, a new vertex is removed from the local
// list and added to the work queue.
//
// Components are allocated from the component_value_allocator object,
// which ensures that a given component number is unique in the
// system, currently by using the rank and number of processes to
// stride allocations.
//
// When two components are discovered to actually be the same
// component, a mapping is created in the collisions object. The
// lower component number is prefered in the resolution, so component
// numbering resolution is consistent. After the search has exhausted
// all vertices in the graph, the mapping is shared with all
// processes, and they independently resolve the comonent mapping (so
// O((N * NP) + (V * NP)) work, in O(N + V) time, where N is the
// number of mappings and V is the number of local vertices). This
// phase can likely be significantly sped up if a clever algorithm for
// the reduction can be found.
namespace boost { namespace graph { namespace distributed {
namespace cc_ps_detail {
// Local object for allocating component numbers. There are two
// places this happens in the code, and I was getting sick of them
// getting out of sync. Components are not tightly packed in
// numbering, but are numbered to ensure each rank has its own
// independent sets of numberings.
template<typename component_value_type>
class component_value_allocator {
public:
component_value_allocator(int num, int size) :
last(0), num(num), size(size)
{
}
component_value_type allocate(void)
{
component_value_type ret = num + (last * size);
last++;
return ret;
}
private:
component_value_type last;
int num;
int size;
};
// Map of the "collisions" between component names in the global
// component mapping. TO make cleanup easier, component numbers
// are added, pointing to themselves, when a new component is
// found. In order to make the results deterministic, the lower
// component number is always taken. The resolver will drill
// through the map until it finds a component entry that points to
// itself as the next value, allowing some cleanup to happen at
// update() time. Attempts are also made to update the mapping
// when new entries are created.
//
// Note that there's an assumption that the entire mapping is
// shared during the end of the algorithm, but before component
// name resolution.
template<typename component_value_type>
class collision_map {
public:
collision_map() : num_unique(0)
{
}
// add new component mapping first time component is used. Own
// function only so that we can sanity check there isn't already
// a mapping for that component number (which would be bad)
void add(const component_value_type &a)
{
BOOST_ASSERT(collisions.count(a) == 0);
collisions[a] = a;
}
// add a mapping between component values saying they're the
// same component
void add(const component_value_type &a, const component_value_type &b)
{
component_value_type high, low, tmp;
if (a > b) {
high = a;
low = b;
} else {
high = b;
low = a;
}
if (collisions.count(high) != 0 && collisions[high] != low) {
tmp = collisions[high];
if (tmp > low) {
collisions[tmp] = low;
collisions[high] = low;
} else {
collisions[low] = tmp;
collisions[high] = tmp;
}
} else {
collisions[high] = low;
}
}
// get the "real" component number for the given component.
// Used to resolve mapping at end of run.
component_value_type update(component_value_type a)
{
BOOST_ASSERT(num_unique > 0);
BOOST_ASSERT(collisions.count(a) != 0);
return collisions[a];
}
// collapse the collisions tree, so that update is a one lookup
// operation. Count unique components at the same time.
void uniqify(void)
{
typename std::map<component_value_type, component_value_type>::iterator i, end;
end = collisions.end();
for (i = collisions.begin() ; i != end ; ++i) {
if (i->first == i->second) {
num_unique++;
} else {
i->second = collisions[i->second];
}
}
}
// get the number of component entries that have an associated
// component number of themselves, which are the real components
// used in the final mapping. This is the number of unique
// components in the graph.
int unique(void)
{
BOOST_ASSERT(num_unique > 0);
return num_unique;
}
// "serialize" into a vector for communication.
std::vector<component_value_type> serialize(void)
{
std::vector<component_value_type> ret;
typename std::map<component_value_type, component_value_type>::iterator i, end;
end = collisions.end();
for (i = collisions.begin() ; i != end ; ++i) {
ret.push_back(i->first);
ret.push_back(i->second);
}
return ret;
}
private:
std::map<component_value_type, component_value_type> collisions;
int num_unique;
};
// resolver to handle remote updates. The resolver will add
// entries into the collisions map if required, and if it is the
// first time the vertex has been touched, it will add the vertex
// to the remote queue. Note that local updates are handled
// differently, in the main loop (below).
// BWB - FIX ME - don't need graph anymore - can pull from key value of Component Map.
template<typename ComponentMap, typename work_queue>
struct update_reducer {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = false);
typedef typename property_traits<ComponentMap>::value_type component_value_type;
typedef typename property_traits<ComponentMap>::key_type vertex_descriptor;
update_reducer(work_queue *q,
cc_ps_detail::collision_map<component_value_type> *collisions,
processor_id_type pg_id) :
q(q), collisions(collisions), pg_id(pg_id)
{
}
// ghost cell initialization routine. This should never be
// called in this imlementation.
template<typename K>
component_value_type operator()(const K&) const
{
return component_value_type(0);
}
// resolver for remote updates. I'm not entirely sure why, but
// I decided to not change the value of the vertex if it's
// already non-infinite. It doesn't matter in the end, as we'll
// touch every vertex in the cleanup phase anyway. If the
// component is currently infinite, set to the new component
// number and add the vertex to the work queue. If it's not
// infinite, we've touched it already so don't add it to the
// work queue. Do add a collision entry so that we know the two
// components are the same.
component_value_type operator()(const vertex_descriptor &v,
const component_value_type& current,
const component_value_type& update) const
{
const component_value_type max = (std::numeric_limits<component_value_type>::max)();
component_value_type ret = current;
if (max == current) {
q->push(v);
ret = update;
} else if (current != update) {
collisions->add(current, update);
}
return ret;
}
// So for whatever reason, the property map can in theory call
// the resolver with a local descriptor in addition to the
// standard global descriptor. As far as I can tell, this code
// path is never taken in this implementation, but I need to
// have this code here to make it compile. We just make a
// global descriptor and call the "real" operator().
template<typename K>
component_value_type operator()(const K& v,
const component_value_type& current,
const component_value_type& update) const
{
return (*this)(vertex_descriptor(pg_id, v), current, update);
}
private:
work_queue *q;
collision_map<component_value_type> *collisions;
boost::processor_id_type pg_id;
};
} // namespace cc_ps_detail
template<typename Graph, typename ComponentMap>
typename property_traits<ComponentMap>::value_type
connected_components_ps(const Graph& g, ComponentMap c)
{
using boost::graph::parallel::process_group;
typedef typename property_traits<ComponentMap>::value_type component_value_type;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename boost::graph::parallel::process_group_type<Graph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename property_map<Graph, vertex_owner_t>
::const_type vertex_owner_map;
typedef std::queue<vertex_descriptor> work_queue;
static const component_value_type max_component =
(std::numeric_limits<component_value_type>::max)();
typename property_map<Graph, vertex_owner_t>::const_type
owner = get(vertex_owner, g);
// standard who am i? stuff
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
// Initialize every vertex to have infinite component number
BGL_FORALL_VERTICES_T(v, g, Graph) put(c, v, max_component);
vertex_iterator current, end;
boost::tie(current, end) = vertices(g);
cc_ps_detail::component_value_allocator<component_value_type> cva(process_id(pg), num_processes(pg));
cc_ps_detail::collision_map<component_value_type> collisions;
work_queue q; // this is intentionally a local data structure
c.set_reduce(cc_ps_detail::update_reducer<ComponentMap, work_queue>(&q, &collisions, id));
// add starting work
while (true) {
bool useful_found = false;
component_value_type val = cva.allocate();
put(c, *current, val);
collisions.add(val);
q.push(*current);
if (0 != out_degree(*current, g)) useful_found = true;
++current;
if (useful_found) break;
}
// Run the loop until everyone in the system is done
bool global_done = false;
while (!global_done) {
// drain queue of work for this superstep
while (!q.empty()) {
vertex_descriptor v = q.front();
q.pop();
// iterate through outedges of the vertex currently being
// examined, setting their component to our component. There
// is no way to end up in the queue without having a component
// number already.
BGL_FORALL_ADJ_T(v, peer, g, Graph) {
component_value_type my_component = get(c, v);
// update other vertex with our component information.
// Resolver will handle remote collisions as well as whether
// to put the vertex on the work queue or not. We have to
// handle local collisions and work queue management
if (id == get(owner, peer)) {
if (max_component == get(c, peer)) {
put(c, peer, my_component);
q.push(peer);
} else if (my_component != get(c, peer)) {
collisions.add(my_component, get(c, peer));
}
} else {
put(c, peer, my_component);
}
}
}
// synchronize / start a new superstep.
synchronize(pg);
global_done = all_reduce(pg, (q.empty() && (current == end)), boost::parallel::minimum<bool>());
// If the queue is currently empty, add something to do to start
// the current superstep (supersteps start at the sync, not at
// the top of the while loop as one might expect). Down at the
// bottom of the while loop so that not everyone starts the
// algorithm with something to do, to try to reduce component
// name conflicts
if (q.empty()) {
bool useful_found = false;
for ( ; current != end && !useful_found ; ++current) {
if (max_component == get(c, *current)) {
component_value_type val = cva.allocate();
put(c, *current, val);
collisions.add(val);
q.push(*current);
if (0 != out_degree(*current, g)) useful_found = true;
}
}
}
}
// share component mappings
std::vector<component_value_type> global;
std::vector<component_value_type> mine = collisions.serialize();
all_gather(pg, mine.begin(), mine.end(), global);
for (size_t i = 0 ; i < global.size() ; i += 2) {
collisions.add(global[i], global[i + 1]);
}
collisions.uniqify();
// update the component mappings
BGL_FORALL_VERTICES_T(v, g, Graph) {
put(c, v, collisions.update(get(c, v)));
}
return collisions.unique();
}
} // end namespace distributed
} // end namespace graph
} // end namespace boost
#endif // BOOST_GRAPH_PARALLEL_CC_HPP

View File

@@ -0,0 +1,665 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
/**************************************************************************
* This source file implements the variation on Dijkstra's algorithm *
* presented by Crauser et al. in: *
* *
* Andreas Crauser, Kurt Mehlhorn, Ulrich Meyer, and Peter *
* Sanders. A Parallelization of Dijkstra's Shortest Path *
* Algorithm. In Lubos Brim, Jozef Gruska, and Jiri Zlatuska, *
* editors, Mathematical Foundations of Computer Science (MFCS), *
* volume 1450 of Lecture Notes in Computer Science, pages *
* 722--731, 1998. Springer. *
* *
* This implementation is, however, restricted to the distributed-memory *
* case, where the work is distributed by virtue of the vertices being *
* distributed. In a shared-memory (single address space) context, we *
* would want to add an explicit balancing step. *
**************************************************************************/
#ifndef BOOST_GRAPH_CRAUSER_ET_AL_SHORTEST_PATHS_HPP
#define BOOST_GRAPH_CRAUSER_ET_AL_SHORTEST_PATHS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/assert.hpp>
#include <boost/graph/distributed/detail/dijkstra_shortest_paths.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <functional>
#include <boost/graph/iteration_macros.hpp>
#include <boost/property_map/property_map_iterator.hpp>
#include <boost/type_traits/is_same.hpp>
#include <algorithm>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/graph/distributed/detail/remote_update_set.hpp>
#include <vector>
#include <boost/graph/breadth_first_search.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#ifdef PBGL_ACCOUNTING
# include <boost/graph/accounting.hpp>
# include <numeric>
#endif // PBGL_ACCOUNTING
#ifdef MUTABLE_QUEUE
# include <boost/pending/mutable_queue.hpp>
#endif
namespace boost { namespace graph { namespace distributed {
#ifdef PBGL_ACCOUNTING
struct crauser_et_al_shortest_paths_stats_t
{
/* Total wall-clock time used by the algorithm.*/
accounting::time_type execution_time;
/* The number of vertices deleted in each superstep. */
std::vector<std::size_t> deleted_vertices;
template<typename OutputStream>
void print(OutputStream& out)
{
double avg_deletions = std::accumulate(deleted_vertices.begin(),
deleted_vertices.end(),
0.0);
avg_deletions /= deleted_vertices.size();
out << "Problem = \"Single-Source Shortest Paths\"\n"
<< "Algorithm = \"Crauser et al\"\n"
<< "Function = crauser_et_al_shortest_paths\n"
<< "Wall clock time = " << accounting::print_time(execution_time)
<< "\nSupersteps = " << deleted_vertices.size() << "\n"
<< "Avg. deletions per superstep = " << avg_deletions << "\n";
}
};
static crauser_et_al_shortest_paths_stats_t crauser_et_al_shortest_paths_stats;
#endif
namespace detail {
/************************************************************************
* Function objects that perform distance comparisons modified by the *
* minimum or maximum edge weights. *
************************************************************************/
template<typename Vertex, typename DistanceMap, typename MinInWeightMap,
typename Combine, typename Compare>
struct min_in_distance_compare
: std::binary_function<Vertex, Vertex, bool>
{
min_in_distance_compare(DistanceMap d, MinInWeightMap m,
Combine combine, Compare compare)
: distance_map(d), min_in_weight(m), combine(combine),
compare(compare)
{
}
bool operator()(const Vertex& x, const Vertex& y) const
{
return compare(combine(get(distance_map, x), -get(min_in_weight, x)),
combine(get(distance_map, y), -get(min_in_weight, y)));
}
private:
DistanceMap distance_map;
MinInWeightMap min_in_weight;
Combine combine;
Compare compare;
};
template<typename Vertex, typename DistanceMap, typename MinOutWeightMap,
typename Combine, typename Compare>
struct min_out_distance_compare
: std::binary_function<Vertex, Vertex, bool>
{
min_out_distance_compare(DistanceMap d, MinOutWeightMap m,
Combine combine, Compare compare)
: distance_map(d), min_out_weight(m), combine(combine),
compare(compare)
{
}
bool operator()(const Vertex& x, const Vertex& y) const
{
return compare(combine(get(distance_map, x), get(min_out_weight, x)),
combine(get(distance_map, y), get(min_out_weight, y)));
}
private:
DistanceMap distance_map;
MinOutWeightMap min_out_weight;
Combine combine;
Compare compare;
};
/************************************************************************/
/************************************************************************
* Dijkstra queue that implements Crauser et al.'s criteria. This queue *
* actually stores three separate priority queues, to help expose all *
* vertices that can be processed in a single phase. *
************************************************************************/
template<typename Graph, typename Combine,
typename Compare, typename VertexIndexMap, typename DistanceMap,
typename PredecessorMap, typename MinOutWeightMap,
typename MinInWeightMap>
class crauser_et_al_dijkstra_queue
: public graph::detail::remote_update_set<
crauser_et_al_dijkstra_queue<
Graph, Combine, Compare, VertexIndexMap, DistanceMap,
PredecessorMap, MinOutWeightMap, MinInWeightMap>,
typename boost::graph::parallel::process_group_type<Graph>::type,
typename dijkstra_msg_value<DistanceMap, PredecessorMap>::type,
typename property_map<Graph, vertex_owner_t>::const_type>
{
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
typedef crauser_et_al_dijkstra_queue self_type;
typedef dijkstra_msg_value<DistanceMap, PredecessorMap> msg_value_creator;
typedef typename msg_value_creator::type msg_value_type;
typedef typename graph_traits<Graph>::vertices_size_type
vertices_size_type;
typedef typename property_map<Graph, vertex_owner_t>::const_type
OwnerPropertyMap;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef graph::detail::remote_update_set<self_type, process_group_type,
msg_value_type, OwnerPropertyMap>
inherited;
// Priority queue for tentative distances
typedef indirect_cmp<DistanceMap, Compare> dist_queue_compare_type;
typedef typename property_traits<DistanceMap>::value_type distance_type;
#ifdef MUTABLE_QUEUE
typedef mutable_queue<vertex_descriptor, std::vector<vertex_descriptor>,
dist_queue_compare_type, VertexIndexMap> dist_queue_type;
#else
typedef relaxed_heap<vertex_descriptor, dist_queue_compare_type,
VertexIndexMap> dist_queue_type;
#endif // MUTABLE_QUEUE
// Priority queue for OUT criteria
typedef min_out_distance_compare<vertex_descriptor, DistanceMap,
MinOutWeightMap, Combine, Compare>
out_queue_compare_type;
#ifdef MUTABLE_QUEUE
typedef mutable_queue<vertex_descriptor, std::vector<vertex_descriptor>,
out_queue_compare_type, VertexIndexMap> out_queue_type;
#else
typedef relaxed_heap<vertex_descriptor, out_queue_compare_type,
VertexIndexMap> out_queue_type;
#endif // MUTABLE_QUEUE
// Priority queue for IN criteria
typedef min_in_distance_compare<vertex_descriptor, DistanceMap,
MinInWeightMap, Combine, Compare>
in_queue_compare_type;
#ifdef MUTABLE_QUEUE
typedef mutable_queue<vertex_descriptor, std::vector<vertex_descriptor>,
in_queue_compare_type, VertexIndexMap> in_queue_type;
#else
typedef relaxed_heap<vertex_descriptor, in_queue_compare_type,
VertexIndexMap> in_queue_type;
#endif // MUTABLE_QUEUE
typedef typename process_group_type::process_id_type process_id_type;
public:
typedef typename dist_queue_type::size_type size_type;
typedef typename dist_queue_type::value_type value_type;
crauser_et_al_dijkstra_queue(const Graph& g,
const Combine& combine,
const Compare& compare,
const VertexIndexMap& id,
const DistanceMap& distance_map,
const PredecessorMap& predecessor_map,
const MinOutWeightMap& min_out_weight,
const MinInWeightMap& min_in_weight)
: inherited(boost::graph::parallel::process_group(g), get(vertex_owner, g)),
dist_queue(num_vertices(g),
dist_queue_compare_type(distance_map, compare),
id),
out_queue(num_vertices(g),
out_queue_compare_type(distance_map, min_out_weight,
combine, compare),
id),
in_queue(num_vertices(g),
in_queue_compare_type(distance_map, min_in_weight,
combine, compare),
id),
g(g),
distance_map(distance_map),
predecessor_map(predecessor_map),
min_out_weight(min_out_weight),
min_in_weight(min_in_weight),
min_distance(0),
min_out_distance(0)
#ifdef PBGL_ACCOUNTING
, local_deletions(0)
#endif
{ }
void push(const value_type& x)
{
msg_value_type msg_value =
msg_value_creator::create(get(distance_map, x),
predecessor_value(get(predecessor_map, x)));
inherited::update(x, msg_value);
}
void update(const value_type& x) { push(x); }
void pop()
{
// Remove from distance queue
dist_queue.remove(top_vertex);
// Remove from OUT queue
out_queue.remove(top_vertex);
// Remove from IN queue
in_queue.remove(top_vertex);
#ifdef PBGL_ACCOUNTING
++local_deletions;
#endif
}
vertex_descriptor& top() { return top_vertex; }
const vertex_descriptor& top() const { return top_vertex; }
bool empty()
{
inherited::collect();
// If there are no suitable messages, wait until we get something
while (!has_suitable_vertex()) {
if (do_synchronize()) return true;
}
// Return true only if nobody has any messages; false if we
// have suitable messages
return false;
}
bool do_synchronize()
{
using boost::parallel::all_reduce;
using boost::parallel::minimum;
inherited::synchronize();
// TBD: could use combine here, but then we need to stop using
// minimum<distance_type>() as the function object.
distance_type local_distances[2];
local_distances[0] =
dist_queue.empty()? (std::numeric_limits<distance_type>::max)()
: get(distance_map, dist_queue.top());
local_distances[1] =
out_queue.empty()? (std::numeric_limits<distance_type>::max)()
: (get(distance_map, out_queue.top())
+ get(min_out_weight, out_queue.top()));
distance_type distances[2];
all_reduce(this->process_group, local_distances, local_distances + 2,
distances, minimum<distance_type>());
min_distance = distances[0];
min_out_distance = distances[1];
#ifdef PBGL_ACCOUNTING
std::size_t deletions = 0;
all_reduce(this->process_group, &local_deletions, &local_deletions + 1,
&deletions, std::plus<std::size_t>());
if (process_id(this->process_group) == 0) {
crauser_et_al_shortest_paths_stats.deleted_vertices.push_back(deletions);
}
local_deletions = 0;
BOOST_ASSERT(deletions > 0);
#endif
return min_distance == (std::numeric_limits<distance_type>::max)();
}
private:
vertex_descriptor predecessor_value(vertex_descriptor v) const
{ return v; }
vertex_descriptor
predecessor_value(property_traits<dummy_property_map>::reference) const
{ return graph_traits<Graph>::null_vertex(); }
bool has_suitable_vertex() const
{
if (!dist_queue.empty()) {
top_vertex = dist_queue.top();
if (get(distance_map, dist_queue.top()) <= min_out_distance)
return true;
}
if (!in_queue.empty()) {
top_vertex = in_queue.top();
return (get(distance_map, top_vertex)
- get(min_in_weight, top_vertex)) <= min_distance;
}
return false;
}
public:
void
receive_update(process_id_type source, vertex_descriptor vertex,
distance_type distance)
{
// Update the queue if the received distance is better than
// the distance we know locally
if (distance < get(distance_map, vertex)
|| (distance == get(distance_map, vertex)
&& source == process_id(this->process_group))) {
// Update the local distance map
put(distance_map, vertex, distance);
bool is_in_queue = dist_queue.contains(vertex);
if (!is_in_queue) {
dist_queue.push(vertex);
out_queue.push(vertex);
in_queue.push(vertex);
}
else {
dist_queue.update(vertex);
out_queue.update(vertex);
in_queue.update(vertex);
}
}
}
void
receive_update(process_id_type source, vertex_descriptor vertex,
std::pair<distance_type, vertex_descriptor> p)
{
if (p.first <= get(distance_map, vertex)) {
put(predecessor_map, vertex, p.second);
receive_update(source, vertex, p.first);
}
}
private:
dist_queue_type dist_queue;
out_queue_type out_queue;
in_queue_type in_queue;
mutable value_type top_vertex;
const Graph& g;
DistanceMap distance_map;
PredecessorMap predecessor_map;
MinOutWeightMap min_out_weight;
MinInWeightMap min_in_weight;
distance_type min_distance;
distance_type min_out_distance;
#ifdef PBGL_ACCOUNTING
std::size_t local_deletions;
#endif
};
/************************************************************************/
/************************************************************************
* Initialize the property map that contains the minimum incoming edge *
* weight for each vertex. There are separate implementations for *
* directed, bidirectional, and undirected graph. *
************************************************************************/
template<typename Graph, typename MinInWeightMap, typename WeightMap,
typename Inf, typename Compare>
void
initialize_min_in_weights(const Graph& g, MinInWeightMap min_in_weight,
WeightMap weight, Inf inf, Compare compare,
directed_tag, incidence_graph_tag)
{
// Send minimum weights off to the owners
set_property_map_role(vertex_distance, min_in_weight);
BGL_FORALL_VERTICES_T(v, g, Graph) {
BGL_FORALL_OUTEDGES_T(v, e, g, Graph) {
if (get(weight, e) < get(min_in_weight, target(e, g)))
put(min_in_weight, target(e, g), get(weight, e));
}
}
using boost::graph::parallel::process_group;
synchronize(process_group(g));
// Replace any infinities with zeros
BGL_FORALL_VERTICES_T(v, g, Graph) {
if (get(min_in_weight, v) == inf) put(min_in_weight, v, 0);
}
}
template<typename Graph, typename MinInWeightMap, typename WeightMap,
typename Inf, typename Compare>
void
initialize_min_in_weights(const Graph& g, MinInWeightMap min_in_weight,
WeightMap weight, Inf inf, Compare compare,
directed_tag, bidirectional_graph_tag)
{
#if 0
typename property_map<Graph, vertex_local_t>::const_type
local = get(vertex_local, g);
// This code assumes that the properties of the in-edges are
// available locally. This is not necessarily the case, so don't
// do this yet.
set_property_map_role(vertex_distance, min_in_weight);
BGL_FORALL_VERTICES_T(v, g, Graph) {
if (in_edges(v, g).first != in_edges(v, g).second) {
std::cerr << "weights(" << g.distribution().global(get(local, v))
<< ") = ";
BGL_FORALL_INEDGES_T(v, e, g, Graph) {
std::cerr << get(weight, e) << ' ';
}
std::cerr << std::endl;
put(min_in_weight, v,
*std::min_element
(make_property_map_iterator(weight, in_edges(v, g).first),
make_property_map_iterator(weight, in_edges(v, g).second),
compare));
} else {
put(min_in_weight, v, 0);
}
std::cerr << "miw(" << g.distribution().global(get(local, v)) << ") = "
<< get(min_in_weight, v) << std::endl;
}
#else
initialize_min_in_weights(g, min_in_weight, weight, inf, compare,
directed_tag(), incidence_graph_tag());
#endif
}
template<typename Graph, typename MinInWeightMap, typename WeightMap,
typename Inf, typename Compare>
inline void
initialize_min_in_weights(const Graph&, MinInWeightMap, WeightMap, Inf,
Compare, undirected_tag, bidirectional_graph_tag)
{
// In weights are the same as out weights, so do nothing
}
/************************************************************************/
/************************************************************************
* Initialize the property map that contains the minimum outgoing edge *
* weight for each vertex. *
************************************************************************/
template<typename Graph, typename MinOutWeightMap, typename WeightMap,
typename Compare>
void
initialize_min_out_weights(const Graph& g, MinOutWeightMap min_out_weight,
WeightMap weight, Compare compare)
{
typedef typename property_traits<WeightMap>::value_type weight_type;
BGL_FORALL_VERTICES_T(v, g, Graph) {
if (out_edges(v, g).first != out_edges(v, g).second) {
put(min_out_weight, v,
*std::min_element
(make_property_map_iterator(weight, out_edges(v, g).first),
make_property_map_iterator(weight, out_edges(v, g).second),
compare));
if (get(min_out_weight, v) < weight_type(0))
boost::throw_exception(negative_edge());
}
}
}
/************************************************************************/
} // end namespace detail
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap, typename WeightMap,
typename IndexMap, typename ColorMap, typename Compare,
typename Combine, typename DistInf, typename DistZero>
void
crauser_et_al_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance, WeightMap weight,
IndexMap index_map, ColorMap color_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>::type
process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
Vertex;
typedef typename graph_traits<DistributedGraph>::vertices_size_type
vertices_size_type;
#ifdef PBGL_ACCOUNTING
crauser_et_al_shortest_paths_stats.deleted_vertices.clear();
crauser_et_al_shortest_paths_stats.execution_time = accounting::get_time();
#endif
// Property map that stores the lowest edge weight outgoing from
// each vertex. If a vertex has no out-edges, the stored weight
// is zero.
typedef typename property_traits<WeightMap>::value_type weight_type;
typedef iterator_property_map<weight_type*, IndexMap> MinOutWeightMap;
std::vector<weight_type> min_out_weights_vec(num_vertices(g), inf);
MinOutWeightMap min_out_weight(&min_out_weights_vec.front(), index_map);
detail::initialize_min_out_weights(g, min_out_weight, weight, compare);
// Property map that stores the lowest edge weight incoming to
// each vertex. For undirected graphs, this will just be a
// shallow copy of the version for outgoing edges.
typedef typename graph_traits<DistributedGraph>::directed_category
directed_category;
const bool is_undirected =
is_same<directed_category, undirected_tag>::value;
typedef MinOutWeightMap MinInWeightMap;
std::vector<weight_type>
min_in_weights_vec(is_undirected? 1 : num_vertices(g), inf);
MinInWeightMap min_in_weight(&min_in_weights_vec.front(), index_map);
typedef typename graph_traits<DistributedGraph>::traversal_category
category;
detail::initialize_min_in_weights(g, min_in_weight, weight, inf, compare,
directed_category(), category());
// Initialize local portion of property maps
typename graph_traits<DistributedGraph>::vertex_iterator ui, ui_end;
for (boost::tie(ui, ui_end) = vertices(g); ui != ui_end; ++ui) {
put(distance, *ui, inf);
put(predecessor, *ui, *ui);
}
put(distance, s, zero);
// Dijkstra Queue
typedef detail::crauser_et_al_dijkstra_queue
<DistributedGraph, Combine, Compare, IndexMap, DistanceMap,
PredecessorMap, MinOutWeightMap, MinInWeightMap>
Queue;
Queue Q(g, combine, compare, index_map, distance, predecessor,
min_out_weight, is_undirected? min_out_weight : min_in_weight);
// Parallel Dijkstra visitor
::boost::detail::dijkstra_bfs_visitor<
DijkstraVisitor, Queue, WeightMap,
boost::parallel::caching_property_map<PredecessorMap>,
boost::parallel::caching_property_map<DistanceMap>, Combine, Compare
> bfs_vis(vis, Q, weight,
boost::parallel::make_caching_property_map(predecessor),
boost::parallel::make_caching_property_map(distance),
combine, compare, zero);
set_property_map_role(vertex_color, color_map);
set_property_map_role(vertex_distance, distance);
breadth_first_search(g, s, Q, bfs_vis, color_map);
#ifdef PBGL_ACCOUNTING
crauser_et_al_shortest_paths_stats.execution_time =
accounting::get_time() - crauser_et_al_shortest_paths_stats.execution_time;
#endif
}
template<typename DistributedGraph, typename PredecessorMap,
typename DistanceMap, typename WeightMap>
void
crauser_et_al_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance, WeightMap weight)
{
typedef typename property_traits<DistanceMap>::value_type distance_type;
std::vector<default_color_type> colors(num_vertices(g), white_color);
crauser_et_al_shortest_paths(g, s, predecessor, distance, weight,
get(vertex_index, g),
make_iterator_property_map(&colors[0],
get(vertex_index, g)),
std::less<distance_type>(),
closed_plus<distance_type>(),
(std::numeric_limits<distance_type>::max)(),
distance_type(),
dijkstra_visitor<>());
}
template<typename DistributedGraph, typename PredecessorMap,
typename DistanceMap>
void
crauser_et_al_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance)
{
crauser_et_al_shortest_paths(g, s, predecessor, distance,
get(edge_weight, g));
}
} // end namespace distributed
#ifdef PBGL_ACCOUNTING
using distributed::crauser_et_al_shortest_paths_stats;
#endif
using distributed::crauser_et_al_shortest_paths;
} } // end namespace boost::graph
#endif // BOOST_GRAPH_CRAUSER_ET_AL_SHORTEST_PATHS_HPP

View File

@@ -0,0 +1,938 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
/**
* This header implements four distributed algorithms to compute
* the minimum spanning tree (actually, minimum spanning forest) of a
* graph. All of the algorithms were implemented as specified in the
* paper by Dehne and Gotz:
*
* Frank Dehne and Silvia Gotz. Practical Parallel Algorithms for Minimum
* Spanning Trees. In Symposium on Reliable Distributed Systems,
* pages 366--371, 1998.
*
* There are four algorithm variants implemented.
*/
#ifndef BOOST_DEHNE_GOTZ_MIN_SPANNING_TREE_HPP
#define BOOST_DEHNE_GOTZ_MIN_SPANNING_TREE_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/property_map/property_map.hpp>
#include <vector>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/limits.hpp>
#include <utility>
#include <boost/pending/disjoint_sets.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/graph/vertex_and_edge_range.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/parallel/detail/untracked_pair.hpp>
#include <cmath>
namespace boost { namespace graph { namespace distributed {
namespace detail {
/**
* Binary function object type that selects the (edge, weight) pair
* with the minimum weight. Used within a Boruvka merge step to select
* the candidate edges incident to each supervertex.
*/
struct smaller_weighted_edge
{
template<typename Edge, typename Weight>
std::pair<Edge, Weight>
operator()(const std::pair<Edge, Weight>& x,
const std::pair<Edge, Weight>& y) const
{ return x.second < y.second? x : y; }
};
/**
* Unary predicate that determines if the source and target vertices
* of the given edge have the same representative within a disjoint
* sets data structure. Used to indicate when an edge is now a
* self-loop because of supervertex merging in Boruvka's algorithm.
*/
template<typename DisjointSets, typename Graph>
class do_has_same_supervertex
{
public:
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
do_has_same_supervertex(DisjointSets& dset, const Graph& g)
: dset(dset), g(g) { }
bool operator()(edge_descriptor e)
{ return dset.find_set(source(e, g)) == dset.find_set(target(e, g)); }
private:
DisjointSets& dset;
const Graph& g;
};
/**
* Build a @ref do_has_same_supervertex object.
*/
template<typename DisjointSets, typename Graph>
inline do_has_same_supervertex<DisjointSets, Graph>
has_same_supervertex(DisjointSets& dset, const Graph& g)
{ return do_has_same_supervertex<DisjointSets, Graph>(dset, g); }
/** \brief A single distributed Boruvka merge step.
*
* A distributed Boruvka merge step involves computing (globally)
* the minimum weight edges incident on each supervertex and then
* merging supervertices along these edges. Once supervertices are
* merged, self-loops are eliminated.
*
* The set of parameters passed to this algorithm is large, and
* considering this algorithm in isolation there are several
* redundancies. However, the more asymptotically efficient
* distributed MSF algorithms require mixing Boruvka steps with the
* merging of local MSFs (implemented in
* merge_local_minimum_spanning_trees_step): the interaction of the
* two algorithms mandates the addition of these parameters.
*
* \param pg The process group over which communication should be
* performed. Within the distributed Boruvka algorithm, this will be
* equivalent to \code process_group(g); however, in the context of
* the mixed MSF algorithms, the process group @p pg will be a
* (non-strict) process subgroup of \code process_group(g).
*
* \param g The underlying graph on which the MSF is being
* computed. The type of @p g must model DistributedGraph, but there
* are no other requirements because the edge and (super)vertex
* lists are passed separately.
*
* \param weight_map Property map containing the weights of each
* edge. The type of this property map must model
* ReadablePropertyMap and must support caching.
*
* \param out An output iterator that will be written with the set
* of edges selected to build the MSF. Every process within the
* process group @p pg will receive all edges in the MSF.
*
* \param dset Disjoint sets data structure mapping from vertices in
* the graph @p g to their representative supervertex.
*
* \param supervertex_map Mapping from supervertex descriptors to
* indices.
*
* \param supervertices A vector containing all of the
* supervertices. Will be modified to include only the remaining
* supervertices after merging occurs.
*
* \param edge_list The list of edges that remain in the graph. This
* list will be pruned to remove self-loops once MSF edges have been
* found.
*/
template<typename ProcessGroup, typename Graph, typename WeightMap,
typename OutputIterator, typename RankMap, typename ParentMap,
typename SupervertexMap, typename Vertex, typename EdgeList>
OutputIterator
boruvka_merge_step(ProcessGroup pg, const Graph& g, WeightMap weight_map,
OutputIterator out,
disjoint_sets<RankMap, ParentMap>& dset,
SupervertexMap supervertex_map,
std::vector<Vertex>& supervertices,
EdgeList& edge_list)
{
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
typedef typename graph_traits<Graph>::vertices_size_type
vertices_size_type;
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
typedef typename EdgeList::iterator edge_iterator;
typedef typename property_traits<WeightMap>::value_type
weight_type;
typedef boost::parallel::detail::untracked_pair<edge_descriptor,
weight_type> w_edge;
typedef typename property_traits<SupervertexMap>::value_type
supervertex_index;
smaller_weighted_edge min_edge;
weight_type inf = (std::numeric_limits<weight_type>::max)();
// Renumber the supervertices
for (std::size_t i = 0; i < supervertices.size(); ++i)
put(supervertex_map, supervertices[i], i);
// BSP-B1: Find local minimum-weight edges for each supervertex
std::vector<w_edge> candidate_edges(supervertices.size(),
w_edge(edge_descriptor(), inf));
for (edge_iterator ei = edge_list.begin(); ei != edge_list.end(); ++ei) {
weight_type w = get(weight_map, *ei);
supervertex_index u =
get(supervertex_map, dset.find_set(source(*ei, g)));
supervertex_index v =
get(supervertex_map, dset.find_set(target(*ei, g)));
if (u != v) {
candidate_edges[u] = min_edge(candidate_edges[u], w_edge(*ei, w));
candidate_edges[v] = min_edge(candidate_edges[v], w_edge(*ei, w));
}
}
// BSP-B2 (a): Compute global minimum edges for each supervertex
all_reduce(pg,
&candidate_edges[0],
&candidate_edges[0] + candidate_edges.size(),
&candidate_edges[0], min_edge);
// BSP-B2 (b): Use the edges to compute sequentially the new
// connected components and emit the edges.
for (vertices_size_type i = 0; i < candidate_edges.size(); ++i) {
if (candidate_edges[i].second != inf) {
edge_descriptor e = candidate_edges[i].first;
vertex_descriptor u = dset.find_set(source(e, g));
vertex_descriptor v = dset.find_set(target(e, g));
if (u != v) {
// Emit the edge, but cache the weight so everyone knows it
cache(weight_map, e, candidate_edges[i].second);
*out++ = e;
// Link the two supervertices
dset.link(u, v);
// Whichever vertex was reparented will be removed from the
// list of supervertices.
vertex_descriptor victim = u;
if (dset.find_set(u) == u) victim = v;
supervertices[get(supervertex_map, victim)] =
graph_traits<Graph>::null_vertex();
}
}
}
// BSP-B3: Eliminate self-loops
edge_list.erase(std::remove_if(edge_list.begin(), edge_list.end(),
has_same_supervertex(dset, g)),
edge_list.end());
// TBD: might also eliminate multiple edges between supervertices
// when the edges do not have the best weight, but this is not
// strictly necessary.
// Eliminate supervertices that have been absorbed
supervertices.erase(std::remove(supervertices.begin(),
supervertices.end(),
graph_traits<Graph>::null_vertex()),
supervertices.end());
return out;
}
/**
* An edge descriptor adaptor that reroutes the source and target
* edges to different vertices, but retains the original edge
* descriptor for, e.g., property maps. This is used when we want to
* turn a set of edges in the overall graph into a set of edges
* between supervertices.
*/
template<typename Graph>
struct supervertex_edge_descriptor
{
typedef supervertex_edge_descriptor self_type;
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
Vertex source;
Vertex target;
Edge e;
operator Edge() const { return e; }
friend inline bool operator==(const self_type& x, const self_type& y)
{ return x.e == y.e; }
friend inline bool operator!=(const self_type& x, const self_type& y)
{ return x.e != y.e; }
};
template<typename Graph>
inline typename supervertex_edge_descriptor<Graph>::Vertex
source(supervertex_edge_descriptor<Graph> se, const Graph&)
{ return se.source; }
template<typename Graph>
inline typename supervertex_edge_descriptor<Graph>::Vertex
target(supervertex_edge_descriptor<Graph> se, const Graph&)
{ return se.target; }
/**
* Build a supervertex edge descriptor from a normal edge descriptor
* using the given disjoint sets data structure to identify
* supervertex representatives.
*/
template<typename Graph, typename DisjointSets>
struct build_supervertex_edge_descriptor
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
typedef Edge argument_type;
typedef supervertex_edge_descriptor<Graph> result_type;
build_supervertex_edge_descriptor() : g(0), dsets(0) { }
build_supervertex_edge_descriptor(const Graph& g, DisjointSets& dsets)
: g(&g), dsets(&dsets) { }
result_type operator()(argument_type e) const
{
result_type result;
result.source = dsets->find_set(source(e, *g));
result.target = dsets->find_set(target(e, *g));
result.e = e;
return result;
}
private:
const Graph* g;
DisjointSets* dsets;
};
template<typename Graph, typename DisjointSets>
inline build_supervertex_edge_descriptor<Graph, DisjointSets>
make_supervertex_edge_descriptor(const Graph& g, DisjointSets& dsets)
{ return build_supervertex_edge_descriptor<Graph, DisjointSets>(g, dsets); }
template<typename T>
struct identity_function
{
typedef T argument_type;
typedef T result_type;
result_type operator()(argument_type x) const { return x; }
};
template<typename Graph, typename DisjointSets, typename EdgeMapper>
class is_not_msf_edge
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
public:
is_not_msf_edge(const Graph& g, DisjointSets dset, EdgeMapper edge_mapper)
: g(g), dset(dset), edge_mapper(edge_mapper) { }
bool operator()(Edge e)
{
Vertex u = dset.find_set(source(edge_mapper(e), g));
Vertex v = dset.find_set(target(edge_mapper(e), g));
if (u == v) return true;
else {
dset.link(u, v);
return false;
}
}
private:
const Graph& g;
DisjointSets dset;
EdgeMapper edge_mapper;
};
template<typename Graph, typename ForwardIterator, typename EdgeList,
typename EdgeMapper, typename RankMap, typename ParentMap>
void
sorted_mutating_kruskal(const Graph& g,
ForwardIterator first_vertex,
ForwardIterator last_vertex,
EdgeList& edge_list, EdgeMapper edge_mapper,
RankMap rank_map, ParentMap parent_map)
{
typedef disjoint_sets<RankMap, ParentMap> DisjointSets;
// Build and initialize disjoint-sets data structure
DisjointSets dset(rank_map, parent_map);
for (ForwardIterator v = first_vertex; v != last_vertex; ++v)
dset.make_set(*v);
is_not_msf_edge<Graph, DisjointSets, EdgeMapper>
remove_non_msf_edges(g, dset, edge_mapper);
edge_list.erase(std::remove_if(edge_list.begin(), edge_list.end(),
remove_non_msf_edges),
edge_list.end());
}
/**
* Merge local minimum spanning forests from p processes into
* minimum spanning forests on p/D processes (where D is the tree
* factor, currently fixed at 3), eliminating unnecessary edges in
* the process.
*
* As with @ref boruvka_merge_step, this routine has many
* parameters, not all of which make sense within the limited
* context of this routine. The parameters are required for the
* Boruvka and local MSF merging steps to interoperate.
*
* \param pg The process group on which local minimum spanning
* forests should be merged. The top (D-1)p/D processes will be
* eliminated, and a new process subgroup containing p/D processors
* will be returned. The value D is a constant factor that is
* currently fixed to 3.
*
* \param g The underlying graph whose MSF is being computed. It must model
* the DistributedGraph concept.
*
* \param first_vertex Iterator to the first vertex in the graph
* that should be considered. While the local MSF merging algorithm
* typically operates on the entire vertex set, within the hybrid
* distributed MSF algorithms this will refer to the first
* supervertex.
*
* \param last_vertex The past-the-end iterator for the vertex list.
*
* \param edge_list The list of local edges that will be
* considered. For the p/D processes that remain, this list will
* contain edges in the MSF known to the vertex after other
* processes' edge lists have been merged. The edge list must be
* sorted in order of increasing weight.
*
* \param weight Property map containing the weights of each
* edge. The type of this property map must model
* ReadablePropertyMap and must support caching.
*
* \param global_index Mapping from vertex descriptors to a global
* index. The type must model ReadablePropertyMap.
*
* \param edge_mapper A function object that can remap edge descriptors
* in the edge list to any alternative edge descriptor. This
* function object will be the identity function when a pure merging
* of local MSFs is required, but may be a mapping to a supervertex
* edge when the local MSF merging occurs on a supervertex
* graph. This function object saves us the trouble of having to
* build a supervertex graph adaptor.
*
* \param already_local_msf True when the edge list already
* constitutes a local MSF. If false, Kruskal's algorithm will first
* be applied to the local edge list to select MSF edges.
*
* \returns The process subgroup containing the remaining p/D
* processes. If the size of this process group is greater than one,
* the MSF edges contained in the edge list do not constitute an MSF
* for the entire graph.
*/
template<typename ProcessGroup, typename Graph, typename ForwardIterator,
typename EdgeList, typename WeightMap, typename GlobalIndexMap,
typename EdgeMapper>
ProcessGroup
merge_local_minimum_spanning_trees_step(ProcessGroup pg,
const Graph& g,
ForwardIterator first_vertex,
ForwardIterator last_vertex,
EdgeList& edge_list,
WeightMap weight,
GlobalIndexMap global_index,
EdgeMapper edge_mapper,
bool already_local_msf)
{
typedef typename ProcessGroup::process_id_type process_id_type;
typedef typename EdgeList::value_type edge_descriptor;
typedef typename property_traits<WeightMap>::value_type weight_type;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
// The tree factor, often called "D"
process_id_type const tree_factor = 3;
process_id_type num_procs = num_processes(pg);
process_id_type id = process_id(pg);
process_id_type procs_left = (num_procs + tree_factor - 1) / tree_factor;
std::size_t n = std::size_t(last_vertex - first_vertex);
if (!already_local_msf) {
// Compute local minimum spanning forest. We only care about the
// edges in the MSF, because only edges in the local MSF can be in
// the global MSF.
std::vector<std::size_t> ranks(n);
std::vector<vertex_descriptor> parents(n);
detail::sorted_mutating_kruskal
(g, first_vertex, last_vertex,
edge_list, edge_mapper,
make_iterator_property_map(ranks.begin(), global_index),
make_iterator_property_map(parents.begin(), global_index));
}
typedef std::pair<edge_descriptor, weight_type> w_edge;
// Order edges based on their weights.
indirect_cmp<WeightMap, std::less<weight_type> > cmp_edge_weight(weight);
if (id < procs_left) {
// The p/D processes that remain will receive local MSF edges from
// D-1 other processes.
synchronize(pg);
for (process_id_type from_id = procs_left + id; from_id < num_procs;
from_id += procs_left) {
std::size_t num_incoming_edges;
receive(pg, from_id, 0, num_incoming_edges);
if (num_incoming_edges > 0) {
std::vector<w_edge> incoming_edges(num_incoming_edges);
receive(pg, from_id, 1, &incoming_edges[0], num_incoming_edges);
edge_list.reserve(edge_list.size() + num_incoming_edges);
for (std::size_t i = 0; i < num_incoming_edges; ++i) {
cache(weight, incoming_edges[i].first, incoming_edges[i].second);
edge_list.push_back(incoming_edges[i].first);
}
std::inplace_merge(edge_list.begin(),
edge_list.end() - num_incoming_edges,
edge_list.end(),
cmp_edge_weight);
}
}
// Compute the local MSF from union of the edges in the MSFs of
// all children.
std::vector<std::size_t> ranks(n);
std::vector<vertex_descriptor> parents(n);
detail::sorted_mutating_kruskal
(g, first_vertex, last_vertex,
edge_list, edge_mapper,
make_iterator_property_map(ranks.begin(), global_index),
make_iterator_property_map(parents.begin(), global_index));
} else {
// The (D-1)p/D processes that are dropping out of further
// computations merely send their MSF edges to their parent
// process in the process tree.
send(pg, id % procs_left, 0, edge_list.size());
if (edge_list.size() > 0) {
std::vector<w_edge> outgoing_edges;
outgoing_edges.reserve(edge_list.size());
for (std::size_t i = 0; i < edge_list.size(); ++i) {
outgoing_edges.push_back(std::make_pair(edge_list[i],
get(weight, edge_list[i])));
}
send(pg, id % procs_left, 1, &outgoing_edges[0],
outgoing_edges.size());
}
synchronize(pg);
}
// Return a process subgroup containing the p/D parent processes
return process_subgroup(pg,
make_counting_iterator(process_id_type(0)),
make_counting_iterator(procs_left));
}
} // end namespace detail
// ---------------------------------------------------------------------
// Dense Boruvka MSF algorithm
// ---------------------------------------------------------------------
template<typename Graph, typename WeightMap, typename OutputIterator,
typename VertexIndexMap, typename RankMap, typename ParentMap,
typename SupervertexMap>
OutputIterator
dense_boruvka_minimum_spanning_tree(const Graph& g, WeightMap weight_map,
OutputIterator out,
VertexIndexMap index_map,
RankMap rank_map, ParentMap parent_map,
SupervertexMap supervertex_map)
{
using boost::graph::parallel::process_group;
typedef typename graph_traits<Graph>::traversal_category traversal_category;
BOOST_STATIC_ASSERT((is_convertible<traversal_category*,
vertex_list_graph_tag*>::value));
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
// Don't throw away cached edge weights
weight_map.set_max_ghost_cells(0);
// Initialize the disjoint sets structures
disjoint_sets<RankMap, ParentMap> dset(rank_map, parent_map);
vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi)
dset.make_set(*vi);
std::vector<vertex_descriptor> supervertices;
supervertices.assign(vertices(g).first, vertices(g).second);
// Use Kruskal's algorithm to find the minimum spanning forest
// considering only the local edges. The resulting edges are not
// necessarily going to be in the final minimum spanning
// forest. However, any edge not part of the local MSF cannot be a
// part of the global MSF, so we should have eliminated some edges
// from consideration.
std::vector<edge_descriptor> edge_list;
kruskal_minimum_spanning_tree
(make_vertex_and_edge_range(g, vertices(g).first, vertices(g).second,
edges(g).first, edges(g).second),
std::back_inserter(edge_list),
boost::weight_map(weight_map).
vertex_index_map(index_map));
// While the number of supervertices is decreasing, keep executing
// Boruvka steps to identify additional MSF edges. This loop will
// execute log |V| times.
vertices_size_type old_num_supervertices;
do {
old_num_supervertices = supervertices.size();
out = detail::boruvka_merge_step(process_group(g), g,
weight_map, out,
dset, supervertex_map, supervertices,
edge_list);
} while (supervertices.size() < old_num_supervertices);
return out;
}
template<typename Graph, typename WeightMap, typename OutputIterator,
typename VertexIndex>
OutputIterator
dense_boruvka_minimum_spanning_tree(const Graph& g, WeightMap weight_map,
OutputIterator out, VertexIndex i_map)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
std::vector<std::size_t> ranks(num_vertices(g));
std::vector<vertex_descriptor> parents(num_vertices(g));
std::vector<std::size_t> supervertices(num_vertices(g));
return dense_boruvka_minimum_spanning_tree
(g, weight_map, out, i_map,
make_iterator_property_map(ranks.begin(), i_map),
make_iterator_property_map(parents.begin(), i_map),
make_iterator_property_map(supervertices.begin(), i_map));
}
template<typename Graph, typename WeightMap, typename OutputIterator>
OutputIterator
dense_boruvka_minimum_spanning_tree(const Graph& g, WeightMap weight_map,
OutputIterator out)
{
return dense_boruvka_minimum_spanning_tree(g, weight_map, out,
get(vertex_index, g));
}
// ---------------------------------------------------------------------
// Merge local MSFs MSF algorithm
// ---------------------------------------------------------------------
template<typename Graph, typename WeightMap, typename OutputIterator,
typename GlobalIndexMap>
OutputIterator
merge_local_minimum_spanning_trees(const Graph& g, WeightMap weight,
OutputIterator out,
GlobalIndexMap global_index)
{
using boost::graph::parallel::process_group_type;
using boost::graph::parallel::process_group;
typedef typename graph_traits<Graph>::traversal_category traversal_category;
BOOST_STATIC_ASSERT((is_convertible<traversal_category*,
vertex_list_graph_tag*>::value));
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
// Don't throw away cached edge weights
weight.set_max_ghost_cells(0);
// Compute the initial local minimum spanning forests
std::vector<edge_descriptor> edge_list;
kruskal_minimum_spanning_tree
(make_vertex_and_edge_range(g, vertices(g).first, vertices(g).second,
edges(g).first, edges(g).second),
std::back_inserter(edge_list),
boost::weight_map(weight).vertex_index_map(global_index));
// Merge the local MSFs from p processes into p/D processes,
// reducing the number of processes in each step. Continue looping
// until either (a) the current process drops out or (b) only one
// process remains in the group. This loop will execute log_D p
// times.
typename process_group_type<Graph>::type pg = process_group(g);
while (pg && num_processes(pg) > 1) {
pg = detail::merge_local_minimum_spanning_trees_step
(pg, g, vertices(g).first, vertices(g).second,
edge_list, weight, global_index,
detail::identity_function<edge_descriptor>(), true);
}
// Only process 0 has the entire edge list, so emit it to the output
// iterator.
if (pg && process_id(pg) == 0) {
out = std::copy(edge_list.begin(), edge_list.end(), out);
}
synchronize(process_group(g));
return out;
}
template<typename Graph, typename WeightMap, typename OutputIterator>
inline OutputIterator
merge_local_minimum_spanning_trees(const Graph& g, WeightMap weight,
OutputIterator out)
{
return merge_local_minimum_spanning_trees(g, weight, out,
get(vertex_index, g));
}
// ---------------------------------------------------------------------
// Boruvka-then-merge MSF algorithm
// ---------------------------------------------------------------------
template<typename Graph, typename WeightMap, typename OutputIterator,
typename GlobalIndexMap, typename RankMap, typename ParentMap,
typename SupervertexMap>
OutputIterator
boruvka_then_merge(const Graph& g, WeightMap weight, OutputIterator out,
GlobalIndexMap index, RankMap rank_map,
ParentMap parent_map, SupervertexMap supervertex_map)
{
using std::log;
using boost::graph::parallel::process_group_type;
using boost::graph::parallel::process_group;
typedef typename graph_traits<Graph>::traversal_category traversal_category;
BOOST_STATIC_ASSERT((is_convertible<traversal_category*,
vertex_list_graph_tag*>::value));
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
// Don't throw away cached edge weights
weight.set_max_ghost_cells(0);
// Compute the initial local minimum spanning forests
std::vector<edge_descriptor> edge_list;
kruskal_minimum_spanning_tree
(make_vertex_and_edge_range(g, vertices(g).first, vertices(g).second,
edges(g).first, edges(g).second),
std::back_inserter(edge_list),
boost::weight_map(weight).
vertex_index_map(index));
// Initialize the disjoint sets structures for Boruvka steps
disjoint_sets<RankMap, ParentMap> dset(rank_map, parent_map);
vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi)
dset.make_set(*vi);
// Construct the initial set of supervertices (all vertices)
std::vector<vertex_descriptor> supervertices;
supervertices.assign(vertices(g).first, vertices(g).second);
// Continue performing Boruvka merge steps until the number of
// supervertices reaches |V| / (log_D p)^2.
const std::size_t tree_factor = 3; // TBD: same as above! should be param
double log_d_p = log((double)num_processes(process_group(g)))
/ log((double)tree_factor);
vertices_size_type target_supervertices =
vertices_size_type(num_vertices(g) / (log_d_p * log_d_p));
vertices_size_type old_num_supervertices;
while (supervertices.size() > target_supervertices) {
old_num_supervertices = supervertices.size();
out = detail::boruvka_merge_step(process_group(g), g,
weight, out, dset,
supervertex_map, supervertices,
edge_list);
if (supervertices.size() == old_num_supervertices)
return out;
}
// Renumber the supervertices
for (std::size_t i = 0; i < supervertices.size(); ++i)
put(supervertex_map, supervertices[i], i);
// Merge local MSFs on the supervertices. (D-1)p/D processors drop
// out each iteration, so this loop executes log_D p times.
typename process_group_type<Graph>::type pg = process_group(g);
bool have_msf = false;
while (pg && num_processes(pg) > 1) {
pg = detail::merge_local_minimum_spanning_trees_step
(pg, g, supervertices.begin(), supervertices.end(),
edge_list, weight, supervertex_map,
detail::make_supervertex_edge_descriptor(g, dset),
have_msf);
have_msf = true;
}
// Only process 0 has the complete list of _supervertex_ MST edges,
// so emit those to the output iterator. This is not the complete
// list of edges in the MSF, however: the Boruvka steps in the
// beginning of the algorithm emitted any edges used to merge
// supervertices.
if (pg && process_id(pg) == 0)
out = std::copy(edge_list.begin(), edge_list.end(), out);
synchronize(process_group(g));
return out;
}
template<typename Graph, typename WeightMap, typename OutputIterator,
typename GlobalIndexMap>
inline OutputIterator
boruvka_then_merge(const Graph& g, WeightMap weight, OutputIterator out,
GlobalIndexMap index)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
std::vector<vertices_size_type> ranks(num_vertices(g));
std::vector<vertex_descriptor> parents(num_vertices(g));
std::vector<vertices_size_type> supervertex_indices(num_vertices(g));
return boruvka_then_merge
(g, weight, out, index,
make_iterator_property_map(ranks.begin(), index),
make_iterator_property_map(parents.begin(), index),
make_iterator_property_map(supervertex_indices.begin(), index));
}
template<typename Graph, typename WeightMap, typename OutputIterator>
inline OutputIterator
boruvka_then_merge(const Graph& g, WeightMap weight, OutputIterator out)
{ return boruvka_then_merge(g, weight, out, get(vertex_index, g)); }
// ---------------------------------------------------------------------
// Boruvka-mixed-merge MSF algorithm
// ---------------------------------------------------------------------
template<typename Graph, typename WeightMap, typename OutputIterator,
typename GlobalIndexMap, typename RankMap, typename ParentMap,
typename SupervertexMap>
OutputIterator
boruvka_mixed_merge(const Graph& g, WeightMap weight, OutputIterator out,
GlobalIndexMap index, RankMap rank_map,
ParentMap parent_map, SupervertexMap supervertex_map)
{
using boost::graph::parallel::process_group_type;
using boost::graph::parallel::process_group;
typedef typename graph_traits<Graph>::traversal_category traversal_category;
BOOST_STATIC_ASSERT((is_convertible<traversal_category*,
vertex_list_graph_tag*>::value));
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::edge_descriptor edge_descriptor;
// Don't throw away cached edge weights
weight.set_max_ghost_cells(0);
// Initialize the disjoint sets structures for Boruvka steps
disjoint_sets<RankMap, ParentMap> dset(rank_map, parent_map);
vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi)
dset.make_set(*vi);
// Construct the initial set of supervertices (all vertices)
std::vector<vertex_descriptor> supervertices;
supervertices.assign(vertices(g).first, vertices(g).second);
// Compute the initial local minimum spanning forests
std::vector<edge_descriptor> edge_list;
kruskal_minimum_spanning_tree
(make_vertex_and_edge_range(g, vertices(g).first, vertices(g).second,
edges(g).first, edges(g).second),
std::back_inserter(edge_list),
boost::weight_map(weight).
vertex_index_map(index));
if (num_processes(process_group(g)) == 1) {
return std::copy(edge_list.begin(), edge_list.end(), out);
}
// Like the merging local MSFs algorithm and the Boruvka-then-merge
// algorithm, each iteration of this loop reduces the number of
// processes by a constant factor D, and therefore we require log_D
// p iterations. Note also that the number of edges in the edge list
// decreases geometrically, giving us an efficient distributed MSF
// algorithm.
typename process_group_type<Graph>::type pg = process_group(g);
vertices_size_type old_num_supervertices;
while (pg && num_processes(pg) > 1) {
// A single Boruvka step. If this doesn't change anything, we're done
old_num_supervertices = supervertices.size();
out = detail::boruvka_merge_step(pg, g, weight, out, dset,
supervertex_map, supervertices,
edge_list);
if (old_num_supervertices == supervertices.size()) {
edge_list.clear();
break;
}
// Renumber the supervertices
for (std::size_t i = 0; i < supervertices.size(); ++i)
put(supervertex_map, supervertices[i], i);
// A single merging of local MSTs, which reduces the number of
// processes we're using by a constant factor D.
pg = detail::merge_local_minimum_spanning_trees_step
(pg, g, supervertices.begin(), supervertices.end(),
edge_list, weight, supervertex_map,
detail::make_supervertex_edge_descriptor(g, dset),
true);
}
// Only process 0 has the complete edge list, so emit it for the
// user. Note that list edge list only contains the MSF edges in the
// final supervertex graph: all of the other edges were used to
// merge supervertices and have been emitted by the Boruvka steps,
// although only process 0 has received the complete set.
if (pg && process_id(pg) == 0)
out = std::copy(edge_list.begin(), edge_list.end(), out);
synchronize(process_group(g));
return out;
}
template<typename Graph, typename WeightMap, typename OutputIterator,
typename GlobalIndexMap>
inline OutputIterator
boruvka_mixed_merge(const Graph& g, WeightMap weight, OutputIterator out,
GlobalIndexMap index)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
std::vector<vertices_size_type> ranks(num_vertices(g));
std::vector<vertex_descriptor> parents(num_vertices(g));
std::vector<vertices_size_type> supervertex_indices(num_vertices(g));
return boruvka_mixed_merge
(g, weight, out, index,
make_iterator_property_map(ranks.begin(), index),
make_iterator_property_map(parents.begin(), index),
make_iterator_property_map(supervertex_indices.begin(), index));
}
template<typename Graph, typename WeightMap, typename OutputIterator>
inline OutputIterator
boruvka_mixed_merge(const Graph& g, WeightMap weight, OutputIterator out)
{ return boruvka_mixed_merge(g, weight, out, get(vertex_index, g)); }
} // end namespace distributed
using distributed::dense_boruvka_minimum_spanning_tree;
using distributed::merge_local_minimum_spanning_trees;
using distributed::boruvka_then_merge;
using distributed::boruvka_mixed_merge;
} } // end namespace boost::graph
#endif // BOOST_DEHNE_GOTZ_MIN_SPANNING_TREE_HPP

View File

@@ -0,0 +1,513 @@
// Copyright (C) 2007 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
/**************************************************************************
* This source file implements the Delta-stepping algorithm: *
* *
* Ulrich Meyer and Peter Sanders. Parallel Shortest Path for Arbitrary *
* Graphs. In Proceedings from the 6th International Euro-Par *
* Conference on Parallel Processing, pages 461--470, 2000. *
* *
* Ulrich Meyer, Peter Sanders: [Delta]-stepping: A Parallelizable *
* Shortest Path Algorithm. J. Algorithms 49(1): 114-152, 2003. *
* *
* There are several potential optimizations we could still perform for *
* this algorithm implementation: *
* *
* - Implement "shortcuts", which bound the number of reinsertions *
* in a single bucket (to one). The computation of shortcuts looks *
* expensive in a distributed-memory setting, but it could be *
* ammortized over many queries. *
* *
* - The size of the "buckets" data structure can be bounded to *
* max_edge_weight/delta buckets, if we treat the buckets as a *
* circular array. *
* *
* - If we partition light/heavy edges ahead of time, we could improve *
* relaxation performance by only iterating over the right portion *
* of the out-edge list each time. *
* *
* - Implement a more sophisticated algorithm for guessing "delta", *
* based on the shortcut-finding algorithm. *
**************************************************************************/
#ifndef BOOST_GRAPH_DELTA_STEPPING_SHORTEST_PATHS_HPP
#define BOOST_GRAPH_DELTA_STEPPING_SHORTEST_PATHS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/iteration_macros.hpp>
#include <limits>
#include <list>
#include <vector>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/distributed/detail/dijkstra_shortest_paths.hpp>
#include <utility> // for std::pair
#include <functional> // for std::logical_or
#include <boost/graph/parallel/algorithm.hpp> // for all_reduce
#include <cassert>
#include <algorithm> // for std::min, std::max
#include <boost/graph/parallel/simple_trigger.hpp>
#ifdef PBGL_DELTA_STEPPING_DEBUG
# include <iostream> // for std::cerr
#endif
namespace boost { namespace graph { namespace distributed {
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
class delta_stepping_impl {
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::degree_size_type Degree;
typedef typename property_traits<EdgeWeightMap>::value_type Dist;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
ProcessGroup;
typedef std::list<Vertex> Bucket;
typedef typename Bucket::iterator BucketIterator;
typedef typename std::vector<Bucket*>::size_type BucketIndex;
typedef detail::dijkstra_msg_value<DistanceMap, PredecessorMap> MessageValue;
enum {
// Relax a remote vertex. The message contains a pair<Vertex,
// MessageValue>, the first part of which is the vertex whose
// tentative distance is being relaxed and the second part
// contains either the new distance (if there is no predecessor
// map) or a pair with the distance and predecessor.
msg_relax
};
public:
delta_stepping_impl(const Graph& g,
PredecessorMap predecessor,
DistanceMap distance,
EdgeWeightMap weight,
Dist delta);
delta_stepping_impl(const Graph& g,
PredecessorMap predecessor,
DistanceMap distance,
EdgeWeightMap weight);
void run(Vertex s);
private:
// Relax the edge (u, v), creating a new best path of distance x.
void relax(Vertex u, Vertex v, Dist x);
// Synchronize all of the processes, by receiving all messages that
// have not yet been received.
void synchronize();
// Handle a relax message that contains only the target and
// distance. This kind of message will be received when the
// predecessor map is a dummy_property_map.
void handle_relax_message(Vertex v, Dist x) { relax(v, v, x); }
// Handle a relax message that contains the source (predecessor),
// target, and distance. This kind of message will be received when
// the predecessor map is not a dummy_property_map.
void handle_relax_message(Vertex v, const std::pair<Dist, Vertex>& p)
{ relax(p.second, v, p.first); }
// Setup triggers for msg_relax messages
void setup_triggers();
void handle_msg_relax(int /*source*/, int /*tag*/,
const std::pair<Vertex, typename MessageValue::type>& data,
trigger_receive_context)
{ handle_relax_message(data.first, data.second); }
const Graph& g;
PredecessorMap predecessor;
DistanceMap distance;
EdgeWeightMap weight;
Dist delta;
ProcessGroup pg;
typename property_map<Graph, vertex_owner_t>::const_type owner;
typename property_map<Graph, vertex_local_t>::const_type local;
// A "property map" that contains the position of each vertex in
// whatever bucket it resides in.
std::vector<BucketIterator> position_in_bucket;
// Bucket data structure. The ith bucket contains all local vertices
// with (tentative) distance in the range [i*delta,
// (i+1)*delta).
std::vector<Bucket*> buckets;
// This "dummy" list is used only so that we can initialize the
// position_in_bucket property map with non-singular iterators. This
// won't matter for most implementations of the C++ Standard
// Library, but it avoids undefined behavior and allows us to run
// with library "debug modes".
std::list<Vertex> dummy_list;
// A "property map" that states which vertices have been deleted
// from the bucket in this iteration.
std::vector<bool> vertex_was_deleted;
};
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
delta_stepping_impl(const Graph& g,
PredecessorMap predecessor,
DistanceMap distance,
EdgeWeightMap weight,
Dist delta)
: g(g),
predecessor(predecessor),
distance(distance),
weight(weight),
delta(delta),
pg(boost::graph::parallel::process_group_adl(g), attach_distributed_object()),
owner(get(vertex_owner, g)),
local(get(vertex_local, g))
{
setup_triggers();
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
delta_stepping_impl(const Graph& g,
PredecessorMap predecessor,
DistanceMap distance,
EdgeWeightMap weight)
: g(g),
predecessor(predecessor),
distance(distance),
weight(weight),
pg(boost::graph::parallel::process_group_adl(g), attach_distributed_object()),
owner(get(vertex_owner, g)),
local(get(vertex_local, g))
{
using boost::parallel::all_reduce;
using boost::parallel::maximum;
using std::max;
// Compute the maximum edge weight and degree
Dist max_edge_weight = 0;
Degree max_degree = 0;
BGL_FORALL_VERTICES_T(u, g, Graph) {
max_degree = max BOOST_PREVENT_MACRO_SUBSTITUTION (max_degree, out_degree(u, g));
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
max_edge_weight = max BOOST_PREVENT_MACRO_SUBSTITUTION (max_edge_weight, get(weight, e));
}
max_edge_weight = all_reduce(pg, max_edge_weight, maximum<Dist>());
max_degree = all_reduce(pg, max_degree, maximum<Degree>());
// Take a guess at delta, based on what works well for random
// graphs.
delta = max_edge_weight / max_degree;
if (delta == 0)
delta = 1;
setup_triggers();
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
run(Vertex s)
{
Dist inf = (std::numeric_limits<Dist>::max)();
// None of the vertices are stored in the bucket.
position_in_bucket.clear();
position_in_bucket.resize(num_vertices(g), dummy_list.end());
// None of the vertices have been deleted
vertex_was_deleted.clear();
vertex_was_deleted.resize(num_vertices(g), false);
// No path from s to any other vertex, yet
BGL_FORALL_VERTICES_T(v, g, Graph)
put(distance, v, inf);
// The distance to the starting node is zero
if (get(owner, s) == process_id(pg))
// Put "s" into its bucket (bucket 0)
relax(s, s, 0);
else
// Note that we know the distance to s is zero
cache(distance, s, 0);
BucketIndex max_bucket = (std::numeric_limits<BucketIndex>::max)();
BucketIndex current_bucket = 0;
do {
// Synchronize with all of the other processes.
synchronize();
// Find the next bucket that has something in it.
while (current_bucket < buckets.size()
&& (!buckets[current_bucket] || buckets[current_bucket]->empty()))
++current_bucket;
if (current_bucket >= buckets.size())
current_bucket = max_bucket;
#ifdef PBGL_DELTA_STEPPING_DEBUG
std::cerr << "#" << process_id(pg) << ": lowest bucket is #"
<< current_bucket << std::endl;
#endif
// Find the smallest bucket (over all processes) that has vertices
// that need to be processed.
using boost::parallel::all_reduce;
using boost::parallel::minimum;
current_bucket = all_reduce(pg, current_bucket, minimum<BucketIndex>());
if (current_bucket == max_bucket)
// There are no non-empty buckets in any process; exit.
break;
#ifdef PBGL_DELTA_STEPPING_DEBUG
if (process_id(pg) == 0)
std::cerr << "Processing bucket #" << current_bucket << std::endl;
#endif
// Contains the set of vertices that have been deleted in the
// relaxation of "light" edges. Note that we keep track of which
// vertices were deleted with the property map
// "vertex_was_deleted".
std::vector<Vertex> deleted_vertices;
// Repeatedly relax light edges
bool nonempty_bucket;
do {
// Someone has work to do in this bucket.
if (current_bucket < buckets.size() && buckets[current_bucket]) {
Bucket& bucket = *buckets[current_bucket];
// For each element in the bucket
while (!bucket.empty()) {
Vertex u = bucket.front();
#ifdef PBGL_DELTA_STEPPING_DEBUG
std::cerr << "#" << process_id(pg) << ": processing vertex "
<< get(vertex_global, g, u).second << "@"
<< get(vertex_global, g, u).first
<< std::endl;
#endif
// Remove u from the front of the bucket
bucket.pop_front();
// Insert u into the set of deleted vertices, if it hasn't
// been done already.
if (!vertex_was_deleted[get(local, u)]) {
vertex_was_deleted[get(local, u)] = true;
deleted_vertices.push_back(u);
}
// Relax each light edge.
Dist u_dist = get(distance, u);
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
if (get(weight, e) <= delta) // light edge
relax(u, target(e, g), u_dist + get(weight, e));
}
}
// Synchronize with all of the other processes.
synchronize();
// Is the bucket empty now?
nonempty_bucket = (current_bucket < buckets.size()
&& buckets[current_bucket]
&& !buckets[current_bucket]->empty());
} while (all_reduce(pg, nonempty_bucket, std::logical_or<bool>()));
// Relax heavy edges for each of the vertices that we previously
// deleted.
for (typename std::vector<Vertex>::iterator iter = deleted_vertices.begin();
iter != deleted_vertices.end(); ++iter) {
// Relax each heavy edge.
Vertex u = *iter;
Dist u_dist = get(distance, u);
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
if (get(weight, e) > delta) // heavy edge
relax(u, target(e, g), u_dist + get(weight, e));
}
// Go to the next bucket: the current bucket must already be empty.
++current_bucket;
} while (true);
// Delete all of the buckets.
for (typename std::vector<Bucket*>::iterator iter = buckets.begin();
iter != buckets.end(); ++iter) {
if (*iter) {
delete *iter;
*iter = 0;
}
}
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
relax(Vertex u, Vertex v, Dist x)
{
#ifdef PBGL_DELTA_STEPPING_DEBUG
std::cerr << "#" << process_id(pg) << ": relax("
<< get(vertex_global, g, u).second << "@"
<< get(vertex_global, g, u).first << ", "
<< get(vertex_global, g, v).second << "@"
<< get(vertex_global, g, v).first << ", "
<< x << ")" << std::endl;
#endif
if (x < get(distance, v)) {
// We're relaxing the edge to vertex v.
if (get(owner, v) == process_id(pg)) {
// Compute the new bucket index for v
BucketIndex new_index = static_cast<BucketIndex>(x / delta);
// Make sure there is enough room in the buckets data structure.
if (new_index >= buckets.size()) buckets.resize(new_index + 1, 0);
// Make sure that we have allocated the bucket itself.
if (!buckets[new_index]) buckets[new_index] = new Bucket;
if (get(distance, v) != (std::numeric_limits<Dist>::max)()
&& !vertex_was_deleted[get(local, v)]) {
// We're moving v from an old bucket into a new one. Compute
// the old index, then splice it in.
BucketIndex old_index
= static_cast<BucketIndex>(get(distance, v) / delta);
buckets[new_index]->splice(buckets[new_index]->end(),
*buckets[old_index],
position_in_bucket[get(local, v)]);
} else {
// We're inserting v into a bucket for the first time. Put it
// at the end.
buckets[new_index]->push_back(v);
}
// v is now at the last position in the new bucket
position_in_bucket[get(local, v)] = buckets[new_index]->end();
--position_in_bucket[get(local, v)];
// Update predecessor and tentative distance information
put(predecessor, v, u);
put(distance, v, x);
} else {
#ifdef PBGL_DELTA_STEPPING_DEBUG
std::cerr << "#" << process_id(pg) << ": sending relax("
<< get(vertex_global, g, u).second << "@"
<< get(vertex_global, g, u).first << ", "
<< get(vertex_global, g, v).second << "@"
<< get(vertex_global, g, v).first << ", "
<< x << ") to " << get(owner, v) << std::endl;
#endif
// The vertex is remote: send a request to the vertex's owner
send(pg, get(owner, v), msg_relax,
std::make_pair(v, MessageValue::create(x, u)));
// Cache tentative distance information
cache(distance, v, x);
}
}
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
synchronize()
{
using boost::graph::parallel::synchronize;
// Synchronize at the process group level.
synchronize(pg);
// Receive any relaxation request messages.
// typedef typename ProcessGroup::process_id_type process_id_type;
// while (optional<std::pair<process_id_type, int> > stp = probe(pg)) {
// // Receive the relaxation message
// assert(stp->second == msg_relax);
// std::pair<Vertex, typename MessageValue::type> data;
// receive(pg, stp->first, stp->second, data);
// // Turn it into a "relax" call
// handle_relax_message(data.first, data.second);
// }
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>::
setup_triggers()
{
using boost::graph::parallel::simple_trigger;
simple_trigger(pg, msg_relax, this,
&delta_stepping_impl::handle_msg_relax);
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_shortest_paths
(const Graph& g,
typename graph_traits<Graph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance, EdgeWeightMap weight,
typename property_traits<EdgeWeightMap>::value_type delta)
{
// The "distance" map needs to act like one, retrieving the default
// value of infinity.
set_property_map_role(vertex_distance, distance);
// Construct the implementation object, which will perform all of
// the actual work.
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>
impl(g, predecessor, distance, weight, delta);
// Run the delta-stepping algorithm. The results will show up in
// "predecessor" and "weight".
impl.run(s);
}
template<typename Graph, typename PredecessorMap, typename DistanceMap,
typename EdgeWeightMap>
void
delta_stepping_shortest_paths
(const Graph& g,
typename graph_traits<Graph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance, EdgeWeightMap weight)
{
// The "distance" map needs to act like one, retrieving the default
// value of infinity.
set_property_map_role(vertex_distance, distance);
// Construct the implementation object, which will perform all of
// the actual work.
delta_stepping_impl<Graph, PredecessorMap, DistanceMap, EdgeWeightMap>
impl(g, predecessor, distance, weight);
// Run the delta-stepping algorithm. The results will show up in
// "predecessor" and "weight".
impl.run(s);
}
} } } // end namespace boost::graph::distributed
#endif // BOOST_GRAPH_DELTA_STEPPING_SHORTEST_PATHS_HPP

View File

@@ -0,0 +1,307 @@
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_DFS_HPP
#define BOOST_GRAPH_DISTRIBUTED_DFS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/properties.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/static_assert.hpp>
#include <boost/assert.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/graph/parallel/container_traits.hpp>
namespace boost {
namespace graph { namespace distributed { namespace detail {
template<typename DistributedGraph, typename ColorMap, typename ParentMap,
typename ExploreMap, typename VertexIndexMap, typename DFSVisitor>
class parallel_dfs
{
typedef typename graph_traits<DistributedGraph>::vertex_iterator
vertex_iterator;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
typedef typename graph_traits<DistributedGraph>::out_edge_iterator
out_edge_iterator;
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
/**
* The first vertex in the pair is the local node (i) and the
* second vertex in the pair is the (possibly remote) node (j).
*/
typedef boost::parallel::detail::untracked_pair<vertex_descriptor, vertex_descriptor> vertex_pair;
typedef typename property_traits<ColorMap>::value_type color_type;
typedef color_traits<color_type> Color;
// Message types
enum { discover_msg = 10, return_msg = 50, visited_msg = 100 , done_msg = 150};
public:
parallel_dfs(const DistributedGraph& g, ColorMap color,
ParentMap parent, ExploreMap explore,
VertexIndexMap index_map, DFSVisitor vis)
: g(g), color(color), parent(parent), explore(explore),
index_map(index_map), vis(vis), pg(process_group(g)),
owner(get(vertex_owner, g)), next_out_edge(num_vertices(g))
{ }
void run(vertex_descriptor s)
{
vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) {
put(color, *vi, Color::white());
put(parent, *vi, *vi);
put(explore, *vi, *vi);
next_out_edge[get(index_map, *vi)] = out_edges(*vi, g).first;
vis.initialize_vertex(*vi, g);
}
vis.start_vertex(s, g);
if (get(owner, s) == process_id(pg)) {
send_oob(pg, get(owner, s), discover_msg, vertex_pair(s, s));
}
bool done = false;
while (!done) {
std::pair<process_id_type, int> msg = *pg.poll(true);
switch (msg.second) {
case discover_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
if (p.first != p.second) {
// delete j from nomessage(j)
if (get(color, p.second) != Color::black())
local_put(color, p.second, Color::gray());
if (recover(p)) break;
}
if (get(color, p.first) == Color::white()) {
put(color, p.first, Color::gray());
put(parent, p.first, p.second);
vis.discover_vertex(p.first, g);
if (shift_center_of_activity(p.first)) break;
out_edge_iterator ei, ei_end;
for (boost::tie(ei,ei_end) = out_edges(p.first, g); ei != ei_end; ++ei)
{
// Notify everyone who may not know that the source
// vertex has been visited. They can then mark the
// corresponding color map entry gray.
if (get(parent, p.first) != target(*ei, g)
&& get(explore, p.first) != target(*ei, g)) {
vertex_pair visit(target(*ei, g), p.first);
send_oob(pg, get(owner, target(*ei, g)), visited_msg, visit);
}
}
}
}
break;
case visited_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
// delete j from nomessage(j)
if (get(color, p.second) != Color::black())
local_put(color, p.second, Color::gray());
recover(p);
}
break;
case return_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
// delete j from nomessage(i)
local_put(color, p.second, Color::black());
shift_center_of_activity(p.first);
}
break;
case done_msg:
{
receive_oob(pg, msg.first, msg.second, done);
// Propagate done message downward in tree
done = true;
process_id_type id = process_id(pg);
process_id_type left = 2*id + 1;
process_id_type right = left + 1;
if (left < num_processes(pg))
send_oob(pg, left, done_msg, done);
if (right < num_processes(pg))
send_oob(pg, right, done_msg, done);
}
break;
default:
BOOST_ASSERT(false);
}
}
}
private:
bool recover(const vertex_pair& p)
{
if (get(explore, p.first) == p.second) {
return shift_center_of_activity(p.first);
}
else
return false;
}
bool shift_center_of_activity(vertex_descriptor i)
{
for (out_edge_iterator ei = next_out_edge[get(index_map, i)],
ei_end = out_edges(i, g).second;
ei != ei_end; ++ei) {
vis.examine_edge(*ei, g);
vertex_descriptor k = target(*ei, g);
color_type target_color = get(color, k);
if (target_color == Color::black()) vis.forward_or_cross_edge(*ei, g);
else if (target_color == Color::gray()) vis.back_edge(*ei, g);
else {
put(explore, i, k);
vis.tree_edge(*ei, g);
vertex_pair p(k, i);
send_oob(pg, get(owner, k), discover_msg, p);
next_out_edge[get(index_map, i)] = ++ei;
return false;
}
}
next_out_edge[get(index_map, i)] = out_edges(i, g).second;
put(explore, i, i);
put(color, i, Color::black());
vis.finish_vertex(i, g);
if (get(parent, i) == i) {
send_oob(pg, 0, done_msg, true);
return true;
}
else {
vertex_pair ret(get(parent, i), i);
send_oob(pg, get(owner, ret.first), return_msg, ret);
}
return false;
}
const DistributedGraph& g;
ColorMap color;
ParentMap parent;
ExploreMap explore;
VertexIndexMap index_map;
DFSVisitor vis;
process_group_type pg;
typename property_map<DistributedGraph, vertex_owner_t>::const_type owner;
std::vector<out_edge_iterator> next_out_edge;
};
} // end namespace detail
template<typename DistributedGraph, typename ColorMap, typename ParentMap,
typename ExploreMap, typename VertexIndexMap, typename DFSVisitor>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis, ColorMap color, ParentMap parent, ExploreMap explore,
VertexIndexMap index_map)
{
typedef typename graph_traits<DistributedGraph>::directed_category
directed_category;
BOOST_STATIC_ASSERT(
(is_convertible<directed_category, undirected_tag>::value));
set_property_map_role(vertex_color, color);
graph::distributed::detail::parallel_dfs
<DistributedGraph, ColorMap, ParentMap, ExploreMap, VertexIndexMap,
DFSVisitor> do_dfs(g, color, parent, explore, index_map, vis);
do_dfs.run(s);
using boost::graph::parallel::process_group;
synchronize(process_group(g));
}
template<typename DistributedGraph, typename DFSVisitor,
typename VertexIndexMap>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis,
VertexIndexMap index_map)
{
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
std::vector<default_color_type> colors(num_vertices(g));
std::vector<vertex_descriptor> parent(num_vertices(g));
std::vector<vertex_descriptor> explore(num_vertices(g));
tsin_depth_first_visit
(g, s,
vis,
make_iterator_property_map(colors.begin(), index_map),
make_iterator_property_map(parent.begin(), index_map),
make_iterator_property_map(explore.begin(), index_map),
index_map);
}
template<typename DistributedGraph, typename DFSVisitor,
typename VertexIndexMap>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis)
{
tsin_depth_first_visit(g, s, vis, get(vertex_index, g));
}
} // end namespace distributed
using distributed::tsin_depth_first_visit;
} // end namespace graph
template<typename DistributedGraph, typename DFSVisitor>
void
depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis)
{
graph::tsin_depth_first_visit(g, s, vis, get(vertex_index, g));
}
} // end namespace boost
#endif // BOOST_GRAPH_DISTRIBUTED_DFS_HPP

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_DIJKSTRA_DETAIL_HPP
#define BOOST_GRAPH_PARALLEL_DIJKSTRA_DETAIL_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/property_map/property_map.hpp>
namespace boost { namespace graph { namespace distributed { namespace detail {
/**********************************************************************
* Dijkstra queue message data *
**********************************************************************/
template<typename DistanceMap, typename PredecessorMap>
class dijkstra_msg_value
{
typedef typename property_traits<DistanceMap>::value_type distance_type;
typedef typename property_traits<PredecessorMap>::value_type
predecessor_type;
public:
typedef std::pair<distance_type, predecessor_type> type;
static type create(distance_type dist, predecessor_type pred)
{ return std::make_pair(dist, pred); }
};
template<typename DistanceMap>
class dijkstra_msg_value<DistanceMap, dummy_property_map>
{
typedef typename property_traits<DistanceMap>::key_type vertex_descriptor;
public:
typedef typename property_traits<DistanceMap>::value_type type;
static type create(type dist, vertex_descriptor) { return dist; }
};
/**********************************************************************/
} } } } // end namespace boost::graph::distributed::detail
#endif // BOOST_GRAPH_PARALLEL_DIJKSTRA_DETAIL_HPP

View File

@@ -0,0 +1,108 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_FILTERED_QUEUE_HPP
#define BOOST_FILTERED_QUEUE_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <algorithm>
namespace boost {
/** Queue adaptor that filters elements pushed into the queue
* according to some predicate.
*/
template<typename Buffer, typename Predicate>
class filtered_queue
{
public:
typedef Buffer buffer_type;
typedef Predicate predicate_type;
typedef typename Buffer::value_type value_type;
typedef typename Buffer::size_type size_type;
/**
* Constructs a new filtered queue with an initial buffer and a
* predicate.
*
* @param buffer the initial buffer
* @param pred the predicate
*/
explicit
filtered_queue(const buffer_type& buffer = buffer_type(),
const predicate_type& pred = predicate_type())
: buffer(buffer), pred(pred) {}
/** Push a value into the queue.
*
* If the predicate returns @c true for @p x, pushes @p x into the
* buffer.
*/
void push(const value_type& x) { if (pred(x)) buffer.push(x); }
/** Pop the front element off the buffer.
*
* @pre @c !empty()
*/
void pop() { buffer.pop(); }
/** Retrieve the front (top) element in the buffer.
*
* @pre @c !empty()
*/
value_type& top() { return buffer.top(); }
/**
* \overload
*/
const value_type& top() const { return buffer.top(); }
/** Determine the number of elements in the buffer. */
size_type size() const { return buffer.size(); }
/** Determine if the buffer is empty. */
bool empty() const { return buffer.empty(); }
/** Get a reference to the underlying buffer. */
buffer_type& base() { return buffer; }
const buffer_type& base() const { return buffer; }
/** Swap the contents of this with @p other. */
void swap(filtered_queue& other)
{
using std::swap;
swap(buffer, other.buffer);
swap(pred, other.pred);
}
private:
buffer_type buffer;
predicate_type pred;
};
/** Create a filtered queue. */
template<typename Buffer, typename Predicate>
inline filtered_queue<Buffer, Predicate>
make_filtered_queue(const Buffer& buffer, const Predicate& pred)
{ return filtered_queue<Buffer, Predicate>(buffer, pred); }
/** Swap a filtered_queue. */
template<typename Buffer, typename Predicate>
inline void
swap(filtered_queue<Buffer, Predicate>& x,
filtered_queue<Buffer, Predicate>& y)
{
x.swap(y);
}
} // end namespace boost
#endif // BOOST_FILTERED_QUEUE_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,177 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#include <boost/optional.hpp>
#include <cassert>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <functional>
#include <algorithm>
#include <boost/graph/parallel/simple_trigger.hpp>
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
namespace boost { namespace graph { namespace distributed {
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
BOOST_DISTRIBUTED_QUEUE_TYPE::
distributed_queue(const ProcessGroup& process_group, const OwnerMap& owner,
const Buffer& buffer, bool polling)
: process_group(process_group, attach_distributed_object()),
owner(owner),
buffer(buffer),
polling(polling)
{
if (!polling)
outgoing_buffers.reset(
new outgoing_buffers_t(num_processes(process_group)));
setup_triggers();
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
BOOST_DISTRIBUTED_QUEUE_TYPE::
distributed_queue(const ProcessGroup& process_group, const OwnerMap& owner,
const Buffer& buffer, const UnaryPredicate& pred,
bool polling)
: process_group(process_group, attach_distributed_object()),
owner(owner),
buffer(buffer),
pred(pred),
polling(polling)
{
if (!polling)
outgoing_buffers.reset(
new outgoing_buffers_t(num_processes(process_group)));
setup_triggers();
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
BOOST_DISTRIBUTED_QUEUE_TYPE::
distributed_queue(const ProcessGroup& process_group, const OwnerMap& owner,
const UnaryPredicate& pred, bool polling)
: process_group(process_group, attach_distributed_object()),
owner(owner),
pred(pred),
polling(polling)
{
if (!polling)
outgoing_buffers.reset(
new outgoing_buffers_t(num_processes(process_group)));
setup_triggers();
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
void
BOOST_DISTRIBUTED_QUEUE_TYPE::push(const value_type& x)
{
typename ProcessGroup::process_id_type dest = get(owner, x);
if (outgoing_buffers)
outgoing_buffers->at(dest).push_back(x);
else if (dest == process_id(process_group))
buffer.push(x);
else
send(process_group, get(owner, x), msg_push, x);
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
bool
BOOST_DISTRIBUTED_QUEUE_TYPE::empty() const
{
/* Processes will stay here until the buffer is nonempty or
synchronization with the other processes indicates that all local
buffers are empty (and no messages are in transit).
*/
while (buffer.empty() && !do_synchronize()) ;
return buffer.empty();
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
typename BOOST_DISTRIBUTED_QUEUE_TYPE::size_type
BOOST_DISTRIBUTED_QUEUE_TYPE::size() const
{
empty();
return buffer.size();
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
void BOOST_DISTRIBUTED_QUEUE_TYPE::setup_triggers()
{
using boost::graph::parallel::simple_trigger;
simple_trigger(process_group, msg_push, this,
&distributed_queue::handle_push);
simple_trigger(process_group, msg_multipush, this,
&distributed_queue::handle_multipush);
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
void
BOOST_DISTRIBUTED_QUEUE_TYPE::
handle_push(int /*source*/, int /*tag*/, const value_type& value,
trigger_receive_context)
{
if (pred(value)) buffer.push(value);
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
void
BOOST_DISTRIBUTED_QUEUE_TYPE::
handle_multipush(int /*source*/, int /*tag*/,
const std::vector<value_type>& values,
trigger_receive_context)
{
for (std::size_t i = 0; i < values.size(); ++i)
if (pred(values[i])) buffer.push(values[i]);
}
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
bool
BOOST_DISTRIBUTED_QUEUE_TYPE::do_synchronize() const
{
#ifdef PBGL_ACCOUNTING
++num_synchronizations;
#endif
using boost::parallel::all_reduce;
using std::swap;
typedef typename ProcessGroup::process_id_type process_id_type;
if (outgoing_buffers) {
// Transfer all of the push requests
process_id_type id = process_id(process_group);
process_id_type np = num_processes(process_group);
for (process_id_type dest = 0; dest < np; ++dest) {
outgoing_buffer_t& outgoing = outgoing_buffers->at(dest);
std::size_t size = outgoing.size();
if (size != 0) {
if (dest != id) {
send(process_group, dest, msg_multipush, outgoing);
} else {
for (std::size_t i = 0; i < size; ++i)
buffer.push(outgoing[i]);
}
outgoing.clear();
}
}
}
synchronize(process_group);
unsigned local_size = buffer.size();
unsigned global_size =
all_reduce(process_group, local_size, std::plus<unsigned>());
return global_size == 0;
}
} } } // end namespace boost::graph::distributed

View File

@@ -0,0 +1,259 @@
// Copyright (C) 2005-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DETAIL_REMOTE_UPDATE_SET_HPP
#define BOOST_GRAPH_DETAIL_REMOTE_UPDATE_SET_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/parallel/process_group.hpp>
#include <boost/type_traits/is_convertible.hpp>
#include <vector>
#include <boost/assert.hpp>
#include <boost/optional.hpp>
#include <queue>
namespace boost { namespace graph { namespace detail {
template<typename ProcessGroup>
void do_synchronize(ProcessGroup& pg)
{
using boost::parallel::synchronize;
synchronize(pg);
}
struct remote_set_queued {};
struct remote_set_immediate {};
template<typename ProcessGroup>
class remote_set_semantics
{
BOOST_STATIC_CONSTANT
(bool,
queued = (is_convertible<
typename ProcessGroup::communication_category,
parallel::bsp_process_group_tag>::value));
public:
typedef typename mpl::if_c<queued,
remote_set_queued,
remote_set_immediate>::type type;
};
template<typename Derived, typename ProcessGroup, typename Value,
typename OwnerMap,
typename Semantics = typename remote_set_semantics<ProcessGroup>::type>
class remote_update_set;
/**********************************************************************
* Remote updating set that queues messages until synchronization *
**********************************************************************/
template<typename Derived, typename ProcessGroup, typename Value,
typename OwnerMap>
class remote_update_set<Derived, ProcessGroup, Value, OwnerMap,
remote_set_queued>
{
typedef typename property_traits<OwnerMap>::key_type Key;
typedef std::vector<std::pair<Key, Value> > Updates;
typedef typename Updates::size_type updates_size_type;
typedef typename Updates::value_type updates_pair_type;
public:
private:
typedef typename ProcessGroup::process_id_type process_id_type;
enum message_kind {
/** Message containing the number of updates that will be sent in
* a msg_updates message that will immediately follow. This
* message will contain a single value of type
* updates_size_type.
*/
msg_num_updates,
/** Contains (key, value) pairs with all of the updates from a
* particular source. The number of updates is variable, but will
* be provided in a msg_num_updates message that immediately
* preceeds this message.
*
*/
msg_updates
};
struct handle_messages
{
explicit
handle_messages(remote_update_set* self, const ProcessGroup& pg)
: self(self), update_sizes(num_processes(pg), 0) { }
void operator()(process_id_type source, int tag)
{
switch(tag) {
case msg_num_updates:
{
// Receive the # of updates
updates_size_type num_updates;
receive(self->process_group, source, tag, num_updates);
update_sizes[source] = num_updates;
}
break;
case msg_updates:
{
updates_size_type num_updates = update_sizes[source];
BOOST_ASSERT(num_updates);
// Receive the actual updates
std::vector<updates_pair_type> updates(num_updates);
receive(self->process_group, source, msg_updates, &updates[0],
num_updates);
// Send updates to derived "receive_update" member
Derived* derived = static_cast<Derived*>(self);
for (updates_size_type u = 0; u < num_updates; ++u)
derived->receive_update(source, updates[u].first, updates[u].second);
update_sizes[source] = 0;
}
break;
};
}
private:
remote_update_set* self;
std::vector<updates_size_type> update_sizes;
};
friend struct handle_messages;
protected:
remote_update_set(const ProcessGroup& pg, const OwnerMap& owner)
: process_group(pg, handle_messages(this, pg)),
updates(num_processes(pg)), owner(owner) {
}
void update(const Key& key, const Value& value)
{
if (get(owner, key) == process_id(process_group)) {
Derived* derived = static_cast<Derived*>(this);
derived->receive_update(get(owner, key), key, value);
}
else {
updates[get(owner, key)].push_back(std::make_pair(key, value));
}
}
void collect() { }
void synchronize()
{
// Emit all updates and then remove them
process_id_type num_processes = updates.size();
for (process_id_type p = 0; p < num_processes; ++p) {
if (!updates[p].empty()) {
send(process_group, p, msg_num_updates, updates[p].size());
send(process_group, p, msg_updates,
&updates[p].front(), updates[p].size());
updates[p].clear();
}
}
do_synchronize(process_group);
}
ProcessGroup process_group;
private:
std::vector<Updates> updates;
OwnerMap owner;
};
/**********************************************************************
* Remote updating set that sends messages immediately *
**********************************************************************/
template<typename Derived, typename ProcessGroup, typename Value,
typename OwnerMap>
class remote_update_set<Derived, ProcessGroup, Value, OwnerMap,
remote_set_immediate>
{
typedef typename property_traits<OwnerMap>::key_type Key;
typedef std::pair<Key, Value> update_pair_type;
typedef typename std::vector<update_pair_type>::size_type updates_size_type;
public:
typedef typename ProcessGroup::process_id_type process_id_type;
private:
enum message_kind {
/** Contains a (key, value) pair that will be updated. */
msg_update
};
struct handle_messages
{
explicit handle_messages(remote_update_set* self, const ProcessGroup& pg)
: self(self)
{ update_sizes.resize(num_processes(pg), 0); }
void operator()(process_id_type source, int tag)
{
// Receive the # of updates
BOOST_ASSERT(tag == msg_update);
update_pair_type update;
receive(self->process_group, source, tag, update);
// Send update to derived "receive_update" member
Derived* derived = static_cast<Derived*>(self);
derived->receive_update(source, update.first, update.second);
}
private:
std::vector<updates_size_type> update_sizes;
remote_update_set* self;
};
friend struct handle_messages;
protected:
remote_update_set(const ProcessGroup& pg, const OwnerMap& owner)
: process_group(pg, handle_messages(this, pg)), owner(owner) { }
void update(const Key& key, const Value& value)
{
if (get(owner, key) == process_id(process_group)) {
Derived* derived = static_cast<Derived*>(this);
derived->receive_update(get(owner, key), key, value);
}
else
send(process_group, get(owner, key), msg_update,
update_pair_type(key, value));
}
void collect()
{
typedef std::pair<process_id_type, int> probe_type;
handle_messages handler(this, process_group);
while (optional<probe_type> stp = probe(process_group))
if (stp->second == msg_update) handler(stp->first, stp->second);
}
void synchronize()
{
do_synchronize(process_group);
}
ProcessGroup process_group;
OwnerMap owner;
};
} } } // end namespace boost::graph::detail
#endif // BOOST_GRAPH_DETAIL_REMOTE_UPDATE_SET_HPP

View File

@@ -0,0 +1,84 @@
// -*- C++ -*-
// Copyright (C) 2007 Douglas Gregor <doug.gregor@gmail.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GRAPH_DISTRIBUTED_TAG_ALLOCATOR_HPP
#define BOOST_GRAPH_DISTRIBUTED_TAG_ALLOCATOR_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <vector>
namespace boost { namespace graph { namespace distributed { namespace detail {
/**
* \brief The tag allocator allows clients to request unique tags that
* can be used for one-time communications.
*
* The tag allocator hands out tag values from a predefined maximum
* (given in the constructor) moving downward. Tags are provided one
* at a time via a @c token. When the @c token goes out of scope, the
* tag is returned and may be reallocated. These tags should be used,
* for example, for one-time communication of values.
*/
class tag_allocator {
public:
class token;
friend class token;
/**
* Construct a new tag allocator that provides unique tags starting
* with the value @p top_tag and moving lower, as necessary.
*/
explicit tag_allocator(int top_tag) : bottom(top_tag) { }
/**
* Retrieve a new tag. The token itself holds onto the tag, which
* will be released when the token is destroyed.
*/
token get_tag();
private:
int bottom;
std::vector<int> freed;
};
/**
* A token used to represent an allocated tag.
*/
class tag_allocator::token {
public:
/// Transfer ownership of the tag from @p other.
token(const token& other);
/// De-allocate the tag, if this token still owns it.
~token();
/// Retrieve the tag allocated for this task.
operator int() const { return tag_; }
private:
/// Create a token with a specific tag from the given tag_allocator
token(tag_allocator* allocator, int tag)
: allocator(allocator), tag_(tag) { }
/// Undefined: tokens are not copy-assignable
token& operator=(const token&);
/// The allocator from which this tag was allocated.
tag_allocator* allocator;
/// The stored tag flag. If -1, this token does not own the tag.
mutable int tag_;
friend class tag_allocator;
};
} } } } // end namespace boost::graph::distributed::detail
#endif // BOOST_GRAPH_DISTRIBUTED_TAG_ALLOCATOR_HPP

View File

@@ -0,0 +1,205 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_DIJKSTRA_HPP
#define BOOST_GRAPH_PARALLEL_DIJKSTRA_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/distributed/crauser_et_al_shortest_paths.hpp>
#include <boost/graph/distributed/eager_dijkstra_shortest_paths.hpp>
namespace boost {
namespace graph { namespace detail {
template<typename Lookahead>
struct parallel_dijkstra_impl2
{
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename WeightMap, typename IndexMap, typename ColorMap,
typename Compare, typename Combine, typename DistInf,
typename DistZero>
static void
run(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
typename property_traits<DistanceMap>::value_type lookahead,
WeightMap weight, IndexMap index_map, ColorMap color_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
eager_dijkstra_shortest_paths(g, s, predecessor, distance, lookahead,
weight, index_map, color_map, compare,
combine, inf, zero, vis);
}
};
template<>
struct parallel_dijkstra_impl2< ::boost::detail::error_property_not_found >
{
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename WeightMap, typename IndexMap, typename ColorMap,
typename Compare, typename Combine, typename DistInf,
typename DistZero>
static void
run(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
::boost::detail::error_property_not_found,
WeightMap weight, IndexMap index_map, ColorMap color_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
crauser_et_al_shortest_paths(g, s, predecessor, distance, weight,
index_map, color_map, compare, combine,
inf, zero, vis);
}
};
template<typename ColorMap>
struct parallel_dijkstra_impl
{
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename Lookahead, typename WeightMap, typename IndexMap,
typename Compare, typename Combine,
typename DistInf, typename DistZero>
static void
run(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
Lookahead lookahead,
WeightMap weight, IndexMap index_map, ColorMap color_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
graph::detail::parallel_dijkstra_impl2<Lookahead>
::run(g, s, predecessor, distance, lookahead, weight, index_map,
color_map, compare, combine, inf, zero, vis);
}
};
template<>
struct parallel_dijkstra_impl< ::boost::detail::error_property_not_found >
{
private:
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename Lookahead, typename WeightMap, typename IndexMap,
typename ColorMap, typename Compare, typename Combine,
typename DistInf, typename DistZero>
static void
run_impl(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
Lookahead lookahead, WeightMap weight, IndexMap index_map,
ColorMap color_map, Compare compare, Combine combine,
DistInf inf, DistZero zero, DijkstraVisitor vis)
{
BGL_FORALL_VERTICES_T(u, g, DistributedGraph)
BGL_FORALL_OUTEDGES_T(u, e, g, DistributedGraph)
local_put(color_map, target(e, g), white_color);
graph::detail::parallel_dijkstra_impl2<Lookahead>
::run(g, s, predecessor, distance, lookahead, weight, index_map,
color_map, compare, combine, inf, zero, vis);
}
public:
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename Lookahead, typename WeightMap, typename IndexMap,
typename Compare, typename Combine,
typename DistInf, typename DistZero>
static void
run(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
Lookahead lookahead, WeightMap weight, IndexMap index_map,
::boost::detail::error_property_not_found,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
typedef typename graph_traits<DistributedGraph>::vertices_size_type
vertices_size_type;
vertices_size_type n = num_vertices(g);
std::vector<default_color_type> colors(n, white_color);
run_impl(g, s, predecessor, distance, lookahead, weight, index_map,
make_iterator_property_map(colors.begin(), index_map),
compare, combine, inf, zero, vis);
}
};
} } // end namespace graph::detail
/** Dijkstra's single-source shortest paths algorithm for distributed
* graphs.
*
* Also implements the heuristics of:
*
* Andreas Crauser, Kurt Mehlhorn, Ulrich Meyer, and Peter
* Sanders. A Parallelization of Dijkstra's Shortest Path
* Algorithm. In Lubos Brim, Jozef Gruska, and Jiri Zlatuska,
* editors, Mathematical Foundations of Computer Science (MFCS),
* volume 1450 of Lecture Notes in Computer Science, pages
* 722--731, 1998. Springer.
*/
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap,
typename WeightMap, typename IndexMap, typename Compare,
typename Combine, typename DistInf, typename DistZero,
typename T, typename Tag, typename Base>
inline
void
dijkstra_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance, WeightMap weight,
IndexMap index_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis,
const bgl_named_params<T, Tag, Base>& params
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(DistributedGraph,distributed_graph_tag))
{
typedef typename graph_traits<DistributedGraph>::vertices_size_type
vertices_size_type;
// Build a distributed property map for vertex colors, if we need it
bool use_default_color_map
= is_default_param(get_param(params, vertex_color));
vertices_size_type n = use_default_color_map? num_vertices(g) : 1;
std::vector<default_color_type> color(n, white_color);
typedef iterator_property_map<std::vector<default_color_type>::iterator,
IndexMap> DefColorMap;
DefColorMap color_map(color.begin(), index_map);
typedef typename property_value< bgl_named_params<T, Tag, Base>,
vertex_color_t>::type color_map_type;
graph::detail::parallel_dijkstra_impl<color_map_type>
::run(g, s, predecessor, distance,
get_param(params, lookahead_t()),
weight, index_map,
get_param(params, vertex_color),
compare, combine, inf, zero, vis);
}
} // end namespace boost
#endif // BOOST_GRAPH_PARALLEL_DIJKSTRA_HPP

View File

@@ -0,0 +1,154 @@
// Copyright (C) 2005-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Peter Gottschling
// Douglas Gregor
// Andrew Lumsdaine
#include <boost/graph/iteration_macros.hpp>
#include <boost/property_map/parallel/global_index_map.hpp>
#ifndef BOOST_GRAPH_DISTRIBUTED_GRAPH_UTILITY_INCLUDE
#define BOOST_GRAPH_DISTRIBUTED_GRAPH_UTILITY_INCLUDE
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
namespace boost { namespace graph {
template <class Property, class Graph>
void property_on_inedges(Property p, const Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_INEDGES_T(u, e, g, Graph)
request(p, e);
synchronize(p);
}
// For reverse graphs
template <class Property, class Graph>
void property_on_outedges(Property p, const Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
request(p, e);
synchronize(p);
}
template <class Property, class Graph>
void property_on_successors(Property p, const Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
request(p, target(e, g));
synchronize(p);
}
template <class Property, class Graph>
void property_on_predecessors(Property p, const Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_INEDGES_T(u, e, g, Graph)
request(p, source(e, g));
synchronize(p);
}
// Like successors and predecessors but saves one synchronize (and a call)
template <class Property, class Graph>
void property_on_adjacents(Property p, const Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph) {
BGL_FORALL_OUTEDGES_T(u, e, g, Graph)
request(p, target(e, g));
BGL_FORALL_INEDGES_T(u, e, g, Graph)
request(p, source(e, g));
}
synchronize(p);
}
template <class PropertyIn, class PropertyOut, class Graph>
void copy_vertex_property(PropertyIn p_in, PropertyOut p_out, Graph& g)
{
BGL_FORALL_VERTICES_T(u, g, Graph)
put(p_out, u, get(p_in, g));
}
template <class PropertyIn, class PropertyOut, class Graph>
void copy_edge_property(PropertyIn p_in, PropertyOut p_out, Graph& g)
{
BGL_FORALL_EDGES_T(e, g, Graph)
put(p_out, e, get(p_in, g));
}
namespace distributed {
// Define global_index<Graph> global(graph);
// Then global(v) returns global index of v
template <typename Graph>
struct global_index
{
typedef typename property_map<Graph, vertex_index_t>::const_type
VertexIndexMap;
typedef typename property_map<Graph, vertex_global_t>::const_type
VertexGlobalMap;
explicit global_index(Graph const& g)
: global_index_map(process_group(g), num_vertices(g), get(vertex_index, g),
get(vertex_global, g)) {}
int operator() (typename graph_traits<Graph>::vertex_descriptor v)
{ return get(global_index_map, v); }
protected:
boost::parallel::global_index_map<VertexIndexMap, VertexGlobalMap>
global_index_map;
};
template<typename T>
struct additive_reducer {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
template<typename K>
T operator()(const K&) const { return T(0); }
template<typename K>
T operator()(const K&, const T& local, const T& remote) const { return local + remote; }
};
template <typename T>
struct choose_min_reducer {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
template<typename K>
T operator()(const K&) const { return (std::numeric_limits<T>::max)(); }
template<typename K>
T operator()(const K&, const T& x, const T& y) const
{ return x < y ? x : y; }
};
// To use a property map syntactically like a function
template <typename PropertyMap>
struct property_map_reader
{
explicit property_map_reader(PropertyMap pm) : pm(pm) {}
template <typename T>
typename PropertyMap::value_type
operator() (const T& v)
{
return get(pm, v);
}
private:
PropertyMap pm;
};
} // namespace distributed
}} // namespace boost::graph
#endif // BOOST_GRAPH_DISTRIBUTED_GRAPH_UTILITY_INCLUDE

View File

@@ -0,0 +1,447 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
/**************************************************************************
* This source file implements a variation on distributed Dijkstra's *
* algorithm that can expose additional parallelism by permitting *
* vertices within a certain distance from the minimum to be processed, *
* even though they may not be at their final distance. This can *
* introduce looping, but the algorithm will still terminate so long as *
* there are no negative loops. *
**************************************************************************/
#ifndef BOOST_GRAPH_EAGER_DIJKSTRA_SHORTEST_PATHS_HPP
#define BOOST_GRAPH_EAGER_DIJKSTRA_SHORTEST_PATHS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/assert.hpp>
#include <boost/graph/distributed/detail/dijkstra_shortest_paths.hpp>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/graph/distributed/detail/remote_update_set.hpp>
#include <vector>
#include <boost/graph/breadth_first_search.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#ifdef PBGL_ACCOUNTING
# include <boost/graph/accounting.hpp>
# include <numeric>
#endif // PBGL_ACCOUNTING
#ifdef MUTABLE_QUEUE
# include <boost/pending/mutable_queue.hpp>
#endif
namespace boost { namespace graph { namespace distributed {
#ifdef PBGL_ACCOUNTING
struct eager_dijkstra_shortest_paths_stats_t
{
/* The value of the lookahead parameter. */
double lookahead;
/* Total wall-clock time used by the algorithm.*/
accounting::time_type execution_time;
/* The number of vertices deleted in each superstep. */
std::vector<std::size_t> deleted_vertices;
template<typename OutputStream>
void print(OutputStream& out)
{
double avg_deletions = std::accumulate(deleted_vertices.begin(),
deleted_vertices.end(),
0.0);
avg_deletions /= deleted_vertices.size();
out << "Problem = \"Single-Source Shortest Paths\"\n"
<< "Algorithm = \"Eager Dijkstra\"\n"
<< "Function = eager_dijkstra_shortest_paths\n"
<< "(P) Lookahead = " << lookahead << "\n"
<< "Wall clock time = " << accounting::print_time(execution_time)
<< "\nSupersteps = " << deleted_vertices.size() << "\n"
<< "Avg. deletions per superstep = " << avg_deletions << "\n";
}
};
static eager_dijkstra_shortest_paths_stats_t eager_dijkstra_shortest_paths_stats;
#endif
namespace detail {
// Borrowed from BGL's dijkstra_shortest_paths
template <class UniformCostVisitor, class Queue,
class WeightMap, class PredecessorMap, class DistanceMap,
class BinaryFunction, class BinaryPredicate>
struct parallel_dijkstra_bfs_visitor : bfs_visitor<>
{
typedef typename property_traits<DistanceMap>::value_type distance_type;
parallel_dijkstra_bfs_visitor(UniformCostVisitor vis, Queue& Q,
WeightMap w, PredecessorMap p, DistanceMap d,
BinaryFunction combine, BinaryPredicate compare,
distance_type zero)
: m_vis(vis), m_Q(Q), m_weight(w), m_predecessor(p), m_distance(d),
m_combine(combine), m_compare(compare), m_zero(zero) { }
template <class Vertex, class Graph>
void initialize_vertex(Vertex u, Graph& g)
{ m_vis.initialize_vertex(u, g); }
template <class Vertex, class Graph>
void discover_vertex(Vertex u, Graph& g) { m_vis.discover_vertex(u, g); }
template <class Vertex, class Graph>
void examine_vertex(Vertex u, Graph& g) { m_vis.examine_vertex(u, g); }
/* Since the eager formulation of Parallel Dijkstra's algorithm can
loop, we may relax on *any* edge, not just those associated with
white and gray targets. */
template <class Edge, class Graph>
void examine_edge(Edge e, Graph& g) {
if (m_compare(get(m_weight, e), m_zero))
boost::throw_exception(negative_edge());
m_vis.examine_edge(e, g);
boost::parallel::caching_property_map<PredecessorMap> c_pred(m_predecessor);
boost::parallel::caching_property_map<DistanceMap> c_dist(m_distance);
distance_type old_distance = get(c_dist, target(e, g));
bool m_decreased = relax(e, g, m_weight, c_pred, c_dist,
m_combine, m_compare);
/* On x86 Linux with optimization, we sometimes get into a
horrible case where m_decreased is true but the distance hasn't
actually changed. This occurs when the comparison inside
relax() occurs with the 80-bit precision of the x87 floating
point unit, but the difference is lost when the resulting
values are written back to lower-precision memory (e.g., a
double). With the eager Dijkstra's implementation, this results
in looping. */
if (m_decreased && old_distance != get(c_dist, target(e, g))) {
m_Q.update(target(e, g));
m_vis.edge_relaxed(e, g);
} else
m_vis.edge_not_relaxed(e, g);
}
template <class Vertex, class Graph>
void finish_vertex(Vertex u, Graph& g) { m_vis.finish_vertex(u, g); }
UniformCostVisitor m_vis;
Queue& m_Q;
WeightMap m_weight;
PredecessorMap m_predecessor;
DistanceMap m_distance;
BinaryFunction m_combine;
BinaryPredicate m_compare;
distance_type m_zero;
};
/**********************************************************************
* Dijkstra queue that implements arbitrary "lookahead" *
**********************************************************************/
template<typename Graph, typename Combine, typename Compare,
typename VertexIndexMap, typename DistanceMap,
typename PredecessorMap>
class lookahead_dijkstra_queue
: public graph::detail::remote_update_set<
lookahead_dijkstra_queue<
Graph, Combine, Compare, VertexIndexMap, DistanceMap,
PredecessorMap>,
typename boost::graph::parallel::process_group_type<Graph>::type,
typename dijkstra_msg_value<DistanceMap, PredecessorMap>::type,
typename property_map<Graph, vertex_owner_t>::const_type>
{
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
typedef lookahead_dijkstra_queue self_type;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef dijkstra_msg_value<DistanceMap, PredecessorMap> msg_value_creator;
typedef typename msg_value_creator::type msg_value_type;
typedef typename property_map<Graph, vertex_owner_t>::const_type
OwnerPropertyMap;
typedef graph::detail::remote_update_set<self_type, process_group_type,
msg_value_type, OwnerPropertyMap>
inherited;
// Priority queue for tentative distances
typedef indirect_cmp<DistanceMap, Compare> queue_compare_type;
typedef typename property_traits<DistanceMap>::value_type distance_type;
#ifdef MUTABLE_QUEUE
typedef mutable_queue<vertex_descriptor, std::vector<vertex_descriptor>,
queue_compare_type, VertexIndexMap> queue_type;
#else
typedef relaxed_heap<vertex_descriptor, queue_compare_type,
VertexIndexMap> queue_type;
#endif // MUTABLE_QUEUE
typedef typename process_group_type::process_id_type process_id_type;
public:
typedef vertex_descriptor value_type;
lookahead_dijkstra_queue(const Graph& g,
const Combine& combine,
const Compare& compare,
const VertexIndexMap& id,
const DistanceMap& distance_map,
const PredecessorMap& predecessor_map,
distance_type lookahead)
: inherited(boost::graph::parallel::process_group(g), get(vertex_owner, g)),
queue(num_vertices(g), queue_compare_type(distance_map, compare), id),
distance_map(distance_map),
predecessor_map(predecessor_map),
min_distance(0),
lookahead(lookahead)
#ifdef PBGL_ACCOUNTING
, local_deletions(0)
#endif
{ }
void push(const value_type& x)
{
msg_value_type msg_value =
msg_value_creator::create(get(distance_map, x),
predecessor_value(get(predecessor_map, x)));
inherited::update(x, msg_value);
}
void update(const value_type& x) { push(x); }
void pop()
{
queue.pop();
#ifdef PBGL_ACCOUNTING
++local_deletions;
#endif
}
value_type& top() { return queue.top(); }
const value_type& top() const { return queue.top(); }
bool empty()
{
inherited::collect();
// If there are no suitable messages, wait until we get something
while (!has_suitable_vertex()) {
if (do_synchronize()) return true;
}
// Return true only if nobody has any messages; false if we
// have suitable messages
return false;
}
private:
vertex_descriptor predecessor_value(vertex_descriptor v) const
{ return v; }
vertex_descriptor
predecessor_value(property_traits<dummy_property_map>::reference) const
{ return graph_traits<Graph>::null_vertex(); }
bool has_suitable_vertex() const
{
return (!queue.empty()
&& get(distance_map, queue.top()) <= min_distance + lookahead);
}
bool do_synchronize()
{
using boost::parallel::all_reduce;
using boost::parallel::minimum;
inherited::synchronize();
// TBD: could use combine here, but then we need to stop using
// minimum<distance_type>() as the function object.
distance_type local_distance =
queue.empty()? (std::numeric_limits<distance_type>::max)()
: get(distance_map, queue.top());
all_reduce(this->process_group, &local_distance, &local_distance + 1,
&min_distance, minimum<distance_type>());
#ifdef PBGL_ACCOUNTING
std::size_t deletions = 0;
all_reduce(this->process_group, &local_deletions, &local_deletions + 1,
&deletions, std::plus<std::size_t>());
if (process_id(this->process_group) == 0)
eager_dijkstra_shortest_paths_stats.deleted_vertices
.push_back(deletions);
local_deletions = 0;
BOOST_ASSERT(deletions > 0);
#endif
return min_distance == (std::numeric_limits<distance_type>::max)();
}
public:
void
receive_update(process_id_type source, vertex_descriptor vertex,
distance_type distance)
{
// Update the queue if the received distance is better than
// the distance we know locally
if (distance <= get(distance_map, vertex)) {
// Update the local distance map
put(distance_map, vertex, distance);
bool is_in_queue = queue.contains(vertex);
if (!is_in_queue)
queue.push(vertex);
else
queue.update(vertex);
}
}
void
receive_update(process_id_type source, vertex_descriptor vertex,
std::pair<distance_type, vertex_descriptor> p)
{
if (p.first <= get(distance_map, vertex)) {
put(predecessor_map, vertex, p.second);
receive_update(source, vertex, p.first);
}
}
private:
queue_type queue;
DistanceMap distance_map;
PredecessorMap predecessor_map;
distance_type min_distance;
distance_type lookahead;
#ifdef PBGL_ACCOUNTING
std::size_t local_deletions;
#endif
};
/**********************************************************************/
} // end namespace detail
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap, typename WeightMap,
typename IndexMap, typename ColorMap, typename Compare,
typename Combine, typename DistInf, typename DistZero>
void
eager_dijkstra_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
typename property_traits<DistanceMap>::value_type lookahead,
WeightMap weight, IndexMap index_map, ColorMap color_map,
Compare compare, Combine combine, DistInf inf, DistZero zero,
DijkstraVisitor vis)
{
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>::type
process_group_type;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
Vertex;
typedef typename graph_traits<DistributedGraph>::vertices_size_type
vertices_size_type;
#ifdef PBGL_ACCOUNTING
eager_dijkstra_shortest_paths_stats.deleted_vertices.clear();
eager_dijkstra_shortest_paths_stats.lookahead = lookahead;
eager_dijkstra_shortest_paths_stats.execution_time = accounting::get_time();
#endif
// Initialize local portion of property maps
typename graph_traits<DistributedGraph>::vertex_iterator ui, ui_end;
for (boost::tie(ui, ui_end) = vertices(g); ui != ui_end; ++ui) {
put(distance, *ui, inf);
put(predecessor, *ui, *ui);
}
put(distance, s, zero);
// Dijkstra Queue
typedef detail::lookahead_dijkstra_queue
<DistributedGraph, Combine, Compare, IndexMap, DistanceMap,
PredecessorMap> Queue;
Queue Q(g, combine, compare, index_map, distance,
predecessor, lookahead);
// Parallel Dijkstra visitor
detail::parallel_dijkstra_bfs_visitor
<DijkstraVisitor, Queue, WeightMap, PredecessorMap, DistanceMap, Combine,
Compare> bfs_vis(vis, Q, weight, predecessor, distance, combine, compare,
zero);
set_property_map_role(vertex_color, color_map);
set_property_map_role(vertex_distance, distance);
breadth_first_search(g, s, Q, bfs_vis, color_map);
#ifdef PBGL_ACCOUNTING
eager_dijkstra_shortest_paths_stats.execution_time =
accounting::get_time()
- eager_dijkstra_shortest_paths_stats.execution_time;
#endif
}
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap, typename WeightMap>
void
eager_dijkstra_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
typename property_traits<DistanceMap>::value_type lookahead,
WeightMap weight)
{
typedef typename property_traits<DistanceMap>::value_type distance_type;
std::vector<default_color_type> colors(num_vertices(g), white_color);
eager_dijkstra_shortest_paths(g, s, predecessor, distance, lookahead, weight,
get(vertex_index, g),
make_iterator_property_map(&colors[0],
get(vertex_index,
g)),
std::less<distance_type>(),
closed_plus<distance_type>(),
distance_type(),
(std::numeric_limits<distance_type>::max)(),
dijkstra_visitor<>());
}
template<typename DistributedGraph, typename DijkstraVisitor,
typename PredecessorMap, typename DistanceMap>
void
eager_dijkstra_shortest_paths
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
PredecessorMap predecessor, DistanceMap distance,
typename property_traits<DistanceMap>::value_type lookahead)
{
eager_dijkstra_shortest_paths(g, s, predecessor, distance, lookahead,
get(edge_weight, g));
}
} // end namespace distributed
#ifdef PBGL_ACCOUNTING
using distributed::eager_dijkstra_shortest_paths_stats;
#endif
using distributed::eager_dijkstra_shortest_paths;
} } // end namespace boost::graph
#endif // BOOST_GRAPH_EAGER_DIJKSTRA_SHORTEST_PATHS_HPP

View File

@@ -0,0 +1,51 @@
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Nick Edmonds
// Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_DISTRIBUTED_FILTERED_GRAPH_HPP
#define BOOST_DISTRIBUTED_FILTERED_GRAPH_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/parallel/process_group.hpp>
#include <boost/graph/filtered_graph.hpp>
namespace boost {
namespace graph {
namespace parallel {
/// Retrieve the process group from a filtered graph
template<typename Graph, typename EdgePredicate, typename VertexPredicate>
struct process_group_type<filtered_graph<Graph, EdgePredicate, VertexPredicate> >
: process_group_type<Graph> { };
template<typename Graph, typename EdgePredicate, typename VertexPredicate>
struct process_group_type<const filtered_graph<Graph, EdgePredicate, VertexPredicate> >
: process_group_type<Graph> { };
}
}
/// Retrieve the process group from a filtered graph
template<typename Graph, typename EdgePredicate, typename VertexPredicate>
inline typename graph::parallel::process_group_type<Graph>::type
process_group(filtered_graph<Graph, EdgePredicate, VertexPredicate> const& g) {
return process_group(g.m_g);
}
/// Forward vertex() to vertex() of the base graph
template <typename Graph, typename EdgePredicate, typename VertexPredicate>
typename graph_traits<Graph>::vertex_descriptor
vertex(typename graph_traits<Graph>::vertices_size_type i,
filtered_graph<Graph, EdgePredicate, VertexPredicate> const& g)
{ return vertex(i, g.m_g); }
}
#endif // BOOST_DISTRIBUTED_FILTERED_GRAPH_HPP

View File

@@ -0,0 +1,384 @@
// Copyright (C) 2005-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_FRUCHTERMAN_REINGOLD_HPP
#define BOOST_GRAPH_DISTRIBUTED_FRUCHTERMAN_REINGOLD_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/fruchterman_reingold.hpp>
namespace boost { namespace graph { namespace distributed {
class simple_tiling
{
public:
simple_tiling(int columns, int rows, bool flip = true)
: columns(columns), rows(rows), flip(flip)
{
}
// Convert from a position (x, y) in the tiled display into a
// processor ID number
int operator()(int x, int y) const
{
return flip? (rows - y - 1) * columns + x : y * columns + x;
}
// Convert from a process ID to a position (x, y) in the tiled
// display
std::pair<int, int> operator()(int id)
{
int my_col = id % columns;
int my_row = flip? rows - (id / columns) - 1 : id / columns;
return std::make_pair(my_col, my_row);
}
int columns, rows;
private:
bool flip;
};
// Force pairs function object that does nothing
struct no_force_pairs
{
template<typename Graph, typename ApplyForce>
void operator()(const Graph&, const ApplyForce&)
{
}
};
// Computes force pairs in the distributed case.
template<typename PositionMap, typename DisplacementMap, typename LocalForces,
typename NonLocalForces = no_force_pairs>
class distributed_force_pairs_proxy
{
public:
distributed_force_pairs_proxy(const PositionMap& position,
const DisplacementMap& displacement,
const LocalForces& local_forces,
const NonLocalForces& nonlocal_forces = NonLocalForces())
: position(position), displacement(displacement),
local_forces(local_forces), nonlocal_forces(nonlocal_forces)
{
}
template<typename Graph, typename ApplyForce>
void operator()(const Graph& g, ApplyForce apply_force)
{
// Flush remote displacements
displacement.flush();
// Receive updated positions for all of our neighbors
synchronize(position);
// Reset remote displacements
displacement.reset();
// Compute local repulsive forces
local_forces(g, apply_force);
// Compute neighbor repulsive forces
nonlocal_forces(g, apply_force);
}
protected:
PositionMap position;
DisplacementMap displacement;
LocalForces local_forces;
NonLocalForces nonlocal_forces;
};
template<typename PositionMap, typename DisplacementMap, typename LocalForces>
inline
distributed_force_pairs_proxy<PositionMap, DisplacementMap, LocalForces>
make_distributed_force_pairs(const PositionMap& position,
const DisplacementMap& displacement,
const LocalForces& local_forces)
{
typedef
distributed_force_pairs_proxy<PositionMap, DisplacementMap, LocalForces>
result_type;
return result_type(position, displacement, local_forces);
}
template<typename PositionMap, typename DisplacementMap, typename LocalForces,
typename NonLocalForces>
inline
distributed_force_pairs_proxy<PositionMap, DisplacementMap, LocalForces,
NonLocalForces>
make_distributed_force_pairs(const PositionMap& position,
const DisplacementMap& displacement,
const LocalForces& local_forces,
const NonLocalForces& nonlocal_forces)
{
typedef
distributed_force_pairs_proxy<PositionMap, DisplacementMap, LocalForces,
NonLocalForces>
result_type;
return result_type(position, displacement, local_forces, nonlocal_forces);
}
// Compute nonlocal force pairs based on the shared borders with
// adjacent tiles.
template<typename PositionMap>
class neighboring_tiles_force_pairs
{
public:
typedef typename property_traits<PositionMap>::value_type Point;
typedef typename point_traits<Point>::component_type Dim;
enum bucket_position { left, top, right, bottom, end_position };
neighboring_tiles_force_pairs(PositionMap position, Point origin,
Point extent, simple_tiling tiling)
: position(position), origin(origin), extent(extent), tiling(tiling)
{
}
template<typename Graph, typename ApplyForce>
void operator()(const Graph& g, ApplyForce apply_force)
{
// TBD: Do this some smarter way
if (tiling.columns == 1 && tiling.rows == 1)
return;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
#ifndef BOOST_NO_STDC_NAMESPACE
using std::sqrt;
#endif // BOOST_NO_STDC_NAMESPACE
// TBD: num_vertices(g) should be the global number of vertices?
Dim two_k = Dim(2) * sqrt(extent[0] * extent[1] / num_vertices(g));
std::vector<vertex_descriptor> my_vertices[4];
std::vector<vertex_descriptor> neighbor_vertices[4];
// Compute cutoff positions
Dim cutoffs[4];
cutoffs[left] = origin[0] + two_k;
cutoffs[top] = origin[1] + two_k;
cutoffs[right] = origin[0] + extent[0] - two_k;
cutoffs[bottom] = origin[1] + extent[1] - two_k;
// Compute neighbors
typename PositionMap::process_group_type pg = position.process_group();
std::pair<int, int> my_tile = tiling(process_id(pg));
int neighbors[4] = { -1, -1, -1, -1 } ;
if (my_tile.first > 0)
neighbors[left] = tiling(my_tile.first - 1, my_tile.second);
if (my_tile.second > 0)
neighbors[top] = tiling(my_tile.first, my_tile.second - 1);
if (my_tile.first < tiling.columns - 1)
neighbors[right] = tiling(my_tile.first + 1, my_tile.second);
if (my_tile.second < tiling.rows - 1)
neighbors[bottom] = tiling(my_tile.first, my_tile.second + 1);
// Sort vertices along the edges into buckets
BGL_FORALL_VERTICES_T(v, g, Graph) {
if (position[v][0] <= cutoffs[left]) my_vertices[left].push_back(v);
if (position[v][1] <= cutoffs[top]) my_vertices[top].push_back(v);
if (position[v][0] >= cutoffs[right]) my_vertices[right].push_back(v);
if (position[v][1] >= cutoffs[bottom]) my_vertices[bottom].push_back(v);
}
// Send vertices to neighbors, and gather our neighbors' vertices
bucket_position pos;
for (pos = left; pos < end_position; pos = bucket_position(pos + 1)) {
if (neighbors[pos] != -1) {
send(pg, neighbors[pos], 0, my_vertices[pos].size());
if (!my_vertices[pos].empty())
send(pg, neighbors[pos], 1,
&my_vertices[pos].front(), my_vertices[pos].size());
}
}
// Pass messages around
synchronize(pg);
// Receive neighboring vertices
for (pos = left; pos < end_position; pos = bucket_position(pos + 1)) {
if (neighbors[pos] != -1) {
std::size_t incoming_vertices;
receive(pg, neighbors[pos], 0, incoming_vertices);
if (incoming_vertices) {
neighbor_vertices[pos].resize(incoming_vertices);
receive(pg, neighbors[pos], 1, &neighbor_vertices[pos].front(),
incoming_vertices);
}
}
}
// For each neighboring vertex, we need to get its current position
for (pos = left; pos < end_position; pos = bucket_position(pos + 1))
for (typename std::vector<vertex_descriptor>::iterator i =
neighbor_vertices[pos].begin();
i != neighbor_vertices[pos].end();
++i)
request(position, *i);
synchronize(position);
// Apply forces in adjacent bins. This is O(n^2) in the worst
// case. Oh well.
for (pos = left; pos < end_position; pos = bucket_position(pos + 1)) {
for (typename std::vector<vertex_descriptor>::iterator i =
my_vertices[pos].begin();
i != my_vertices[pos].end();
++i)
for (typename std::vector<vertex_descriptor>::iterator j =
neighbor_vertices[pos].begin();
j != neighbor_vertices[pos].end();
++j)
apply_force(*i, *j);
}
}
protected:
PositionMap position;
Point origin;
Point extent;
simple_tiling tiling;
};
template<typename PositionMap>
inline neighboring_tiles_force_pairs<PositionMap>
make_neighboring_tiles_force_pairs
(PositionMap position,
typename property_traits<PositionMap>::value_type origin,
typename property_traits<PositionMap>::value_type extent,
simple_tiling tiling)
{
return neighboring_tiles_force_pairs<PositionMap>(position, origin, extent,
tiling);
}
template<typename DisplacementMap, typename Cooling>
class distributed_cooling_proxy
{
public:
typedef typename Cooling::result_type result_type;
distributed_cooling_proxy(const DisplacementMap& displacement,
const Cooling& cooling)
: displacement(displacement), cooling(cooling)
{
}
result_type operator()()
{
// Accumulate displacements computed on each processor
synchronize(displacement.data->process_group);
// Allow the underlying cooling to occur
return cooling();
}
protected:
DisplacementMap displacement;
Cooling cooling;
};
template<typename DisplacementMap, typename Cooling>
inline distributed_cooling_proxy<DisplacementMap, Cooling>
make_distributed_cooling(const DisplacementMap& displacement,
const Cooling& cooling)
{
typedef distributed_cooling_proxy<DisplacementMap, Cooling> result_type;
return result_type(displacement, cooling);
}
template<typename Point>
struct point_accumulating_reducer {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
template<typename K>
Point operator()(const K&) const { return Point(); }
template<typename K>
Point operator()(const K&, const Point& p1, const Point& p2) const
{ return Point(p1[0] + p2[0], p1[1] + p2[1]); }
};
template<typename Graph, typename PositionMap,
typename AttractiveForce, typename RepulsiveForce,
typename ForcePairs, typename Cooling, typename DisplacementMap>
void
fruchterman_reingold_force_directed_layout
(const Graph& g,
PositionMap position,
typename property_traits<PositionMap>::value_type const& origin,
typename property_traits<PositionMap>::value_type const& extent,
AttractiveForce attractive_force,
RepulsiveForce repulsive_force,
ForcePairs force_pairs,
Cooling cool,
DisplacementMap displacement)
{
typedef typename property_traits<PositionMap>::value_type Point;
// Reduction in the displacement map involves summing the forces
displacement.set_reduce(point_accumulating_reducer<Point>());
// We need to track the positions of all of our neighbors
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_ADJ_T(u, v, g, Graph)
request(position, v);
// Invoke the "sequential" Fruchterman-Reingold implementation
boost::fruchterman_reingold_force_directed_layout
(g, position, origin, extent,
attractive_force, repulsive_force,
make_distributed_force_pairs(position, displacement, force_pairs),
make_distributed_cooling(displacement, cool),
displacement);
}
template<typename Graph, typename PositionMap,
typename AttractiveForce, typename RepulsiveForce,
typename ForcePairs, typename Cooling, typename DisplacementMap>
void
fruchterman_reingold_force_directed_layout
(const Graph& g,
PositionMap position,
typename property_traits<PositionMap>::value_type const& origin,
typename property_traits<PositionMap>::value_type const& extent,
AttractiveForce attractive_force,
RepulsiveForce repulsive_force,
ForcePairs force_pairs,
Cooling cool,
DisplacementMap displacement,
simple_tiling tiling)
{
typedef typename property_traits<PositionMap>::value_type Point;
// Reduction in the displacement map involves summing the forces
displacement.set_reduce(point_accumulating_reducer<Point>());
// We need to track the positions of all of our neighbors
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_ADJ_T(u, v, g, Graph)
request(position, v);
// Invoke the "sequential" Fruchterman-Reingold implementation
boost::fruchterman_reingold_force_directed_layout
(g, position, origin, extent,
attractive_force, repulsive_force,
make_distributed_force_pairs
(position, displacement, force_pairs,
make_neighboring_tiles_force_pairs(position, origin, extent, tiling)),
make_distributed_cooling(displacement, cool),
displacement);
}
} } } // end namespace boost::graph::distributed
#endif // BOOST_GRAPH_DISTRIBUTED_FRUCHTERMAN_REINGOLD_HPP

View File

@@ -0,0 +1,294 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_PARALLEL_GRAPHVIZ_HPP
#define BOOST_GRAPH_PARALLEL_GRAPHVIZ_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/graphviz.hpp>
#include <boost/type_traits/is_base_and_derived.hpp>
#include <boost/type_traits/is_same.hpp>
#include <fstream>
#include <sstream>
#include <iostream>
#include <string>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/property_map/parallel/global_index_map.hpp>
namespace boost {
template<typename Graph>
struct graph_id_writer
{
explicit graph_id_writer(const Graph& g) : g(g) { }
void operator()(std::ostream& out)
{
out << " label=\"p" << process_id(g.process_group()) << "\";\n";
}
private:
const Graph& g;
};
template<typename NumberMap>
struct paint_by_number_writer
{
explicit paint_by_number_writer(NumberMap number) : number(number) { }
template<typename Descriptor>
void operator()(std::ostream& out, Descriptor k)
{
static const char* color_names[] = {
"blue",
"brown",
"cyan",
"darkgreen",
"darkorchid",
"darksalmon",
"darkviolet",
"deeppink",
"gold3",
"green",
"magenta",
"navy",
"red",
"yellow",
"palegreen",
"gray65",
"gray21",
"bisque2",
"greenyellow",
"indianred4",
"lightblue2",
"mediumspringgreen",
"orangered",
"orange"
};
const int colors = sizeof(color_names) / sizeof(color_names[0]);
if (get(number, k) < colors) {
out << " [ style=\"filled\", fillcolor=\"" << color_names[get(number, k)]
<< "\" ]";
} else {
out << " [ label=\"(" << get(number, k) << ")\" ]";
}
}
private:
NumberMap number;
};
template<typename NumberMap>
inline paint_by_number_writer<NumberMap>
paint_by_number(NumberMap number)
{ return paint_by_number_writer<NumberMap>(number); }
template<typename Graph, typename VertexPropertiesWriter,
typename EdgePropertiesWriter, typename GraphPropertiesWriter>
void
write_graphviz(std::ostream& out,
const Graph& g,
VertexPropertiesWriter vpw,
EdgePropertiesWriter epw,
GraphPropertiesWriter gpw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
typedef typename graph_traits<Graph>::directed_category directed_category;
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef typename property_map<Graph, vertex_index_t>::const_type
VertexIndexMap;
typedef typename property_map<Graph, vertex_global_t>::const_type
VertexGlobalMap;
static const bool is_undirected
= (is_base_and_derived<undirected_tag, directed_category>::value
|| is_same<undirected_tag, directed_category>::value);
static const char* graph_kind = is_undirected? "graph" : "digraph";
static const char* edge_kind = is_undirected? "--" : "->";
using boost::graph::parallel::process_group;
process_group_type pg = process_group(g);
parallel::global_index_map<VertexIndexMap, VertexGlobalMap>
global_index(pg, num_vertices(g), get(vertex_index, g),
get(vertex_global, g));
std::ostringstream local_graph_out;
local_graph_out << " subgraph cluster_" << process_id(pg) << " {\n";
gpw(local_graph_out);
typename graph_traits<Graph>::vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) {
int global_idx = get(global_index, *vi);
local_graph_out << " n" << global_idx;
vpw(local_graph_out, *vi);
local_graph_out << ";\n";
}
local_graph_out << " }\n\n";
typename graph_traits<Graph>::edge_iterator ei, ei_end;
for (boost::tie(ei, ei_end) = edges(g); ei != ei_end; ++ei) {
int source_idx = get(global_index, source(*ei, g));
int target_idx = get(global_index, target(*ei, g));
local_graph_out << " n" << source_idx << " " << edge_kind << " n"
<< target_idx;
epw(local_graph_out, *ei);
local_graph_out << ";\n";
}
if (process_id(pg) == 0) {
out << graph_kind << " g {\n";
out << local_graph_out.str();
synchronize(pg);
for (int i = 1; i < num_processes(pg); ++i) {
int len;
receive(pg, i, 0, len);
char* data = new char [len+1];
data[len] = 0;
receive(pg, i, 1, data, len);
out << std::endl << data;
delete [] data;
}
out << "}\n";
} else {
std::string result_str = local_graph_out.str();
const char* data = result_str.c_str();
int len = result_str.length();
send(pg, 0, 0, len);
send(pg, 0, 1, data, len);
synchronize(pg);
}
synchronize(pg);
synchronize(pg);
synchronize(pg);
}
template<typename Graph, typename VertexPropertiesWriter,
typename EdgePropertiesWriter>
inline void
write_graphviz(std::ostream& out,
const Graph& g,
VertexPropertiesWriter vpw,
EdgePropertiesWriter epw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
write_graphviz(out, g, vpw, epw, graph_id_writer<Graph>(g));
}
template<typename Graph, typename VertexPropertiesWriter>
inline void
write_graphviz(std::ostream& out,
const Graph& g,
VertexPropertiesWriter vpw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
write_graphviz(out, g, vpw, default_writer());
}
template<typename Graph>
inline void
write_graphviz(std::ostream& out, const Graph& g
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
write_graphviz(out, g, default_writer());
}
template<typename Graph, typename VertexPropertiesWriter,
typename EdgePropertiesWriter, typename GraphPropertiesWriter>
void
write_graphviz(const std::string& filename,
const Graph& g,
VertexPropertiesWriter vpw,
EdgePropertiesWriter epw,
GraphPropertiesWriter gpw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
if (process_id(g.process_group()) == 0) {
std::ofstream out(filename.c_str());
write_graphviz(out, g, vpw, epw, gpw);
} else {
write_graphviz(std::cout, g, vpw, epw, gpw);
}
}
template<typename Graph, typename VertexPropertiesWriter,
typename EdgePropertiesWriter>
void
write_graphviz(const std::string& filename,
const Graph& g,
VertexPropertiesWriter vpw,
EdgePropertiesWriter epw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
if (process_id(g.process_group()) == 0) {
std::ofstream out(filename.c_str());
write_graphviz(out, g, vpw, epw);
} else {
write_graphviz(std::cout, g, vpw, epw);
}
}
template<typename Graph, typename VertexPropertiesWriter>
void
write_graphviz(const std::string& filename,
const Graph& g,
VertexPropertiesWriter vpw
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
if (process_id(g.process_group()) == 0) {
std::ofstream out(filename.c_str());
write_graphviz(out, g, vpw);
} else {
write_graphviz(std::cout, g, vpw);
}
}
template<typename Graph>
void
write_graphviz(const std::string& filename, const Graph& g
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
if (process_id(g.process_group()) == 0) {
std::ofstream out(filename.c_str());
write_graphviz(out, g);
} else {
write_graphviz(std::cout, g);
}
}
template<typename Graph>
void
write_graphviz(std::ostream& out, const Graph& g,
const dynamic_properties& dp,
const std::string& node_id = "node_id"
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph,distributed_graph_tag))
{
write_graphviz
(out, g,
/*vertex_writer=*/dynamic_vertex_properties_writer(dp, node_id),
/*edge_writer=*/dynamic_properties_writer(dp));
}
} // end namespace boost
#endif // BOOST_GRAPH_PARALLEL_GRAPHVIZ_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,175 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_LOCAL_SUBGRAPH_HPP
#define BOOST_GRAPH_LOCAL_SUBGRAPH_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/filtered_graph.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/type_traits/is_base_and_derived.hpp>
#include <boost/graph/parallel/container_traits.hpp>
namespace boost {
namespace graph { namespace detail {
// Optionally, virtually derive from a base class
template<bool Derive, typename Base> struct derive_from_if;
template<typename Base> struct derive_from_if<true, Base> : virtual Base {};
template<typename Base> struct derive_from_if<false, Base> {};
template<typename NewBase, typename Tag, typename OldBase = NewBase>
struct derive_from_if_tag_is :
derive_from_if<(is_base_and_derived<OldBase, Tag>::value
|| is_same<OldBase, Tag>::value),
NewBase>
{
};
} } // end namespace graph::detail
template<typename DistributedGraph>
class is_local_edge
{
public:
typedef bool result_type;
typedef typename graph_traits<DistributedGraph>::edge_descriptor
argument_type;
is_local_edge() : g(0) {}
is_local_edge(DistributedGraph& g) : g(&g), owner(get(vertex_owner, g)) {}
// Since either the source or target vertex must be local, the
// equivalence of their owners indicates a local edge.
result_type operator()(const argument_type& e) const
{ return get(owner, source(e, *g)) == get(owner, target(e, *g)); }
private:
DistributedGraph* g;
typename property_map<DistributedGraph, vertex_owner_t>::const_type owner;
};
template<typename DistributedGraph>
class is_local_vertex
{
public:
typedef bool result_type;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
argument_type;
is_local_vertex() : g(0) {}
is_local_vertex(DistributedGraph& g) : g(&g), owner(get(vertex_owner, g)) { }
// Since either the source or target vertex must be local, the
// equivalence of their owners indicates a local edge.
result_type operator()(const argument_type& v) const
{
return get(owner, v) == process_id(process_group(*g));
}
private:
DistributedGraph* g;
typename property_map<DistributedGraph, vertex_owner_t>::const_type owner;
};
template<typename DistributedGraph>
class local_subgraph
: public filtered_graph<DistributedGraph,
is_local_edge<DistributedGraph>,
is_local_vertex<DistributedGraph> >
{
typedef filtered_graph<DistributedGraph,
is_local_edge<DistributedGraph>,
is_local_vertex<DistributedGraph> >
inherited;
typedef typename graph_traits<DistributedGraph>::traversal_category
inherited_category;
public:
struct traversal_category :
graph::detail::derive_from_if_tag_is<incidence_graph_tag,
inherited_category>,
graph::detail::derive_from_if_tag_is<adjacency_graph_tag,
inherited_category>,
graph::detail::derive_from_if_tag_is<vertex_list_graph_tag,
inherited_category>,
graph::detail::derive_from_if_tag_is<edge_list_graph_tag,
inherited_category>,
graph::detail::derive_from_if_tag_is<vertex_list_graph_tag,
inherited_category,
distributed_vertex_list_graph_tag>,
graph::detail::derive_from_if_tag_is<edge_list_graph_tag,
inherited_category,
distributed_edge_list_graph_tag>
{ };
local_subgraph(DistributedGraph& g)
: inherited(g,
is_local_edge<DistributedGraph>(g),
is_local_vertex<DistributedGraph>(g)),
g(g)
{
}
// Distributed Container
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>::type
process_group_type;
process_group_type& process_group()
{
using boost::graph::parallel::process_group;
return process_group(g);
}
const process_group_type& process_group() const
{
using boost::graph::parallel::process_group;
return boost::graph::parallel::process_group(g);
}
DistributedGraph& base() { return g; }
const DistributedGraph& base() const { return g; }
private:
DistributedGraph& g;
};
template<typename DistributedGraph, typename PropertyTag>
class property_map<local_subgraph<DistributedGraph>, PropertyTag>
: public property_map<DistributedGraph, PropertyTag> { };
template<typename DistributedGraph, typename PropertyTag>
class property_map<local_subgraph<const DistributedGraph>, PropertyTag>
{
public:
typedef typename property_map<DistributedGraph, PropertyTag>::const_type
type;
typedef type const_type;
};
template<typename PropertyTag, typename DistributedGraph>
inline typename property_map<local_subgraph<DistributedGraph>, PropertyTag>::type
get(PropertyTag p, local_subgraph<DistributedGraph>& g)
{ return get(p, g.base()); }
template<typename PropertyTag, typename DistributedGraph>
inline typename property_map<local_subgraph<DistributedGraph>, PropertyTag>
::const_type
get(PropertyTag p, const local_subgraph<DistributedGraph>& g)
{ return get(p, g.base()); }
template<typename DistributedGraph>
inline local_subgraph<DistributedGraph>
make_local_subgraph(DistributedGraph& g)
{ return local_subgraph<DistributedGraph>(g); }
} // end namespace boost
#endif // BOOST_GRAPH_LOCAL_SUBGRAPH_HPP

View File

@@ -0,0 +1,809 @@
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Copyright (C) 2007 Douglas Gregor
// Copyright (C) 2007 Matthias Troyer <troyer@boost-consulting.com>
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Matthias Troyer
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_MPI_PROCESS_GROUP
#define BOOST_GRAPH_DISTRIBUTED_MPI_PROCESS_GROUP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
//#define NO_SPLIT_BATCHES
#define SEND_OOB_BSEND
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/weak_ptr.hpp>
#include <utility>
#include <memory>
#include <boost/function/function1.hpp>
#include <boost/function/function2.hpp>
#include <boost/function/function0.hpp>
#include <boost/mpi.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/utility/enable_if.hpp>
namespace boost { namespace graph { namespace distributed {
// Process group tags
struct mpi_process_group_tag : virtual parallel::linear_process_group_tag { };
class mpi_process_group
{
struct impl;
public:
/// Number of tags available to each data structure.
static const int max_tags = 256;
/**
* The type of a "receive" handler, that will be provided with
* (source, tag) pairs when a message is received. Users can provide a
* receive handler for a distributed data structure, for example, to
* automatically pick up and respond to messages as needed.
*/
typedef function<void(int source, int tag)> receiver_type;
/**
* The type of a handler for the on-synchronize event, which will be
* executed at the beginning of synchronize().
*/
typedef function0<void> on_synchronize_event_type;
/// Used as a tag to help create an "empty" process group.
struct create_empty {};
/// The type used to buffer message data
typedef boost::mpi::packed_oprimitive::buffer_type buffer_type;
/// The type used to identify a process
typedef int process_id_type;
/// The type used to count the number of processes
typedef int process_size_type;
/// The type of communicator used to transmit data via MPI
typedef boost::mpi::communicator communicator_type;
/// Classification of the capabilities of this process group
struct communication_category
: virtual parallel::bsp_process_group_tag,
virtual mpi_process_group_tag { };
// TBD: We can eliminate the "source" field and possibly the
// "offset" field.
struct message_header {
/// The process that sent the message
process_id_type source;
/// The message tag
int tag;
/// The offset of the message into the buffer
std::size_t offset;
/// The length of the message in the buffer, in bytes
std::size_t bytes;
template <class Archive>
void serialize(Archive& ar, int)
{
ar & source & tag & offset & bytes;
}
};
/**
* Stores the outgoing messages for a particular processor.
*
* @todo Evaluate whether we should use a deque instance, which
* would reduce could reduce the cost of "sending" messages but
* increases the time spent in the synchronization step.
*/
struct outgoing_messages {
outgoing_messages() {}
~outgoing_messages() {}
std::vector<message_header> headers;
buffer_type buffer;
template <class Archive>
void serialize(Archive& ar, int)
{
ar & headers & buffer;
}
void swap(outgoing_messages& x)
{
headers.swap(x.headers);
buffer.swap(x.buffer);
}
};
private:
/**
* Virtual base from which every trigger will be launched. See @c
* trigger_launcher for more information.
*/
class trigger_base : boost::noncopyable
{
public:
explicit trigger_base(int tag) : tag_(tag) { }
/// Retrieve the tag associated with this trigger
int tag() const { return tag_; }
virtual ~trigger_base() { }
/**
* Invoked to receive a message that matches a particular trigger.
*
* @param source the source of the message
* @param tag the (local) tag of the message
* @param context the context under which the trigger is being
* invoked
*/
virtual void
receive(mpi_process_group const& pg, int source, int tag,
trigger_receive_context context, int block=-1) const = 0;
protected:
// The message tag associated with this trigger
int tag_;
};
/**
* Launches a specific handler in response to a trigger. This
* function object wraps up the handler function object and a buffer
* for incoming data.
*/
template<typename Type, typename Handler>
class trigger_launcher : public trigger_base
{
public:
explicit trigger_launcher(mpi_process_group& self, int tag,
const Handler& handler)
: trigger_base(tag), self(self), handler(handler)
{}
void
receive(mpi_process_group const& pg, int source, int tag,
trigger_receive_context context, int block=-1) const;
private:
mpi_process_group& self;
mutable Handler handler;
};
/**
* Launches a specific handler with a message reply in response to a
* trigger. This function object wraps up the handler function
* object and a buffer for incoming data.
*/
template<typename Type, typename Handler>
class reply_trigger_launcher : public trigger_base
{
public:
explicit reply_trigger_launcher(mpi_process_group& self, int tag,
const Handler& handler)
: trigger_base(tag), self(self), handler(handler)
{}
void
receive(mpi_process_group const& pg, int source, int tag,
trigger_receive_context context, int block=-1) const;
private:
mpi_process_group& self;
mutable Handler handler;
};
template<typename Type, typename Handler>
class global_trigger_launcher : public trigger_base
{
public:
explicit global_trigger_launcher(mpi_process_group& self, int tag,
const Handler& handler)
: trigger_base(tag), handler(handler)
{
}
void
receive(mpi_process_group const& pg, int source, int tag,
trigger_receive_context context, int block=-1) const;
private:
mutable Handler handler;
// TBD: do not forget to cancel any outstanding Irecv when deleted,
// if we decide to use Irecv
};
template<typename Type, typename Handler>
class global_irecv_trigger_launcher : public trigger_base
{
public:
explicit global_irecv_trigger_launcher(mpi_process_group& self, int tag,
const Handler& handler, int sz)
: trigger_base(tag), handler(handler), buffer_size(sz)
{
prepare_receive(self,tag);
}
void
receive(mpi_process_group const& pg, int source, int tag,
trigger_receive_context context, int block=-1) const;
private:
void prepare_receive(mpi_process_group const& pg, int tag, bool force=false) const;
Handler handler;
int buffer_size;
// TBD: do not forget to cancel any outstanding Irecv when deleted,
// if we decide to use Irecv
};
public:
/**
* Construct a new BSP process group from an MPI communicator. The
* MPI communicator will be duplicated to create a new communicator
* for this process group to use.
*/
mpi_process_group(communicator_type parent_comm = communicator_type());
/**
* Construct a new BSP process group from an MPI communicator. The
* MPI communicator will be duplicated to create a new communicator
* for this process group to use. This constructor allows to tune the
* size of message batches.
*
* @param num_headers The maximum number of headers in a message batch
*
* @param buffer_size The maximum size of the message buffer in a batch.
*
*/
mpi_process_group( std::size_t num_headers, std::size_t buffer_size,
communicator_type parent_comm = communicator_type());
/**
* Construct a copy of the BSP process group for a new distributed
* data structure. This data structure will synchronize with all
* other members of the process group's equivalence class (including
* @p other), but will have its own set of tags.
*
* @param other The process group that this new process group will
* be based on, using a different set of tags within the same
* communication and synchronization space.
*
* @param handler A message handler that will be passed (source,
* tag) pairs for each message received by this data
* structure. The handler is expected to receive the messages
* immediately. The handler can be changed after-the-fact by
* calling @c replace_handler.
*
* @param out_of_band_receive An anachronism. TODO: remove this.
*/
mpi_process_group(const mpi_process_group& other,
const receiver_type& handler,
bool out_of_band_receive = false);
/**
* Construct a copy of the BSP process group for a new distributed
* data structure. This data structure will synchronize with all
* other members of the process group's equivalence class (including
* @p other), but will have its own set of tags.
*/
mpi_process_group(const mpi_process_group& other,
attach_distributed_object,
bool out_of_band_receive = false);
/**
* Create an "empty" process group, with no information. This is an
* internal routine that users should never need.
*/
explicit mpi_process_group(create_empty) {}
/**
* Destroys this copy of the process group.
*/
~mpi_process_group();
/**
* Replace the current message handler with a new message handler.
*
* @param handle The new message handler.
* @param out_of_band_receive An anachronism: remove this
*/
void replace_handler(const receiver_type& handler,
bool out_of_band_receive = false);
/**
* Turns this process group into the process group for a new
* distributed data structure or object, allocating its own tag
* block.
*/
void make_distributed_object();
/**
* Replace the handler to be invoked at the beginning of synchronize.
*/
void
replace_on_synchronize_handler(const on_synchronize_event_type& handler = 0);
/**
* Return the block number of the current data structure. A value of
* 0 indicates that this particular instance of the process group is
* not associated with any distributed data structure.
*/
int my_block_number() const { return block_num? *block_num : 0; }
/**
* Encode a block number/tag pair into a single encoded tag for
* transmission.
*/
int encode_tag(int block_num, int tag) const
{ return block_num * max_tags + tag; }
/**
* Decode an encoded tag into a block number/tag pair.
*/
std::pair<int, int> decode_tag(int encoded_tag) const
{ return std::make_pair(encoded_tag / max_tags, encoded_tag % max_tags); }
// @todo Actually write up the friend declarations so these could be
// private.
// private:
/** Allocate a block of tags for this instance. The block should not
* have been allocated already, e.g., my_block_number() ==
* 0. Returns the newly-allocated block number.
*/
int allocate_block(bool out_of_band_receive = false);
/** Potentially emit a receive event out of band. Returns true if an event
* was actually sent, false otherwise.
*/
bool maybe_emit_receive(int process, int encoded_tag) const;
/** Emit a receive event. Returns true if an event was actually
* sent, false otherwise.
*/
bool emit_receive(int process, int encoded_tag) const;
/** Emit an on-synchronize event to all block handlers. */
void emit_on_synchronize() const;
/** Retrieve a reference to the stored receiver in this block. */
template<typename Receiver>
Receiver* get_receiver();
template<typename T>
void
send_impl(int dest, int tag, const T& value,
mpl::true_ /*is_mpi_datatype*/) const;
template<typename T>
void
send_impl(int dest, int tag, const T& value,
mpl::false_ /*is_mpi_datatype*/) const;
template<typename T>
typename disable_if<boost::mpi::is_mpi_datatype<T>, void>::type
array_send_impl(int dest, int tag, const T values[], std::size_t n) const;
template<typename T>
bool
receive_impl(int source, int tag, T& value,
mpl::true_ /*is_mpi_datatype*/) const;
template<typename T>
bool
receive_impl(int source, int tag, T& value,
mpl::false_ /*is_mpi_datatype*/) const;
// Receive an array of values
template<typename T>
typename disable_if<boost::mpi::is_mpi_datatype<T>, bool>::type
array_receive_impl(int source, int tag, T* values, std::size_t& n) const;
optional<std::pair<mpi_process_group::process_id_type, int> > probe() const;
void synchronize() const;
operator bool() { return impl_; }
mpi_process_group base() const;
/**
* Create a new trigger for a specific message tag. Triggers handle
* out-of-band messaging, and the handler itself will be called
* whenever a message is available. The handler itself accepts four
* arguments: the source of the message, the message tag (which will
* be the same as @p tag), the message data (of type @c Type), and a
* boolean flag that states whether the message was received
* out-of-band. The last will be @c true for out-of-band receives,
* or @c false for receives at the end of a synchronization step.
*/
template<typename Type, typename Handler>
void trigger(int tag, const Handler& handler);
/**
* Create a new trigger for a specific message tag, along with a way
* to send a reply with data back to the sender. Triggers handle
* out-of-band messaging, and the handler itself will be called
* whenever a message is available. The handler itself accepts four
* arguments: the source of the message, the message tag (which will
* be the same as @p tag), the message data (of type @c Type), and a
* boolean flag that states whether the message was received
* out-of-band. The last will be @c true for out-of-band receives,
* or @c false for receives at the end of a synchronization
* step. The handler also returns a value, which will be routed back
* to the sender.
*/
template<typename Type, typename Handler>
void trigger_with_reply(int tag, const Handler& handler);
template<typename Type, typename Handler>
void global_trigger(int tag, const Handler& handler, std::size_t buffer_size=0);
/**
* Poll for any out-of-band messages. This routine will check if any
* out-of-band messages are available. Those that are available will
* be handled immediately, if possible.
*
* @returns if an out-of-band message has been received, but we are
* unable to actually receive the message, a (source, tag) pair will
* be returned. Otherwise, returns an empty optional.
*
* @param wait When true, we should block until a message comes in.
*
* @param synchronizing whether we are currently synchronizing the
* process group
*/
optional<std::pair<int, int> >
poll(bool wait = false, int block = -1, bool synchronizing = false) const;
/**
* Determines the context of the trigger currently executing. If
* multiple triggers are executing (recursively), then the context
* for the most deeply nested trigger will be returned. If no
* triggers are executing, returns @c trc_none. This might be used,
* for example, to determine whether a reply to a message should
* itself be sent out-of-band or whether it can go via the normal,
* slower communication route.
*/
trigger_receive_context trigger_context() const;
/// INTERNAL ONLY
void receive_batch(process_id_type source, outgoing_messages& batch) const;
/// INTERNAL ONLY
///
/// Determine the actual communicator and tag will be used for a
/// transmission with the given tag.
std::pair<boost::mpi::communicator, int>
actual_communicator_and_tag(int tag, int block) const;
/// set the size of the message buffer used for buffered oob sends
static void set_message_buffer_size(std::size_t s);
/// get the size of the message buffer used for buffered oob sends
static std::size_t message_buffer_size();
static int old_buffer_size;
static void* old_buffer;
private:
void install_trigger(int tag, int block,
shared_ptr<trigger_base> const& launcher);
void poll_requests(int block=-1) const;
// send a batch if the buffer is full now or would get full
void maybe_send_batch(process_id_type dest) const;
// actually send a batch
void send_batch(process_id_type dest, outgoing_messages& batch) const;
void send_batch(process_id_type dest) const;
void pack_headers() const;
/**
* Process a batch of incoming messages immediately.
*
* @param source the source of these messages
*/
void process_batch(process_id_type source) const;
void receive_batch(boost::mpi::status& status) const;
//void free_finished_sends() const;
/// Status messages used internally by the process group
enum status_messages {
/// the first of the reserved message tags
msg_reserved_first = 126,
/// Sent from a processor when sending batched messages
msg_batch = 126,
/// Sent from a processor when sending large batched messages, larger than
/// the maximum buffer size for messages to be received by MPI_Irecv
msg_large_batch = 127,
/// Sent from a source processor to everyone else when that
/// processor has entered the synchronize() function.
msg_synchronizing = 128,
/// the last of the reserved message tags
msg_reserved_last = 128
};
/**
* Description of a block of tags associated to a particular
* distributed data structure. This structure will live as long as
* the distributed data structure is around, and will be used to
* help send messages to the data structure.
*/
struct block_type
{
block_type() { }
/// Handler for receive events
receiver_type on_receive;
/// Handler executed at the start of synchronization
on_synchronize_event_type on_synchronize;
/// Individual message triggers. Note: at present, this vector is
/// indexed by the (local) tag of the trigger. Any tags that
/// don't have triggers will have NULL pointers in that spot.
std::vector<shared_ptr<trigger_base> > triggers;
};
/**
* Data structure containing all of the blocks for the distributed
* data structures attached to a process group.
*/
typedef std::vector<block_type*> blocks_type;
/// Iterator into @c blocks_type.
typedef blocks_type::iterator block_iterator;
/**
* Deleter used to deallocate a block when its distributed data
* structure is destroyed. This type will be used as the deleter for
* @c block_num.
*/
struct deallocate_block;
static std::vector<char> message_buffer;
public:
/**
* Data associated with the process group and all of its attached
* distributed data structures.
*/
shared_ptr<impl> impl_;
/**
* When non-null, indicates that this copy of the process group is
* associated with a particular distributed data structure. The
* integer value contains the block number (a value > 0) associated
* with that data structure. The deleter for this @c shared_ptr is a
* @c deallocate_block object that will deallocate the associated
* block in @c impl_->blocks.
*/
shared_ptr<int> block_num;
/**
* Rank of this process, to avoid having to call rank() repeatedly.
*/
int rank;
/**
* Number of processes in this process group, to avoid having to
* call communicator::size() repeatedly.
*/
int size;
};
inline mpi_process_group::process_id_type
process_id(const mpi_process_group& pg)
{ return pg.rank; }
inline mpi_process_group::process_size_type
num_processes(const mpi_process_group& pg)
{ return pg.size; }
mpi_process_group::communicator_type communicator(const mpi_process_group& pg);
template<typename T>
void
send(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, const T& value);
template<typename InputIterator>
void
send(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, InputIterator first, InputIterator last);
template<typename T>
inline void
send(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, T* first, T* last)
{ send(pg, dest, tag, first, last - first); }
template<typename T>
inline void
send(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, const T* first, const T* last)
{ send(pg, dest, tag, first, last - first); }
template<typename T>
mpi_process_group::process_id_type
receive(const mpi_process_group& pg, int tag, T& value);
template<typename T>
mpi_process_group::process_id_type
receive(const mpi_process_group& pg,
mpi_process_group::process_id_type source, int tag, T& value);
optional<std::pair<mpi_process_group::process_id_type, int> >
probe(const mpi_process_group& pg);
void synchronize(const mpi_process_group& pg);
template<typename T, typename BinaryOperation>
T*
all_reduce(const mpi_process_group& pg, T* first, T* last, T* out,
BinaryOperation bin_op);
template<typename T, typename BinaryOperation>
T*
scan(const mpi_process_group& pg, T* first, T* last, T* out,
BinaryOperation bin_op);
template<typename InputIterator, typename T>
void
all_gather(const mpi_process_group& pg,
InputIterator first, InputIterator last, std::vector<T>& out);
template<typename InputIterator>
mpi_process_group
process_subgroup(const mpi_process_group& pg,
InputIterator first, InputIterator last);
template<typename T>
void
broadcast(const mpi_process_group& pg, T& val,
mpi_process_group::process_id_type root);
/*******************************************************************
* Out-of-band communication *
*******************************************************************/
template<typename T>
typename enable_if<boost::mpi::is_mpi_datatype<T> >::type
send_oob(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, const T& value, int block=-1)
{
using boost::mpi::get_mpi_datatype;
// Determine the actual message tag we will use for the send, and which
// communicator we will use.
std::pair<boost::mpi::communicator, int> actual
= pg.actual_communicator_and_tag(tag, block);
#ifdef SEND_OOB_BSEND
if (mpi_process_group::message_buffer_size()) {
MPI_Bsend(const_cast<T*>(&value), 1, get_mpi_datatype<T>(value), dest,
actual.second, actual.first);
return;
}
#endif
MPI_Request request;
MPI_Isend(const_cast<T*>(&value), 1, get_mpi_datatype<T>(value), dest,
actual.second, actual.first, &request);
int done=0;
do {
pg.poll();
MPI_Test(&request,&done,MPI_STATUS_IGNORE);
} while (!done);
}
template<typename T>
typename disable_if<boost::mpi::is_mpi_datatype<T> >::type
send_oob(const mpi_process_group& pg, mpi_process_group::process_id_type dest,
int tag, const T& value, int block=-1)
{
using boost::mpi::packed_oarchive;
// Determine the actual message tag we will use for the send, and which
// communicator we will use.
std::pair<boost::mpi::communicator, int> actual
= pg.actual_communicator_and_tag(tag, block);
// Serialize the data into a buffer
packed_oarchive out(actual.first);
out << value;
std::size_t size = out.size();
// Send the actual message data
#ifdef SEND_OOB_BSEND
if (mpi_process_group::message_buffer_size()) {
MPI_Bsend(const_cast<void*>(out.address()), size, MPI_PACKED,
dest, actual.second, actual.first);
return;
}
#endif
MPI_Request request;
MPI_Isend(const_cast<void*>(out.address()), size, MPI_PACKED,
dest, actual.second, actual.first, &request);
int done=0;
do {
pg.poll();
MPI_Test(&request,&done,MPI_STATUS_IGNORE);
} while (!done);
}
template<typename T>
typename enable_if<boost::mpi::is_mpi_datatype<T> >::type
receive_oob(const mpi_process_group& pg,
mpi_process_group::process_id_type source, int tag, T& value, int block=-1);
template<typename T>
typename disable_if<boost::mpi::is_mpi_datatype<T> >::type
receive_oob(const mpi_process_group& pg,
mpi_process_group::process_id_type source, int tag, T& value, int block=-1);
template<typename SendT, typename ReplyT>
typename enable_if<boost::mpi::is_mpi_datatype<ReplyT> >::type
send_oob_with_reply(const mpi_process_group& pg,
mpi_process_group::process_id_type dest,
int tag, const SendT& send_value, ReplyT& reply_value,
int block = -1);
template<typename SendT, typename ReplyT>
typename disable_if<boost::mpi::is_mpi_datatype<ReplyT> >::type
send_oob_with_reply(const mpi_process_group& pg,
mpi_process_group::process_id_type dest,
int tag, const SendT& send_value, ReplyT& reply_value,
int block = -1);
} } } // end namespace boost::graph::distributed
BOOST_IS_BITWISE_SERIALIZABLE(boost::graph::distributed::mpi_process_group::message_header)
namespace boost { namespace mpi {
template<>
struct is_mpi_datatype<boost::graph::distributed::mpi_process_group::message_header> : mpl::true_ { };
} } // end namespace boost::mpi
namespace std {
/// optimized swap for outgoing messages
inline void
swap(boost::graph::distributed::mpi_process_group::outgoing_messages& x,
boost::graph::distributed::mpi_process_group::outgoing_messages& y)
{
x.swap(y);
}
}
BOOST_CLASS_IMPLEMENTATION(boost::graph::distributed::mpi_process_group::outgoing_messages,object_serializable)
BOOST_CLASS_TRACKING(boost::graph::distributed::mpi_process_group::outgoing_messages,track_never)
#include <boost/graph/distributed/detail/mpi_process_group.ipp>
#endif // BOOST_PARALLEL_MPI_MPI_PROCESS_GROUP_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,116 @@
// Copyright (C) 2006-2010 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Jeremiah Willcock
// Andrew Lumsdaine
// Distributed version of the one-bit color map
#ifndef BOOST_DISTRIBUTED_ONE_BIT_COLOR_MAP_HPP
#define BOOST_DISTRIBUTED_ONE_BIT_COLOR_MAP_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/one_bit_color_map.hpp>
#include <boost/property_map/parallel/distributed_property_map.hpp>
#include <boost/property_map/parallel/local_property_map.hpp>
namespace boost {
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
class one_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
: public parallel::distributed_property_map<ProcessGroup, GlobalMap,
one_bit_color_map<StorageMap> >
{
typedef one_bit_color_map<StorageMap> local_map;
typedef parallel::distributed_property_map<ProcessGroup, GlobalMap,
local_map >
inherited;
typedef local_property_map<ProcessGroup, GlobalMap, StorageMap>
index_map_type;
public:
one_bit_color_map(std::size_t inital_size,
const index_map_type& index = index_map_type())
: inherited(index.process_group(), index.global(),
local_map(inital_size, index.base())) { }
inherited& base() { return *this; }
const inherited& base() const { return *this; }
};
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline one_bit_color_type
get(one_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
const& pm,
typename property_traits<GlobalMap>::key_type key)
{
return get(pm.base(), key);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline void
put(one_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
const& pm,
typename property_traits<GlobalMap>::key_type key,
one_bit_color_type value)
{
put(pm.base(), key, value);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
class one_bit_color_map<parallel::distributed_property_map<
ProcessGroup, GlobalMap, StorageMap> >
: public parallel::distributed_property_map<
ProcessGroup, GlobalMap, one_bit_color_map<StorageMap> >
{
typedef one_bit_color_map<StorageMap> local_map;
typedef parallel::distributed_property_map<ProcessGroup,GlobalMap,local_map>
inherited;
typedef parallel::distributed_property_map<ProcessGroup, GlobalMap,
StorageMap>
index_map_type;
public:
one_bit_color_map(std::size_t inital_size,
const index_map_type& index = index_map_type())
: inherited(index.process_group(), index.global(),
local_map(inital_size, index.base())) { }
inherited& base() { return *this; }
const inherited& base() const { return *this; }
};
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline one_bit_color_type
get(one_bit_color_map<
parallel::distributed_property_map<
ProcessGroup, GlobalMap, one_bit_color_map<StorageMap> > > const& pm,
typename property_traits<GlobalMap>::key_type key)
{
return get(pm.base(), key);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline void
put(one_bit_color_map<
parallel::distributed_property_map<
ProcessGroup, GlobalMap, one_bit_color_map<StorageMap> > > const& pm,
typename property_traits<GlobalMap>::key_type key,
one_bit_color_type value)
{
put(pm.base(), key, value);
}
} // end namespace boost
#endif // BOOST_DISTRIBUTED_ONE_BIT_COLOR_MAP_HPP

View File

@@ -0,0 +1,226 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Copyright (C) 2002 Brad King and Douglas Gregor
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// Brian Barrett
#ifndef BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
#define BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/assert.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/page_rank.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/property_map/parallel/distributed_property_map.hpp>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/graph/parallel/container_traits.hpp>
// #define WANT_MPI_ONESIDED 1
namespace boost { namespace graph { namespace distributed {
namespace detail {
#ifdef WANT_MPI_ONESIDED
template<typename Graph, typename RankMap, typename owner_map_t>
void page_rank_step(const Graph& g, RankMap from_rank, MPI_Win to_win,
typename property_traits<RankMap>::value_type damping,
owner_map_t owner)
{
typedef typename property_traits<RankMap>::value_type rank_type;
int me, ret;
MPI_Comm_rank(MPI_COMM_WORLD, &me);
// MPI_Accumulate is not required to store the value of the data
// being sent, only the address. The value of the memory location
// must not change until the end of the access epoch, meaning the
// call to MPI_Fence. We therefore store the updated value back
// into the from_rank map before the accumulate rather than using
// a temporary. We're going to reset the values in the from_rank
// before the end of page_rank_step() anyway, so this isn't a huge
// deal. But MPI-2 One-sided is an abomination.
BGL_FORALL_VERTICES_T(u, g, Graph) {
put(from_rank, u, (damping * get(from_rank, u) / out_degree(u, g)));
BGL_FORALL_ADJ_T(u, v, g, Graph) {
ret = MPI_Accumulate(&(from_rank[u]),
1, MPI_DOUBLE,
get(owner, v), local(v),
1, MPI_DOUBLE, MPI_SUM, to_win);
BOOST_ASSERT(MPI_SUCCESS == ret);
}
}
MPI_Win_fence(0, to_win);
// Set new rank maps for the other map. Do this now to get around
// the stupid synchronization rules of MPI-2 One-sided
BGL_FORALL_VERTICES_T(v, g, Graph) put(from_rank, v, rank_type(1 - damping));
}
#endif
template<typename T>
struct rank_accumulate_reducer {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
template<typename K>
T operator()(const K&) const { return T(0); }
template<typename K>
T operator()(const K&, const T& x, const T& y) const { return x + y; }
};
} // end namespace detail
template<typename Graph, typename RankMap, typename Done, typename RankMap2>
void
page_rank_impl(const Graph& g, RankMap rank_map, Done done,
typename property_traits<RankMap>::value_type damping,
typename graph_traits<Graph>::vertices_size_type n,
RankMap2 rank_map2)
{
typedef typename property_traits<RankMap>::value_type rank_type;
int me;
MPI_Comm_rank(MPI_COMM_WORLD, &me);
typedef typename property_map<Graph, vertex_owner_t>
::const_type vertex_owner_map;
typename property_map<Graph, vertex_owner_t>::const_type
owner = get(vertex_owner, g);
typedef typename boost::graph::parallel::process_group_type<Graph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
BOOST_ASSERT(me == id);
rank_type initial_rank = rank_type(rank_type(1) / n);
BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, initial_rank);
#ifdef WANT_MPI_ONESIDED
BOOST_ASSERT(sizeof(rank_type) == sizeof(double));
bool to_map_2 = true;
MPI_Win win, win2;
MPI_Win_create(&(rank_map[*(vertices(g).first)]),
sizeof(double) * num_vertices(g),
sizeof(double),
MPI_INFO_NULL, MPI_COMM_WORLD, &win);
MPI_Win_set_name(win, "rank_map_win");
MPI_Win_create(&(rank_map2[*(vertices(g).first)]),
sizeof(double) * num_vertices(g),
sizeof(double),
MPI_INFO_NULL, MPI_COMM_WORLD, &win2);
MPI_Win_set_name(win, "rank_map2_win");
// set initial rank maps for the first iteration...
BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map2, v, rank_type(1 - damping));
MPI_Win_fence(0, win);
MPI_Win_fence(0, win2);
while ((to_map_2 && !done(rank_map, g)) ||
(!to_map_2 && !done(rank_map2, g))) {
if (to_map_2) {
graph::distributed::detail::page_rank_step(g, rank_map, win2, damping, owner);
to_map_2 = false;
} else {
graph::distributed::detail::page_rank_step(g, rank_map2, win, damping, owner);
to_map_2 = true;
}
}
synchronize(boost::graph::parallel::process_group(g));
MPI_Win_free(&win);
MPI_Win_free(&win2);
#else
// The ranks accumulate after each step.
rank_map.set_reduce(detail::rank_accumulate_reducer<rank_type>());
rank_map2.set_reduce(detail::rank_accumulate_reducer<rank_type>());
rank_map.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
rank_map2.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
bool to_map_2 = true;
while ((to_map_2 && !done(rank_map, g)) ||
(!to_map_2 && !done(rank_map2, g))) {
/**
* PageRank can implemented slightly more efficiently on a
* bidirectional graph than on an incidence graph. However,
* distributed PageRank requires that we have the rank of the
* source vertex available locally, so we force the incidence
* graph implementation, which pushes rank from source to
* target.
*/
typedef incidence_graph_tag category;
if (to_map_2) {
graph::detail::page_rank_step(g, rank_map, rank_map2, damping,
category());
to_map_2 = false;
} else {
graph::detail::page_rank_step(g, rank_map2, rank_map, damping,
category());
to_map_2 = true;
}
using boost::graph::parallel::process_group;
synchronize(process_group(g));
}
rank_map.reset();
#endif
if (!to_map_2)
BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, get(rank_map2, v));
}
template<typename Graph, typename RankMap, typename Done, typename RankMap2>
void
page_rank(const Graph& g, RankMap rank_map, Done done,
typename property_traits<RankMap>::value_type damping,
typename graph_traits<Graph>::vertices_size_type n,
RankMap2 rank_map2
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
{
(page_rank_impl)(g, rank_map, done, damping, n, rank_map2);
}
template<typename MutableGraph>
void
remove_dangling_links(MutableGraph& g
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(MutableGraph,
distributed_graph_tag))
{
typename graph_traits<MutableGraph>::vertices_size_type old_n;
do {
old_n = num_vertices(g);
typename graph_traits<MutableGraph>::vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; /* in loop */) {
typename graph_traits<MutableGraph>::vertex_descriptor v = *vi++;
if (out_degree(v, g) == 0) {
clear_vertex(v, g);
remove_vertex(v, g);
}
}
} while (num_vertices(g) < old_n);
}
} // end namespace distributed
using distributed::page_rank;
using distributed::remove_dangling_links;
} } // end namespace boost::graph
#endif // BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP

View File

@@ -0,0 +1,278 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_QUEUE_HPP
#define BOOST_GRAPH_DISTRIBUTED_QUEUE_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/parallel/process_group.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <vector>
namespace boost { namespace graph { namespace distributed {
/// A unary predicate that always returns "true".
struct always_push
{
template<typename T> bool operator()(const T&) const { return true; }
};
/** A distributed queue adaptor.
*
* Class template @c distributed_queue implements a distributed queue
* across a process group. The distributed queue is an adaptor over an
* existing (local) queue, which must model the @ref Buffer
* concept. Each process stores a distinct copy of the local queue,
* from which it draws or removes elements via the @ref pop and @ref
* top members.
*
* The value type of the local queue must be a model of the @ref
* GlobalDescriptor concept. The @ref push operation of the
* distributed queue passes (via a message) the value to its owning
* processor. Thus, the elements within a particular local queue are
* guaranteed to have the process owning that local queue as an owner.
*
* Synchronization of distributed queues occurs in the @ref empty and
* @ref size functions, which will only return "empty" values (true or
* 0, respectively) when the entire distributed queue is empty. If the
* local queue is empty but the distributed queue is not, the
* operation will block until either condition changes. When the @ref
* size function of a nonempty queue returns, it returns the size of
* the local queue. These semantics were selected so that sequential
* code that processes elements in the queue via the following idiom
* can be parallelized via introduction of a distributed queue:
*
* distributed_queue<...> Q;
* Q.push(x);
* while (!Q.empty()) {
* // do something, that may push a value onto Q
* }
*
* In the parallel version, the initial @ref push operation will place
* the value @c x onto its owner's queue. All processes will
* synchronize at the call to empty, and only the process owning @c x
* will be allowed to execute the loop (@ref Q.empty() returns
* false). This iteration may in turn push values onto other remote
* queues, so when that process finishes execution of the loop body
* and all processes synchronize again in @ref empty, more processes
* may have nonempty local queues to execute. Once all local queues
* are empty, @ref Q.empty() returns @c false for all processes.
*
* The distributed queue can receive messages at two different times:
* during synchronization and when polling @ref empty. Messages are
* always received during synchronization, to ensure that accurate
* local queue sizes can be determines. However, whether @ref empty
* should poll for messages is specified as an option to the
* constructor. Polling may be desired when the order in which
* elements in the queue are processed is not important, because it
* permits fewer synchronization steps and less communication
* overhead. However, when more strict ordering guarantees are
* required, polling may be semantically incorrect. By disabling
* polling, one ensures that parallel execution using the idiom above
* will not process an element at a later "level" before an earlier
* "level".
*
* The distributed queue nearly models the @ref Buffer
* concept. However, the @ref push routine does not necessarily
* increase the result of @c size() by one (although the size of the
* global queue does increase by one).
*/
template<typename ProcessGroup, typename OwnerMap, typename Buffer,
typename UnaryPredicate = always_push>
class distributed_queue
{
typedef distributed_queue self_type;
enum {
/** Message indicating a remote push. The message contains a
* single value x of type value_type that is to be pushed on the
* receiver's queue.
*/
msg_push,
/** Push many elements at once. */
msg_multipush
};
public:
typedef ProcessGroup process_group_type;
typedef Buffer buffer_type;
typedef typename buffer_type::value_type value_type;
typedef typename buffer_type::size_type size_type;
/** Construct a new distributed queue.
*
* Build a new distributed queue that communicates over the given @p
* process_group, whose local queue is initialized via @p buffer and
* which may or may not poll for messages.
*/
explicit
distributed_queue(const ProcessGroup& process_group,
const OwnerMap& owner,
const Buffer& buffer,
bool polling = false);
/** Construct a new distributed queue.
*
* Build a new distributed queue that communicates over the given @p
* process_group, whose local queue is initialized via @p buffer and
* which may or may not poll for messages.
*/
explicit
distributed_queue(const ProcessGroup& process_group = ProcessGroup(),
const OwnerMap& owner = OwnerMap(),
const Buffer& buffer = Buffer(),
const UnaryPredicate& pred = UnaryPredicate(),
bool polling = false);
/** Construct a new distributed queue.
*
* Build a new distributed queue that communicates over the given @p
* process_group, whose local queue is default-initalized and which
* may or may not poll for messages.
*/
distributed_queue(const ProcessGroup& process_group, const OwnerMap& owner,
const UnaryPredicate& pred, bool polling = false);
/** Virtual destructor required with virtual functions.
*
*/
virtual ~distributed_queue() {}
/** Push an element onto the distributed queue.
*
* The element will be sent to its owner process to be added to that
* process's local queue. If polling is enabled for this queue and
* the owner process is the current process, the value will be
* immediately pushed onto the local queue.
*
* Complexity: O(1) messages of size O(sizeof(value_type)) will be
* transmitted.
*/
void push(const value_type& x);
/** Pop an element off the local queue.
*
* @p @c !empty()
*/
void pop() { buffer.pop(); }
/**
* Return the element at the top of the local queue.
*
* @p @c !empty()
*/
value_type& top() { return buffer.top(); }
/**
* \overload
*/
const value_type& top() const { return buffer.top(); }
/** Determine if the queue is empty.
*
* When the local queue is nonempty, returns @c true. If the local
* queue is empty, synchronizes with all other processes in the
* process group until either (1) the local queue is nonempty
* (returns @c true) (2) the entire distributed queue is empty
* (returns @c false).
*/
bool empty() const;
/** Determine the size of the local queue.
*
* The behavior of this routine is equivalent to the behavior of
* @ref empty, except that when @ref empty returns true this
* function returns the size of the local queue and when @ref empty
* returns false this function returns zero.
*/
size_type size() const;
// private:
/** Synchronize the distributed queue and determine if all queues
* are empty.
*
* \returns \c true when all local queues are empty, or false if at least
* one of the local queues is nonempty.
* Defined as virtual for derived classes like depth_limited_distributed_queue.
*/
virtual bool do_synchronize() const;
private:
// Setup triggers
void setup_triggers();
// Message handlers
void
handle_push(int source, int tag, const value_type& value,
trigger_receive_context);
void
handle_multipush(int source, int tag, const std::vector<value_type>& values,
trigger_receive_context);
mutable ProcessGroup process_group;
OwnerMap owner;
mutable Buffer buffer;
UnaryPredicate pred;
bool polling;
typedef std::vector<value_type> outgoing_buffer_t;
typedef std::vector<outgoing_buffer_t> outgoing_buffers_t;
shared_ptr<outgoing_buffers_t> outgoing_buffers;
};
/// Helper macro containing the normal names for the template
/// parameters to distributed_queue.
#define BOOST_DISTRIBUTED_QUEUE_PARMS \
typename ProcessGroup, typename OwnerMap, typename Buffer, \
typename UnaryPredicate
/// Helper macro containing the normal template-id for
/// distributed_queue.
#define BOOST_DISTRIBUTED_QUEUE_TYPE \
distributed_queue<ProcessGroup, OwnerMap, Buffer, UnaryPredicate>
/** Synchronize all processes involved with the given distributed queue.
*
* This function will synchronize all of the local queues for a given
* distributed queue, by ensuring that no additional messages are in
* transit. It is rarely required by the user, because most
* synchronization of distributed queues occurs via the @c empty or @c
* size methods.
*/
template<BOOST_DISTRIBUTED_QUEUE_PARMS>
inline void
synchronize(const BOOST_DISTRIBUTED_QUEUE_TYPE& Q)
{ Q.do_synchronize(); }
/// Construct a new distributed queue.
template<typename ProcessGroup, typename OwnerMap, typename Buffer>
inline distributed_queue<ProcessGroup, OwnerMap, Buffer>
make_distributed_queue(const ProcessGroup& process_group,
const OwnerMap& owner,
const Buffer& buffer,
bool polling = false)
{
typedef distributed_queue<ProcessGroup, OwnerMap, Buffer> result_type;
return result_type(process_group, owner, buffer, polling);
}
} } } // end namespace boost::graph::distributed
#include <boost/graph/distributed/detail/queue.ipp>
#undef BOOST_DISTRIBUTED_QUEUE_TYPE
#undef BOOST_DISTRIBUTED_QUEUE_PARMS
#endif // BOOST_GRAPH_DISTRIBUTED_QUEUE_HPP

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2005-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Nick Edmonds
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_REVERSE_GRAPH_HPP
#define BOOST_GRAPH_DISTRIBUTED_REVERSE_GRAPH_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/reverse_graph.hpp>
#include <boost/graph/parallel/container_traits.hpp>
namespace boost {
namespace graph {
namespace parallel {
/// Retrieve the process group from a reverse graph
template<typename Graph, typename GraphRef>
struct process_group_type<reverse_graph<Graph, GraphRef> >
: process_group_type<Graph> { };
}
}
/// Retrieve the process group from a reverse graph
template<typename Graph, typename GraphRef>
inline typename graph::parallel::process_group_type<Graph>::type
process_group(reverse_graph<Graph, GraphRef> const& g) {
return process_group(g.m_g);
}
} // namespace boost
#endif

View File

@@ -0,0 +1,164 @@
// Copyright 2004, 2005 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Nick Edmonds
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_RMAT_GENERATOR_HPP
#define BOOST_GRAPH_DISTRIBUTED_RMAT_GENERATOR_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/assert.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <math.h>
namespace boost {
// Memory-scalable (amount of memory required will scale down
// linearly as the number of processes increases) generator, which
// requires an MPI process group. Run-time is slightly worse than
// the unique rmat generator. Edge list generated is sorted and
// unique.
template<typename ProcessGroup, typename Distribution,
typename RandomGenerator, typename Graph>
class scalable_rmat_iterator
{
typedef typename graph_traits<Graph>::directed_category directed_category;
typedef typename graph_traits<Graph>::vertices_size_type vertices_size_type;
typedef typename graph_traits<Graph>::edges_size_type edges_size_type;
public:
typedef std::input_iterator_tag iterator_category;
typedef std::pair<vertices_size_type, vertices_size_type> value_type;
typedef const value_type& reference;
typedef const value_type* pointer;
typedef void difference_type;
// No argument constructor, set to terminating condition
scalable_rmat_iterator()
: gen(), done(true)
{ }
// Initialize for edge generation
scalable_rmat_iterator(ProcessGroup pg, Distribution distrib,
RandomGenerator& gen, vertices_size_type n,
edges_size_type m, double a, double b, double c,
double d, bool permute_vertices = true)
: gen(), done(false)
{
BOOST_ASSERT(a + b + c + d == 1);
int id = process_id(pg);
this->gen.reset(new uniform_01<RandomGenerator>(gen));
std::vector<vertices_size_type> vertexPermutation;
if (permute_vertices)
generate_permutation_vector(gen, vertexPermutation, n);
int SCALE = int(floor(log(double(n))/log(2.)));
boost::uniform_01<RandomGenerator> prob(gen);
std::map<value_type, bool> edge_map;
edges_size_type generated = 0, local_edges = 0;
do {
edges_size_type tossed = 0;
do {
vertices_size_type u, v;
boost::tie(u, v) = generate_edge(this->gen, n, SCALE, a, b, c, d);
if (permute_vertices) {
u = vertexPermutation[u];
v = vertexPermutation[v];
}
// Lowest vertex number always comes first (this
// means we don't have to worry about i->j and j->i
// being in the edge list)
if (u > v && is_same<directed_category, undirected_tag>::value)
std::swap(u, v);
if (distrib(u) == id || distrib(v) == id) {
if (edge_map.find(std::make_pair(u, v)) == edge_map.end()) {
edge_map[std::make_pair(u, v)] = true;
local_edges++;
} else {
tossed++;
// special case - if both u and v are on same
// proc, ++ twice, since we divide by two (to
// cover the two process case)
if (distrib(u) == id && distrib(v) == id)
tossed++;
}
}
generated++;
} while (generated < m);
tossed = all_reduce(pg, tossed, boost::parallel::sum<vertices_size_type>());
generated -= (tossed / 2);
} while (generated < m);
// NGE - Asking for more than n^2 edges will result in an infinite loop here
// Asking for a value too close to n^2 edges may as well
values.reserve(local_edges);
typename std::map<value_type, bool>::reverse_iterator em_end = edge_map.rend();
for (typename std::map<value_type, bool>::reverse_iterator em_i = edge_map.rbegin();
em_i != em_end ;
++em_i) {
values.push_back(em_i->first);
}
current = values.back();
values.pop_back();
}
reference operator*() const { return current; }
pointer operator->() const { return &current; }
scalable_rmat_iterator& operator++()
{
if (!values.empty()) {
current = values.back();
values.pop_back();
} else
done = true;
return *this;
}
scalable_rmat_iterator operator++(int)
{
scalable_rmat_iterator temp(*this);
++(*this);
return temp;
}
bool operator==(const scalable_rmat_iterator& other) const
{
return values.empty() && other.values.empty() && done && other.done;
}
bool operator!=(const scalable_rmat_iterator& other) const
{ return !(*this == other); }
private:
// Parameters
shared_ptr<uniform_01<RandomGenerator> > gen;
// Internal data structures
std::vector<value_type> values;
value_type current;
bool done;
};
} // end namespace boost
#endif // BOOST_GRAPH_DISTRIBUTED_RMAT_GENERATOR_HPP

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_SELECTOR_HPP
#define BOOST_GRAPH_DISTRIBUTED_SELECTOR_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
namespace boost {
/* The default local selector for a distributedS selector. */
struct defaultS {};
/**
* Selector that specifies that the graph should be distributed
* among different processes organized based on the given process
* group.
*/
template<typename ProcessGroup, typename LocalS = defaultS,
typename DistributionS = defaultS>
struct distributedS
{
typedef ProcessGroup process_group_type;
typedef LocalS local_selector;
typedef DistributionS distribution;
};
}
#endif // BOOST_GRAPH_DISTRIBUTED_SELECTOR_HPP

View File

@@ -0,0 +1,114 @@
// Copyright Daniel Wallin 2007. Use, modification and distribution is
// subject to the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_SHUFFLED_DISTRIBUTION_070923_HPP
#define BOOST_SHUFFLED_DISTRIBUTION_070923_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
# include <boost/assert.hpp>
# include <boost/iterator/counting_iterator.hpp>
# include <vector>
namespace boost { namespace graph { namespace distributed {
template <class BaseDistribution>
struct shuffled_distribution : BaseDistribution
{
typedef std::size_t size_type;
template <class ProcessGroup>
shuffled_distribution(ProcessGroup const& pg, BaseDistribution const& base)
: BaseDistribution(base)
, n(num_processes(pg))
, mapping_(make_counting_iterator(size_type(0)), make_counting_iterator(n))
, reverse_mapping(mapping_)
{}
std::vector<size_type> const& mapping() const
{
return mapping_;
}
template <class InputIterator>
void assign_mapping(InputIterator first, InputIterator last)
{
mapping_.assign(first, last);
BOOST_ASSERT(mapping_.size() == n);
reverse_mapping.resize(mapping_.size());
for (std::vector<size_t>::iterator i(mapping_.begin());
i != mapping_.end(); ++i)
{
reverse_mapping[*i] = i - mapping_.begin();
}
}
BaseDistribution& base()
{
return *this;
}
BaseDistribution const& base() const
{
return *this;
}
template <class ProcessID>
size_type block_size(ProcessID id, size_type n) const
{
return base().block_size(reverse_mapping[id], n);
}
template <class T>
size_type operator()(T const& value) const
{
return mapping_[base()(value)];
}
template <class ProcessID>
size_type start(ProcessID id) const
{
return base().start(reverse_mapping[id]);
}
size_type local(size_type i) const
{
return base().local(i);
}
size_type global(size_type i) const
{
return base().global(i);
}
template <class ProcessID>
size_type global(ProcessID id, size_type n) const
{
return base().global(reverse_mapping[id], n);
}
template <class Archive>
void serialize(Archive& ar, unsigned long /*version*/)
{
ar & serialization::make_nvp("base", base());
}
void clear()
{
base().clear();
}
private:
size_type n;
std::vector<size_type> mapping_;
std::vector<size_type> reverse_mapping;
};
}}} // namespace boost::graph::distributed
#endif // BOOST_SHUFFLED_DISTRIBUTION_070923_HPP

View File

@@ -0,0 +1,186 @@
// Copyright (C) 2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_ST_CONNECTED_HPP
#define BOOST_GRAPH_DISTRIBUTED_ST_CONNECTED_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/two_bit_color_map.hpp>
#include <boost/graph/distributed/queue.hpp>
#include <boost/pending/queue.hpp>
#include <boost/graph/iteration_macros.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <utility>
#include <boost/optional.hpp>
namespace boost { namespace graph { namespace distributed {
namespace detail {
struct pair_and_or
{
std::pair<bool, bool>
operator()(std::pair<bool, bool> x, std::pair<bool, bool> y) const
{
return std::pair<bool, bool>(x.first && y.first,
x.second || y.second);
}
};
} // end namespace detail
template<typename DistributedGraph, typename ColorMap, typename OwnerMap>
bool
st_connected(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
typename graph_traits<DistributedGraph>::vertex_descriptor t,
ColorMap color, OwnerMap owner)
{
using boost::graph::parallel::process_group;
using boost::graph::parallel::process_group_type;
using boost::parallel::all_reduce;
typedef typename property_traits<ColorMap>::value_type Color;
typedef color_traits<Color> ColorTraits;
typedef typename process_group_type<DistributedGraph>::type ProcessGroup;
typedef typename ProcessGroup::process_id_type ProcessID;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor Vertex;
// Set all vertices to white (unvisited)
BGL_FORALL_VERTICES_T(v, g, DistributedGraph)
put(color, v, ColorTraits::white());
// "color" plays the role of a color map, with no synchronization.
set_property_map_role(vertex_color, color);
color.set_consistency_model(0);
// Vertices found from the source are grey
put(color, s, ColorTraits::gray());
// Vertices found from the target are green
put(color, t, ColorTraits::green());
ProcessGroup pg = process_group(g);
ProcessID rank = process_id(pg);
// Build a local queue
queue<Vertex> Q;
if (get(owner, s) == rank) Q.push(s);
if (get(owner, t) == rank) Q.push(t);
queue<Vertex> other_Q;
while (true) {
bool found = false;
// Process all vertices in the local queue
while (!found && !Q.empty()) {
Vertex u = Q.top(); Q.pop();
Color u_color = get(color, u);
BGL_FORALL_OUTEDGES_T(u, e, g, DistributedGraph) {
Vertex v = target(e, g);
Color v_color = get(color, v);
if (v_color == ColorTraits::white()) {
// We have not seen "v" before; mark it with the same color as u
Color u_color = get(color, u);
put(color, v, u_color);
// Either push v into the local queue or send it off to its
// owner.
ProcessID v_owner = get(owner, v);
if (v_owner == rank)
other_Q.push(v);
else
send(pg, v_owner, 0,
std::make_pair(v, u_color == ColorTraits::gray()));
} else if (v_color != ColorTraits::black() && u_color != v_color) {
// Colors have collided. We're done!
found = true;
break;
}
}
// u is done, so mark it black
put(color, u, ColorTraits::black());
}
// Ensure that all transmitted messages have been received.
synchronize(pg);
// Move all of the send-to-self values into the local Q.
other_Q.swap(Q);
if (!found) {
// Receive all messages
while (optional<std::pair<ProcessID, int> > msg = probe(pg)) {
std::pair<Vertex, bool> data;
receive(pg, msg->first, msg->second, data);
// Determine the colors of u and v, the source and target
// vertices (v is local).
Vertex v = data.first;
Color v_color = get(color, v);
Color u_color = data.second? ColorTraits::gray() : ColorTraits::green();
if (v_color == ColorTraits::white()) {
// v had no color before, so give it u's color and push it
// into the queue.
Q.push(v);
put(color, v, u_color);
} else if (v_color != ColorTraits::black() && u_color != v_color) {
// Colors have collided. We're done!
found = true;
break;
}
}
}
// Check if either all queues are empty or
std::pair<bool, bool> results = all_reduce(pg,
boost::parallel::detail::make_untracked_pair(Q.empty(), found),
detail::pair_and_or());
// If someone found the answer, we're done!
if (results.second)
return true;
// If all queues are empty, we're done.
if (results.first)
return false;
}
}
template<typename DistributedGraph, typename ColorMap>
inline bool
st_connected(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
typename graph_traits<DistributedGraph>::vertex_descriptor t,
ColorMap color)
{
return st_connected(g, s, t, color, get(vertex_owner, g));
}
template<typename DistributedGraph>
inline bool
st_connected(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
typename graph_traits<DistributedGraph>::vertex_descriptor t)
{
return st_connected(g, s, t,
make_two_bit_color_map(num_vertices(g),
get(vertex_index, g)));
}
} } } // end namespace boost::graph::distributed
#endif // BOOST_GRAPH_DISTRIBUTED_ST_CONNECTED_HPP

View File

@@ -0,0 +1,988 @@
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Nick Edmonds
// Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_GRAPH_DISTRIBUTED_SCC_HPP
#define BOOST_GRAPH_DISTRIBUTED_SCC_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
// #define PBGL_SCC_DEBUG
#include <boost/assert.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/property_map/parallel/distributed_property_map.hpp>
#include <boost/property_map/parallel/caching_property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/graph/distributed/queue.hpp>
#include <boost/graph/distributed/filtered_graph.hpp>
#include <boost/pending/indirect_cmp.hpp>
#include <boost/graph/breadth_first_search.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/graph/distributed/local_subgraph.hpp>
#include <boost/graph/parallel/properties.hpp>
#include <boost/graph/named_function_params.hpp>
#include <boost/graph/random.hpp>
#include <boost/graph/distributed/reverse_graph.hpp>
#include <boost/optional.hpp>
#include <boost/graph/distributed/detail/filtered_queue.hpp>
#include <boost/graph/distributed/adjacency_list.hpp>
#ifdef PBGL_SCC_DEBUG
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <sys/time.h>
#include <boost/graph/distributed/graphviz.hpp> // for ostringstream
#endif
#include <vector>
#include <map>
#include <boost/graph/parallel/container_traits.hpp>
#ifdef PBGL_SCC_DEBUG
# include <boost/graph/accounting.hpp>
#endif /* PBGL_SCC_DEBUG */
// If your graph is likely to have large numbers of small strongly connected
// components then running the sequential SCC algorithm on the local subgraph
// and filtering components with no remote edges may increase performance
// #define FILTER_LOCAL_COMPONENTS
namespace boost { namespace graph { namespace distributed { namespace detail {
template<typename vertex_descriptor>
struct v_sets{
std::vector<vertex_descriptor> pred, succ, intersect, ps_union;
};
/* Serialize vertex set */
template<typename Graph>
void
marshal_set( std::vector<std::vector<typename graph_traits<Graph>::vertex_descriptor> > in,
std::vector<typename graph_traits<Graph>::vertex_descriptor>& out )
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
for( std::size_t i = 0; i < in.size(); ++i ) {
out.insert( out.end(), graph_traits<Graph>::null_vertex() );
out.insert( out.end(), in[i].begin(), in[i].end() );
}
}
/* Un-serialize vertex set */
template<typename Graph>
void
unmarshal_set( std::vector<typename graph_traits<Graph>::vertex_descriptor> in,
std::vector<std::vector<typename graph_traits<Graph>::vertex_descriptor> >& out )
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
while( !in.empty() ) {
typename std::vector<vertex_descriptor>::iterator end
= std::find( in.begin(), in.end(), graph_traits<Graph>::null_vertex() );
if( end == in.begin() )
in.erase( in.begin() );
else {
out.push_back(std::vector<vertex_descriptor>());
out[out.size() - 1].insert( out[out.size() - 1].end(), in.begin(), end );
in.erase( in.begin(), end );
}
}
}
/* Determine if vertex is in subset */
template <typename Set>
struct in_subset {
in_subset() : m_s(0) { }
in_subset(const Set& s) : m_s(&s) { }
template <typename Elt>
bool operator()(const Elt& x) const {
return ((*m_s).find(x) != (*m_s).end());
}
private:
const Set* m_s;
};
template<typename T>
struct vertex_identity_property_map
: public boost::put_get_helper<T, vertex_identity_property_map<T> >
{
typedef T key_type;
typedef T value_type;
typedef T reference;
typedef boost::readable_property_map_tag category;
inline value_type operator[](const key_type& v) const { return v; }
inline void clear() { }
};
template <typename T>
inline void synchronize( vertex_identity_property_map<T> & ) { }
/* BFS visitor for SCC */
template<typename Graph, typename SourceMap>
struct scc_discovery_visitor : bfs_visitor<>
{
scc_discovery_visitor(SourceMap& sourceM)
: sourceM(sourceM) {}
template<typename Edge>
void tree_edge(Edge e, const Graph& g)
{
put(sourceM, target(e,g), get(sourceM, source(e,g)));
}
private:
SourceMap& sourceM;
};
} } } } /* End namespace boost::graph::distributed::detail */
namespace boost { namespace graph { namespace distributed {
enum fhp_message_tags { fhp_edges_size_msg, fhp_add_edges_msg, fhp_pred_size_msg,
fhp_pred_msg, fhp_succ_size_msg, fhp_succ_msg };
template<typename Graph, typename ReverseGraph,
typename VertexComponentMap, typename IsoMapFR, typename IsoMapRF,
typename VertexIndexMap>
void
fleischer_hendrickson_pinar_strong_components(const Graph& g,
VertexComponentMap c,
const ReverseGraph& gr,
IsoMapFR fr, IsoMapRF rf,
VertexIndexMap vertex_index_map)
{
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<ReverseGraph>::vertex_iterator rev_vertex_iterator;
typedef typename graph_traits<ReverseGraph>::vertex_descriptor rev_vertex_descriptor;
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef iterator_property_map<typename std::vector<vertex_descriptor>::iterator,
VertexIndexMap> ParentMap;
typedef iterator_property_map<typename std::vector<default_color_type>::iterator,
VertexIndexMap> ColorMap;
typedef iterator_property_map<typename std::vector<vertex_descriptor>::iterator,
VertexIndexMap> Rev_ParentMap;
typedef std::vector<std::pair<vertex_descriptor, vertex_descriptor> > VertexPairVec;
typedef typename property_map<Graph, vertex_owner_t>::const_type
OwnerMap;
OwnerMap owner = get(vertex_owner, g);
using boost::graph::parallel::process_group;
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
int num_procs = num_processes(pg);
int n = 0;
int my_n = num_vertices(g);
all_reduce(pg, &my_n, &my_n+1, &n, std::plus<int>());
//
// Initialization
//
#ifdef PBGL_SCC_DEBUG
accounting::time_type start = accounting::get_time();
#endif
vertex_iterator vstart, vend;
rev_vertex_iterator rev_vstart, rev_vend;
std::vector<std::vector<vertex_descriptor> > vertex_sets, new_vertex_sets;
vertex_sets.push_back(std::vector<vertex_descriptor>());
// Remove vertices that do not have at least one in edge and one out edge
new_vertex_sets.push_back(std::vector<vertex_descriptor>());
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
if( out_degree( get(fr, *vstart), gr) > 0 && out_degree(*vstart, g) > 0 )
new_vertex_sets[0].push_back( *vstart );
// Perform sequential SCC on local subgraph, filter all components with external
// edges, mark remaining components and remove them from vertex_sets
#ifdef FILTER_LOCAL_COMPONENTS
// This doesn't actually speed up SCC in connected graphs it seems, but it does work
// and may help in the case where there are lots of small strong components.
{
local_subgraph<const Graph> ls(g);
typedef typename property_map<local_subgraph<const Graph>, vertex_index_t>::type
local_index_map_type;
local_index_map_type local_index = get(vertex_index, ls);
std::vector<int> ls_components_vec(num_vertices(ls));
typedef iterator_property_map<std::vector<int>::iterator, local_index_map_type>
ls_components_map_type;
ls_components_map_type ls_component(ls_components_vec.begin(), local_index);
int num_comp = boost::strong_components(ls, ls_component);
// Create map of components
std::map<int, std::vector<vertex_descriptor> > local_comp_map;
typedef typename graph_traits<local_subgraph<const Graph> >::vertex_iterator ls_vertex_iterator;
ls_vertex_iterator vstart, vend;
for( boost::tie(vstart,vend) = vertices(ls); vstart != vend; vstart++ )
local_comp_map[get(ls_component, *vstart)].push_back( *vstart );
// Filter components that have no non-local edges
typedef typename graph_traits<Graph>::adjacency_iterator adjacency_iterator;
typedef typename graph_traits<ReverseGraph>::adjacency_iterator rev_adjacency_iterator;
adjacency_iterator abegin, aend;
rev_adjacency_iterator rev_abegin, rev_aend;
for( std::size_t i = 0; i < num_comp; ++i ) {
bool local = true;
for( std::size_t j = 0; j < local_comp_map[i].size(); j++ ) {
for( boost::tie(abegin,aend) = adjacent_vertices(local_comp_map[i][j], g);
abegin != aend; abegin++ )
if( get(owner, *abegin) != id ) {
local = false;
break;
}
if( local )
for( boost::tie(rev_abegin,rev_aend) = adjacent_vertices(get(fr, local_comp_map[i][j]), gr);
rev_abegin != rev_aend; rev_abegin++ )
if( get(owner, *rev_abegin) != id ) {
local = false;
break;
}
if( !local ) break;
}
if( local ) // Mark and remove from new_vertex_sets
for( std::size_t j = 0; j < local_comp_map[i].size(); j++ ) {
put( c, local_comp_map[i][j], local_comp_map[i][0] );
typename std::vector<vertex_descriptor>::iterator pos =
std::find(new_vertex_sets[0].begin(), new_vertex_sets[0].end(), local_comp_map[i][j]);
if( pos != new_vertex_sets[0].end() )
new_vertex_sets[0].erase(pos);
}
}
}
#endif // FILTER_LOCAL_COMPONENTS
all_gather( pg, new_vertex_sets[0].begin(), new_vertex_sets[0].end(), vertex_sets[0] );
new_vertex_sets.clear();
#ifdef PBGL_SCC_DEBUG
accounting::time_type end = accounting::get_time();
if(id == 0)
std::cerr << "Trim local SCCs time = " << accounting::print_time(end - start) << " seconds.\n";
#endif
if( vertex_sets[0].empty() ) return;
//
// Recursively determine SCCs
//
#ifdef PBGL_SCC_DEBUG
int iterations = 0;
#endif
// Only need to be able to map starting vertices for BFS from now on
fr.clear();
do {
#ifdef PBGL_SCC_DEBUG
if(id == 0) {
printf("\n\nIteration %d:\n\n", iterations++);
if( iterations > 1 ) {
end = accounting::get_time();
std::cerr << "Running main loop destructors time = " << accounting::print_time(end - start) << " seconds.\n";
}
start = accounting::get_time();
}
#endif
// Get forward->reverse mappings for BFS start vertices
for (std::size_t i = 0; i < vertex_sets.size(); ++i)
get(fr, vertex_sets[i][0]);
synchronize(fr);
// Determine local vertices to start BFS from
std::vector<vertex_descriptor> local_start;
for( std::size_t i = id; i < vertex_sets.size(); i += num_procs )
local_start.push_back(vertex_sets[i][0]);
if( local_start.empty() )
local_start.push_back(vertex_sets[0][0]);
// Make filtered graphs
typedef std::set<vertex_descriptor> VertexSet;
typedef std::set<rev_vertex_descriptor> Rev_VertexSet;
VertexSet filter_set_g;
Rev_VertexSet filter_set_gr;
typename VertexSet::iterator fs;
int active_vertices = 0;
for (std::size_t i = 0; i < vertex_sets.size(); i++)
active_vertices += vertex_sets[i].size();
// This is a completely random bound
if ( active_vertices < 0.05*n ) {
// TODO: This set insertion is ridiculously inefficient, make it an in-place-merge?
for (std::size_t i = 0; i < vertex_sets.size(); i++)
filter_set_g.insert(vertex_sets[i].begin(), vertex_sets[i].end());
for (fs = filter_set_g.begin(); fs != filter_set_g.end(); ++fs )
filter_set_gr.insert(get(fr, *fs));
}
filtered_graph<const Graph, keep_all, detail::in_subset<VertexSet> >
fg(g, keep_all(), detail::in_subset<VertexSet>(filter_set_g));
filtered_graph<const ReverseGraph, keep_all, detail::in_subset<VertexSet> >
fgr(gr, keep_all(), detail::in_subset<VertexSet>(filter_set_gr));
// Add additional starting vertices to BFS queue
typedef filtered_queue<queue<vertex_descriptor>, boost::detail::has_not_been_seen<VertexIndexMap> >
local_queue_type;
typedef boost::graph::distributed::distributed_queue<process_group_type, OwnerMap, local_queue_type>
queue_t;
typedef typename property_map<ReverseGraph, vertex_owner_t>::const_type
RevOwnerMap;
typedef filtered_queue<queue<rev_vertex_descriptor>, boost::detail::has_not_been_seen<VertexIndexMap> >
rev_local_queue_type;
typedef boost::graph::distributed::distributed_queue<process_group_type, RevOwnerMap, rev_local_queue_type>
rev_queue_t;
queue_t Q(process_group(g),
owner,
make_filtered_queue(queue<vertex_descriptor>(),
boost::detail::has_not_been_seen<VertexIndexMap>
(num_vertices(g), vertex_index_map)),
false);
rev_queue_t Qr(process_group(gr),
get(vertex_owner, gr),
make_filtered_queue(queue<rev_vertex_descriptor>(),
boost::detail::has_not_been_seen<VertexIndexMap>
(num_vertices(gr), vertex_index_map)),
false);
for( std::size_t i = 1; i < local_start.size(); ++i ) {
Q.push(local_start[i]);
Qr.push(get(fr, local_start[i]));
}
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0)
std::cerr << " Initialize BFS time = " << accounting::print_time(end - start) << " seconds.\n";
start = accounting::get_time();
#endif
#ifdef PBGL_SCC_DEBUG
accounting::time_type start2 = accounting::get_time();
#endif
// Forward BFS
std::vector<default_color_type> color_map_s(num_vertices(g));
ColorMap color_map(color_map_s.begin(), vertex_index_map);
std::vector<vertex_descriptor> succ_map_s(num_vertices(g), graph_traits<Graph>::null_vertex());
ParentMap succ_map(succ_map_s.begin(), vertex_index_map);
for( std::size_t i = 0; i < vertex_sets.size(); ++i )
put(succ_map, vertex_sets[i][0], vertex_sets[i][0]);
#ifdef PBGL_SCC_DEBUG
accounting::time_type end2 = accounting::get_time();
if(id == 0)
std::cerr << " Initialize forward BFS time = " << accounting::print_time(end2 - start2) << " seconds.\n";
#endif
if (active_vertices < 0.05*n)
breadth_first_search(fg, local_start[0], Q,
detail::scc_discovery_visitor<filtered_graph<const Graph, keep_all,
detail::in_subset<VertexSet> >, ParentMap>
(succ_map),
color_map);
else
breadth_first_search(g, local_start[0], Q,
detail::scc_discovery_visitor<const Graph, ParentMap>(succ_map),
color_map);
#ifdef PBGL_SCC_DEBUG
start2 = accounting::get_time();
#endif
// Reverse BFS
color_map.clear(); // reuse color map since g and gr have same vertex index
std::vector<vertex_descriptor> pred_map_s(num_vertices(gr), graph_traits<Graph>::null_vertex());
Rev_ParentMap pred_map(pred_map_s.begin(), vertex_index_map);
for( std::size_t i = 0; i < vertex_sets.size(); ++i )
put(pred_map, get(fr, vertex_sets[i][0]), vertex_sets[i][0]);
#ifdef PBGL_SCC_DEBUG
end2 = accounting::get_time();
if(id == 0)
std::cerr << " Initialize reverse BFS time = " << accounting::print_time(end2 - start2) << " seconds.\n";
#endif
if (active_vertices < 0.05*n)
breadth_first_search(fgr, get(fr, local_start[0]),
Qr,
detail::scc_discovery_visitor<filtered_graph<const ReverseGraph, keep_all,
detail::in_subset<Rev_VertexSet> >, Rev_ParentMap>
(pred_map),
color_map);
else
breadth_first_search(gr, get(fr, local_start[0]),
Qr,
detail::scc_discovery_visitor<const ReverseGraph, Rev_ParentMap>(pred_map),
color_map);
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0)
std::cerr << " Perform forward and reverse BFS time = " << accounting::print_time(end - start) << " seconds.\n";
start = accounting::get_time();
#endif
// Send predecessors and successors discovered by this proc to the proc responsible for
// this BFS tree
typedef struct detail::v_sets<vertex_descriptor> Vsets;
std::map<vertex_descriptor, Vsets> set_map;
std::map<vertex_descriptor, int> dest_map;
std::vector<VertexPairVec> successors(num_procs);
std::vector<VertexPairVec> predecessors(num_procs);
// Calculate destinations for messages
for (std::size_t i = 0; i < vertex_sets.size(); ++i)
dest_map[vertex_sets[i][0]] = i % num_procs;
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ ) {
vertex_descriptor v = get(succ_map, *vstart);
if( v != graph_traits<Graph>::null_vertex() ) {
if (dest_map[v] == id)
set_map[v].succ.push_back(*vstart);
else
successors[dest_map[v]].push_back( std::make_pair(v, *vstart) );
}
}
for( boost::tie(rev_vstart, rev_vend) = vertices(gr); rev_vstart != rev_vend; rev_vstart++ ) {
vertex_descriptor v = get(pred_map, *rev_vstart);
if( v != graph_traits<Graph>::null_vertex() ) {
if (dest_map[v] == id)
set_map[v].pred.push_back(get(rf, *rev_vstart));
else
predecessors[dest_map[v]].push_back( std::make_pair(v, get(rf, *rev_vstart)) );
}
}
// Send predecessor and successor messages
for (process_id_type i = 0; i < num_procs; ++i) {
if (!successors[i].empty()) {
send(pg, i, fhp_succ_size_msg, successors[i].size());
send(pg, i, fhp_succ_msg, &successors[i][0], successors[i].size());
}
if (!predecessors[i].empty()) {
send(pg, i, fhp_pred_size_msg, predecessors[i].size());
send(pg, i, fhp_pred_msg, &predecessors[i][0], predecessors[i].size());
}
}
synchronize(pg);
// Receive predecessor and successor messages and handle them
while (optional<std::pair<process_id_type, int> > m = probe(pg)) {
BOOST_ASSERT(m->second == fhp_succ_size_msg || m->second == fhp_pred_size_msg);
std::size_t num_requests;
receive(pg, m->first, m->second, num_requests);
VertexPairVec requests(num_requests);
if (m->second == fhp_succ_size_msg) {
receive(pg, m->first, fhp_succ_msg, &requests[0],
num_requests);
std::map<vertex_descriptor, int> added;
for (std::size_t i = 0; i < requests.size(); ++i) {
set_map[requests[i].first].succ.push_back(requests[i].second);
added[requests[i].first]++;
}
// If order of vertex traversal in vertices() is std::less<vertex_descriptor>,
// then the successor sets will be in order
for (std::size_t i = 0; i < local_start.size(); ++i)
if (added[local_start[i]] > 0)
std::inplace_merge(set_map[local_start[i]].succ.begin(),
set_map[local_start[i]].succ.end() - added[local_start[i]],
set_map[local_start[i]].succ.end(),
std::less<vertex_descriptor>());
} else {
receive(pg, m->first, fhp_pred_msg, &requests[0],
num_requests);
std::map<vertex_descriptor, int> added;
for (std::size_t i = 0; i < requests.size(); ++i) {
set_map[requests[i].first].pred.push_back(requests[i].second);
added[requests[i].first]++;
}
if (boost::is_same<detail::vertex_identity_property_map<vertex_descriptor>, IsoMapRF>::value)
for (std::size_t i = 0; i < local_start.size(); ++i)
if (added[local_start[i]] > 0)
std::inplace_merge(set_map[local_start[i]].pred.begin(),
set_map[local_start[i]].pred.end() - added[local_start[i]],
set_map[local_start[i]].pred.end(),
std::less<vertex_descriptor>());
}
}
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0)
std::cerr << " All gather successors and predecessors time = " << accounting::print_time(end - start) << " seconds.\n";
start = accounting::get_time();
#endif
//
// Filter predecessor and successor sets and perform set arithmetic
//
new_vertex_sets.clear();
if( std::size_t(id) < vertex_sets.size() ) { //If this proc has one or more unique starting points
for( std::size_t i = 0; i < local_start.size(); ++i ) {
vertex_descriptor v = local_start[i];
// Replace this sort with an in-place merges during receive step if possible
if (!boost::is_same<detail::vertex_identity_property_map<vertex_descriptor>, IsoMapRF>::value)
std::sort(set_map[v].pred.begin(), set_map[v].pred.end(), std::less<vertex_descriptor>());
// Limit predecessor and successor sets to members of the original set
std::vector<vertex_descriptor> temp;
std::set_intersection( vertex_sets[id + i*num_procs].begin(), vertex_sets[id + i*num_procs].end(),
set_map[v].pred.begin(), set_map[v].pred.end(),
back_inserter(temp),
std::less<vertex_descriptor>());
set_map[v].pred.clear();
std::swap(set_map[v].pred, temp);
std::set_intersection( vertex_sets[id + i*num_procs].begin(), vertex_sets[id + i*num_procs].end(),
set_map[v].succ.begin(), set_map[v].succ.end(),
back_inserter(temp),
std::less<vertex_descriptor>());
set_map[v].succ.clear();
std::swap(set_map[v].succ, temp);
// Intersection(pred, succ)
std::set_intersection(set_map[v].pred.begin(), set_map[v].pred.end(),
set_map[v].succ.begin(), set_map[v].succ.end(),
back_inserter(set_map[v].intersect),
std::less<vertex_descriptor>());
// Union(pred, succ)
std::set_union(set_map[v].pred.begin(), set_map[v].pred.end(),
set_map[v].succ.begin(), set_map[v].succ.end(),
back_inserter(set_map[v].ps_union),
std::less<vertex_descriptor>());
new_vertex_sets.push_back(std::vector<vertex_descriptor>());
// Original set - Union(pred, succ)
std::set_difference(vertex_sets[id + i*num_procs].begin(), vertex_sets[id + i*num_procs].end(),
set_map[v].ps_union.begin(), set_map[v].ps_union.end(),
back_inserter(new_vertex_sets[new_vertex_sets.size() - 1]),
std::less<vertex_descriptor>());
set_map[v].ps_union.clear();
new_vertex_sets.push_back(std::vector<vertex_descriptor>());
// Pred - Intersect(pred, succ)
std::set_difference(set_map[v].pred.begin(), set_map[v].pred.end(),
set_map[v].intersect.begin(), set_map[v].intersect.end(),
back_inserter(new_vertex_sets[new_vertex_sets.size() - 1]),
std::less<vertex_descriptor>());
set_map[v].pred.clear();
new_vertex_sets.push_back(std::vector<vertex_descriptor>());
// Succ - Intersect(pred, succ)
std::set_difference(set_map[v].succ.begin(), set_map[v].succ.end(),
set_map[v].intersect.begin(), set_map[v].intersect.end(),
back_inserter(new_vertex_sets[new_vertex_sets.size() - 1]),
std::less<vertex_descriptor>());
set_map[v].succ.clear();
// Label SCC just identified with the 'first' vertex in that SCC
for( std::size_t j = 0; j < set_map[v].intersect.size(); j++ )
put(c, set_map[v].intersect[j], set_map[v].intersect[0]);
set_map[v].intersect.clear();
}
}
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0)
std::cerr << " Perform set arithemetic time = " << accounting::print_time(end - start) << " seconds.\n";
start = accounting::get_time();
#endif
// Remove sets of size 1 from new_vertex_sets
typename std::vector<std::vector<vertex_descriptor> >::iterator vviter;
for( vviter = new_vertex_sets.begin(); vviter != new_vertex_sets.end(); /*in loop*/ )
if( (*vviter).size() < 2 )
vviter = new_vertex_sets.erase( vviter );
else
vviter++;
// All gather new sets and recur (gotta marshal and unmarshal sets first)
vertex_sets.clear();
std::vector<vertex_descriptor> serial_sets, all_serial_sets;
detail::marshal_set<Graph>( new_vertex_sets, serial_sets );
all_gather( pg, serial_sets.begin(), serial_sets.end(), all_serial_sets );
detail::unmarshal_set<Graph>( all_serial_sets, vertex_sets );
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0) {
std::cerr << " Serialize and gather new vertex sets time = " << accounting::print_time(end - start) << " seconds.\n\n\n";
printf("Vertex sets: %d\n", (int)vertex_sets.size() );
for( std::size_t i = 0; i < vertex_sets.size(); ++i )
printf(" %d: %d\n", i, (int)vertex_sets[i].size() );
}
start = accounting::get_time();
#endif
// HACK!?! -- This would be more properly implemented as a topological sort
// Remove vertices without an edge to another vertex in the set and an edge from another
// vertex in the set
typedef typename graph_traits<Graph>::out_edge_iterator out_edge_iterator;
out_edge_iterator estart, eend;
typedef typename graph_traits<ReverseGraph>::out_edge_iterator r_out_edge_iterator;
r_out_edge_iterator restart, reend;
for (std::size_t i = 0; i < vertex_sets.size(); ++i) {
std::vector<vertex_descriptor> new_set;
for (std::size_t j = 0; j < vertex_sets[i].size(); ++j) {
vertex_descriptor v = vertex_sets[i][j];
if (get(owner, v) == id) {
boost::tie(estart, eend) = out_edges(v, g);
while (estart != eend && find(vertex_sets[i].begin(), vertex_sets[i].end(),
target(*estart,g)) == vertex_sets[i].end()) estart++;
if (estart != eend) {
boost::tie(restart, reend) = out_edges(get(fr, v), gr);
while (restart != reend && find(vertex_sets[i].begin(), vertex_sets[i].end(),
get(rf, target(*restart,gr))) == vertex_sets[i].end()) restart++;
if (restart != reend)
new_set.push_back(v);
}
}
}
vertex_sets[i].clear();
all_gather(pg, new_set.begin(), new_set.end(), vertex_sets[i]);
std::sort(vertex_sets[i].begin(), vertex_sets[i].end(), std::less<vertex_descriptor>());
}
#ifdef PBGL_SCC_DEBUG
end = accounting::get_time();
if(id == 0)
std::cerr << " Trim vertex sets time = " << accounting::print_time(end - start) << " seconds.\n\n\n";
start = accounting::get_time();
#endif
} while ( !vertex_sets.empty() );
// Label vertices not in a SCC as their own SCC
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
if( get(c, *vstart) == graph_traits<Graph>::null_vertex() )
put(c, *vstart, *vstart);
synchronize(c);
}
template<typename Graph, typename ReverseGraph, typename IsoMap>
void
build_reverse_graph( const Graph& g, ReverseGraph& gr, IsoMap& fr, IsoMap& rf )
{
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<Graph>::out_edge_iterator out_edge_iterator;
typedef typename boost::graph::parallel::process_group_type<Graph>::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
typedef std::vector<std::pair<vertex_descriptor, vertex_descriptor> > VertexPairVec;
typedef typename graph_traits<Graph>::directed_category directed_category;
typename property_map<Graph, vertex_owner_t>::const_type
owner = get(vertex_owner, g);
process_group_type pg = process_group(g);
process_id_type id = process_id(pg);
int n;
vertex_iterator vstart, vend;
int num_procs = num_processes(pg);
vertex_descriptor v;
out_edge_iterator oestart, oeend;
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
{
v = add_vertex(gr);
put(fr, *vstart, v);
put(rf, v, *vstart);
}
gr.distribution() = g.distribution();
int my_n = num_vertices(g);
all_reduce(pg, &my_n, &my_n+1, &n, std::plus<int>());
for (int i = 0; i < n; ++i)
get(fr, vertex(i,g));
synchronize(fr);
// Add edges to gr
std::vector<std::pair<vertex_descriptor, vertex_descriptor> > new_edges;
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
for( boost::tie(oestart, oeend) = out_edges(*vstart, g); oestart != oeend; oestart++ )
new_edges.push_back( std::make_pair(get(fr, target(*oestart,g)), get(fr, source(*oestart, g))) );
std::vector<VertexPairVec> edge_requests(num_procs);
typename std::vector<std::pair<vertex_descriptor, vertex_descriptor> >::iterator iter;
for( iter = new_edges.begin(); iter != new_edges.end(); iter++ ) {
std::pair<vertex_descriptor, vertex_descriptor> p1 = *iter;
if( get(owner, p1.first ) == id )
add_edge( p1.first, p1.second, gr );
else
edge_requests[get(owner, p1.first)].push_back(p1);
}
new_edges.clear();
// Send edge addition requests
for (process_id_type p = 0; p < num_procs; ++p) {
if (!edge_requests[p].empty()) {
VertexPairVec reqs(edge_requests[p].begin(), edge_requests[p].end());
send(pg, p, fhp_edges_size_msg, reqs.size());
send(pg, p, fhp_add_edges_msg, &reqs[0], reqs.size());
}
}
synchronize(pg);
// Receive edge addition requests and handle them
while (optional<std::pair<process_id_type, int> > m = probe(pg)) {
BOOST_ASSERT(m->second == fhp_edges_size_msg);
std::size_t num_requests;
receive(pg, m->first, m->second, num_requests);
VertexPairVec requests(num_requests);
receive(pg, m->first, fhp_add_edges_msg, &requests[0],
num_requests);
for( std::size_t i = 0; i < requests.size(); ++i )
add_edge( requests[i].first, requests[i].second, gr );
}
synchronize(gr);
}
template<typename Graph, typename VertexComponentMap, typename ComponentMap>
typename property_traits<ComponentMap>::value_type
number_components(const Graph& g, VertexComponentMap r, ComponentMap c)
{
typedef typename boost::graph::parallel::process_group_type<Graph>::type process_group_type;
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef typename property_traits<ComponentMap>::value_type ComponentMapType;
std::vector<vertex_descriptor> my_roots, all_roots;
vertex_iterator vstart, vend;
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
if( find( my_roots.begin(), my_roots.end(), get(r, *vstart) ) == my_roots.end() )
my_roots.push_back( get(r, *vstart) );
all_gather( process_group(g), my_roots.begin(), my_roots.end(), all_roots );
/* Number components */
std::map<vertex_descriptor, ComponentMapType> comp_numbers;
ComponentMapType c_num = 0;
// Compute component numbers
for (std::size_t i = 0; i < all_roots.size(); ++i )
if ( comp_numbers.count(all_roots[i]) == 0 )
comp_numbers[all_roots[i]] = c_num++;
// Broadcast component numbers
for( boost::tie(vstart, vend) = vertices(g); vstart != vend; vstart++ )
put( c, *vstart, comp_numbers[get(r,*vstart)] );
// Broadcast number of components
if (process_id(process_group(g)) == 0) {
typedef typename process_group_type::process_size_type
process_size_type;
for (process_size_type dest = 1, n = num_processes(process_group(g));
dest != n; ++dest)
send(process_group(g), dest, 0, c_num);
}
synchronize(process_group(g));
if (process_id(process_group(g)) != 0) receive(process_group(g), 0, 0, c_num);
synchronize(c);
return c_num;
}
template<typename Graph, typename ComponentMap, typename VertexComponentMap,
typename VertexIndexMap>
typename property_traits<ComponentMap>::value_type
fleischer_hendrickson_pinar_strong_components_impl
(const Graph& g,
ComponentMap c,
VertexComponentMap r,
VertexIndexMap vertex_index_map,
incidence_graph_tag)
{
typedef typename graph_traits<Graph>::vertex_iterator vertex_iterator;
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef iterator_property_map<typename std::vector<vertex_descriptor>::iterator,
VertexIndexMap> IsoMap;
typename boost::graph::parallel::process_group_type<Graph>::type pg = process_group(g);
#ifdef PBGL_SCC_DEBUG
accounting::time_type start = accounting::get_time();
#endif
typedef adjacency_list<listS,
distributedS<typename boost::graph::parallel::process_group_type<Graph>::type, vecS>,
directedS > ReverseGraph;
ReverseGraph gr(0, pg);
std::vector<vertex_descriptor> fr_s(num_vertices(g));
std::vector<vertex_descriptor> rf_s(num_vertices(g));
IsoMap fr(fr_s.begin(), vertex_index_map); // fr = forward->reverse
IsoMap rf(rf_s.begin(), vertex_index_map); // rf = reverse->forward
build_reverse_graph(g, gr, fr, rf);
#ifdef PBGL_SCC_DEBUG
accounting::time_type end = accounting::get_time();
if(process_id(process_group(g)) == 0)
std::cerr << "Reverse graph initialization time = " << accounting::print_time(end - start) << " seconds.\n";
#endif
fleischer_hendrickson_pinar_strong_components(g, r, gr, fr, rf,
vertex_index_map);
typename property_traits<ComponentMap>::value_type c_num = number_components(g, r, c);
return c_num;
}
template<typename Graph, typename ComponentMap, typename VertexComponentMap,
typename VertexIndexMap>
typename property_traits<ComponentMap>::value_type
fleischer_hendrickson_pinar_strong_components_impl
(const Graph& g,
ComponentMap c,
VertexComponentMap r,
VertexIndexMap vertex_index_map,
bidirectional_graph_tag)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
reverse_graph<Graph> gr(g);
detail::vertex_identity_property_map<vertex_descriptor> fr, rf;
fleischer_hendrickson_pinar_strong_components(g, r, gr, fr, rf,
vertex_index_map);
typename property_traits<ComponentMap>::value_type c_num
= number_components(g, r, c);
return c_num;
}
template<typename Graph, typename ComponentMap, typename VertexIndexMap>
inline typename property_traits<ComponentMap>::value_type
fleischer_hendrickson_pinar_strong_components
(const Graph& g,
ComponentMap c,
VertexIndexMap vertex_index_map)
{
typedef typename graph_traits<Graph>::vertex_descriptor
vertex_descriptor;
typedef iterator_property_map<typename std::vector<vertex_descriptor>::iterator,
VertexIndexMap> VertexComponentMap;
typename boost::graph::parallel::process_group_type<Graph>::type pg
= process_group(g);
if (num_processes(pg) == 1) {
local_subgraph<const Graph> ls(g);
return boost::strong_components(ls, c);
}
// Create a VertexComponentMap for intermediate labeling of SCCs
std::vector<vertex_descriptor> r_s(num_vertices(g), graph_traits<Graph>::null_vertex());
VertexComponentMap r(r_s.begin(), vertex_index_map);
return fleischer_hendrickson_pinar_strong_components_impl
(g, c, r, vertex_index_map,
typename graph_traits<Graph>::traversal_category());
}
template<typename Graph, typename ComponentMap>
inline typename property_traits<ComponentMap>::value_type
fleischer_hendrickson_pinar_strong_components(const Graph& g,
ComponentMap c)
{
return fleischer_hendrickson_pinar_strong_components(g, c, get(vertex_index, g));
}
} // end namespace distributed
using distributed::fleischer_hendrickson_pinar_strong_components;
} // end namespace graph
template<class Graph, class ComponentMap, class P, class T, class R>
inline typename property_traits<ComponentMap>::value_type
strong_components
(const Graph& g, ComponentMap comp,
const bgl_named_params<P, T, R>&
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
{
return graph::fleischer_hendrickson_pinar_strong_components(g, comp);
}
template<class Graph, class ComponentMap>
inline typename property_traits<ComponentMap>::value_type
strong_components
(const Graph& g, ComponentMap comp
BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
{
return graph::fleischer_hendrickson_pinar_strong_components(g, comp);
}
} /* end namespace boost */
#endif // BOOST_GRAPH_DISTRIBUTED_SCC_HPP

View File

@@ -0,0 +1,116 @@
// Copyright (C) 2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Jeremiah Willcock
// Andrew Lumsdaine
// Distributed version of the two-bit color map
#ifndef BOOST_DISTRIBUTED_TWO_BIT_COLOR_MAP_HPP
#define BOOST_DISTRIBUTED_TWO_BIT_COLOR_MAP_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/two_bit_color_map.hpp>
#include <boost/property_map/parallel/distributed_property_map.hpp>
#include <boost/property_map/parallel/local_property_map.hpp>
namespace boost {
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
class two_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
: public parallel::distributed_property_map<ProcessGroup, GlobalMap,
two_bit_color_map<StorageMap> >
{
typedef two_bit_color_map<StorageMap> local_map;
typedef parallel::distributed_property_map<ProcessGroup, GlobalMap,
local_map >
inherited;
typedef local_property_map<ProcessGroup, GlobalMap, StorageMap>
index_map_type;
public:
two_bit_color_map(std::size_t inital_size,
const index_map_type& index = index_map_type())
: inherited(index.process_group(), index.global(),
local_map(inital_size, index.base())) { }
inherited& base() { return *this; }
const inherited& base() const { return *this; }
};
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline two_bit_color_type
get(two_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
const& pm,
typename property_traits<GlobalMap>::key_type key)
{
return get(pm.base(), key);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline void
put(two_bit_color_map<local_property_map<ProcessGroup,GlobalMap,StorageMap> >
const& pm,
typename property_traits<GlobalMap>::key_type key,
two_bit_color_type value)
{
put(pm.base(), key, value);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
class two_bit_color_map<parallel::distributed_property_map<
ProcessGroup, GlobalMap, StorageMap> >
: public parallel::distributed_property_map<
ProcessGroup, GlobalMap, two_bit_color_map<StorageMap> >
{
typedef two_bit_color_map<StorageMap> local_map;
typedef parallel::distributed_property_map<ProcessGroup,GlobalMap,local_map>
inherited;
typedef parallel::distributed_property_map<ProcessGroup, GlobalMap,
StorageMap>
index_map_type;
public:
two_bit_color_map(std::size_t inital_size,
const index_map_type& index = index_map_type())
: inherited(index.process_group(), index.global(),
local_map(inital_size, index.base())) { }
inherited& base() { return *this; }
const inherited& base() const { return *this; }
};
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline two_bit_color_type
get(two_bit_color_map<
parallel::distributed_property_map<
ProcessGroup, GlobalMap, two_bit_color_map<StorageMap> > > const& pm,
typename property_traits<GlobalMap>::key_type key)
{
return get(pm.base(), key);
}
template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
inline void
put(two_bit_color_map<
parallel::distributed_property_map<
ProcessGroup, GlobalMap, two_bit_color_map<StorageMap> > > const& pm,
typename property_traits<GlobalMap>::key_type key,
two_bit_color_type value)
{
put(pm.base(), key, value);
}
} // end namespace boost
#endif // BOOST_DISTRIBUTED_TWO_BIT_COLOR_MAP_HPP

View File

@@ -0,0 +1,85 @@
// Copyright (C) 2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
// This file contains the "unsafe_serialize" routine, which transforms
// types they may not be serializable (such as void*) into
// serializable equivalents.
#ifndef PBGL_UNSAFE_SERIALIZE_HPP
#define PBGL_UNSAFE_SERIALIZE_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/mpi/datatype.hpp>
#include <boost/serialization/is_bitwise_serializable.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
#include <boost/cstdint.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits.hpp>
#include <utility>
BOOST_IS_BITWISE_SERIALIZABLE(void*)
namespace boost { namespace mpi {
template<> struct is_mpi_datatype<void*> : mpl::true_ { };
} } // end namespace boost::mpi
namespace boost {
typedef mpl::if_c<(sizeof(int) == sizeof(void*)),
int,
mpl::if_c<(sizeof(long) == sizeof(void*)),
long,
mpl::if_c<(sizeof(void*) <= sizeof(boost::intmax_t)),
boost::intmax_t,
void>::type
>::type
>::type ptr_serialize_type;
BOOST_STATIC_ASSERT ((!boost::is_void<ptr_serialize_type>::value));
template<typename T> inline T& unsafe_serialize(T& x) { return x; }
inline ptr_serialize_type& unsafe_serialize(void*& x)
{ return reinterpret_cast<ptr_serialize_type&>(x); }
// Force Boost.MPI to serialize a void* like a ptr_serialize_type
namespace mpi {
template<> inline MPI_Datatype get_mpi_datatype<void*>(void* const& x)
{
return get_mpi_datatype<ptr_serialize_type>();
}
}
template<typename T, typename U>
struct unsafe_pair
{
unsafe_pair() { }
unsafe_pair(const T& t, const U& u) : first(t), second(u) { }
unsafe_pair(const std::pair<T, U>& p) : first(p.first), second(p.second) { }
T first;
U second;
template<typename Archiver>
void serialize(Archiver& ar, const unsigned /*version*/)
{
ar & unsafe_serialize(first) & unsafe_serialize(second);
}
};
template<typename T, typename U>
bool operator<(unsafe_pair<T,U> const& x, unsafe_pair<T,U> const& y)
{
return std::make_pair(x.first, x.second) <
std::make_pair(y.first, y.second);
}
} // end namespace boost
#endif // PBGL_UNSAFE_SERIALIZE_HPP

View File

@@ -0,0 +1,403 @@
// Copyright (C) 2004-2006 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#ifndef BOOST_VERTEX_LIST_ADAPTOR_HPP
#define BOOST_VERTEX_LIST_ADAPTOR_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <vector>
#include <boost/shared_ptr.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/parallel/algorithm.hpp>
#include <boost/graph/parallel/container_traits.hpp>
#include <boost/property_map/vector_property_map.hpp>
namespace boost { namespace graph {
// --------------------------------------------------------------------------
// Global index map built from a distribution
// --------------------------------------------------------------------------
template<typename Distribution, typename OwnerPropertyMap,
typename LocalPropertyMap>
class distribution_global_index_map
{
public:
typedef std::size_t value_type;
typedef value_type reference;
typedef typename property_traits<OwnerPropertyMap>::key_type key_type;
typedef readable_property_map_tag category;
distribution_global_index_map(const Distribution& distribution,
const OwnerPropertyMap& owner,
const LocalPropertyMap& local)
: distribution_(distribution), owner(owner), local(local) { }
Distribution distribution_;
OwnerPropertyMap owner;
LocalPropertyMap local;
};
template<typename Distribution, typename OwnerPropertyMap,
typename LocalPropertyMap>
inline
typename distribution_global_index_map<Distribution, OwnerPropertyMap,
LocalPropertyMap>::value_type
get(const distribution_global_index_map<Distribution, OwnerPropertyMap,
LocalPropertyMap>& p,
typename distribution_global_index_map<Distribution, OwnerPropertyMap,
LocalPropertyMap>::key_type x)
{
using boost::get;
return p.distribution_.global(get(p.owner, x), get(p.local, x));
}
template<typename Graph, typename Distribution>
inline
distribution_global_index_map<
Distribution,
typename property_map<Graph, vertex_owner_t>::const_type,
typename property_map<Graph, vertex_local_t>::const_type>
make_distribution_global_index_map(const Graph& g, const Distribution& d)
{
typedef distribution_global_index_map<
Distribution,
typename property_map<Graph, vertex_owner_t>::const_type,
typename property_map<Graph, vertex_local_t>::const_type>
result_type;
return result_type(d, get(vertex_owner, g), get(vertex_local, g));
}
// --------------------------------------------------------------------------
// Global index map built from a distributed index map and list of vertices
// --------------------------------------------------------------------------
template<typename IndexMap>
class stored_global_index_map : public IndexMap
{
public:
typedef readable_property_map_tag category;
stored_global_index_map(const IndexMap& index_map) : IndexMap(index_map) {
// When we have a global index, we need to always have the indices
// of every key we've seen
this->set_max_ghost_cells(0);
}
};
// --------------------------------------------------------------------------
// Global index map support code
// --------------------------------------------------------------------------
namespace detail {
template<typename PropertyMap, typename ForwardIterator>
inline void
initialize_global_index_map(const PropertyMap&,
ForwardIterator, ForwardIterator)
{ }
template<typename IndexMap, typename ForwardIterator>
void
initialize_global_index_map(stored_global_index_map<IndexMap>& p,
ForwardIterator first, ForwardIterator last)
{
using std::distance;
typedef typename property_traits<IndexMap>::value_type size_t;
size_t n = distance(first, last);
for (size_t i = 0; i < n; ++i, ++first) local_put(p, *first, i);
}
}
// --------------------------------------------------------------------------
// Adapts a Distributed Vertex List Graph to a Vertex List Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
class vertex_list_adaptor : public graph_traits<Graph>
{
typedef graph_traits<Graph> inherited;
typedef typename inherited::traversal_category base_traversal_category;
public:
typedef typename inherited::vertex_descriptor vertex_descriptor;
typedef typename std::vector<vertex_descriptor>::iterator vertex_iterator;
typedef typename std::vector<vertex_descriptor>::size_type
vertices_size_type;
struct traversal_category
: public virtual base_traversal_category,
public virtual vertex_list_graph_tag {};
vertex_list_adaptor(const Graph& g,
const GlobalIndexMap& index_map = GlobalIndexMap())
: g(&g), index_map(index_map)
{
using boost::vertices;
all_vertices_.reset(new std::vector<vertex_descriptor>());
all_gather(process_group(), vertices(g).first, vertices(g).second,
*all_vertices_);
detail::initialize_global_index_map(this->index_map,
all_vertices_->begin(),
all_vertices_->end());
}
const Graph& base() const { return *g; }
// --------------------------------------------------------------------------
// Distributed Container
// --------------------------------------------------------------------------
typedef typename boost::graph::parallel::process_group_type<Graph>::type
process_group_type;
process_group_type process_group() const
{
using boost::graph::parallel::process_group;
return process_group(*g);
}
std::pair<vertex_iterator, vertex_iterator> vertices() const
{ return std::make_pair(all_vertices_->begin(), all_vertices_->end()); }
vertices_size_type num_vertices() const { return all_vertices_->size(); }
GlobalIndexMap get_index_map() const { return index_map; }
private:
const Graph* g;
GlobalIndexMap index_map;
shared_ptr<std::vector<vertex_descriptor> > all_vertices_;
};
template<typename Graph, typename GlobalIndexMap>
inline vertex_list_adaptor<Graph, GlobalIndexMap>
make_vertex_list_adaptor(const Graph& g, const GlobalIndexMap& index_map)
{ return vertex_list_adaptor<Graph, GlobalIndexMap>(g, index_map); }
namespace detail {
template<typename Graph>
class default_global_index_map
{
typedef typename graph_traits<Graph>::vertices_size_type value_type;
typedef typename property_map<Graph, vertex_index_t>::const_type local_map;
public:
typedef vector_property_map<value_type, local_map> distributed_map;
typedef stored_global_index_map<distributed_map> type;
};
}
template<typename Graph>
inline
vertex_list_adaptor<Graph,
typename detail::default_global_index_map<Graph>::type>
make_vertex_list_adaptor(const Graph& g)
{
typedef typename detail::default_global_index_map<Graph>::type
GlobalIndexMap;
typedef typename detail::default_global_index_map<Graph>::distributed_map
DistributedMap;
typedef vertex_list_adaptor<Graph, GlobalIndexMap> result_type;
return result_type(g,
GlobalIndexMap(DistributedMap(num_vertices(g),
get(vertex_index, g))));
}
// --------------------------------------------------------------------------
// Incidence Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor
source(typename vertex_list_adaptor<Graph, GlobalIndexMap>::edge_descriptor e,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return source(e, g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor
target(typename vertex_list_adaptor<Graph, GlobalIndexMap>::edge_descriptor e,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return target(e, g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::out_edge_iterator,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::out_edge_iterator>
out_edges(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return out_edges(v, g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::degree_size_type
out_degree(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return out_degree(v, g.base()); }
// --------------------------------------------------------------------------
// Bidirectional Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::in_edge_iterator,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::in_edge_iterator>
in_edges(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return in_edges(v, g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::degree_size_type
in_degree(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return in_degree(v, g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::degree_size_type
degree(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return degree(v, g.base()); }
// --------------------------------------------------------------------------
// Adjacency Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::adjacency_iterator,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::adjacency_iterator>
adjacent_vertices(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return adjacent_vertices(v, g.base()); }
// --------------------------------------------------------------------------
// Vertex List Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_iterator,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_iterator>
vertices(const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return g.vertices(); }
template<typename Graph, typename GlobalIndexMap>
inline
typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertices_size_type
num_vertices(const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return g.num_vertices(); }
// --------------------------------------------------------------------------
// Edge List Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::edge_iterator,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::edge_iterator>
edges(const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return edges(g.base()); }
template<typename Graph, typename GlobalIndexMap>
inline
typename vertex_list_adaptor<Graph, GlobalIndexMap>::edges_size_type
num_edges(const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return num_edges(g.base()); }
// --------------------------------------------------------------------------
// Property Graph
// --------------------------------------------------------------------------
template<typename PropertyTag, typename Graph, typename GlobalIndexMap>
inline typename property_map<Graph, PropertyTag>::type
get(PropertyTag p, vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return get(p, g.base()); }
template<typename PropertyTag, typename Graph, typename GlobalIndexMap>
inline typename property_map<Graph, PropertyTag>::const_type
get(PropertyTag p, const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return get(p, g.base()); }
template<typename PropertyTag, typename Graph, typename GlobalIndexMap>
inline typename property_traits<
typename property_map<Graph, PropertyTag>::type
>::value_type
get(PropertyTag p, const vertex_list_adaptor<Graph, GlobalIndexMap>& g,
typename property_traits<
typename property_map<Graph, PropertyTag>::type
>::key_type const& x)
{ return get(p, g.base(), x); }
template<typename PropertyTag, typename Graph, typename GlobalIndexMap>
inline void
put(PropertyTag p, vertex_list_adaptor<Graph, GlobalIndexMap>& g,
typename property_traits<
typename property_map<Graph, PropertyTag>::type
>::key_type const& x,
typename property_traits<
typename property_map<Graph, PropertyTag>::type
>::value_type const& v)
{ return put(p, g.base(), x, v); }
// --------------------------------------------------------------------------
// Property Graph: vertex_index property
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
inline GlobalIndexMap
get(vertex_index_t, const vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return g.get_index_map(); }
template<typename Graph, typename GlobalIndexMap>
inline typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertices_size_type
get(vertex_index_t, const vertex_list_adaptor<Graph, GlobalIndexMap>& g,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor x)
{ return get(g.get_index_map(), x); }
// --------------------------------------------------------------------------
// Adjacency Matrix Graph
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
std::pair<typename vertex_list_adaptor<Graph, GlobalIndexMap>::edge_descriptor,
bool>
edge(typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor u,
typename vertex_list_adaptor<Graph, GlobalIndexMap>::vertex_descriptor v,
vertex_list_adaptor<Graph, GlobalIndexMap>& g)
{ return edge(u, v, g.base()); }
} } // end namespace boost::graph
namespace boost {
// --------------------------------------------------------------------------
// Property Graph: vertex_index property
// --------------------------------------------------------------------------
template<typename Graph, typename GlobalIndexMap>
class property_map<vertex_index_t,
graph::vertex_list_adaptor<Graph, GlobalIndexMap> >
{
public:
typedef GlobalIndexMap type;
typedef type const_type;
};
template<typename Graph, typename GlobalIndexMap>
class property_map<vertex_index_t,
const graph::vertex_list_adaptor<Graph, GlobalIndexMap> >
{
public:
typedef GlobalIndexMap type;
typedef type const_type;
};
using graph::distribution_global_index_map;
using graph::make_distribution_global_index_map;
using graph::stored_global_index_map;
using graph::make_vertex_list_adaptor;
using graph::vertex_list_adaptor;
} // end namespace boost
#endif // BOOST_VERTEX_LIST_ADAPTOR_HPP