Derecho  0.9
Distributed systems toolkit for RDMA
group_impl.hpp
Go to the documentation of this file.
1 
8 #include <spdlog/async.h>
9 #include <spdlog/sinks/rotating_file_sink.h>
10 #include <spdlog/sinks/stdout_color_sinks.h>
11 
12 #include "../group.hpp"
14 #include "derecho_internal.hpp"
15 #include "make_kind_map.hpp"
16 #include <derecho/utils/logger.hpp>
17 
18 namespace derecho {
19 
20 template <typename SubgroupType>
21 auto& _Group::get_subgroup(uint32_t subgroup_num) {
22  if(auto gptr = dynamic_cast<GroupProjection<SubgroupType>*>(this)) {
23  return gptr->get_subgroup(subgroup_num);
24  } else
25  throw derecho_exception("Error: this top-level group contains no subgroups for the selected type.");
26 }
27 
28 template <typename SubgroupType>
29 auto& _Group::get_nonmember_subgroup(uint32_t subgroup_num) {
30  if(auto gptr = dynamic_cast<GroupProjection<SubgroupType>*>(this)) {
31  return gptr->get_nonmember_subgroup(subgroup_num);
32  } else
33  throw derecho_exception("Error: this top-level group contains no subgroups for the selected type.");
34 }
35 
36 template <typename SubgroupType>
37 std::vector<std::vector<node_id_t>> _Group::get_subgroup_members(uint32_t subgroup_index) {
38  if(auto gptr = dynamic_cast<GroupProjection<SubgroupType>*>(this)) {
39  return gptr->get_subgroup_members(subgroup_index);
40  } else
41  throw derecho_exception("Error: this top-level group contains no subgroups for the selected type.");
42 }
43 
44 template <typename SubgroupType>
45 std::size_t _Group::get_number_of_shards(uint32_t subgroup_index) {
46  if(auto gptr = dynamic_cast<GroupProjection<SubgroupType>*>(this)) {
47  return gptr->get_number_of_shards(subgroup_index);
48  } else
49  throw derecho_exception("Error: this top-level group contains no subgroups for the selected type.");
50 }
51 
52 template <typename ReplicatedType>
55  void* ret{nullptr};
56  set_replicated_pointer(std::type_index{typeid(ReplicatedType)}, subgroup_num,
57  &ret);
58  return *((Replicated<ReplicatedType>*)ret);
59 }
60 
61 template <typename ReplicatedType>
64  void* ret{nullptr};
65  set_external_caller_pointer(std::type_index{typeid(ReplicatedType)}, subgroup_num,
66  &ret);
67  return *((ExternalCaller<ReplicatedType>*)ret);
68 }
69 
70 template <typename ReplicatedType>
71 std::vector<std::vector<node_id_t>>
73  return get_view_manager().get_subgroup_members(get_index_of_type(typeid(ReplicatedType)), subgroup_index);
74 }
75 
76 template <typename ReplicatedType>
77 std::size_t
79  return get_view_manager().get_number_of_shards_in_subgroup(get_index_of_type(typeid(ReplicatedType)), subgroup_index);
80 }
81 
82 template <typename... ReplicatedTypes>
84  uint32_t subgroup_num,
85  void** ret) {
86  ((*ret = (type == std::type_index{typeid(ReplicatedTypes)}
87  ? &get_subgroup<ReplicatedTypes>(subgroup_num)
88  : *ret)),
89  ...);
90 }
91 
92 template <typename... ReplicatedTypes>
93 uint32_t Group<ReplicatedTypes...>::get_index_of_type(const std::type_info& ti) {
94  assert_always((std::type_index{ti} == std::type_index{typeid(ReplicatedTypes)} || ... || false));
95  return (((std::type_index{ti} == std::type_index{typeid(ReplicatedTypes)}) ? //
96  (index_of_type<ReplicatedTypes, ReplicatedTypes...>)
97  : 0)
98  + ... + 0);
99  //return index_of_type<SubgroupType, ReplicatedTypes...>;
100 }
101 
102 template <typename... ReplicatedTypes>
104  return view_manager;
105 }
106 
107 template <typename... ReplicatedTypes>
109  uint32_t subgroup_num,
110  void** ret) {
111  ((*ret = (type == std::type_index{typeid(ReplicatedTypes)}
112  ? &get_nonmember_subgroup<ReplicatedTypes>(subgroup_num)
113  : *ret)),
114  ...);
115 }
116 
117 /* There is only one constructor */
118 template <typename... ReplicatedTypes>
120  const SubgroupInfo& subgroup_info,
121  IDeserializationContext* deserialization_context,
122  std::vector<view_upcall_t> _view_upcalls,
123  Factory<ReplicatedTypes>... factories)
127  leader_connection([&]() -> std::optional<tcp::socket> {
128  if(!is_starting_leader) {
130  }
131  return std::nullopt;
132  }()),
133  user_deserialization_context(deserialization_context),
134  persistence_manager(objects_by_subgroup_id, callbacks.local_persistence_callback),
135  //Initially empty, all connections are added in the new view callback
136  tcp_sockets(std::make_shared<tcp::tcp_connections>(my_id, std::map<node_id_t, std::pair<ip_addr_t, uint16_t>>{{my_id, {getConfString(CONF_DERECHO_LOCAL_IP), getConfUInt16(CONF_DERECHO_RPC_PORT)}}})),
137  view_manager([&]() {
138  if(is_starting_leader) {
139  return ViewManager(subgroup_info,
140  {std::type_index(typeid(ReplicatedTypes))...},
141  std::disjunction_v<has_persistent_fields<ReplicatedTypes>...>,
142  tcp_sockets, objects_by_subgroup_id,
143  persistence_manager.get_callbacks(),
144  _view_upcalls);
145  } else {
146  return ViewManager(leader_connection.value(),
147  subgroup_info,
148  {std::type_index(typeid(ReplicatedTypes))...},
149  std::disjunction_v<has_persistent_fields<ReplicatedTypes>...>,
150  tcp_sockets, objects_by_subgroup_id,
151  persistence_manager.get_callbacks(),
152  _view_upcalls);
153  }
154  }()),
155  rpc_manager(view_manager, deserialization_context),
156  factories(make_kind_map(factories...)) {
157  //State transfer must complete before an initial view can commit, and must retry if the view is aborted
158  bool initial_view_confirmed = false;
159  while(!initial_view_confirmed) {
160  //This might be the shard leaders from the previous view,
161  //or the nodes with the longest logs in their shard if we're doing total restart,
162  //or empty if this is the first View of a new group
163  const vector_int64_2d& old_shard_leaders = view_manager.get_old_shard_leaders();
164  //As a side effect, construct_objects filters old_shard_leaders to just the leaders
165  //this node needs to receive object state from
166  std::set<std::pair<subgroup_id_t, node_id_t>> subgroups_and_leaders_to_receive
167  = construct_objects<ReplicatedTypes...>(view_manager.get_current_view_const().get(),
168  old_shard_leaders);
169  //These functions are no-ops if we're not doing total restart
170  view_manager.truncate_logs();
171  view_manager.send_logs();
172  receive_objects(subgroups_and_leaders_to_receive);
173  if(is_starting_leader) {
174  bool leader_has_quorum = true;
175  initial_view_confirmed = view_manager.leader_prepare_initial_view(leader_has_quorum);
176  if(!leader_has_quorum) {
177  //If quorum was lost due to failures during the prepare message,
178  //stop here and wait for more nodes to rejoin before going back to state-transfer
179  view_manager.await_rejoining_nodes(my_id);
180  }
181  } else {
182  //This will wait for a new view to be sent if the view was aborted
183  initial_view_confirmed = view_manager.check_view_committed(leader_connection.value());
184  }
185  }
186  if(is_starting_leader) {
187  //In restart mode, once a prepare is successful, send a commit
188  //(this function does nothing if we're not doing total restart)
189  view_manager.leader_commit_initial_view();
190  }
191  //Once the initial view is committed, we can make RDMA connections
192  view_manager.initialize_multicast_groups(callbacks);
193  rpc_manager.create_connections();
194  //This function registers some new-view upcalls to view_manager, so it must come before finish_setup()
195  set_up_components();
196  view_manager.finish_setup();
197  //Start all the predicates and listeners threads
198  rpc_manager.start_listening();
199  view_manager.start();
200  persistence_manager.start();
201 }
202 
203 //nope there's two now
204 template <typename... ReplicatedTypes>
206  : Group({}, subgroup_info, nullptr, {}, factories...) {}
207 
208 template <typename... ReplicatedTypes>
210  // shutdown the persistence manager
211  // TODO-discussion:
212  // Will a node be able to come back once it leaves? if not, maybe we should
213  // shut it down on leave().
214  persistence_manager.shutdown(true);
215  tcp_sockets->destroy();
216 }
217 
218 template <typename... ReplicatedTypes>
219 template <typename FirstType, typename... RestTypes>
220 std::set<std::pair<subgroup_id_t, node_id_t>> Group<ReplicatedTypes...>::construct_objects(
221  const View& curr_view,
222  const vector_int64_2d& old_shard_leaders) {
223  std::set<std::pair<subgroup_id_t, uint32_t>> subgroups_to_receive;
224  if(!curr_view.is_adequately_provisioned) {
225  return subgroups_to_receive;
226  }
227  //The numeric type ID of this subgroup type is its position in the ordered list of subgroup types
228  const subgroup_type_id_t subgroup_type_id = index_of_type<FirstType, ReplicatedTypes...>;
229  const auto& subgroup_ids = curr_view.subgroup_ids_by_type_id.at(subgroup_type_id);
230  for(uint32_t subgroup_index = 0; subgroup_index < subgroup_ids.size(); ++subgroup_index) {
231  subgroup_id_t subgroup_id = subgroup_ids.at(subgroup_index);
232  // Find out if this node is in any shard of this subgroup
233  bool in_subgroup = false;
234  uint32_t num_shards = curr_view.subgroup_shard_views.at(subgroup_id).size();
235  for(uint32_t shard_num = 0; shard_num < num_shards; ++shard_num) {
236  const std::vector<node_id_t>& members = curr_view.subgroup_shard_views.at(subgroup_id).at(shard_num).members;
237  //"If this node is in subview->members for this shard"
238  if(std::find(members.begin(), members.end(), my_id) != members.end()) {
239  in_subgroup = true;
240  // This node may have been re-assigned from a different shard, in which
241  // case we should delete the old shard's object state
242  auto old_object = replicated_objects.template get<FirstType>().find(subgroup_index);
243  if(old_object != replicated_objects.template get<FirstType>().end() && old_object->second.get_shard_num() != shard_num) {
244  dbg_default_debug("Deleting old Replicated Object state for type {}; I was reassigned from shard {} to shard {}",
245  typeid(FirstType).name(), old_object->second.get_shard_num(), shard_num);
246  // also erase from objects_by_subgroup_id
247  objects_by_subgroup_id.erase(subgroup_id);
248  replicated_objects.template get<FirstType>().erase(old_object);
249  }
250  //If we don't have a Replicated<T> for this (type, subgroup index), we just became a member of the shard
251  if(replicated_objects.template get<FirstType>().count(subgroup_index) == 0) {
252  //Determine if there is existing state for this shard that will need to be received
253  bool has_previous_leader = old_shard_leaders.size() > subgroup_id
254  && old_shard_leaders[subgroup_id].size() > shard_num
255  && old_shard_leaders[subgroup_id][shard_num] > -1
256  && old_shard_leaders[subgroup_id][shard_num] != my_id;
257  if(has_previous_leader) {
258  subgroups_to_receive.emplace(subgroup_id, old_shard_leaders[subgroup_id][shard_num]);
259  }
260  if(has_previous_leader && !has_persistent_fields<FirstType>::value) {
261  /* Construct an "empty" Replicated<T>, since all of T's state will
262  * be received from the leader and there are no logs to update */
263  replicated_objects.template get<FirstType>().emplace(
264  subgroup_index, Replicated<FirstType>(subgroup_type_id, my_id,
265  subgroup_id, subgroup_index,
266  shard_num, rpc_manager, this));
267  } else {
268  replicated_objects.template get<FirstType>().emplace(
269  subgroup_index, Replicated<FirstType>(subgroup_type_id, my_id,
270  subgroup_id, subgroup_index, shard_num, rpc_manager,
271  factories.template get<FirstType>(), this));
272  }
273  // Store a reference to the Replicated<T> just constructed
274  objects_by_subgroup_id.emplace(subgroup_id,
275  replicated_objects.template get<FirstType>().at(subgroup_index));
276  break; // This node can be in at most one shard, so stop here
277  }
278  }
279  }
280  if(!in_subgroup) {
281  // If we have a Replicated<T> for the subgroup, but we're no longer a member, delete it
282  auto old_object = replicated_objects.template get<FirstType>().find(subgroup_index);
283  if(old_object != replicated_objects.template get<FirstType>().end()) {
284  dbg_default_debug("Deleting old Replicated Object state (of type {}) for subgroup {} because this node is no longer a member",
285  typeid(FirstType).name(), subgroup_index);
286  objects_by_subgroup_id.erase(subgroup_id);
287  replicated_objects.template get<FirstType>().erase(old_object);
288  }
289  // Create an ExternalCaller for the subgroup if we don't already have one
290  external_callers.template get<FirstType>().emplace(
291  subgroup_index, ExternalCaller<FirstType>(subgroup_type_id,
292  my_id, subgroup_id, rpc_manager));
293  }
294  }
295  return functional_insert(subgroups_to_receive, construct_objects<RestTypes...>(curr_view, old_shard_leaders));
296 }
297 
298 template <typename... ReplicatedTypes>
300  //Give PersistenceManager this pointer to break the circular dependency
301  persistence_manager.set_view_manager(view_manager);
302  //Now that MulticastGroup is constructed, tell it about RPCManager's message handler
303  SharedLockedReference<View> curr_view = view_manager.get_current_view();
304  curr_view.get().multicast_group->register_rpc_callback([this](subgroup_id_t subgroup, node_id_t sender, char* buf, uint32_t size) {
305  rpc_manager.rpc_message_handler(subgroup, sender, buf, size);
306  });
307  view_manager.add_view_upcall([this](const View& new_view) {
308  rpc_manager.new_view_callback(new_view);
309  });
310  //ViewManager must call back to Group after a view change in order to call construct_objects,
311  //since ViewManager doesn't know the template parameters
312  view_manager.register_initialize_objects_upcall([this](node_id_t my_id, const View& view,
313  const vector_int64_2d& old_shard_leaders) {
314  std::set<std::pair<subgroup_id_t, node_id_t>> subgroups_and_leaders
315  = construct_objects<ReplicatedTypes...>(view, old_shard_leaders);
316  receive_objects(subgroups_and_leaders);
317  });
318 }
319 
320 template <typename... ReplicatedTypes>
321 template <typename SubgroupType>
323  if(!view_manager.get_current_view().get().is_adequately_provisioned) {
324  throw subgroup_provisioning_exception("View is inadequately provisioned because subgroup provisioning failed!");
325  }
326  try {
327  return replicated_objects.template get<SubgroupType>().at(subgroup_index);
328  } catch(std::out_of_range& ex) {
329  throw invalid_subgroup_exception("Not a member of the requested subgroup.");
330  }
331 }
332 
333 template <typename... ReplicatedTypes>
334 template <typename SubgroupType>
336  try {
337  return external_callers.template get<SubgroupType>().at(subgroup_index);
338  } catch(std::out_of_range& ex) {
339  throw invalid_subgroup_exception("No ExternalCaller exists for the requested subgroup; this node may be a member of the subgroup");
340  }
341 }
342 
343 template <typename... ReplicatedTypes>
344 template <typename SubgroupType>
346  try {
347  auto& EC = external_callers.template get<SubgroupType>().at(subgroup_index);
348  View& curr_view = view_manager.get_current_view().get();
349  auto subgroup_id = curr_view.subgroup_ids_by_type_id.at(index_of_type<SubgroupType, ReplicatedTypes...>)
350  .at(subgroup_index);
351  const auto& shard_subviews = curr_view.subgroup_shard_views.at(subgroup_id);
352  std::vector<node_id_t> shard_reps(shard_subviews.size());
353  for(uint i = 0; i < shard_subviews.size(); ++i) {
354  // for shard iteration to be possible, each shard must contain at least one member
355  shard_reps[i] = shard_subviews[i].members.at(0);
356  }
357  return ShardIterator<SubgroupType>(EC, shard_reps);
358  } catch(std::out_of_range& ex) {
359  throw invalid_subgroup_exception("No ExternalCaller exists for the requested subgroup; this node may be a member of the subgroup");
360  }
361 }
362 
363 template <typename... ReplicatedTypes>
364 void Group<ReplicatedTypes...>::receive_objects(const std::set<std::pair<subgroup_id_t, node_id_t>>& subgroups_and_leaders) {
365  //This will receive one object from each shard leader in ascending order of subgroup ID
366  for(const auto& subgroup_and_leader : subgroups_and_leaders) {
368  = tcp_sockets->get_socket(subgroup_and_leader.second);
369  ReplicatedObject& subgroup_object = objects_by_subgroup_id.at(subgroup_and_leader.first);
370  if(subgroup_object.is_persistent()) {
371  int64_t log_tail_length = subgroup_object.get_minimum_latest_persisted_version();
372  dbg_default_debug("Sending log tail length of {} for subgroup {} to node {}.",
373  log_tail_length, subgroup_and_leader.first, subgroup_and_leader.second);
374  leader_socket.get().write(log_tail_length);
375  }
376  dbg_default_debug("Receiving Replicated Object state for subgroup {} from node {}",
377  subgroup_and_leader.first, subgroup_and_leader.second);
378  std::size_t buffer_size;
379  bool success = leader_socket.get().read(buffer_size);
380  assert_always(success);
381  char* buffer = new char[buffer_size];
382  success = leader_socket.get().read(buffer, buffer_size);
383  assert_always(success);
384  subgroup_object.receive_object(buffer);
385  delete[] buffer;
386  }
387  dbg_default_debug("Done receiving all Replicated Objects from subgroup leaders");
388 }
389 
390 template <typename... ReplicatedTypes>
392  view_manager.report_failure(who);
393 }
394 
395 template <typename... ReplicatedTypes>
396 void Group<ReplicatedTypes...>::leave(bool group_shutdown) {
397  if(group_shutdown) {
398  view_manager.silence();
399  view_manager.barrier_sync();
400  }
401  view_manager.leave();
402 }
403 
404 template <typename... ReplicatedTypes>
405 std::vector<node_id_t> Group<ReplicatedTypes...>::get_members() {
406  return view_manager.get_members();
407 }
408 
409 template <typename... ReplicatedTypes>
410 template <typename SubgroupType>
411 std::vector<std::vector<node_id_t>> Group<ReplicatedTypes...>::get_subgroup_members(uint32_t subgroup_index) {
413 }
414 template <typename... ReplicatedTypes>
415 template <typename SubgroupType>
416 int32_t Group<ReplicatedTypes...>::get_my_shard(uint32_t subgroup_index) {
417  return view_manager.get_my_shard(index_of_type<SubgroupType, ReplicatedTypes...>, subgroup_index);
418 }
419 
420 template <typename... ReplicatedTypes>
422  return view_manager.get_my_rank();
423 }
424 
425 template <typename... ReplicatedTypes>
427  return my_id;
428 }
429 
430 template <typename... ReplicatedTypes>
432  view_manager.barrier_sync();
433 }
434 
435 template <typename... ReplicatedTypes>
437  view_manager.debug_print_status();
438 }
439 
440 } /* namespace derecho */
uint32_t subgroup_id_t
Type alias for the internal Subgroup IDs generated by ViewManager.
The Deserialization Interface to be implemented by user applications.
Definition: rpc_manager.hpp:36
auto & get_subgroup(uint32_t subgroup_num=0)
Definition: group_impl.hpp:21
void barrier_sync()
Waits until all members of the group have called this function.
Definition: group_impl.hpp:431
mutils::KindMap< Factory, Types... > make_kind_map(Factory< Types >... factories)
Constructs a KindMap<Factory, Types...> from a list of factories of those types.
The top-level object for creating a Derecho group.
Definition: group.hpp:122
std::vector< std::vector< int64_t > > vector_int64_2d
Type of a 2-dimensional vector used to store potential node IDs, or -1.
std::vector< std::vector< SubView > > subgroup_shard_views
Maps subgroup ID -> shard number -> SubView for that subgroup/shard.
Definition: view.hpp:143
ViewManager & get_view_manager() override
Definition: group_impl.hpp:103
const uint16_t getConfUInt16(const std::string &key)
Definition: conf.cpp:126
void debug_print_status() const
Definition: group_impl.hpp:436
uint32_t subgroup_type_id_t
Type of the numeric ID used to refer to subgroup types within a Group; this is currently computed as ...
A little helper class that wraps together a reference and a lock on a related mutex.
std::set< T > functional_insert(std::set< T > &a, const std::set< T > &b)
Inserts set b into set a and returns the modified a.
std::is_base_of< PersistsFields, T > has_persistent_fields
A template whose member field "value" will be true if type T inherits from PersistsFields, and false otherwise.
Definition: replicated.hpp:43
const std::string & getConfString(const std::string &key)
Definition: conf.cpp:110
void set_replicated_pointer(std::type_index type, uint32_t subgroup_num, void **ret)
Definition: group_impl.hpp:83
STL namespace.
Group(const CallbackSet &callbacks, const SubgroupInfo &subgroup_info, IDeserializationContext *deserialization_context, std::vector< view_upcall_t > _view_upcalls={}, Factory< ReplicatedTypes >... factories)
Constructor that starts a new managed Derecho group with this node as the leader. ...
Definition: group_impl.hpp:119
const uint32_t getConfUInt32(const std::string &key)
Definition: conf.cpp:118
ExternalCaller< ReplicatedType > & get_nonmember_subgroup(uint32_t subgroup_index=0)
Definition: group_impl.hpp:63
std::vector< std::vector< node_id_t > > get_subgroup_members(uint32_t subgroup_index=0)
Definition: group_impl.hpp:72
#define dbg_default_debug(...)
Definition: logger.hpp:42
std::size_t get_number_of_shards(uint32_t subgroup_index=0)
Definition: group_impl.hpp:45
auto & get_nonmember_subgroup(uint32_t subgroup_num=0)
Definition: group_impl.hpp:29
void set(volatile Elem &e, const Elem &value)
Thread-safe setter for DerechoSST members; ensures there is a std::atomic_signal_fence after writing ...
bool is_adequately_provisioned
Set to false during MulticastGroup setup if a subgroup membership function throws a subgroup_provisio...
Definition: view.hpp:120
An exception that indicates that a subgroup membership function was unable to finish executing becaus...
std::map< subgroup_type_id_t, std::vector< subgroup_id_t > > subgroup_ids_by_type_id
Maps the (type, index) pairs used by users to identify subgroups to the internal subgroup IDs generat...
Definition: view.hpp:139
Replicated< ReplicatedType > & get_subgroup(uint32_t subgroup_num=0)
Definition: group_impl.hpp:54
#define CONF_DERECHO_LEADER_GMS_PORT
Definition: conf.hpp:23
virtual std::size_t receive_object(char *buffer)=0
constexpr uint32_t index_of_type
A compile-time "function" that computes the index of a type within a template parameter pack of types...
Definition: group.hpp:55
virtual uint32_t get_index_of_type(const std::type_info &)=0
std::size_t get_number_of_shards(uint32_t subgroup_index=0)
Definition: group_impl.hpp:78
#define CONF_DERECHO_LOCAL_IP
Definition: conf.hpp:25
Common interface for all types of Replicated<T>, specifying some methods for state transfer and persi...
std::function< std::unique_ptr< T >(persistent::PersistentRegistry *)> Factory
The type of factory function the user must provide to the Group constructor, to construct each Replic...
#define CONF_DERECHO_RPC_PORT
Definition: conf.hpp:27
Bundles together a set of callback functions for message delivery events.
#define CONF_DERECHO_GMS_PORT
Definition: conf.hpp:26
uint32_t node_id_t
Type alias for Node IDs in a Derecho group.
std::vector< std::vector< node_id_t > > get_subgroup_members(uint32_t subgroup_index=0)
Definition: group_impl.hpp:37
std::int32_t get_my_rank()
Returns the order of this node in the sequence of members of the group.
Definition: group_impl.hpp:421
#define assert_always(x...)
Definition: schedule.cpp:10
Container for whatever information is needed to describe a Group&#39;s subgroups and shards.
#define CONF_DERECHO_LOCAL_ID
Definition: conf.hpp:24
uint32_t get_index_of_type(const std::type_info &) override
Definition: group_impl.hpp:93
void report_failure(const node_id_t who)
Reports to the GMS that the given node has failed.
Definition: group_impl.hpp:391
std::vector< node_id_t > get_members()
Returns a vector listing the nodes that are currently members of the group.
Definition: group_impl.hpp:405
virtual const persistent::version_t get_minimum_latest_persisted_version() noexcept(false)=0
Base exception class for all exceptions raised by Derecho.
#define CONF_DERECHO_LEADER_IP
Definition: conf.hpp:22
virtual bool is_persistent() const =0
std::int32_t get_my_shard(uint32_t subgroup_index=0)
Returns the shard number that this node is a member of in the specified subgroup (by subgroup type an...
void set_external_caller_pointer(std::type_index type, uint32_t subgroup_num, void **ret)
Definition: group_impl.hpp:108
Exception that means the user made an invalid request for a subgroup handle, such as by supplying an ...