Derecho  0.9
Distributed systems toolkit for RDMA
subgroup_functions.cpp
Go to the documentation of this file.
1 
7 #include <vector>
8 
13 #include <derecho/core/view.hpp>
14 
15 namespace derecho {
16 
17 subgroup_allocation_map_t one_subgroup_entire_view(const std::vector<std::type_index>& subgroup_type_order,
18  const std::unique_ptr<View>& prev_view, View& curr_view) {
19  subgroup_allocation_map_t subgroup_layouts;
20  //There should really be only one subgroup type, but this could work
21  //if you want every subgroup type to overlap and contain the entire view
22  for(const auto& subgroup_type : subgroup_type_order) {
23  subgroup_layouts.emplace(subgroup_type, subgroup_shard_layout_t{1});
24  subgroup_layouts[subgroup_type][0].emplace_back(curr_view.make_subview(curr_view.members));
25  curr_view.next_unassigned_rank = curr_view.members.size();
26  }
27  return subgroup_layouts;
28 }
29 subgroup_allocation_map_t one_subgroup_entire_view_raw(const std::vector<std::type_index>& subgroup_type_order,
30  const std::unique_ptr<View>& prev_view, View& curr_view) {
31  subgroup_allocation_map_t subgroup_layouts;
32  for(const auto& subgroup_type : subgroup_type_order) {
33  subgroup_layouts.emplace(subgroup_type, subgroup_shard_layout_t{1});
34  subgroup_layouts[subgroup_type][0].emplace_back(curr_view.make_subview(curr_view.members, Mode::UNORDERED));
35  curr_view.next_unassigned_rank = curr_view.members.size();
36  }
37  return subgroup_layouts;
38 }
39 
40 ShardAllocationPolicy flexible_even_shards(const std::string& profile) {
41  const std::string conf_profile_prefix = "SUBGROUP/" + profile + "/";
42  int num_shards = getConfUInt32(conf_profile_prefix + num_shards_profile_field);
43  int min_nodes = getConfUInt32(conf_profile_prefix + min_nodes_profile_field);
44  int max_nodes = getConfUInt32(conf_profile_prefix + max_nodes_profile_field);
45  return flexible_even_shards(num_shards, min_nodes, max_nodes, profile);
46 }
47 
48 ShardAllocationPolicy flexible_even_shards(int num_shards, int min_nodes_per_shard,
49  int max_nodes_per_shard, const std::string& profile) {
50  return ShardAllocationPolicy{
51  num_shards, true, min_nodes_per_shard, max_nodes_per_shard, Mode::ORDERED, profile, {}, {}, {}, {}};
52 }
53 
54 
55 ShardAllocationPolicy fixed_even_shards(int num_shards, int nodes_per_shard,
56  const std::string& profile) {
57  return ShardAllocationPolicy{
58  num_shards, true, nodes_per_shard, nodes_per_shard, Mode::ORDERED, profile, {}, {}, {}, {}};
59 }
60 
61 ShardAllocationPolicy raw_fixed_even_shards(int num_shards, int nodes_per_shard,
62  const std::string& profile) {
63  return ShardAllocationPolicy{
64  num_shards, true, nodes_per_shard, nodes_per_shard, Mode::UNORDERED, profile, {}, {}, {}, {}};
65 }
66 
67 ShardAllocationPolicy custom_shard_policy(const std::vector<Mode>& delivery_modes_by_shard,
68  const std::vector<std::string>& profiles_by_shard) {
69  std::vector<int> min_nodes_by_shard;
70  std::vector<int> max_nodes_by_shard;
71  for(const std::string& profile : profiles_by_shard) {
72  const std::string conf_profile_prefix = "SUBGROUP/" + profile + "/";
73  min_nodes_by_shard.emplace_back(getConfUInt32(conf_profile_prefix + min_nodes_profile_field));
74  max_nodes_by_shard.emplace_back(getConfUInt32(conf_profile_prefix + max_nodes_profile_field));
75  }
76  return custom_shards_policy(min_nodes_by_shard, max_nodes_by_shard,
77  delivery_modes_by_shard, profiles_by_shard);
78 }
79 
80 ShardAllocationPolicy custom_shards_policy(const std::vector<int>& min_nodes_by_shard,
81  const std::vector<int>& max_nodes_by_shard,
82  const std::vector<Mode>& delivery_modes_by_shard,
83  const std::vector<std::string>& profiles_by_shard) {
84  return ShardAllocationPolicy{static_cast<int>(min_nodes_by_shard.size()), false, -1, -1,
85  Mode::ORDERED, "default", min_nodes_by_shard, max_nodes_by_shard,
86  delivery_modes_by_shard, profiles_by_shard};
87 }
88 
90  return SubgroupAllocationPolicy{1, true, {policy}};
91 }
92 
94  return SubgroupAllocationPolicy{num_subgroups, true, {subgroup_policy}};
95 }
96 
98  const std::vector<std::type_index>& subgroup_type_order,
99  const std::unique_ptr<View>& prev_view,
100  View& curr_view,
101  subgroup_allocation_map_t& subgroup_layouts) const {
102  //First, determine how many nodes each shard can have based on their policies
103  std::map<std::type_index, std::vector<std::vector<uint32_t>>> shard_sizes
104  = compute_standard_shard_sizes(subgroup_type_order, prev_view, curr_view);
105  //Now we can go through and actually allocate nodes to each shard,
106  //knowing exactly how many nodes they will get
107  if(!prev_view) {
108  for(const auto& subgroup_type : subgroup_type_order) {
109  //Ignore cross-product-allocated types
110  if(!std::holds_alternative<SubgroupAllocationPolicy>(policies.at(subgroup_type))) {
111  continue;
112  }
113  subgroup_layouts[subgroup_type] =
114  allocate_standard_subgroup_type(subgroup_type, curr_view, shard_sizes);
115  }
116  } else {
117  for(uint32_t subgroup_type_id = 0; subgroup_type_id < subgroup_type_order.size();
118  ++subgroup_type_id) {
119  //We need to both iterate through this vector and keep the counter in order to know the type IDs
120  const std::type_index& subgroup_type = subgroup_type_order[subgroup_type_id];
121  if(!std::holds_alternative<SubgroupAllocationPolicy>(policies.at(subgroup_type))) {
122  continue;
123  }
124  subgroup_layouts[subgroup_type] =
125  update_standard_subgroup_type(subgroup_type, subgroup_type_id,
126  prev_view, curr_view, shard_sizes);
127  }
128  }
129 }
130 
131 std::map<std::type_index, std::vector<std::vector<uint32_t>>>
133  const std::vector<std::type_index>& subgroup_type_order,
134  const std::unique_ptr<View>& prev_view,
135  const View& curr_view) const {
136  //First, determine how many nodes we will need for a minimal allocation
137  int nodes_needed = 0;
138  std::map<std::type_index, std::vector<std::vector<uint32_t>>> shard_sizes;
139  for(uint32_t subgroup_type_id = 0; subgroup_type_id < subgroup_type_order.size(); ++subgroup_type_id) {
140  const std::type_index& subgroup_type = subgroup_type_order[subgroup_type_id];
141  //Get the policy for this subgroup type, if and only if it's a "standard" policy
142  if(!std::holds_alternative<SubgroupAllocationPolicy>(policies.at(subgroup_type))) {
143  continue;
144  }
145  const SubgroupAllocationPolicy& subgroup_type_policy
146  = std::get<SubgroupAllocationPolicy>(policies.at(subgroup_type));
147 
148  shard_sizes.emplace(subgroup_type,
149  std::vector<std::vector<uint32_t>>(subgroup_type_policy.num_subgroups));
150  for(int subgroup_num = 0; subgroup_num < subgroup_type_policy.num_subgroups; ++subgroup_num) {
151  const ShardAllocationPolicy& sharding_policy
152  = subgroup_type_policy.identical_subgroups
153  ? subgroup_type_policy.shard_policy_by_subgroup[0]
154  : subgroup_type_policy.shard_policy_by_subgroup[subgroup_num];
155  shard_sizes[subgroup_type][subgroup_num].resize(sharding_policy.num_shards);
156  for(int shard_num = 0; shard_num < sharding_policy.num_shards; ++shard_num) {
157  int min_shard_size = sharding_policy.even_shards ? sharding_policy.min_nodes_per_shard
158  : sharding_policy.min_num_nodes_by_shard[shard_num];
159  //If there was a previous view, we must include all non-failed nodes from that view
160  if(prev_view) {
161  const subgroup_id_t previous_assignment_offset
162  = prev_view->subgroup_ids_by_type_id.at(subgroup_type_id)[0];
163  const SubView& previous_shard_assignment
164  = prev_view->subgroup_shard_views[previous_assignment_offset + subgroup_num]
165  [shard_num];
166  int num_nonfailed_nodes = 0;
167  for(std::size_t rank = 0; rank < previous_shard_assignment.members.size(); ++rank) {
168  if(curr_view.rank_of(previous_shard_assignment.members[rank]) != -1) {
169  num_nonfailed_nodes++;
170  }
171  }
172  if(num_nonfailed_nodes > min_shard_size) {
173  min_shard_size = num_nonfailed_nodes;
174  }
175  }
176  shard_sizes[subgroup_type][subgroup_num][shard_num] = min_shard_size;
177  nodes_needed += min_shard_size;
178  }
179  }
180  }
181  //At this point we know whether the View has enough members,
182  //so throw the exception if it will be inadequate
183  if(nodes_needed > curr_view.num_members) {
185  }
186  //Now go back and add one node to each shard evenly, until either they reach max size
187  //or we run out of members in curr_view
188  bool done_adding = false;
189  while(!done_adding) {
190  //This starts at true, but if any shard combines it with false, it will be false
191  bool all_at_max = true;
192  for(const auto& subgroup_type : subgroup_type_order) {
193  if(!std::holds_alternative<SubgroupAllocationPolicy>(policies.at(subgroup_type))) {
194  continue;
195  }
196  const SubgroupAllocationPolicy& subgroup_type_policy
197  = std::get<SubgroupAllocationPolicy>(policies.at(subgroup_type));
198  for(int subgroup_num = 0; subgroup_num < subgroup_type_policy.num_subgroups; ++subgroup_num) {
199  const ShardAllocationPolicy& sharding_policy
200  = subgroup_type_policy.identical_subgroups
201  ? subgroup_type_policy.shard_policy_by_subgroup[0]
202  : subgroup_type_policy.shard_policy_by_subgroup[subgroup_num];
203  for(int shard_num = 0; shard_num < sharding_policy.num_shards; ++shard_num) {
204  uint max_shard_members = sharding_policy.even_shards
205  ? sharding_policy.max_nodes_per_shard
206  : sharding_policy.max_num_nodes_by_shard[shard_num];
207  if(nodes_needed >= curr_view.num_members) {
208  done_adding = true;
209  break;
210  }
211  if(shard_sizes[subgroup_type][subgroup_num][shard_num] < max_shard_members) {
212  shard_sizes[subgroup_type][subgroup_num][shard_num]++;
213  nodes_needed++;
214  }
215  all_at_max = all_at_max
216  && shard_sizes[subgroup_type][subgroup_num][shard_num]
217  == max_shard_members;
218  }
219  }
220  }
221  if(all_at_max) {
222  done_adding = true;
223  }
224  }
225  return shard_sizes;
226 }
227 
229  const std::type_index subgroup_type,
230  View& curr_view,
231  const std::map<std::type_index, std::vector<std::vector<uint32_t>>>& shard_sizes) const {
232  //The size of shard_sizes[subgroup_type] is the number of subgroups of this type
233  subgroup_shard_layout_t subgroup_allocation(shard_sizes.at(subgroup_type).size());
234  for(uint32_t subgroup_num = 0; subgroup_num < subgroup_allocation.size(); ++subgroup_num) {
235  //The size of shard_sizes[subgroup_type][subgroup_num] is the number of shards
236  for(uint32_t shard_num = 0; shard_num < shard_sizes.at(subgroup_type)[subgroup_num].size();
237  ++shard_num) {
238  uint32_t shard_size = shard_sizes.at(subgroup_type)[subgroup_num][shard_num];
239  //Grab the next shard_size nodes
240  std::vector<node_id_t> desired_nodes(&curr_view.members[curr_view.next_unassigned_rank],
241  &curr_view.members[curr_view.next_unassigned_rank + shard_size]);
242  curr_view.next_unassigned_rank += shard_size;
243  //Figure out what the Mode policy for this shard is
244  const SubgroupAllocationPolicy& subgroup_type_policy
245  = std::get<SubgroupAllocationPolicy>(policies.at(subgroup_type));
246  const ShardAllocationPolicy& sharding_policy
247  = subgroup_type_policy.identical_subgroups
248  ? subgroup_type_policy.shard_policy_by_subgroup[0]
249  : subgroup_type_policy.shard_policy_by_subgroup[subgroup_num];
250  Mode delivery_mode = sharding_policy.even_shards
251  ? sharding_policy.shards_mode
252  : sharding_policy.modes_by_shard[shard_num];
253  std::string profile = sharding_policy.shards_profile;
254  if (!sharding_policy.even_shards) {
255  profile = sharding_policy.profiles_by_shard[shard_num];
256  }
257  //Put the SubView at the end of subgroup_allocation[subgroup_num]
258  //Since we go through shards in order, this is at index shard_num
259  subgroup_allocation[subgroup_num].emplace_back(
260  curr_view.make_subview(desired_nodes, delivery_mode, {}, profile));
261  }
262  }
263  return subgroup_allocation;
264 }
265 
267  const std::type_index subgroup_type,
268  const subgroup_type_id_t subgroup_type_id,
269  const std::unique_ptr<View>& prev_view,
270  View& curr_view,
271  const std::map<std::type_index, std::vector<std::vector<uint32_t>>>& shard_sizes) const {
272  /* Subgroups of the same type will have contiguous IDs because they were created in order.
273  * So the previous assignment is the slice of the previous subgroup_shard_views vector
274  * starting at the first subgroup's ID, and extending for num_subgroups entries.
275  */
276  const subgroup_id_t previous_assignment_offset = prev_view->subgroup_ids_by_type_id.at(subgroup_type_id)[0];
277  subgroup_shard_layout_t next_assignment(shard_sizes.at(subgroup_type).size());
278  for(uint32_t subgroup_num = 0; subgroup_num < next_assignment.size(); ++subgroup_num) {
279  //The size of shard_sizes[subgroup_type][subgroup_num] is the number of shards
280  for(uint32_t shard_num = 0; shard_num < shard_sizes.at(subgroup_type)[subgroup_num].size();
281  ++shard_num) {
282  const SubView& previous_shard_assignment
283  = prev_view->subgroup_shard_views[previous_assignment_offset + subgroup_num]
284  [shard_num];
285  std::vector<node_id_t> next_shard_members;
286  std::vector<int> next_is_sender;
287  uint32_t allocated_shard_size = shard_sizes.at(subgroup_type)[subgroup_num][shard_num];
288  //Add all the non-failed nodes from the previous assignment
289  for(std::size_t rank = 0; rank < previous_shard_assignment.members.size(); ++rank) {
290  if(curr_view.rank_of(previous_shard_assignment.members[rank]) == -1) {
291  continue;
292  }
293  next_shard_members.push_back(previous_shard_assignment.members[rank]);
294  next_is_sender.push_back(previous_shard_assignment.is_sender[rank]);
295  }
296  //Add additional members if needed
297  while(next_shard_members.size() < allocated_shard_size) {
298  //This must be true if compute_standard_shard_sizes said our view was adequate
299  assert(curr_view.next_unassigned_rank < (int)curr_view.members.size());
300  next_shard_members.push_back(curr_view.members[curr_view.next_unassigned_rank]);
301  curr_view.next_unassigned_rank++;
302  //All members start out as senders with the default allocator
303  next_is_sender.push_back(true);
304  }
305  next_assignment[subgroup_num].emplace_back(curr_view.make_subview(next_shard_members,
306  previous_shard_assignment.mode,
307  next_is_sender,
308  previous_shard_assignment.profile));
309  }
310  }
311  return next_assignment;
312 }
313 
315  const std::vector<std::type_index>& subgroup_type_order,
316  const std::unique_ptr<View>& prev_view,
317  View& curr_view,
318  subgroup_allocation_map_t& subgroup_layouts) const {
319  for(uint32_t subgroup_type_id = 0; subgroup_type_id < subgroup_type_order.size(); ++subgroup_type_id) {
320  //We need to both iterate through this vector and keep the counter in order to know the type IDs
321  const std::type_index& subgroup_type = subgroup_type_order[subgroup_type_id];
322  //Only consider CrossProductPolicy subgroup types
323  if(!std::holds_alternative<CrossProductPolicy>(policies.at(subgroup_type))) {
324  continue;
325  }
326  const CrossProductPolicy& cross_product_policy
327  = std::get<CrossProductPolicy>(policies.at(subgroup_type));
328  /* This function runs after compute_standard_memberships, so the source and
329  * target subgroups will have entries in subgroup_layouts.
330  * Check to make sure the source and target subgroup types actually
331  * provisioned enough subgroups for the subgroup index to make sense. */
332  if(cross_product_policy.source_subgroup.second
333  >= subgroup_layouts[cross_product_policy.source_subgroup.first].size()
334  || cross_product_policy.target_subgroup.second
335  >= subgroup_layouts[cross_product_policy.target_subgroup.first].size()) {
337  }
338 
339  const std::vector<SubView>& source_subgroup_layout
340  = subgroup_layouts[cross_product_policy.source_subgroup.first]
341  [cross_product_policy.source_subgroup.second];
342  const std::vector<SubView>& target_subgroup_layout
343  = subgroup_layouts[cross_product_policy.target_subgroup.first]
344  [cross_product_policy.target_subgroup.second];
345 
346  /* Ignore prev_view and next_unassigned_rank, because this subgroup's assignment is based
347  * entirely on the source and target subgroups, and doesn't provision any new nodes. */
348  int num_source_members = 0;
349  for(const auto& shard_view : source_subgroup_layout) {
350  num_source_members += shard_view.members.size();
351  }
352  int num_target_shards = target_subgroup_layout.size();
353  //Each subgroup will have only one shard, since they'll all overlap, so there are source * target subgroups
354  subgroup_shard_layout_t assignment(num_source_members * num_target_shards);
355  //I want a list of all members of the source subgroup, "flattened" out of shards, but we don't have that
356  //Instead, iterate through the source's shards in order and keep a consistent index
357  int source_member_index = 0;
358  for(std::size_t source_shard = 0; source_shard < source_subgroup_layout.size(); ++source_shard) {
359  for(const auto& source_node : source_subgroup_layout[source_shard].members) {
360  for(int target_shard = 0; target_shard < num_target_shards; ++target_shard) {
361  const SubView& target_shard_view = target_subgroup_layout[target_shard];
362  std::vector<node_id_t> desired_nodes(target_shard_view.members.size() + 1);
363  desired_nodes[0] = source_node;
364  std::copy(target_shard_view.members.begin(),
365  target_shard_view.members.end(),
366  desired_nodes.begin() + 1);
367  std::vector<int> sender_flags(desired_nodes.size(), false);
368  sender_flags[0] = true;
369  //The vector at this subgroup's index will be default initialized, so push_back a single shard
370  assignment[source_member_index * num_target_shards + target_shard].push_back(
371  curr_view.make_subview(desired_nodes, Mode::ORDERED, sender_flags));
372  //Now, to send from source_member_index to target_shard, we can use the subgroup at
373  //source_member_index * num_target_shards + target_shard
374  }
375  source_member_index++;
376  }
377  }
378  subgroup_layouts[subgroup_type] = std::move(assignment);
379  }
380 }
382  const std::vector<std::type_index>& subgroup_type_order,
383  const std::unique_ptr<View>& prev_view,
384  View& curr_view) const {
385  subgroup_allocation_map_t subgroup_allocations;
386  compute_standard_memberships(subgroup_type_order, prev_view, curr_view, subgroup_allocations);
387  compute_cross_product_memberships(subgroup_type_order, prev_view, curr_view, subgroup_allocations);
388  return subgroup_allocations;
389 }
390 
391 } // namespace derecho
uint32_t subgroup_id_t
Type alias for the internal Subgroup IDs generated by ViewManager.
const std::map< std::type_index, std::variant< SubgroupAllocationPolicy, CrossProductPolicy > > policies
The entry for each type of subgroup is either a SubgroupAllocationPolicy if that type should use the ...
std::map< std::type_index, std::vector< std::vector< uint32_t > > > compute_standard_shard_sizes(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, const View &curr_view) const
Determines how many members each shard can have in the current view, based on each shard&#39;s policy (mi...
A data structure defining the parameters of the default subgroup allocation function for a single sub...
const std::vector< node_id_t > members
Node IDs of members in the current view, indexed by their SST rank.
Definition: view.hpp:99
std::vector< int > is_sender
vector selecting the senders, 0 for non-sender, non-0 for sender
Definition: view.hpp:39
ShardAllocationPolicy fixed_even_shards(int num_shards, int nodes_per_shard, const std::string &profile="default")
Returns a ShardAllocationPolicy that specifies num_shards shards with the same fixed number of nodes ...
uint32_t subgroup_type_id_t
Type of the numeric ID used to refer to subgroup types within a Group; this is currently computed as ...
int32_t next_unassigned_rank
The rank of the lowest-ranked member that is not assigned to a subgroup in this View.
Definition: view.hpp:124
ShardAllocationPolicy raw_fixed_even_shards(int num_shards, int nodes_per_shard, const std::string &profile="default")
Returns a ShardAllocationPolicy that specifies num_shards shards with the same fixed number of nodes ...
void compute_cross_product_memberships(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, View &curr_view, subgroup_allocation_map_t &subgroup_layouts) const
Helper function that implements the subgroup allocation algorithm for all cross-product subgroups...
constexpr char max_nodes_profile_field[]
void compute_standard_memberships(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, View &curr_view, subgroup_allocation_map_t &subgroup_layouts) const
Helper function that implements the subgroup allocation algorithm for all "standard" (non-cross-produ...
ShardAllocationPolicy custom_shards_policy(const std::vector< int > &min_nodes_by_shard, const std::vector< int > &max_nodes_by_shard, const std::vector< Mode > &delivery_modes_by_shard, const std::vector< std::string > &profiles_by_shard)
Returns a ShardAllocationPolicy for a subgroup that has a different number of members in each shard...
std::vector< std::vector< SubView > > subgroup_shard_layout_t
The data structure used to store a subgroups-and-shards layout for a single subgroup type (i...
subgroup_allocation_map_t operator()(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, View &curr_view) const
subgroup_shard_layout_t allocate_standard_subgroup_type(const std::type_index subgroup_type, View &curr_view, const std::map< std::type_index, std::vector< std::vector< uint32_t >>> &shard_sizes) const
Creates and returns an initial membership allocation for a single subgroup type, based on the input m...
const std::string profile
Settings for the subview.
Definition: view.hpp:51
std::pair< std::type_index, uint32_t > source_subgroup
The (type, index) pair identifying the "source" subgroup of the cross-product.
subgroup_allocation_map_t one_subgroup_entire_view(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, View &curr_view)
A simple implementation of shard_view_generator_t that creates a single, un-sharded subgroup containi...
const uint32_t getConfUInt32(const std::string &key)
Definition: conf.cpp:118
std::vector< int > min_num_nodes_by_shard
If even_shards is false, this will contain an entry for each shard indicating the minimum number of m...
int num_subgroups
The number of subgroups of the same Replicated type to create.
std::pair< std::type_index, uint32_t > target_subgroup
The (type, index) pair identifying the "target" subgroup of the cross-product.
SubgroupAllocationPolicy one_subgroup_policy(const ShardAllocationPolicy &policy)
Returns a SubgroupAllocationPolicy for a replicated type that only has a single subgroup.
int rank_of(const std::tuple< ip_addr_t, uint16_t, uint16_t, uint16_t, uint16_t > &who) const
Looks up the SST rank of an IP address.
Definition: view.cpp:157
int num_shards
The number of shards; set to 1 for a non-sharded subgroup.
constexpr char num_shards_profile_field[]
An exception that indicates that a subgroup membership function was unable to finish executing becaus...
std::map< std::type_index, subgroup_shard_layout_t > subgroup_allocation_map_t
The data structure used to store the subgroups-and-shards layouts for all subgroup types in a Group (...
subgroup_allocation_map_t one_subgroup_entire_view_raw(const std::vector< std::type_index > &subgroup_type_order, const std::unique_ptr< View > &prev_view, View &curr_view)
A simple implementation of shard_view_generator_t that returns a single, un-sharded subgroup in Unord...
bool even_shards
Whether all shards should contain the same number of members.
subgroup_shard_layout_t update_standard_subgroup_type(const std::type_index subgroup_type, const subgroup_type_id_t subgroup_type_id, const std::unique_ptr< View > &prev_view, View &curr_view, const std::map< std::type_index, std::vector< std::vector< uint32_t >>> &shard_sizes) const
Creates and returns a new membership allocation for a single subgroup type, based on its previous all...
SubView make_subview(const std::vector< node_id_t > &with_members, const Mode mode=Mode::ORDERED, const std::vector< int > &is_sender={}, std::string profile="default") const
Constructs a SubView containing the provided subset of this View&#39;s members.
Definition: view.cpp:174
The subset of a View associated with a single shard, or a single subgroup if the subgroup is non-shar...
Definition: view.hpp:31
std::vector< node_id_t > members
Node IDs of members in this subgroup/shard, indexed by their order in the SST.
Definition: view.hpp:36
An alternate type of subgroup allocation policy for subgroup types whose membership will be defined a...
ShardAllocationPolicy flexible_even_shards(int num_shards, int min_nodes_per_shard, int max_nodes_per_shard, const std::string &profile="default")
Returns a ShardAllocationPolicy that specifies num_shards "flexible" or fault-tolerant shards...
Mode mode
Operation mode, raw mode does not do stability and delivery.
Definition: view.hpp:34
std::vector< ShardAllocationPolicy > shard_policy_by_subgroup
If identical_subgroups is true, contains a single entry with the allocation policy for all subgroups ...
A data structure defining the parameters of the default subgroup allocation function for a single sub...
SubgroupAllocationPolicy identical_subgroups_policy(int num_subgroups, const ShardAllocationPolicy &subgroup_policy)
Returns a SubgroupAllocationPolicy for a replicated type that needs n subgroups with identical shardi...
std::vector< int > max_num_nodes_by_shard
If even_shards is false, this will contain an entry for each shard indicating the maximum number of m...
int max_nodes_per_shard
If even_shards is true, this is the maximum number of nodes per shard.
ShardAllocationPolicy custom_shard_policy(const std::vector< Mode > &delivery_modes_by_shard, const std::vector< std::string > &profiles_by_shard)
Returns a ShardAllocationPolicy for a subgroup that has a different number of members in each shard...
constexpr char min_nodes_profile_field[]
bool identical_subgroups
Whether all subgroups of this type will have an identical shard layout.
int min_nodes_per_shard
If even_shards is true, this is the minimum number of nodes per shard.
const int32_t num_members
Number of members in this view.
Definition: view.hpp:114