pacemaker  2.1.2-ada5c3b36
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2021 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <stdbool.h>
13 
14 #include <crm/pengine/rules.h>
15 #include <crm/msg_xml.h>
17 #include <pacemaker-internal.h>
18 #include <crm/services.h>
19 
20 #include "libpacemaker_private.h"
21 
22 // The controller removes the resource from the CIB, making this redundant
23 // #define DELETE_THEN_REFRESH 1
24 
25 #define INFINITY_HACK (INFINITY * -100)
26 
27 #define VARIANT_NATIVE 1
28 #include <lib/pengine/variant.h>
29 
30 extern bool pcmk__is_daemon;
31 
32 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
33  pe_working_set_t *data_set);
34 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
35  xmlNode *operation, pe_working_set_t *data_set);
36 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
37  pe_working_set_t *data_set);
38 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
39  xmlNode *operation, pe_working_set_t *data_set);
40 
41 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
42 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
43 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
45 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
46 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
47  pe_working_set_t * data_set);
48 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
49 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
50 
51 /* This array says what the *next* role should be when transitioning from one
52  * role to another. For example going from Stopped to Promoted, the next role is
53  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
54  * The current state then becomes Started, which is fed into this array again,
55  * giving a next role of RSC_ROLE_PROMOTED.
56  */
57 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
58 /* Current state Next state*/
59 /* Unknown Stopped Started Unpromoted Promoted */
65 };
66 
67 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
68  gboolean optional,
69  pe_working_set_t *data_set);
70 
71 // This array picks the function needed to transition from one role to another
72 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
73 /* Current state Next state */
74 /* Unknown Stopped Started Unpromoted Promoted */
75 /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
76 /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
77 /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
78 /* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
79 /* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
80 };
81 
82 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
83  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
84  "Node weight", (nw_rsc)->id, (flags), \
85  (flags_to_clear), #flags_to_clear); \
86  } while (0)
87 
88 static gboolean
89 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
90 {
91  GList *nodes = NULL;
92  pe_node_t *chosen = NULL;
93  pe_node_t *best = NULL;
94  int multiple = 1;
95  int length = 0;
96  gboolean result = FALSE;
97 
98  process_utilization(rsc, &prefer, data_set);
99 
100  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
101  return rsc->allocated_to ? TRUE : FALSE;
102  }
103 
104  // Sort allowed nodes by weight
105  if (rsc->allowed_nodes) {
106  length = g_hash_table_size(rsc->allowed_nodes);
107  }
108  if (length > 0) {
109  nodes = g_hash_table_get_values(rsc->allowed_nodes);
110  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
111 
112  // First node in sorted list has the best score
113  best = g_list_nth_data(nodes, 0);
114  }
115 
116  if (prefer && nodes) {
117  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
118 
119  if (chosen == NULL) {
120  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
121  prefer->details->uname, rsc->id);
122 
123  /* Favor the preferred node as long as its weight is at least as good as
124  * the best allowed node's.
125  *
126  * An alternative would be to favor the preferred node even if the best
127  * node is better, when the best node's weight is less than INFINITY.
128  */
129  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
130  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
131  chosen->details->uname, rsc->id);
132  chosen = NULL;
133 
134  } else if (!can_run_resources(chosen)) {
135  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
136  chosen->details->uname, rsc->id);
137  chosen = NULL;
138 
139  } else {
140  pe_rsc_trace(rsc,
141  "Chose preferred node %s for %s (ignoring %d candidates)",
142  chosen->details->uname, rsc->id, length);
143  }
144  }
145 
146  if ((chosen == NULL) && nodes) {
147  /* Either there is no preferred node, or the preferred node is not
148  * available, but there are other nodes allowed to run the resource.
149  */
150 
151  chosen = best;
152  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
153  chosen ? chosen->details->uname : "<none>", rsc->id, length);
154 
155  if (!pe_rsc_is_unique_clone(rsc->parent)
156  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
157  /* If the resource is already running on a node, prefer that node if
158  * it is just as good as the chosen node.
159  *
160  * We don't do this for unique clone instances, because
161  * distribute_children() has already assigned instances to their
162  * running nodes when appropriate, and if we get here, we don't want
163  * remaining unallocated instances to prefer a node that's already
164  * running another instance.
165  */
166  pe_node_t *running = pe__current_node(rsc);
167 
168  if (running && (can_run_resources(running) == FALSE)) {
169  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
170  rsc->id, running->details->uname);
171  } else if (running) {
172  for (GList *iter = nodes->next; iter; iter = iter->next) {
173  pe_node_t *tmp = (pe_node_t *) iter->data;
174 
175  if (tmp->weight != chosen->weight) {
176  // The nodes are sorted by weight, so no more are equal
177  break;
178  }
179  if (tmp->details == running->details) {
180  // Scores are equal, so prefer the current node
181  chosen = tmp;
182  }
183  multiple++;
184  }
185  }
186  }
187  }
188 
189  if (multiple > 1) {
190  static char score[33];
191  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
192 
193  score2char_stack(chosen->weight, score, sizeof(score));
194  do_crm_log(log_level,
195  "Chose node %s for %s from %d nodes with score %s",
196  chosen->details->uname, rsc->id, multiple, score);
197  }
198 
199  result = native_assign_node(rsc, chosen, FALSE);
200  g_list_free(nodes);
201  return result;
202 }
203 
212 static int
213 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
214  const char *value)
215 {
216  GHashTableIter iter;
217  pe_node_t *node = NULL;
218  int best_score = -INFINITY;
219  const char *best_node = NULL;
220 
221  // Find best allowed node with matching attribute
222  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
223  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
224 
225  if ((node->weight > best_score) && can_run_resources(node)
226  && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
227 
228  best_score = node->weight;
229  best_node = node->details->uname;
230  }
231  }
232 
233  if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
234  if (best_node == NULL) {
235  crm_info("No allowed node for %s matches node attribute %s=%s",
236  rsc->id, attr, value);
237  } else {
238  crm_info("Allowed node %s for %s had best score (%d) "
239  "of those matching node attribute %s=%s",
240  best_node, rsc->id, best_score, attr, value);
241  }
242  }
243  return best_score;
244 }
245 
260 static void
261 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
262  const char *attr, float factor,
263  bool only_positive)
264 {
265  GHashTableIter iter;
266  pe_node_t *node = NULL;
267 
268  if (attr == NULL) {
269  attr = CRM_ATTR_UNAME;
270  }
271 
272  // Iterate through each node
273  g_hash_table_iter_init(&iter, nodes);
274  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
275  float weight_f = 0;
276  int weight = 0;
277  int score = 0;
278  int new_score = 0;
279 
280  score = best_node_score_matching_attr(rsc, attr,
281  pe_node_attribute_raw(node, attr));
282 
283  if ((factor < 0) && (score < 0)) {
284  /* Negative preference for a node with a negative score
285  * should not become a positive preference.
286  *
287  * @TODO Consider filtering only if weight is -INFINITY
288  */
289  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
290  node->details->uname, node->weight, factor, score);
291  continue;
292  }
293 
294  if (node->weight == INFINITY_HACK) {
295  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
296  node->details->uname, node->weight, factor, score);
297  continue;
298  }
299 
300  weight_f = factor * score;
301 
302  // Round the number; see http://c-faq.com/fp/round.html
303  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
304 
305  /* Small factors can obliterate the small scores that are often actually
306  * used in configurations. If the score and factor are nonzero, ensure
307  * that the result is nonzero as well.
308  */
309  if ((weight == 0) && (score != 0)) {
310  if (factor > 0.0) {
311  weight = 1;
312  } else if (factor < 0.0) {
313  weight = -1;
314  }
315  }
316 
317  new_score = pe__add_scores(weight, node->weight);
318 
319  if (only_positive && (new_score < 0) && (node->weight > 0)) {
320  crm_trace("%s: Filtering %d + %f * %d = %d "
321  "(negative disallowed, marking node unusable)",
322  node->details->uname, node->weight, factor, score,
323  new_score);
324  node->weight = INFINITY_HACK;
325  continue;
326  }
327 
328  if (only_positive && (new_score < 0) && (node->weight == 0)) {
329  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
330  node->details->uname, node->weight, factor, score,
331  new_score);
332  continue;
333  }
334 
335  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
336  node->weight, factor, score, new_score);
337  node->weight = new_score;
338  }
339 }
340 
341 static inline bool
342 is_nonempty_group(pe_resource_t *rsc)
343 {
344  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
345 }
346 
362 GHashTable *
363 pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id,
364  GHashTable *nodes, const char *attr, float factor,
365  uint32_t flags)
366 {
367  GHashTable *work = NULL;
368 
369  // Avoid infinite recursion
370  if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
371  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
372  primary_id, rsc->id);
373  return nodes;
374  }
376 
378  if (is_nonempty_group(rsc)) {
379  GList *last = g_list_last(rsc->children);
380  pe_resource_t *last_rsc = last->data;
381 
382  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
383  "using last member %s (at %.6f)",
384  primary_id, rsc->id, last_rsc->id, factor);
385  work = pcmk__native_merge_weights(last_rsc, primary_id, NULL, attr,
386  factor, flags);
387  } else {
389  }
391 
392  } else if (is_nonempty_group(rsc)) {
393  /* The first member of the group will recursively incorporate any
394  * constraints involving other members (including the group internal
395  * colocation).
396  *
397  * @TODO The indirect colocations from the dependent group's other
398  * members will be incorporated at full strength rather than by
399  * factor, so the group's combined stickiness will be treated as
400  * (factor + (#members - 1)) * stickiness. It is questionable what
401  * the right approach should be.
402  */
403  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
404  "(at %.6f)", primary_id, rsc->id, factor);
405  work = pcmk__copy_node_table(nodes);
406  work = pcmk__native_merge_weights(rsc->children->data, primary_id, work,
407  attr, factor, flags);
408 
409  } else {
410  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
411  primary_id, rsc->id, factor);
412  work = pcmk__copy_node_table(nodes);
413  add_node_scores_matching_attr(work, rsc, attr, factor,
415  }
416 
417  if (can_run_any(work)) {
418  GList *gIter = NULL;
419  int multiplier = (factor < 0)? -1 : 1;
420 
422  gIter = rsc->rsc_cons;
423  pe_rsc_trace(rsc,
424  "Checking additional %d optional '%s with' constraints",
425  g_list_length(gIter), rsc->id);
426 
427  } else if (is_nonempty_group(rsc)) {
428  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
429 
430  gIter = last_rsc->rsc_cons_lhs;
431  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
432  "constraints using last member %s",
433  g_list_length(gIter), rsc->id, last_rsc->id);
434 
435  } else {
436  gIter = rsc->rsc_cons_lhs;
437  pe_rsc_trace(rsc,
438  "Checking additional %d optional 'with %s' constraints",
439  g_list_length(gIter), rsc->id);
440  }
441 
442  for (; gIter != NULL; gIter = gIter->next) {
443  pe_resource_t *other = NULL;
444  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
445 
447  other = constraint->primary;
448  } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
449  continue;
450  } else {
451  other = constraint->dependent;
452  }
453 
454  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
455  constraint->id, constraint->dependent->id,
456  constraint->primary->id);
457  work = pcmk__native_merge_weights(other, primary_id, work,
458  constraint->node_attribute,
459  multiplier * constraint->score / (float) INFINITY,
461  pe__show_node_weights(true, NULL, primary_id, work, rsc->cluster);
462  }
463 
464  } else if (pcmk_is_set(flags, pe_weights_rollback)) {
465  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
466  primary_id, rsc->id);
467  g_hash_table_destroy(work);
469  return nodes;
470  }
471 
472 
474  pe_node_t *node = NULL;
475  GHashTableIter iter;
476 
477  g_hash_table_iter_init(&iter, work);
478  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
479  if (node->weight == INFINITY_HACK) {
480  node->weight = 1;
481  }
482  }
483  }
484 
485  if (nodes) {
486  g_hash_table_destroy(nodes);
487  }
488 
490  return work;
491 }
492 
493 pe_node_t *
495  pe_working_set_t *data_set)
496 {
497  GList *gIter = NULL;
498 
499  if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
500  /* never allocate children on their own */
501  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
502  rsc->parent->id);
503  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
504  }
505 
506  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
507  return rsc->allocated_to;
508  }
509 
510  if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
511  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
512  return NULL;
513  }
514 
516  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
517 
518  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
519  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
520 
521  GHashTable *archive = NULL;
522  pe_resource_t *primary = constraint->primary;
523 
524  if ((constraint->dependent_role >= RSC_ROLE_PROMOTED)
525  || (constraint->score < 0 && constraint->score > -INFINITY)) {
526  archive = pcmk__copy_node_table(rsc->allowed_nodes);
527  }
528 
529  pe_rsc_trace(rsc,
530  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
531  rsc->id, primary->id, constraint->id,
532  constraint->score, role2text(constraint->dependent_role));
533  primary->cmds->allocate(primary, NULL, data_set);
534  rsc->cmds->rsc_colocation_lh(rsc, primary, constraint, data_set);
535  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
536  pe_rsc_info(rsc, "%s: Rolling back scores from %s",
537  rsc->id, primary->id);
538  g_hash_table_destroy(rsc->allowed_nodes);
539  rsc->allowed_nodes = archive;
540  archive = NULL;
541  }
542  if (archive) {
543  g_hash_table_destroy(archive);
544  }
545  }
546 
547  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
548 
549  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
550  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
551 
552  if (!pcmk__colocation_has_influence(constraint, NULL)) {
553  continue;
554  }
555  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
556  constraint->id, constraint->dependent->id,
557  constraint->primary->id);
558  rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
559  constraint->dependent, rsc->id, rsc->allowed_nodes,
560  constraint->node_attribute, constraint->score / (float) INFINITY,
562  }
563 
564  if (rsc->next_role == RSC_ROLE_STOPPED) {
565  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
566  /* make sure it doesn't come up again */
567  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
568 
569  } else if(rsc->next_role > rsc->role
570  && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
571  && data_set->no_quorum_policy == no_quorum_freeze) {
572  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
573  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
574  pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
575  }
576 
578  rsc, __func__, rsc->allowed_nodes, data_set);
582  }
583 
584  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
585  const char *reason = NULL;
586  pe_node_t *assign_to = NULL;
587 
588  pe__set_next_role(rsc, rsc->role, "unmanaged");
589  assign_to = pe__current_node(rsc);
590  if (assign_to == NULL) {
591  reason = "inactive";
592  } else if (rsc->role == RSC_ROLE_PROMOTED) {
593  reason = "promoted";
594  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
595  reason = "failed";
596  } else {
597  reason = "active";
598  }
599  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
600  (assign_to? assign_to->details->uname : "no node"), reason);
601  native_assign_node(rsc, assign_to, TRUE);
602 
603  } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
604  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
605  native_assign_node(rsc, NULL, TRUE);
606 
607  } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
608  && native_choose_node(rsc, prefer, data_set)) {
609  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
610  rsc->allocated_to->details->uname);
611 
612  } else if (rsc->allocated_to == NULL) {
613  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
614  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
615  } else if (rsc->running_on != NULL) {
616  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
617  }
618 
619  } else {
620  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
621  rsc->allocated_to->details->uname);
622  }
623 
625 
626  if (rsc->is_remote_node) {
627  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
628 
629  CRM_ASSERT(remote_node != NULL);
630  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
631  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
632  remote_node->details->id);
633  remote_node->details->online = TRUE;
634  /* We shouldn't consider an unseen remote-node unclean if we are going
635  * to try and connect to it. Otherwise we get an unnecessary fence */
636  if (remote_node->details->unseen == TRUE) {
637  remote_node->details->unclean = FALSE;
638  }
639 
640  } else {
641  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
642  remote_node->details->id, role2text(rsc->next_role),
643  (rsc->allocated_to? "" : "un"));
644  remote_node->details->shutdown = TRUE;
645  }
646  }
647 
648  return rsc->allocated_to;
649 }
650 
651 static gboolean
652 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
653 {
654  gboolean dup = FALSE;
655  const char *id = NULL;
656  const char *value = NULL;
657  xmlNode *operation = NULL;
658  guint interval2_ms = 0;
659 
660  CRM_ASSERT(rsc);
661  for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
662  operation = pcmk__xe_next(operation)) {
663 
664  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
665  value = crm_element_value(operation, "name");
666  if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
667  continue;
668  }
669 
670  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
671  interval2_ms = crm_parse_interval_spec(value);
672  if (interval_ms != interval2_ms) {
673  continue;
674  }
675 
676  if (id == NULL) {
677  id = ID(operation);
678 
679  } else {
680  pcmk__config_err("Operation %s is duplicate of %s (do not use "
681  "same name and interval combination more "
682  "than once per resource)", ID(operation), id);
683  dup = TRUE;
684  }
685  }
686  }
687 
688  return dup;
689 }
690 
691 static bool
692 op_cannot_recur(const char *name)
693 {
695 }
696 
697 static void
698 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
699  xmlNode * operation, pe_working_set_t * data_set)
700 {
701  char *key = NULL;
702  const char *name = NULL;
703  const char *role = NULL;
704  const char *interval_spec = NULL;
705  const char *node_uname = node? node->details->uname : "n/a";
706 
707  guint interval_ms = 0;
708  pe_action_t *mon = NULL;
709  gboolean is_optional = TRUE;
710  GList *possible_matches = NULL;
711 
712  CRM_ASSERT(rsc);
713 
714  /* Only process for the operations without role="Stopped" */
715  role = crm_element_value(operation, "role");
716  if (role && text2role(role) == RSC_ROLE_STOPPED) {
717  return;
718  }
719 
720  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
721  interval_ms = crm_parse_interval_spec(interval_spec);
722  if (interval_ms == 0) {
723  return;
724  }
725 
726  name = crm_element_value(operation, "name");
727  if (is_op_dup(rsc, name, interval_ms)) {
728  crm_trace("Not creating duplicate recurring action %s for %dms %s",
729  ID(operation), interval_ms, name);
730  return;
731  }
732 
733  if (op_cannot_recur(name)) {
734  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
735  ID(operation), name);
736  return;
737  }
738 
739  key = pcmk__op_key(rsc->id, name, interval_ms);
740  if (find_rsc_op_entry(rsc, key) == NULL) {
741  crm_trace("Not creating recurring action %s for disabled resource %s",
742  ID(operation), rsc->id);
743  free(key);
744  return;
745  }
746 
747  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
748  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
749 
750  if (start != NULL) {
751  pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
752  pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
753  start->uuid);
754  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
755  } else {
756  pe_rsc_trace(rsc, "Marking %s optional", key);
757  is_optional = TRUE;
758  }
759 
760  /* start a monitor for an already active resource */
761  possible_matches = find_actions_exact(rsc->actions, key, node);
762  if (possible_matches == NULL) {
763  is_optional = FALSE;
764  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
765 
766  } else {
767  GList *gIter = NULL;
768 
769  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
770  pe_action_t *op = (pe_action_t *) gIter->data;
771 
773  is_optional = FALSE;
774  break;
775  }
776  }
777  g_list_free(possible_matches);
778  }
779 
780  if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
781  || (role != NULL && text2role(role) != rsc->next_role)) {
782  int log_level = LOG_TRACE;
783  const char *result = "Ignoring";
784 
785  if (is_optional) {
786  char *after_key = NULL;
787  pe_action_t *cancel_op = NULL;
788 
789  // It's running, so cancel it
790  log_level = LOG_INFO;
791  result = "Cancelling";
792  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
793 
794  switch (rsc->role) {
795  case RSC_ROLE_UNPROMOTED:
796  case RSC_ROLE_STARTED:
797  if (rsc->next_role == RSC_ROLE_PROMOTED) {
798  after_key = promote_key(rsc);
799 
800  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
801  after_key = stop_key(rsc);
802  }
803 
804  break;
805  case RSC_ROLE_PROMOTED:
806  after_key = demote_key(rsc);
807  break;
808  default:
809  break;
810  }
811 
812  if (after_key) {
813  pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
814  pe_order_runnable_left, data_set);
815  }
816  }
817 
818  do_crm_log(log_level, "%s action %s (%s vs. %s)",
819  result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
820  role2text(rsc->next_role));
821 
822  free(key);
823  return;
824  }
825 
826  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
827  key = mon->uuid;
828  if (is_optional) {
829  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
830  }
831 
832  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
833  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
834  node_uname, mon->uuid);
836 
837  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
838  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
839  node_uname, mon->uuid);
841 
842  } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
843  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
844  mon->task, interval_ms / 1000, rsc->id, node_uname);
845  }
846 
847  if (rsc->next_role == RSC_ROLE_PROMOTED) {
848  char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
849 
850  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
851  free(running_promoted);
852  }
853 
854  if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
855  pcmk__new_ordering(rsc, start_key(rsc), NULL, NULL, strdup(key), mon,
857  data_set);
858 
859  pcmk__new_ordering(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon,
861  data_set);
862 
863  if (rsc->next_role == RSC_ROLE_PROMOTED) {
864  pcmk__new_ordering(rsc, promote_key(rsc), NULL, rsc, NULL, mon,
866  data_set);
867 
868  } else if (rsc->role == RSC_ROLE_PROMOTED) {
869  pcmk__new_ordering(rsc, demote_key(rsc), NULL, rsc, NULL, mon,
871  data_set);
872  }
873  }
874 }
875 
876 static void
877 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
878 {
879  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
880  (node == NULL || node->details->maintenance == FALSE)) {
881  xmlNode *operation = NULL;
882 
883  for (operation = pcmk__xe_first_child(rsc->ops_xml);
884  operation != NULL;
885  operation = pcmk__xe_next(operation)) {
886 
887  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
888  RecurringOp(rsc, start, node, operation, data_set);
889  }
890  }
891  }
892 }
893 
894 static void
895 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
896  xmlNode * operation, pe_working_set_t * data_set)
897 {
898  char *key = NULL;
899  const char *name = NULL;
900  const char *role = NULL;
901  const char *interval_spec = NULL;
902  const char *node_uname = node? node->details->uname : "n/a";
903 
904  guint interval_ms = 0;
905  GList *possible_matches = NULL;
906  GList *gIter = NULL;
907 
908  /* Only process for the operations with role="Stopped" */
909  role = crm_element_value(operation, "role");
910  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
911  return;
912  }
913 
914  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
915  interval_ms = crm_parse_interval_spec(interval_spec);
916  if (interval_ms == 0) {
917  return;
918  }
919 
920  name = crm_element_value(operation, "name");
921  if (is_op_dup(rsc, name, interval_ms)) {
922  crm_trace("Not creating duplicate recurring action %s for %dms %s",
923  ID(operation), interval_ms, name);
924  return;
925  }
926 
927  if (op_cannot_recur(name)) {
928  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
929  ID(operation), name);
930  return;
931  }
932 
933  key = pcmk__op_key(rsc->id, name, interval_ms);
934  if (find_rsc_op_entry(rsc, key) == NULL) {
935  crm_trace("Not creating recurring action %s for disabled resource %s",
936  ID(operation), rsc->id);
937  free(key);
938  return;
939  }
940 
941  // @TODO add support
942  if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
943  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
944  "not supported for anonymous clones)",
945  ID(operation));
946  return;
947  }
948 
949  pe_rsc_trace(rsc,
950  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
951  ID(operation), rsc->id, role2text(rsc->next_role));
952 
953  /* if the monitor exists on the node where the resource will be running, cancel it */
954  if (node != NULL) {
955  possible_matches = find_actions_exact(rsc->actions, key, node);
956  if (possible_matches) {
957  pe_action_t *cancel_op = NULL;
958 
959  g_list_free(possible_matches);
960 
961  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
962 
963  if ((rsc->next_role == RSC_ROLE_STARTED)
964  || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
965  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
966  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
967  pcmk__new_ordering(rsc, NULL, cancel_op, rsc, start_key(rsc),
968  NULL, pe_order_runnable_left, data_set);
969  }
970 
971  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
972  key, role, role2text(rsc->next_role), node_uname);
973  }
974  }
975 
976  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
977  pe_node_t *stop_node = (pe_node_t *) gIter->data;
978  const char *stop_node_uname = stop_node->details->uname;
979  gboolean is_optional = TRUE;
980  gboolean probe_is_optional = TRUE;
981  gboolean stop_is_optional = TRUE;
982  pe_action_t *stopped_mon = NULL;
983  char *rc_inactive = NULL;
984  GList *probe_complete_ops = NULL;
985  GList *stop_ops = NULL;
986  GList *local_gIter = NULL;
987 
988  if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
989  continue;
990  }
991 
992  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
993  ID(operation), rsc->id, crm_str(stop_node_uname));
994 
995  /* start a monitor for an already stopped resource */
996  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
997  if (possible_matches == NULL) {
998  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
999  crm_str(stop_node_uname));
1000  is_optional = FALSE;
1001  } else {
1002  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1003  crm_str(stop_node_uname));
1004  is_optional = TRUE;
1005  g_list_free(possible_matches);
1006  }
1007 
1008  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1009 
1010  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
1011  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1012  free(rc_inactive);
1013 
1014  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1015  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1016  FALSE);
1017  GList *pIter = NULL;
1018 
1019  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1020  pe_action_t *probe = (pe_action_t *) pIter->data;
1021 
1022  order_actions(probe, stopped_mon, pe_order_runnable_left);
1023  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1024  }
1025 
1026  g_list_free(probes);
1027  }
1028 
1029  if (probe_complete_ops) {
1030  g_list_free(probe_complete_ops);
1031  }
1032 
1033  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1034 
1035  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1036  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1037 
1038  if (!pcmk_is_set(stop->flags, pe_action_optional)) {
1039  stop_is_optional = FALSE;
1040  }
1041 
1042  if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
1043  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1044  crm_str(stop_node_uname), stopped_mon->uuid);
1046  }
1047 
1048  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1049  pcmk__new_ordering(rsc, stop_key(rsc), stop, NULL, strdup(key),
1050  stopped_mon,
1052  data_set);
1053  }
1054 
1055  }
1056 
1057  if (stop_ops) {
1058  g_list_free(stop_ops);
1059  }
1060 
1061  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1062  && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1063  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1064  key, crm_str(stop_node_uname));
1066  }
1067 
1068  if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1069  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1070  }
1071 
1072  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1073  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1074  crm_str(stop_node_uname), stopped_mon->uuid);
1076  }
1077 
1078  if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
1079  && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1080  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1081  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1082  }
1083  }
1084 
1085  free(key);
1086 }
1087 
1088 static void
1089 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1090 {
1091  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
1092  (node == NULL || node->details->maintenance == FALSE)) {
1093  xmlNode *operation = NULL;
1094 
1095  for (operation = pcmk__xe_first_child(rsc->ops_xml);
1096  operation != NULL;
1097  operation = pcmk__xe_next(operation)) {
1098 
1099  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
1100  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1101  }
1102  }
1103  }
1104 }
1105 
1106 static void
1107 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1108 {
1109  pe_action_t *migrate_to = NULL;
1110  pe_action_t *migrate_from = NULL;
1111  pe_action_t *start = NULL;
1112  pe_action_t *stop = NULL;
1113  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1114 
1115  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1116  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1117  start = start_action(rsc, chosen, TRUE);
1118  stop = stop_action(rsc, current, TRUE);
1119 
1120  if (partial == FALSE) {
1121  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1122  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1123  }
1124 
1125  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1126  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1127 
1128  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1129 
1132 
1133  // This is easier than trying to delete it from the graph
1135 
1136  /* order probes before migrations */
1137  if (partial) {
1139  migrate_from->needs = start->needs;
1140 
1141  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1142  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1143  NULL, pe_order_optional, data_set);
1144 
1145  } else {
1148  migrate_to->needs = start->needs;
1149 
1150  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1151  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1152  NULL, pe_order_optional, data_set);
1153  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1154  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1155  NULL,
1157  data_set);
1158  }
1159 
1160  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1161  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1163  data_set);
1164  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1165  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1167  data_set);
1168  }
1169 
1170  if (migrate_to) {
1171  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1173 
1174  /* Pacemaker Remote connections don't require pending to be recorded in
1175  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1176  */
1177  if (rsc->is_remote_node == FALSE) {
1178  /* migrate_to takes place on the source node, but can
1179  * have an effect on the target node depending on how
1180  * the agent is written. Because of this, we have to maintain
1181  * a record that the migrate_to occurred, in case the source node
1182  * loses membership while the migrate_to action is still in-flight.
1183  */
1184  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1185  }
1186  }
1187 
1188  if (migrate_from) {
1189  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1190  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1191  }
1192 }
1193 
1194 void
1196 {
1197  pe_action_t *start = NULL;
1198  pe_node_t *chosen = NULL;
1199  pe_node_t *current = NULL;
1200  gboolean need_stop = FALSE;
1201  bool need_promote = FALSE;
1202  gboolean is_moving = FALSE;
1203  gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
1204 
1205  GList *gIter = NULL;
1206  unsigned int num_all_active = 0;
1207  unsigned int num_clean_active = 0;
1208  bool multiply_active = FALSE;
1209  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1210  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1211 
1212  CRM_ASSERT(rsc);
1213  chosen = rsc->allocated_to;
1214  next_role = rsc->next_role;
1215  if (next_role == RSC_ROLE_UNKNOWN) {
1216  pe__set_next_role(rsc,
1217  (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
1218  "allocation");
1219  }
1220  pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
1221  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
1222  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
1223  ((chosen == NULL)? "no node" : chosen->details->uname));
1224 
1225  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1226 
1227  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1228  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1229 
1230  pe_action_t *stop = NULL;
1231 
1232  pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
1233  pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
1234  rsc->id, dangling_source->details->uname);
1235  stop = stop_action(rsc, dangling_source, FALSE);
1237  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
1238  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1239  }
1240  }
1241 
1242  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1244  && (current->details == rsc->partial_migration_source->details)
1245  && (chosen->details == rsc->partial_migration_target->details)) {
1246 
1247  /* The chosen node is still the migration target from a partial
1248  * migration. Attempt to continue the migration instead of recovering
1249  * by stopping the resource everywhere and starting it on a single node.
1250  */
1251  pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
1252  "to target %s from %s",
1255 
1256  } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
1257  /* If a resource has "requires" set to nothing or quorum, don't consider
1258  * it active on unclean nodes (similar to how all resources behave when
1259  * stonith-enabled is false). We can start such resources elsewhere
1260  * before fencing completes, and if we considered the resource active on
1261  * the failed node, we would attempt recovery for being active on
1262  * multiple nodes.
1263  */
1264  multiply_active = (num_clean_active > 1);
1265  } else {
1266  multiply_active = (num_all_active > 1);
1267  }
1268 
1269  if (multiply_active) {
1271  // Migration was in progress, but we've chosen a different target
1272  crm_notice("Resource %s can no longer migrate from %s to %s "
1273  "(will stop on both nodes)",
1276 
1277  } else {
1278  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
1279 
1280  // Resource was (possibly) incorrectly multiply active
1281  pe_proc_err("%s resource %s might be active on %u nodes (%s)",
1282  crm_str(class), rsc->id, num_all_active,
1283  recovery2text(rsc->recovery_type));
1284  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1285  }
1286 
1287  if (rsc->recovery_type == recovery_stop_start) {
1288  need_stop = TRUE;
1289  }
1290 
1291  /* If by chance a partial migration is in process, but the migration
1292  * target is not chosen still, clear all partial migration data.
1293  */
1295  allow_migrate = FALSE;
1296  }
1297 
1298  if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
1299  pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
1300  rsc->id);
1301  start = start_action(rsc, chosen, TRUE);
1303  }
1304 
1305  if (current && chosen && current->details != chosen->details) {
1306  pe_rsc_trace(rsc, "Moving %s from %s to %s",
1307  rsc->id, crm_str(current->details->uname),
1308  crm_str(chosen->details->uname));
1309  is_moving = TRUE;
1310  need_stop = TRUE;
1311 
1312  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1313  if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
1314  need_stop = TRUE;
1315  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1316  } else {
1317  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1318  if (rsc->next_role == RSC_ROLE_PROMOTED) {
1319  need_promote = TRUE;
1320  }
1321  }
1322 
1323  } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1324  pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
1325  need_stop = TRUE;
1326 
1327  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1328  pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
1329  rsc->id);
1330  start = start_action(rsc, chosen, TRUE);
1331  if (!pcmk_is_set(start->flags, pe_action_optional)) {
1332  // Recovery of a promoted resource
1333  pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
1334  need_stop = TRUE;
1335  }
1336  }
1337 
1338  /* Create any additional actions required when bringing resource down and
1339  * back up to same level.
1340  */
1341  role = rsc->role;
1342  while (role != RSC_ROLE_STOPPED) {
1343  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1344  pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
1345  (need_stop? "required" : "optional"), rsc->id,
1346  role2text(role), role2text(next_role));
1347  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1348  break;
1349  }
1350  role = next_role;
1351  }
1352 
1353 
1354  while ((rsc->role <= rsc->next_role) && (role != rsc->role)
1355  && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
1356  bool required = need_stop;
1357 
1358  next_role = rsc_state_matrix[role][rsc->role];
1359  if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
1360  required = true;
1361  }
1362  pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
1363  (required? "required" : "optional"), rsc->id,
1364  role2text(role), role2text(next_role));
1365  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1366  data_set) == FALSE) {
1367  break;
1368  }
1369  role = next_role;
1370  }
1371  role = rsc->role;
1372 
1373  /* Required steps from this role to the next */
1374  while (role != rsc->next_role) {
1375  next_role = rsc_state_matrix[role][rsc->next_role];
1376  pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
1377  rsc->id, role2text(role), role2text(next_role),
1378  role2text(rsc->next_role));
1379  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1380  break;
1381  }
1382  role = next_role;
1383  }
1384 
1385  if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1386  pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
1387  rsc->id);
1388 
1389  } else if ((rsc->next_role != RSC_ROLE_STOPPED)
1390  || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1391  pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
1392  ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
1393  rsc->id);
1394  start = start_action(rsc, chosen, TRUE);
1395  Recurring(rsc, start, chosen, data_set);
1396  Recurring_Stopped(rsc, start, chosen, data_set);
1397 
1398  } else {
1399  pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
1400  rsc->id);
1401  Recurring_Stopped(rsc, NULL, NULL, data_set);
1402  }
1403 
1404  /* if we are stuck in a partial migration, where the target
1405  * of the partial migration no longer matches the chosen target.
1406  * A full stop/start is required */
1407  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1408  pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
1409  rsc->id);
1410  allow_migrate = FALSE;
1411 
1412  } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
1413  || pcmk_any_flags_set(rsc->flags,
1415  || (current && current->details->unclean)
1416  || rsc->next_role < RSC_ROLE_STARTED) {
1417 
1418  allow_migrate = FALSE;
1419  }
1420 
1421  if (allow_migrate) {
1422  handle_migration_actions(rsc, current, chosen, data_set);
1423  }
1424 }
1425 
1426 static void
1427 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1428 {
1429  GHashTableIter iter;
1430  pe_node_t *node = NULL;
1431  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1432  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1433  if (node->details->remote_rsc) {
1434  node->weight = -INFINITY;
1435  }
1436  }
1437 }
1438 
1453 static GList *
1454 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1455 {
1456  GList *allowed_nodes = NULL;
1457 
1458  if (rsc->allowed_nodes) {
1459  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1460  }
1461 
1462  if (!pcmk__is_daemon) {
1463  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1464  }
1465 
1466  return allowed_nodes;
1467 }
1468 
1469 void
1471 {
1472  /* This function is on the critical path and worth optimizing as much as possible */
1473 
1474  pe_resource_t *top = NULL;
1475  GList *allowed_nodes = NULL;
1476  bool check_unfencing = FALSE;
1477  bool check_utilization = FALSE;
1478 
1479  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1480  pe_rsc_trace(rsc,
1481  "Skipping native constraints for unmanaged resource: %s",
1482  rsc->id);
1483  return;
1484  }
1485 
1486  top = uber_parent(rsc);
1487 
1488  // Whether resource requires unfencing
1489  check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
1492 
1493  // Whether a non-default placement strategy is used
1494  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1495  && !pcmk__str_eq(data_set->placement_strategy,
1496  "default", pcmk__str_casei);
1497 
1498  // Order stops before starts (i.e. restart)
1499  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1500  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1502  data_set);
1503 
1504  // Promotable ordering: demote before stop, start before promote
1506  || (rsc->role > RSC_ROLE_UNPROMOTED)) {
1507 
1508  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1509  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1511 
1512  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1513  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1514  pe_order_runnable_left, data_set);
1515  }
1516 
1517  // Don't clear resource history if probing on same node
1519  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1521  data_set);
1522 
1523  // Certain checks need allowed nodes
1524  if (check_unfencing || check_utilization || rsc->container) {
1525  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1526  }
1527 
1528  if (check_unfencing) {
1529  /* Check if the node needs to be unfenced first */
1530 
1531  for (GList *item = allowed_nodes; item; item = item->next) {
1532  pe_node_t *node = item->data;
1533  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1534 
1535  crm_debug("Ordering any stops of %s before %s, and any starts after",
1536  rsc->id, unfence->uuid);
1537 
1538  /*
1539  * It would be more efficient to order clone resources once,
1540  * rather than order each instance, but ordering the instance
1541  * allows us to avoid unnecessary dependencies that might conflict
1542  * with user constraints.
1543  *
1544  * @TODO: This constraint can still produce a transition loop if the
1545  * resource has a stop scheduled on the node being unfenced, and
1546  * there is a user ordering constraint to start some other resource
1547  * (which will be ordered after the unfence) before stopping this
1548  * resource. An example is "start some slow-starting cloned service
1549  * before stopping an associated virtual IP that may be moving to
1550  * it":
1551  * stop this -> unfencing -> start that -> stop this
1552  */
1553  pcmk__new_ordering(rsc, stop_key(rsc), NULL,
1554  NULL, strdup(unfence->uuid), unfence,
1556 
1557  pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
1558  rsc, start_key(rsc), NULL,
1560  data_set);
1561  }
1562  }
1563 
1564  if (check_utilization) {
1565  GList *gIter = NULL;
1566 
1567  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1568  rsc->id, data_set->placement_strategy);
1569 
1570  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1571  pe_node_t *current = (pe_node_t *) gIter->data;
1572 
1573  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1574  current->details->uname);
1575  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1576 
1577  if (load_stopped->node == NULL) {
1578  load_stopped->node = pe__copy_node(current);
1580  }
1581 
1582  pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL,
1583  load_stopped_task, load_stopped, pe_order_load,
1584  data_set);
1585  }
1586 
1587  for (GList *item = allowed_nodes; item; item = item->next) {
1588  pe_node_t *next = item->data;
1589  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1590  next->details->uname);
1591  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1592 
1593  if (load_stopped->node == NULL) {
1594  load_stopped->node = pe__copy_node(next);
1596  }
1597 
1598  pcmk__new_ordering(NULL, strdup(load_stopped_task), load_stopped,
1599  rsc, start_key(rsc), NULL, pe_order_load,
1600  data_set);
1601 
1602  pcmk__new_ordering(NULL, strdup(load_stopped_task), load_stopped,
1603  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1604  NULL, pe_order_load, data_set);
1605 
1606  free(load_stopped_task);
1607  }
1608  }
1609 
1610  if (rsc->container) {
1611  pe_resource_t *remote_rsc = NULL;
1612 
1613  if (rsc->is_remote_node) {
1614  // rsc is the implicit remote connection for a guest or bundle node
1615 
1616  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1617  * to avoid nesting remotes. However, allow bundles to run on remote
1618  * nodes.
1619  */
1621  rsc_avoids_remote_nodes(rsc->container);
1622  }
1623 
1624  /* If someone cleans up a guest or bundle node's container, we will
1625  * likely schedule a (re-)probe of the container and recovery of the
1626  * connection. Order the connection stop after the container probe,
1627  * so that if we detect the container running, we will trigger a new
1628  * transition and avoid the unnecessary recovery.
1629  */
1631  RSC_STOP, pe_order_optional, data_set);
1632 
1633  /* A user can specify that a resource must start on a Pacemaker Remote
1634  * node by explicitly configuring it with the container=NODENAME
1635  * meta-attribute. This is of questionable merit, since location
1636  * constraints can accomplish the same thing. But we support it, so here
1637  * we check whether a resource (that is not itself a remote connection)
1638  * has container set to a remote node or guest node resource.
1639  */
1640  } else if (rsc->container->is_remote_node) {
1641  remote_rsc = rsc->container;
1642  } else {
1643  remote_rsc = pe__resource_contains_guest_node(data_set,
1644  rsc->container);
1645  }
1646 
1647  if (remote_rsc) {
1648  /* Force the resource on the Pacemaker Remote node instead of
1649  * colocating the resource with the container resource.
1650  */
1651  for (GList *item = allowed_nodes; item; item = item->next) {
1652  pe_node_t *node = item->data;
1653 
1654  if (node->details->remote_rsc != remote_rsc) {
1655  node->weight = -INFINITY;
1656  }
1657  }
1658 
1659  } else {
1660  /* This resource is either a filler for a container that does NOT
1661  * represent a Pacemaker Remote node, or a Pacemaker Remote
1662  * connection resource for a guest node or bundle.
1663  */
1664  int score;
1665 
1666  crm_trace("Order and colocate %s relative to its container %s",
1667  rsc->id, rsc->container->id);
1668 
1670  pcmk__op_key(rsc->container->id, RSC_START, 0),
1671  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1672  NULL,
1674  data_set);
1675 
1676  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1677  rsc->container,
1678  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1679  NULL, pe_order_implies_first, data_set);
1680 
1682  score = 10000; /* Highly preferred but not essential */
1683  } else {
1684  score = INFINITY; /* Force them to run on the same host */
1685  }
1686  pcmk__new_colocation("resource-with-container", NULL, score, rsc,
1687  rsc->container, NULL, NULL, true, data_set);
1688  }
1689  }
1690 
1691  if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
1692  /* don't allow remote nodes to run stonith devices
1693  * or remote connection resources.*/
1694  rsc_avoids_remote_nodes(rsc);
1695  }
1696  g_list_free(allowed_nodes);
1697 }
1698 
1699 void
1701  pcmk__colocation_t *constraint,
1702  pe_working_set_t *data_set)
1703 {
1704  if (dependent == NULL) {
1705  pe_err("dependent was NULL for %s", constraint->id);
1706  return;
1707 
1708  } else if (constraint->primary == NULL) {
1709  pe_err("primary was NULL for %s", constraint->id);
1710  return;
1711  }
1712 
1713  pe_rsc_trace(dependent,
1714  "Processing colocation constraint between %s and %s",
1715  dependent->id, primary->id);
1716 
1717  primary->cmds->rsc_colocation_rh(dependent, primary, constraint, data_set);
1718 }
1719 
1720 void
1722  pcmk__colocation_t *constraint,
1723  pe_working_set_t *data_set)
1724 {
1725  enum pcmk__coloc_affects filter_results;
1726 
1727  CRM_ASSERT((dependent != NULL) && (primary != NULL));
1728  filter_results = pcmk__colocation_affects(dependent, primary, constraint,
1729  false);
1730  pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
1731  ((constraint->score > 0)? "Colocating" : "Anti-colocating"),
1732  dependent->id, primary->id, constraint->id, constraint->score,
1733  filter_results);
1734 
1735  switch (filter_results) {
1737  pcmk__apply_coloc_to_priority(dependent, primary, constraint);
1738  break;
1740  pcmk__apply_coloc_to_weights(dependent, primary, constraint);
1741  break;
1743  default:
1744  return;
1745  }
1746 }
1747 
1748 enum pe_action_flags
1750 {
1751  return action->flags;
1752 }
1753 
1754 static inline bool
1755 is_primitive_action(pe_action_t *action)
1756 {
1757  return action && action->rsc && (action->rsc->variant == pe_native);
1758 }
1759 
1768 #define clear_action_flag_because(action, flag, reason) do { \
1769  if (pcmk_is_set((action)->flags, (flag))) { \
1770  pe__clear_action_flags(action, flag); \
1771  if ((action)->rsc != (reason)->rsc) { \
1772  char *reason_text = pe__action2reason((reason), (flag)); \
1773  pe_action_set_reason((action), reason_text, \
1774  ((flag) == pe_action_migrate_runnable)); \
1775  free(reason_text); \
1776  } \
1777  } \
1778  } while (0)
1779 
1791 static void
1792 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
1793  enum pe_action_flags filter)
1794 {
1795  const char *reason = NULL;
1796 
1797  CRM_ASSERT(is_primitive_action(first));
1798  CRM_ASSERT(is_primitive_action(then));
1799 
1800  // We need to update the action in two cases:
1801 
1802  // ... if 'then' is required
1803  if (pcmk_is_set(filter, pe_action_optional)
1804  && !pcmk_is_set(then->flags, pe_action_optional)) {
1805  reason = "restart";
1806  }
1807 
1808  /* ... if 'then' is unrunnable action on same resource (if a resource
1809  * should restart but can't start, we still want to stop)
1810  */
1811  if (pcmk_is_set(filter, pe_action_runnable)
1813  && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
1814  && (first->rsc == then->rsc)) {
1815  reason = "stop";
1816  }
1817 
1818  if (reason == NULL) {
1819  return;
1820  }
1821 
1822  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
1823  first->uuid, then->uuid, reason);
1824 
1825  // Make 'first' required if it is runnable
1826  if (pcmk_is_set(first->flags, pe_action_runnable)) {
1828  }
1829 
1830  // Make 'first' required if 'then' is required
1831  if (!pcmk_is_set(then->flags, pe_action_optional)) {
1833  }
1834 
1835  // Make 'first' unmigratable if 'then' is unmigratable
1838  }
1839 
1840  // Make 'then' unrunnable if 'first' is required but unrunnable
1841  if (!pcmk_is_set(first->flags, pe_action_optional)
1842  && !pcmk_is_set(first->flags, pe_action_runnable)) {
1844  }
1845 }
1846 
1847 enum pe_graph_flags
1849  enum pe_action_flags flags, enum pe_action_flags filter,
1850  enum pe_ordering type, pe_working_set_t *data_set)
1851 {
1852  /* flags == get_action_flags(first, then_node) called from update_action() */
1853  enum pe_graph_flags changed = pe_graph_none;
1854  enum pe_action_flags then_flags = then->flags;
1855  enum pe_action_flags first_flags = first->flags;
1856 
1857  if (type & pe_order_asymmetrical) {
1858  pe_resource_t *then_rsc = then->rsc;
1859  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
1860 
1861  if (!then_rsc) {
1862  /* ignore */
1863  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
1864  /* ignore... if 'then' is supposed to be stopped after 'first', but
1865  * then is already stopped, there is nothing to be done when non-symmetrical. */
1866  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
1867  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
1869  && then->node
1870  && pcmk__list_of_1(then_rsc->running_on)
1871  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
1872  /* Ignore. If 'then' is supposed to be started after 'first', but
1873  * 'then' is already started, there is nothing to be done when
1874  * asymmetrical -- unless the start is mandatory, which indicates
1875  * the resource is restarting, and the ordering is still needed.
1876  */
1877  } else if (!(first->flags & pe_action_runnable)) {
1878  /* prevent 'then' action from happening if 'first' is not runnable and
1879  * 'then' has not yet occurred. */
1882  } else {
1883  /* ignore... then is allowed to start/stop if it wants to. */
1884  }
1885  }
1886 
1888  && !pcmk_is_set(then_flags, pe_action_optional)) {
1889  // Then is required, and implies first should be, too
1890 
1891  if (pcmk_is_set(filter, pe_action_optional)
1893  && pcmk_is_set(first_flags, pe_action_optional)) {
1895  }
1896 
1900  }
1901  }
1902 
1904  if ((filter & pe_action_optional) &&
1905  ((then->flags & pe_action_optional) == FALSE) &&
1906  (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
1907 
1909 
1913  then);
1914  }
1915  }
1916  }
1917 
1919  && pcmk_is_set(filter, pe_action_optional)) {
1920 
1921  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
1922  ((then->flags & pe_action_runnable) == FALSE)) {
1924  }
1925 
1926  if ((then->flags & pe_action_optional) == 0) {
1928  }
1929  }
1930 
1931  if ((type & pe_order_pseudo_left)
1932  && pcmk_is_set(filter, pe_action_optional)) {
1933 
1934  if ((first->flags & pe_action_runnable) == FALSE) {
1937  }
1938  }
1939 
1941  && pcmk_is_set(filter, pe_action_runnable)
1944 
1947  }
1948 
1950  && pcmk_is_set(filter, pe_action_optional)
1954 
1956  }
1957 
1959  handle_restart_ordering(first, then, filter);
1960  }
1961 
1962  if (then_flags != then->flags) {
1963  pe__set_graph_flags(changed, first, pe_graph_updated_then);
1964  pe_rsc_trace(then->rsc,
1965  "%s on %s: flags are now 0x%.6x (was 0x%.6x) "
1966  "because of 'first' %s (0x%.6x)",
1967  then->uuid,
1968  then->node? then->node->details->uname : "no node",
1969  then->flags, then_flags, first->uuid, first->flags);
1970 
1971  if(then->rsc && then->rsc->parent) {
1972  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
1973  update_action(then, data_set);
1974  }
1975  }
1976 
1977  if (first_flags != first->flags) {
1979  pe_rsc_trace(first->rsc,
1980  "%s on %s: flags are now 0x%.6x (was 0x%.6x) "
1981  "because of 'then' %s (0x%.6x)",
1982  first->uuid,
1983  first->node? first->node->details->uname : "no node",
1984  first->flags, first_flags, then->uuid, then->flags);
1985  }
1986 
1987  return changed;
1988 }
1989 
1990 void
1992 {
1993  pcmk__apply_location(constraint, rsc);
1994 }
1995 
1996 void
1998 {
1999  GList *gIter = NULL;
2000 
2001  CRM_ASSERT(rsc);
2002  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2003 
2004  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2005  pe_action_t *action = (pe_action_t *) gIter->data;
2006 
2007  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2008  graph_element_from_action(action, data_set);
2009  }
2010 
2011  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2012  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2013 
2014  child_rsc->cmds->expand(child_rsc, data_set);
2015  }
2016 }
2017 
2018 #define STOP_SANITY_ASSERT(lineno) do { \
2019  if(current && current->details->unclean) { \
2020  /* It will be a pseudo op */ \
2021  } else if(stop == NULL) { \
2022  crm_err("%s:%d: No stop action exists for %s", \
2023  __func__, lineno, rsc->id); \
2024  CRM_ASSERT(stop != NULL); \
2025  } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
2026  crm_err("%s:%d: Action %s is still optional", \
2027  __func__, lineno, stop->uuid); \
2028  CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
2029  } \
2030  } while(0)
2031 
2032 void
2034 {
2035  pcmk__output_t *out = data_set->priv;
2036 
2037  pe_node_t *next = NULL;
2038  pe_node_t *current = NULL;
2039 
2040  gboolean moving = FALSE;
2041 
2042  if(rsc->variant == pe_container) {
2043  pcmk__bundle_log_actions(rsc, data_set);
2044  return;
2045  }
2046 
2047  if (rsc->children) {
2048  g_list_foreach(rsc->children, (GFunc) LogActions, data_set);
2049  return;
2050  }
2051 
2052  next = rsc->allocated_to;
2053  if (rsc->running_on) {
2054  current = pe__current_node(rsc);
2055  if (rsc->role == RSC_ROLE_STOPPED) {
2056  /*
2057  * This can occur when resources are being recovered
2058  * We fiddle with the current role in native_create_actions()
2059  */
2060  rsc->role = RSC_ROLE_STARTED;
2061  }
2062  }
2063 
2064  if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2065  /* Don't log stopped orphans */
2066  return;
2067  }
2068 
2069  out->message(out, "rsc-action", rsc, current, next, moving);
2070 }
2071 
2072 gboolean
2073 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2074 {
2075  GList *gIter = NULL;
2076 
2077  CRM_ASSERT(rsc);
2078  pe_rsc_trace(rsc, "%s", rsc->id);
2079 
2080  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2081  pe_node_t *current = (pe_node_t *) gIter->data;
2082  pe_action_t *stop;
2083 
2084  if (rsc->partial_migration_target) {
2085  if (rsc->partial_migration_target->details == current->details) {
2086  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2087  next->details->uname, rsc->id);
2088  continue;
2089  } else {
2090  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2091  optional = FALSE;
2092  }
2093  }
2094 
2095  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2096  stop = stop_action(rsc, current, optional);
2097 
2098  if(rsc->allocated_to == NULL) {
2099  pe_action_set_reason(stop, "node availability", TRUE);
2100  }
2101 
2102  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2104  }
2105 
2106  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
2107  DeleteRsc(rsc, current, optional, data_set);
2108  }
2109 
2111  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2112 
2113  order_actions(stop, unfence, pe_order_implies_first);
2114  if (!pcmk__node_unfenced(current)) {
2115  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2116  }
2117  }
2118  }
2119 
2120  return TRUE;
2121 }
2122 
2123 gboolean
2124 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2125 {
2126  pe_action_t *start = NULL;
2127 
2128  CRM_ASSERT(rsc);
2129  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2130  start = start_action(rsc, next, TRUE);
2131 
2132  pcmk__order_vs_unfence(rsc, next, start, pe_order_implies_then, data_set);
2133 
2134  if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
2136  }
2137 
2138 
2139  return TRUE;
2140 }
2141 
2142 gboolean
2143 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2144 {
2145  GList *gIter = NULL;
2146  gboolean runnable = TRUE;
2147  GList *action_list = NULL;
2148 
2149  CRM_ASSERT(rsc);
2150  CRM_CHECK(next != NULL, return FALSE);
2151  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2152 
2153  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2154 
2155  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2156  pe_action_t *start = (pe_action_t *) gIter->data;
2157 
2158  if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2159  runnable = FALSE;
2160  }
2161  }
2162  g_list_free(action_list);
2163 
2164  if (runnable) {
2165  promote_action(rsc, next, optional);
2166  return TRUE;
2167  }
2168 
2169  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2170 
2171  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2172 
2173  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2174  pe_action_t *promote = (pe_action_t *) gIter->data;
2175 
2177  }
2178 
2179  g_list_free(action_list);
2180  return TRUE;
2181 }
2182 
2183 gboolean
2184 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2185 {
2186  GList *gIter = NULL;
2187 
2188  CRM_ASSERT(rsc);
2189  pe_rsc_trace(rsc, "%s", rsc->id);
2190 
2191  /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
2192  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2193  pe_node_t *current = (pe_node_t *) gIter->data;
2194 
2195  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2196  demote_action(rsc, current, optional);
2197  }
2198  return TRUE;
2199 }
2200 
2201 gboolean
2202 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2203 {
2204  CRM_ASSERT(rsc);
2205  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2206  CRM_CHECK(FALSE, return FALSE);
2207  return FALSE;
2208 }
2209 
2210 gboolean
2211 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2212 {
2213  CRM_ASSERT(rsc);
2214  pe_rsc_trace(rsc, "%s", rsc->id);
2215  return FALSE;
2216 }
2217 
2218 gboolean
2219 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2220 {
2221  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2222  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2223  return FALSE;
2224 
2225  } else if (node == NULL) {
2226  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2227  return FALSE;
2228 
2229  } else if (node->details->unclean || node->details->online == FALSE) {
2230  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2231  node->details->uname);
2232  return FALSE;
2233  }
2234 
2235  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2236 
2237  delete_action(rsc, node, optional);
2238 
2241  data_set);
2242 
2245  data_set);
2246 
2247  return TRUE;
2248 }
2249 
2250 gboolean
2252  gboolean force, pe_working_set_t * data_set)
2253 {
2255  char *key = NULL;
2256  pe_action_t *probe = NULL;
2257  pe_node_t *running = NULL;
2258  pe_node_t *allowed = NULL;
2259  pe_resource_t *top = uber_parent(rsc);
2260 
2261  static const char *rc_promoted = NULL;
2262  static const char *rc_inactive = NULL;
2263 
2264  if (rc_inactive == NULL) {
2265  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
2266  rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
2267  }
2268 
2269  CRM_CHECK(node != NULL, return FALSE);
2270  if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
2271  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2272  return FALSE;
2273  }
2274 
2275  if (pe__is_guest_or_remote_node(node)) {
2276  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2277 
2278  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
2279  pe_rsc_trace(rsc,
2280  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2281  rsc->id, node->details->id);
2282  return FALSE;
2283  } else if (pe__is_guest_node(node)
2284  && pe__resource_contains_guest_node(data_set, rsc)) {
2285  pe_rsc_trace(rsc,
2286  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2287  rsc->id, node->details->id);
2288  return FALSE;
2289  } else if (rsc->is_remote_node) {
2290  pe_rsc_trace(rsc,
2291  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2292  rsc->id, node->details->id);
2293  return FALSE;
2294  }
2295  }
2296 
2297  if (rsc->children) {
2298  GList *gIter = NULL;
2299  gboolean any_created = FALSE;
2300 
2301  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2302  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2303 
2304  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2305  || any_created;
2306  }
2307 
2308  return any_created;
2309 
2310  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2311  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2312  return FALSE;
2313  }
2314 
2315  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2316  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2317  return FALSE;
2318  }
2319 
2320  // Check whether resource is already known on node
2321  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2322  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2323  return FALSE;
2324  }
2325 
2326  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2327 
2328  if (rsc->exclusive_discover || top->exclusive_discover) {
2329  if (allowed == NULL) {
2330  /* exclusive discover is enabled and this node is not in the allowed list. */
2331  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2332  return FALSE;
2333  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2334  /* exclusive discover is enabled and this node is not marked
2335  * as a node this resource should be discovered on */
2336  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2337  return FALSE;
2338  }
2339  }
2340 
2341  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2342  /* If this node was allowed to host this resource it would
2343  * have been explicitly added to the 'allowed_nodes' list.
2344  * However it wasn't and the node has discovery disabled, so
2345  * no need to probe for this resource.
2346  */
2347  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2348  return FALSE;
2349  }
2350 
2351  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2352  /* this resource is marked as not needing to be discovered on this node */
2353  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2354  return FALSE;
2355  }
2356 
2357  if (pe__is_guest_node(node)) {
2358  pe_resource_t *remote = node->details->remote_rsc->container;
2359 
2360  if(remote->role == RSC_ROLE_STOPPED) {
2361  /* If the container is stopped, then we know anything that
2362  * might have been inside it is also stopped and there is
2363  * no need to probe.
2364  *
2365  * If we don't know the container's state on the target
2366  * either:
2367  *
2368  * - the container is running, the transition will abort
2369  * and we'll end up in a different case next time, or
2370  *
2371  * - the container is stopped
2372  *
2373  * Either way there is no need to probe.
2374  *
2375  */
2376  if(remote->allocated_to
2377  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2378  /* For safety, we order the 'rsc' start after 'remote'
2379  * has been probed.
2380  *
2381  * Using 'top' helps for groups, but we may need to
2382  * follow the start's ordering chain backwards.
2383  */
2384  pcmk__new_ordering(remote,
2385  pcmk__op_key(remote->id, RSC_STATUS, 0),
2386  NULL, top,
2387  pcmk__op_key(top->id, RSC_START, 0), NULL,
2388  pe_order_optional, data_set);
2389  }
2390  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2391  rsc->id, node->details->id, remote->id);
2392  return FALSE;
2393 
2394  /* Here we really we want to check if remote->stop is required,
2395  * but that information doesn't exist yet
2396  */
2397  } else if(node->details->remote_requires_reset
2398  || node->details->unclean
2399  || pcmk_is_set(remote->flags, pe_rsc_failed)
2400  || remote->next_role == RSC_ROLE_STOPPED
2401  || (remote->allocated_to
2402  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2403  ) {
2404  /* The container is stopping or restarting, don't start
2405  * 'rsc' until 'remote' stops as this also implies that
2406  * 'rsc' is stopped - avoiding the need to probe
2407  */
2408  pcmk__new_ordering(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
2409  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
2410  NULL, pe_order_optional, data_set);
2411  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2412  rsc->id, node->details->id, remote->id);
2413  return FALSE;
2414 /* } else {
2415  * The container is running so there is no problem probing it
2416  */
2417  }
2418  }
2419 
2420  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
2421  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2423 
2424  pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional, data_set);
2425 
2426  /*
2427  * We need to know if it's running_on (not just known_on) this node
2428  * to correctly determine the target rc.
2429  */
2430  running = pe_find_node_id(rsc->running_on, node->details->id);
2431  if (running == NULL) {
2432  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2433 
2434  } else if (rsc->role == RSC_ROLE_PROMOTED) {
2435  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
2436  }
2437 
2438  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2440 
2441  if (pcmk__is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2442  top = rsc;
2443  } else {
2444  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2445  }
2446 
2447  if (!pcmk_is_set(probe->flags, pe_action_runnable)
2448  && (rsc->running_on == NULL)) {
2449  /* Prevent the start from occurring if rsc isn't active, but
2450  * don't cause it to stop if it was active already
2451  */
2453  }
2454 
2455  pcmk__new_ordering(rsc, NULL, probe, top,
2456  pcmk__op_key(top->id, RSC_START, 0), NULL, flags,
2457  data_set);
2458 
2459  // Order the probe before any agent reload
2460  pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
2461  pe_order_optional, data_set);
2462 
2463 #if 0
2464  // complete is always null currently
2465  if (!pcmk__is_unfence_device(rsc, data_set)) {
2466  /* Normally rsc.start depends on probe complete which depends
2467  * on rsc.probe. But this can't be the case for fence devices
2468  * with unfencing, as it would create graph loops.
2469  *
2470  * So instead we explicitly order 'rsc.probe then rsc.start'
2471  */
2472  order_actions(probe, complete, pe_order_implies_then);
2473  }
2474 #endif
2475  return TRUE;
2476 }
2477 
2478 void
2480 {
2481  GList *gIter = NULL;
2482  pe_action_t *reload = NULL;
2483 
2484  if (rsc->children) {
2485  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2486  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2487 
2488  ReloadRsc(child_rsc, node, data_set);
2489  }
2490  return;
2491 
2492  } else if (rsc->variant > pe_native) {
2493  /* Complex resource with no children */
2494  return;
2495 
2496  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2497  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
2498  return;
2499 
2500  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2501  /* We don't need to specify any particular actions here, normal failure
2502  * recovery will apply.
2503  */
2504  pe_rsc_trace(rsc, "%s: preventing agent reload because failed",
2505  rsc->id);
2506  return;
2507 
2508  } else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
2509  /* If a resource's configuration changed while a start was pending,
2510  * force a full restart.
2511  */
2512  pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
2513  rsc->id);
2514  stop_action(rsc, node, FALSE);
2515  return;
2516 
2517  } else if (node == NULL) {
2518  pe_rsc_trace(rsc, "%s: not active", rsc->id);
2519  return;
2520  }
2521 
2522  pe_rsc_trace(rsc, "Processing %s", rsc->id);
2524 
2525  reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
2526  FALSE, TRUE, data_set);
2527  pe_action_set_reason(reload, "resource definition change", FALSE);
2528 
2529  pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
2531  data_set);
2532  pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
2534  data_set);
2535 }
2536 
2537 void
2538 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
2539 {
2540  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
2541  pe_resource_t *parent;
2542 
2543  if (value) {
2544  char *name = NULL;
2545 
2547  crm_xml_add(xml, name, value);
2548  free(name);
2549  }
2550 
2551  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
2552  if (value) {
2553  char *name = NULL;
2554 
2556  crm_xml_add(xml, name, value);
2557  free(name);
2558  }
2559 
2560  for (parent = rsc; parent != NULL; parent = parent->parent) {
2561  if (parent->container) {
2563  }
2564  }
2565 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:36
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:225
pe_node_t * pe_find_node(GList *node_list, const char *uname)
Definition: status.c:434
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
enum rsc_start_requirement needs
Definition: pe_types.h:420
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:149
#define RSC_STOP
Definition: crm.h:204
void native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define crm_notice(fmt, args...)
Definition: logging.h:359
GHashTable * known_on
Definition: pe_types.h:367
xmlNode * ops_xml
Definition: pe_types.h:326
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:19
gboolean unseen
Definition: pe_types.h:218
#define INFINITY
Definition: crm.h:99
GList * rsc_cons
Definition: pe_types.h:357
#define LOAD_STOPPED
Service active and promoted.
Definition: results.h:170
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:59
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:353
pcmk__coloc_affects
#define promote_action(rsc, node, optional)
Definition: internal.h:393
G_GNUC_INTERNAL bool pcmk__node_unfenced(pe_node_t *node)
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
#define stop_action(rsc, node, optional)
Definition: internal.h:377
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:999
pe_resource_t * container
Definition: pe_types.h:380
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:931
pe_node_t * partial_migration_source
Definition: pe_types.h:365
int(* message)(pcmk__output_t *out, const char *message_id,...)
enum rsc_role_e role
Definition: pe_types.h:370
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
GList * children
Definition: pe_types.h:377
resource_alloc_functions_t * cmds
Definition: pe_types.h:334
#define pe_rsc_stop
Definition: pe_types.h:262
#define delete_action(rsc, node, optional)
Definition: internal.h:367
#define pe_flag_remove_after_stop
Definition: pe_types.h:110
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
pe_resource_t * rsc
Definition: pe_types.h:410
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:225
enum rsc_role_e next_role
Definition: pe_types.h:371
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:352
#define reload_key(rsc)
Definition: internal.h:381
#define pcmk__config_err(fmt...)
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:230
GHashTable * meta
Definition: pe_types.h:373
gboolean native_assign_node(pe_resource_t *rsc, pe_node_t *chosen, gboolean force)
#define pe_rsc_unique
Definition: pe_types.h:254
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set)
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:101
Service safely stopped.
Definition: results.h:169
resource_object_functions_t * fns
Definition: pe_types.h:333
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear)
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:291
#define RSC_DELETE
Definition: crm.h:195
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:323
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:142
pe_resource_t * dependent
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1691
GList * rsc_cons_lhs
Definition: pe_types.h:356
enum crm_ais_msg_types type
Definition: cpg.c:48
#define demote_key(rsc)
Definition: internal.h:402
pe_node_t * partial_migration_target
Definition: pe_types.h:364
#define RSC_START
Definition: crm.h:201
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:363
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:264
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:94
G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint)
#define pe_proc_err(fmt...)
Definition: internal.h:32
gboolean remote_requires_reset
Definition: pe_types.h:224
#define RSC_MIGRATE
Definition: crm.h:198
char * crm_meta_name(const char *field)
Definition: utils.c:511
const char * action
Definition: pcmk_fence.c:30
G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, bool influence, pe_working_set_t *data_set)
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:47
GList * nodes
Definition: pe_types.h:157
#define clear_action_flag_because(action, flag, reason)
#define pe_flag_stop_everything
Definition: pe_types.h:105
#define demote_action(rsc, node, optional)
Definition: internal.h:403
#define pe_rsc_provisional
Definition: pe_types.h:258
const char * role2text(enum rsc_role_e role)
Definition: common.c:459
int weight
Definition: pe_types.h:241
#define pe_rsc_merging
Definition: pe_types.h:260
GList * dangling_migrations
Definition: pe_types.h:378
#define CRMD_ACTION_RELOAD_AGENT
Definition: crm.h:172
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2347
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:291
#define pe_rsc_allow_migrate
Definition: pe_types.h:273
#define pe_rsc_failed
Definition: pe_types.h:267
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1952
#define crm_debug(fmt, args...)
Definition: logging.h:362
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:903
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:241
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:529
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
pe_resource_t * primary
#define stop_key(rsc)
Definition: internal.h:376
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
#define pe_rsc_start_pending
Definition: pe_types.h:269
char * task
Definition: pe_types.h:414
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:68
#define CRM_ATTR_UNAME
Definition: crm.h:114
#define crm_trace(fmt, args...)
Definition: logging.h:363
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:166
#define promote_key(rsc)
Definition: internal.h:392
char * crm_strdup_printf(char const *format,...) G_GNUC_PRINTF(1
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:424
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:114
struct pe_node_shared_s * details
Definition: pe_types.h:244
enum rsc_recovery_type recovery_type
Definition: pe_types.h:336
pe_node_t * node
Definition: pe_types.h:411
#define pe_rsc_needs_fencing
Definition: pe_types.h:280
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1905
unsigned long long flags
Definition: pe_types.h:348
const char * uname
Definition: pe_types.h:209
#define pe_rsc_promotable
Definition: pe_types.h:256
void(* expand)(pe_resource_t *, pe_working_set_t *)
#define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task, flags, data_set)
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1439
bool pcmk__is_daemon
Definition: logging.c:47
#define pe_flag_stonith_enabled
Definition: pe_types.h:98
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:635
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set)
Definition: internal.h:125
GList * actions
Definition: pe_types.h:359
pe_graph_flags
Definition: pe_types.h:283
GHashTable * utilization
Definition: pe_types.h:375
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:233
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:323
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:244
char * uuid
Definition: pe_types.h:415
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
#define pe_rsc_allocating
Definition: pe_types.h:259
enum rsc_role_e text2role(const char *role)
Definition: common.c:488
enum pe_obj_types variant
Definition: pe_types.h:331
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
const char * placement_strategy
Definition: pe_types.h:144
int rsc_discover_mode
Definition: pe_types.h:245
gboolean can_run_any(GHashTable *nodes)
const char * id
Definition: pe_types.h:208
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
#define pe_rsc_fence_device
Definition: pe_types.h:255
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
pe_node_t * pe_find_node_id(GList *node_list, const char *id)
Definition: status.c:418
void native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:45
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:53
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
gboolean is_remote_node
Definition: pe_types.h:351
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:579
#define start_action(rsc, node, optional)
Definition: internal.h:383
#define CRM_META
Definition: crm.h:78
int pe__add_scores(int score1, int score2)
Definition: common.c:516
#define crm_err(fmt, args...)
Definition: logging.h:357
G_GNUC_INTERNAL void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order, pe_working_set_t *data_set)
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:215
char guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:314
#define pe_rsc_reload
Definition: pe_types.h:263
#define RSC_PROMOTE
Definition: crm.h:207
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
Definition: complex.c:1116
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
This structure contains everything that makes up a single output formatter.
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set)
#define pe_rsc_needs_unfencing
Definition: pe_types.h:281
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:219
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:53
#define crm_str(x)
Definition: logging.h:383
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
GList * running_on
Definition: pe_types.h:366
#define pe_rsc_block
Definition: pe_types.h:250
enum pe_action_flags flags
Definition: pe_types.h:419
gboolean maintenance
Definition: pe_types.h:222
#define pe_rsc_maintenance
Definition: pe_types.h:276
pe_working_set_t * cluster
Definition: pe_types.h:328
const char * node_attribute
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:258
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
#define pe_flag_have_stonith_resource
Definition: pe_types.h:99
#define RSC_ROLE_MAX
Definition: common.h:108
G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, bool preview)
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1604
#define pe_flag_enable_unfencing
Definition: pe_types.h:100
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:20
G_GNUC_INTERNAL void pcmk__apply_coloc_to_weights(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint)
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:111
#define start_key(rsc)
Definition: internal.h:382
#define ID(x)
Definition: msg_xml.h:456
unsigned long long flags
Definition: pe_types.h:146
#define pe_err(fmt...)
Definition: internal.h:22
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1643
char * name
Definition: pcmk_fence.c:31
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:322
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define CRM_OP_LRM_DELETE
Definition: crm.h:151
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:218
gboolean unclean
Definition: pe_types.h:217
#define pe_flag_show_scores
Definition: pe_types.h:133
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:360
#define pe_rsc_managed
Definition: pe_types.h:249
#define pe_rsc_orphan
Definition: pe_types.h:248
pe_ordering
Definition: pe_types.h:483
gboolean online
Definition: pe_types.h:213
G_GNUC_INTERNAL void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc)
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:402
G_GNUC_INTERNAL bool pcmk__is_unfence_device(const pe_resource_t *rsc, const pe_working_set_t *data_set)
pe_resource_t * parent
Definition: pe_types.h:329
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2135
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
#define RSC_DEMOTE
Definition: crm.h:209
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:18
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:266
char * id
Definition: pe_types.h:322
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Create or update an action object.
Definition: utils.c:731
GHashTable * allowed_nodes
Definition: pe_types.h:368
#define RSC_MIGRATED
Definition: crm.h:199
#define pe_flag_startup_probes
Definition: pe_types.h:115