pacemaker 2.1.6-6fdc9deea29
Scalable High-Availability cluster resource manager
Loading...
Searching...
No Matches
pcmk_scheduler.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2023 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <crm/crm.h>
13#include <crm/cib.h>
14#include <crm/msg_xml.h>
15#include <crm/common/xml.h>
17
18#include <glib.h>
19
20#include <crm/pengine/status.h>
21#include <pacemaker-internal.h>
23
25
41static void
42check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op,
43 enum pe_check_parameters check)
44{
45 const char *reason = NULL;
46 op_digest_cache_t *digest_data = NULL;
47
48 switch (check) {
49 case pe_check_active:
50 if (pcmk__check_action_config(rsc, node, rsc_op)
51 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL)) {
52 reason = "action definition changed";
53 }
54 break;
55
57 digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
58 rsc->cluster);
59 switch (digest_data->rc) {
61 crm_trace("Resource %s history entry %s on %s has "
62 "no digest to compare",
63 rsc->id, ID(rsc_op), node->details->id);
64 break;
66 break;
67 default:
68 reason = "resource parameters have changed";
69 break;
70 }
71 break;
72 }
73 if (reason != NULL) {
74 pe__clear_failcount(rsc, node, reason, rsc->cluster);
75 }
76}
77
88static bool
89failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc)
90{
91 GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
92
93 if (list != NULL) {
94 g_list_free(list);
95 return true;
96 }
97 return false;
98}
99
107static void
108check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
109{
110 // If this is a collective resource, apply recursively to children instead
111 if (rsc->children != NULL) {
112 g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
113 (gpointer) node);
114 return;
115
116 } else if (failcount_clear_action_exists(node, rsc)) {
117 /* Don't force the resource away from this node due to a failcount
118 * that's going to be cleared.
119 *
120 * @TODO Failcount clearing can be scheduled in
121 * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
122 * schedule_resource_actions() via check_params(). This runs well before
123 * then, so it cannot detect those, meaning we might check the migration
124 * threshold when we shouldn't. Worst case, we stop or move the
125 * resource, then move it back in the next transition.
126 */
127 return;
128
129 } else {
130 pe_resource_t *failed = NULL;
131
132 if (pcmk__threshold_reached(rsc, node, &failed)) {
133 resource_location(failed, node, -INFINITY, "__fail_limit__",
134 rsc->cluster);
135 }
136 }
137}
138
151static void
152apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node)
153{
154 if (rsc->exclusive_discover
155 || pe__const_top_resource(rsc, false)->exclusive_discover) {
156 pe_node_t *match = NULL;
157
158 // If this is a collective resource, apply recursively to children
159 g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery,
160 (gpointer) node);
161
162 match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
163 if ((match != NULL)
165 match->weight = -INFINITY;
166 }
167 }
168}
169
177static void
178apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
179{
180 pe_node_t *node = NULL;
181
182 // If this is a collective resource, apply recursively to children instead
183 if (rsc->children != NULL) {
184 g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
185 return;
186 }
187
188 /* A resource is sticky if it is managed, has stickiness configured, and is
189 * active on a single node.
190 */
192 || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
193 return;
194 }
195
196 node = rsc->running_on->data;
197
198 /* In a symmetric cluster, stickiness can always be used. In an
199 * asymmetric cluster, we have to check whether the resource is still
200 * allowed on the node, so we don't keep the resource somewhere it is no
201 * longer explicitly enabled.
202 */
204 && (pe_hash_table_lookup(rsc->allowed_nodes,
205 node->details->id) == NULL)) {
206 pe_rsc_debug(rsc,
207 "Ignoring %s stickiness because the cluster is "
208 "asymmetric and %s is not explicitly allowed",
209 rsc->id, pe__node_name(node));
210 return;
211 }
212
213 pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
214 rsc->id, rsc->stickiness, pe__node_name(node));
215 resource_location(rsc, node, rsc->stickiness, "stickiness", data_set);
216}
217
224static void
225apply_shutdown_locks(pe_working_set_t *data_set)
226{
228 return;
229 }
230 for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
231 pe_resource_t *rsc = (pe_resource_t *) iter->data;
232
233 rsc->cmds->shutdown_lock(rsc);
234 }
235}
236
243static void
244count_available_nodes(pe_working_set_t *data_set)
245{
247 return;
248 }
249
250 // @COMPAT for API backward compatibility only (cluster does not use value)
251 for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
252 pe_node_t *node = (pe_node_t *) iter->data;
253
254 if ((node != NULL) && (node->weight >= 0) && node->details->online
255 && (node->details->type != node_ping)) {
257 }
258 }
259 crm_trace("Online node count: %d", data_set->max_valid_nodes);
260}
261
262/*
263 * \internal
264 * \brief Apply node-specific scheduling criteria
265 *
266 * After the CIB has been unpacked, process node-specific scheduling criteria
267 * including shutdown locks, location constraints, resource stickiness,
268 * migration thresholds, and exclusive resource discovery.
269 */
270static void
271apply_node_criteria(pe_working_set_t *data_set)
272{
273 crm_trace("Applying node-specific scheduling criteria");
274 apply_shutdown_locks(data_set);
275 count_available_nodes(data_set);
277 g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
278
279 for (GList *node_iter = data_set->nodes; node_iter != NULL;
280 node_iter = node_iter->next) {
281 for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
282 rsc_iter = rsc_iter->next) {
283 pe_node_t *node = (pe_node_t *) node_iter->data;
284 pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
285
286 check_failure_threshold(rsc, node);
287 apply_exclusive_discovery(rsc, node);
288 }
289 }
290}
291
298static void
299allocate_resources(pe_working_set_t *data_set)
300{
301 GList *iter = NULL;
302
303 crm_trace("Allocating resources to nodes");
304
305 if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
307 }
309
311 /* Allocate remote connection resources first (which will also allocate
312 * any colocation dependencies). If the connection is migrating, always
313 * prefer the partial migration target.
314 */
315 for (iter = data_set->resources; iter != NULL; iter = iter->next) {
316 pe_resource_t *rsc = (pe_resource_t *) iter->data;
317
318 if (rsc->is_remote_node) {
319 pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
320 rsc->id);
321 rsc->cmds->assign(rsc, rsc->partial_migration_target);
322 }
323 }
324 }
325
326 /* now do the rest of the resources */
327 for (iter = data_set->resources; iter != NULL; iter = iter->next) {
328 pe_resource_t *rsc = (pe_resource_t *) iter->data;
329
330 if (!rsc->is_remote_node) {
331 pe_rsc_trace(rsc, "Allocating %s resource '%s'",
332 crm_element_name(rsc->xml), rsc->id);
333 rsc->cmds->assign(rsc, NULL);
334 }
335 }
336
338}
339
347static void
348clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
349{
350 if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
351 return;
352 }
353 crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
354
355 /* There's no need to recurse into rsc->children because those
356 * should just be unallocated clone instances.
357 */
358
359 for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
360 pe_node_t *node = (pe_node_t *) iter->data;
361 pe_action_t *clear_op = NULL;
362
363 if (!node->details->online) {
364 continue;
365 }
366 if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL) == 0) {
367 continue;
368 }
369
370 clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
371
372 /* We can't use order_action_then_stop() here because its
373 * pe_order_preserve breaks things
374 */
375 pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
377 }
378}
379
386static void
387schedule_resource_actions(pe_working_set_t *data_set)
388{
389 // Process deferred action checks
390 pe__foreach_param_check(data_set, check_params);
392
394 crm_trace("Scheduling probes");
396 }
397
399 g_list_foreach(data_set->resources,
400 (GFunc) clear_failcounts_if_orphaned, data_set);
401 }
402
403 crm_trace("Scheduling resource actions");
404 for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
405 pe_resource_t *rsc = (pe_resource_t *) iter->data;
406
407 rsc->cmds->create_actions(rsc);
408 }
409}
410
419static bool
420is_managed(const pe_resource_t *rsc)
421{
422 if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
423 return true;
424 }
425 for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
426 if (is_managed((pe_resource_t *) iter->data)) {
427 return true;
428 }
429 }
430 return false;
431}
432
441static bool
442any_managed_resources(const pe_working_set_t *data_set)
443{
444 for (const GList *iter = data_set->resources;
445 iter != NULL; iter = iter->next) {
446 if (is_managed((const pe_resource_t *) iter->data)) {
447 return true;
448 }
449 }
450 return false;
451}
452
463static bool
464needs_fencing(const pe_node_t *node, bool have_managed,
466{
467 return have_managed && node->details->unclean
468 && pe_can_fence(data_set, node);
469}
470
479static bool
480needs_shutdown(const pe_node_t *node)
481{
482 if (pe__is_guest_or_remote_node(node)) {
483 /* Do not send shutdown actions for Pacemaker Remote nodes.
484 * @TODO We might come up with a good use for this in the future.
485 */
486 return false;
487 }
488 return node->details->online && node->details->shutdown;
489}
490
501static GList *
502add_nondc_fencing(GList *list, pe_action_t *action,
504{
506 && (list != NULL)) {
507 /* Concurrent fencing is disabled, so order each non-DC
508 * fencing in a chain. If there is any DC fencing or
509 * shutdown, it will be ordered after the last action in the
510 * chain later.
511 */
513 }
514 return g_list_prepend(list, action);
515}
516
524static pe_action_t *
525schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
526{
527 pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
528 FALSE, data_set);
529
530 pe_warn("Scheduling node %s for fencing", pe__node_name(node));
532 return fencing;
533}
534
541static void
542schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
543{
544 pe_action_t *dc_down = NULL;
545 bool integrity_lost = false;
546 bool have_managed = any_managed_resources(data_set);
547 GList *fencing_ops = NULL;
548 GList *shutdown_ops = NULL;
549
550 crm_trace("Scheduling fencing and shutdowns as needed");
551 if (!have_managed) {
552 crm_notice("No fencing will be done until there are resources to manage");
553 }
554
555 // Check each node for whether it needs fencing or shutdown
556 for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
557 pe_node_t *node = (pe_node_t *) iter->data;
558 pe_action_t *fencing = NULL;
559
560 /* Guest nodes are "fenced" by recovering their container resource,
561 * so handle them separately.
562 */
563 if (pe__is_guest_node(node)) {
564 if (node->details->remote_requires_reset && have_managed
565 && pe_can_fence(data_set, node)) {
566 pcmk__fence_guest(node);
567 }
568 continue;
569 }
570
571 if (needs_fencing(node, have_managed, data_set)) {
572 fencing = schedule_fencing(node, data_set);
573
574 // Track DC and non-DC fence actions separately
575 if (node->details->is_dc) {
576 dc_down = fencing;
577 } else {
578 fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
579 }
580
581 } else if (needs_shutdown(node)) {
582 pe_action_t *down_op = pcmk__new_shutdown_action(node);
583
584 // Track DC and non-DC shutdown actions separately
585 if (node->details->is_dc) {
586 dc_down = down_op;
587 } else {
588 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
589 }
590 }
591
592 if ((fencing == NULL) && node->details->unclean) {
593 integrity_lost = true;
594 pe_warn("Node %s is unclean but cannot be fenced",
595 pe__node_name(node));
596 }
597 }
598
599 if (integrity_lost) {
601 pe_warn("Resource functionality and data integrity cannot be "
602 "guaranteed (configure, enable, and test fencing to "
603 "correct this)");
604
606 crm_notice("Unclean nodes will not be fenced until quorum is "
607 "attained or no-quorum-policy is set to ignore");
608 }
609 }
610
611 if (dc_down != NULL) {
612 /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
613 * DC elections. However, we don't want to order non-DC shutdowns before
614 * a DC *fencing*, because even though we don't want a node that's
615 * shutting down to become DC, the DC fencing could be ordered before a
616 * clone stop that's also ordered before the shutdowns, thus leading to
617 * a graph loop.
618 */
619 if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
620 pcmk__order_after_each(dc_down, shutdown_ops);
621 }
622
623 // Order any non-DC fencing before any DC fencing or shutdown
624
626 /* With concurrent fencing, order each non-DC fencing action
627 * separately before any DC fencing or shutdown.
628 */
629 pcmk__order_after_each(dc_down, fencing_ops);
630 } else if (fencing_ops != NULL) {
631 /* Without concurrent fencing, the non-DC fencing actions are
632 * already ordered relative to each other, so we just need to order
633 * the DC fencing after the last action in the chain (which is the
634 * first item in the list).
635 */
636 order_actions((pe_action_t *) fencing_ops->data, dc_down,
638 }
639 }
640 g_list_free(fencing_ops);
641 g_list_free(shutdown_ops);
642}
643
644static void
645log_resource_details(pe_working_set_t *data_set)
646{
648 GList *all = NULL;
649
650 /* We need a list of nodes that we are allowed to output information for.
651 * This is necessary because out->message for all the resource-related
652 * messages expects such a list, due to the `crm_mon --node=` feature. Here,
653 * we just make it a list of all the nodes.
654 */
655 all = g_list_prepend(all, (gpointer) "*");
656
657 for (GList *item = data_set->resources; item != NULL; item = item->next) {
658 pe_resource_t *rsc = (pe_resource_t *) item->data;
659
660 // Log all resources except inactive orphans
662 || (rsc->role != RSC_ROLE_STOPPED)) {
663 out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
664 }
665 }
666
667 g_list_free(all);
668}
669
670static void
671log_all_actions(pe_working_set_t *data_set)
672{
673 /* This only ever outputs to the log, so ignore whatever output object was
674 * previously set and just log instead.
675 */
676 pcmk__output_t *prev_out = data_set->priv;
677 pcmk__output_t *out = NULL;
678
679 if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
680 return;
681 }
682
685 pcmk__output_set_log_level(out, LOG_NOTICE);
686 data_set->priv = out;
687
688 out->begin_list(out, NULL, NULL, "Actions");
690 out->end_list(out);
691 out->finish(out, CRM_EX_OK, true, NULL);
693
694 data_set->priv = prev_out;
695}
696
703static void
704log_unrunnable_actions(const pe_working_set_t *data_set)
705{
707
708 crm_trace("Required but unrunnable actions:");
709 for (const GList *iter = data_set->actions;
710 iter != NULL; iter = iter->next) {
711
712 const pe_action_t *action = (const pe_action_t *) iter->data;
713
714 if (!pcmk_any_flags_set(action->flags, flags)) {
715 pcmk__log_action("\t", action, true);
716 }
717 }
718}
719
728static void
729unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
730{
731 const char* localhost_save = NULL;
732
734 crm_trace("Reusing previously calculated cluster status");
736 return;
737 }
738
739 if (data_set->localhost) {
740 localhost_save = data_set->localhost;
741 }
742
743 CRM_ASSERT(cib != NULL);
744 crm_trace("Calculating cluster status");
745
746 /* This will zero the entire struct without freeing anything first, so
747 * callers should never call pcmk__schedule_actions() with a populated data
748 * set unless pe_flag_have_status is set (i.e. cluster_status() was
749 * previously called, whether directly or via pcmk__schedule_actions()).
750 */
752
753 if (localhost_save) {
754 data_set->localhost = localhost_save;
755 }
756
758 data_set->input = cib;
759 cluster_status(data_set); // Sets pe_flag_have_status
760}
761
770void
771pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
773{
774 unpack_cib(cib, flags, data_set);
779 return;
780 }
781
784 log_resource_details(data_set);
785 }
786
787 apply_node_criteria(data_set);
788
790 return;
791 }
792
795 allocate_resources(data_set);
796 schedule_resource_actions(data_set);
797
798 /* Remote ordering constraints need to happen prior to calculating fencing
799 * because it is one more place we can mark nodes as needing fencing.
800 */
802
803 schedule_fencing_and_shutdowns(data_set);
805 log_all_actions(data_set);
807
808 if (get_crm_log_level() == LOG_TRACE) {
809 log_unrunnable_actions(data_set);
810 }
811}
Cluster Configuration.
bool pcmk__is_daemon
Definition logging.c:47
uint64_t flags
Definition remote.c:3
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition util.h:121
@ RSC_ROLE_STOPPED
Definition common.h:94
A dumping ground.
#define CRM_OP_SHUTDOWN
Definition crm.h:143
#define CRM_OP_CLEAR_FAILCOUNT
Definition crm.h:153
#define INFINITY
Definition crm.h:99
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
G_GNUC_INTERNAL void pcmk__set_allocation_methods(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, const xmlNode *xml_op)
G_GNUC_INTERNAL pe_action_t * pcmk__new_shutdown_action(pe_node_t *node)
G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node, pe_resource_t **failed)
G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set)
#define CRM_TRACE_INIT_DATA(name)
Definition logging.h:134
unsigned int get_crm_log_level(void)
Definition logging.c:1056
#define crm_notice(fmt, args...)
Definition logging.h:377
#define crm_trace(fmt, args...)
Definition logging.h:381
#define LOG_TRACE
Definition logging.h:37
#define ID(x)
Definition msg_xml.h:480
pe_working_set_t * data_set
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition output_log.c:345
void pcmk__output_free(pcmk__output_t *out)
Definition output.c:28
int pcmk__log_output_new(pcmk__output_t **out)
Definition output.c:272
const char * action
Definition pcmk_fence.c:30
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
void pcmk__register_lib_messages(pcmk__output_t *out)
void pcmk__unpack_constraints(pe_working_set_t *data_set)
pe_check_parameters
Definition pe_types.h:218
@ pe_check_active
Definition pe_types.h:227
@ pe_check_last_failure
Definition pe_types.h:222
#define pe_flag_check_config
Definition pe_types.h:157
#define pe_flag_have_quorum
Definition pe_types.h:111
#define pe_rsc_managed
Definition pe_types.h:273
@ pe_order_optional
Definition pe_types.h:508
#define pe_flag_shutdown_lock
Definition pe_types.h:130
#define pe_flag_symmetric_cluster
Definition pe_types.h:112
#define pe_flag_quick_location
Definition pe_types.h:136
#define pe_rsc_orphan
Definition pe_types.h:272
@ node_ping
Definition pe_types.h:88
#define pe_flag_startup_probes
Definition pe_types.h:132
@ pe_discover_exclusive
Definition pe_types.h:502
#define pe_flag_concurrent_fencing
Definition pe_types.h:118
@ pe_action_optional
Definition pe_types.h:319
@ pe_action_runnable
Definition pe_types.h:318
@ pe_action_pseudo
Definition pe_types.h:317
#define pe_flag_stonith_enabled
Definition pe_types.h:115
#define pe_flag_no_compat
Definition pe_types.h:148
#define pe_flag_stop_rsc_orphans
Definition pe_types.h:120
#define pe_flag_have_remote_nodes
Definition pe_types.h:134
#define pe_flag_have_status
Definition pe_types.h:133
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition failcounts.c:275
#define stop_key(rsc)
Definition internal.h:405
#define pe_rsc_debug(rsc, fmt, args...)
Definition internal.h:49
op_digest_cache_t * rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op, pe_node_t *node, pe_working_set_t *data_set)
Definition pe_digest.c:381
void pe__free_param_checks(pe_working_set_t *data_set)
Definition remote.c:264
#define pe_warn(fmt...)
Definition internal.h:57
void pe__register_messages(pcmk__output_t *out)
Definition pe_output.c:3106
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition utils.c:488
#define pe_rsc_trace(rsc, fmt, args...)
Definition internal.h:50
@ RSC_DIGEST_UNKNOWN
Definition internal.h:504
@ RSC_DIGEST_MATCH
Definition internal.h:497
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition utils.c:398
pe_action_t * pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node, const char *reason, pe_working_set_t *data_set)
Schedule a controller operation to clear a fail count.
Definition failcounts.c:388
@ pe_fc_effective
Definition internal.h:335
bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
Definition utils.c:36
const pe_resource_t * pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
Definition complex.c:947
#define pe__set_working_set_flags(working_set, flags_to_set)
Definition internal.h:65
void pe__foreach_param_check(pe_working_set_t *data_set, void(*cb)(pe_resource_t *, pe_node_t *, const xmlNode *, enum pe_check_parameters))
Definition remote.c:249
bool pe__is_guest_node(const pe_node_t *node)
Definition remote.c:33
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition remote.c:41
#define CRM_ASSERT(expr)
Definition results.h:42
@ CRM_EX_OK
Success.
Definition results.h:237
@ pcmk_rc_ok
Definition results.h:151
Cluster status and scheduling.
gboolean cluster_status(pe_working_set_t *data_set)
Definition status.c:71
void set_working_set_defaults(pe_working_set_t *data_set)
Definition status.c:368
@ pcmk__str_none
@ pcmk__str_casei
enum rsc_digest_cmp_val rc
Definition internal.h:508
This structure contains everything that makes up a single output formatter.
void(* end_list)(pcmk__output_t *out)
int(* message)(pcmk__output_t *out, const char *message_id,...)
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
pe_resource_t * rsc
Definition pe_types.h:433
char * task
Definition pe_types.h:437
int weight
Definition pe_types.h:265
int rsc_discover_mode
Definition pe_types.h:269
struct pe_node_shared_s * details
Definition pe_types.h:268
gboolean shutdown
Definition pe_types.h:242
const char * id
Definition pe_types.h:231
gboolean online
Definition pe_types.h:236
gboolean is_dc
Definition pe_types.h:244
gboolean unclean
Definition pe_types.h:240
gboolean remote_requires_reset
Definition pe_types.h:247
enum node_type type
Definition pe_types.h:233
GList * running_on
Definition pe_types.h:398
GList * children
Definition pe_types.h:409
gboolean exclusive_discover
Definition pe_types.h:377
pe_working_set_t * cluster
Definition pe_types.h:353
gboolean is_remote_node
Definition pe_types.h:376
xmlNode * xml
Definition pe_types.h:349
GHashTable * allowed_nodes
Definition pe_types.h:400
unsigned long long flags
Definition pe_types.h:373
pe_node_t * partial_migration_target
Definition pe_types.h:396
resource_alloc_functions_t * cmds
Definition pe_types.h:359
enum rsc_role_e role
Definition pe_types.h:402
const char * placement_strategy
Definition pe_types.h:167
GList * actions
Definition pe_types.h:187
xmlNode * input
Definition pe_types.h:160
GList * resources
Definition pe_types.h:181
unsigned long long flags
Definition pe_types.h:169
const char * localhost
Definition pe_types.h:202
void(* create_actions)(pe_resource_t *rsc)
void(* shutdown_lock)(pe_resource_t *rsc)
pe_node_t *(* assign)(pe_resource_t *rsc, const pe_node_t *prefer)
Wrappers for and extensions to libxml2.