pacemaker  2.0.1-9e909a5bdd
Scalable High-Availability cluster resource manager
container.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
3  *
4  * This source code is licensed under the GNU Lesser General Public License
5  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
6  */
7 
8 #include <crm_internal.h>
9 
10 #include <ctype.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/pengine/status.h>
14 #include <crm/pengine/internal.h>
15 #include <unpack.h>
16 #include <crm/msg_xml.h>
17 
18 #define VARIANT_CONTAINER 1
19 #include "./variant.h"
20 
21 void tuple_free(container_grouping_t *tuple);
22 
23 static char *
24 next_ip(const char *last_ip)
25 {
26  unsigned int oct1 = 0;
27  unsigned int oct2 = 0;
28  unsigned int oct3 = 0;
29  unsigned int oct4 = 0;
30  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
31 
32  if (rc != 4) {
33  /*@ TODO check for IPv6 */
34  return NULL;
35 
36  } else if (oct3 > 253) {
37  return NULL;
38 
39  } else if (oct4 > 253) {
40  ++oct3;
41  oct4 = 1;
42 
43  } else {
44  ++oct4;
45  }
46 
47  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
48 }
49 
50 static int
51 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max)
52 {
53  if(data->ip_range_start == NULL) {
54  return 0;
55 
56  } else if(data->ip_last) {
57  tuple->ipaddr = next_ip(data->ip_last);
58 
59  } else {
60  tuple->ipaddr = strdup(data->ip_range_start);
61  }
62 
63  data->ip_last = tuple->ipaddr;
64 #if 0
65  return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
66  data->prefix, tuple->offset, tuple->ipaddr,
67  data->prefix, tuple->offset, data->prefix, tuple->offset);
68 #else
69  if (data->type == PE_CONTAINER_TYPE_DOCKER || data->type == PE_CONTAINER_TYPE_PODMAN) {
70  if (data->add_host == FALSE) {
71  return 0;
72  }
73  return snprintf(buffer, max, " --add-host=%s-%d:%s",
74  data->prefix, tuple->offset, tuple->ipaddr);
75  } else if (data->type == PE_CONTAINER_TYPE_RKT) {
76  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
77  tuple->ipaddr, data->prefix, tuple->offset);
78  } else {
79  return 0;
80  }
81 #endif
82 }
83 
84 static xmlNode *
85 create_resource(const char *name, const char *provider, const char *kind)
86 {
87  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
88 
89  crm_xml_add(rsc, XML_ATTR_ID, name);
91  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
92  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
93 
94  return rsc;
95 }
96 
109 static bool
110 valid_network(container_variant_data_t *data)
111 {
112  if(data->ip_range_start) {
113  return TRUE;
114  }
115  if(data->control_port) {
116  if(data->replicas_per_host > 1) {
117  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
118  data->replicas_per_host = 1;
119  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
120  }
121  return TRUE;
122  }
123  return FALSE;
124 }
125 
126 static bool
127 create_ip_resource(
128  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
129  pe_working_set_t * data_set)
130 {
131  if(data->ip_range_start) {
132  char *id = NULL;
133  xmlNode *xml_ip = NULL;
134  xmlNode *xml_obj = NULL;
135 
136  id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
138  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
139  free(id);
140 
141  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
142  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
143 
144  crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
145  if(data->host_network) {
146  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
147  }
148 
149  if(data->host_netmask) {
150  crm_create_nvpair_xml(xml_obj, NULL,
151  "cidr_netmask", data->host_netmask);
152 
153  } else {
154  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
155  }
156 
157  xml_obj = create_xml_node(xml_ip, "operations");
158  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
159 
160  // TODO: Other ops? Timeouts and intervals from underlying resource?
161 
162  if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
163  return FALSE;
164  }
165 
166  parent->children = g_list_append(parent->children, tuple->ip);
167  }
168  return TRUE;
169 }
170 
171 static bool
172 create_docker_resource(
173  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
174  pe_working_set_t * data_set)
175 {
176  int offset = 0, max = 4096;
177  char *buffer = calloc(1, max+1);
178 
179  int doffset = 0, dmax = 1024;
180  char *dbuffer = calloc(1, dmax+1);
181 
182  char *id = NULL;
183  xmlNode *xml_docker = NULL;
184  xmlNode *xml_obj = NULL;
185 
186  id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
188  xml_docker = create_resource(id, "heartbeat", "docker");
189  free(id);
190 
191  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
192  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
193 
194  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
195  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
196  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
197  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
198 
199  offset += snprintf(buffer+offset, max-offset, " --restart=no");
200 
201  /* Set a container hostname only if we have an IP to map it to.
202  * The user can set -h or --uts=host themselves if they want a nicer
203  * name for logs, but this makes applications happy who need their
204  * hostname to match the IP they bind to.
205  */
206  if (data->ip_range_start != NULL) {
207  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
208  data->prefix, tuple->offset);
209  }
210 
211  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
212 
213  if(data->docker_network) {
214 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
215  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
216  }
217 
218  if(data->control_port) {
219  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
220  } else {
221  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
222  }
223 
224  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
225  container_mount_t *mount = pIter->data;
226 
227  if(mount->flags) {
228  char *source = crm_strdup_printf(
229  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
230 
231  if(doffset > 0) {
232  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
233  }
234  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
235  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
236  free(source);
237 
238  } else {
239  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
240  }
241  if(mount->options) {
242  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
243  }
244  }
245 
246  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
247  container_port_t *port = pIter->data;
248 
249  if(tuple->ipaddr) {
250  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
251  tuple->ipaddr, port->source, port->target);
252  } else if(safe_str_neq(data->docker_network, "host")) {
253  // No need to do port mapping if net=host
254  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
255  }
256  }
257 
258  if(data->docker_run_options) {
259  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
260  }
261 
262  if(data->docker_host_options) {
263  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
264  }
265 
266  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
267  free(buffer);
268 
269  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
270  free(dbuffer);
271 
272  if(tuple->child) {
273  if(data->docker_run_command) {
274  crm_create_nvpair_xml(xml_obj, NULL,
275  "run_cmd", data->docker_run_command);
276  } else {
277  crm_create_nvpair_xml(xml_obj, NULL,
278  "run_cmd", SBIN_DIR "/pacemaker-remoted");
279  }
280 
281  /* TODO: Allow users to specify their own?
282  *
283  * We just want to know if the container is alive, we'll
284  * monitor the child independently
285  */
286  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
287  /* } else if(child && data->untrusted) {
288  * Support this use-case?
289  *
290  * The ability to have resources started/stopped by us, but
291  * unable to set attributes, etc.
292  *
293  * Arguably better to control API access this with ACLs like
294  * "normal" remote nodes
295  *
296  * crm_create_nvpair_xml(xml_obj, NULL,
297  * "run_cmd",
298  * "/usr/libexec/pacemaker/pacemaker-execd");
299  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
300  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
301  */
302  } else {
303  if(data->docker_run_command) {
304  crm_create_nvpair_xml(xml_obj, NULL,
305  "run_cmd", data->docker_run_command);
306  }
307 
308  /* TODO: Allow users to specify their own?
309  *
310  * We don't know what's in the container, so we just want
311  * to know if it is alive
312  */
313  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
314  }
315 
316 
317  xml_obj = create_xml_node(xml_docker, "operations");
318  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
319 
320  // TODO: Other ops? Timeouts and intervals from underlying resource?
321  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
322  return FALSE;
323  }
324  parent->children = g_list_append(parent->children, tuple->docker);
325  return TRUE;
326 }
327 static bool
328 create_podman_resource(resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
329  pe_working_set_t * data_set)
330 {
331  int offset = 0, max = 4096;
332  char *buffer = calloc(1, max+1);
333 
334  int doffset = 0, dmax = 1024;
335  char *dbuffer = calloc(1, dmax+1);
336 
337  char *id = NULL;
338  xmlNode *xml_podman = NULL;
339  xmlNode *xml_obj = NULL;
340 
341  id = crm_strdup_printf("%s-podman-%d", data->prefix, tuple->offset);
343  xml_podman = create_resource(id, "heartbeat", "podman");
344  free(id);
345 
346  xml_obj = create_xml_node(xml_podman, XML_TAG_ATTR_SETS);
347  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
348 
349  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
350  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
351  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
352  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
353 
354  // FIXME: (bandini 2018-08) podman has no restart policies
355  //offset += snprintf(buffer+offset, max-offset, " --restart=no");
356 
357  /* Set a container hostname only if we have an IP to map it to.
358  * The user can set -h or --uts=host themselves if they want a nicer
359  * name for logs, but this makes applications happy who need their
360  * hostname to match the IP they bind to.
361  */
362  if (data->ip_range_start != NULL) {
363  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
364  data->prefix, tuple->offset);
365  }
366 
367  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
368 
369  if(data->docker_network) {
370  // FIXME: (bandini 2018-08) podman has no support for --link-local-ip
371  //offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
372  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
373  }
374 
375  if(data->control_port) {
376  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
377  } else {
378  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
379  }
380 
381  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
382  container_mount_t *mount = pIter->data;
383 
384  if(mount->flags) {
385  char *source = crm_strdup_printf(
386  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
387 
388  if(doffset > 0) {
389  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
390  }
391  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
392  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
393  free(source);
394 
395  } else {
396  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
397  }
398  if(mount->options) {
399  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
400  }
401  }
402 
403  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
404  container_port_t *port = pIter->data;
405 
406  if(tuple->ipaddr) {
407  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
408  tuple->ipaddr, port->source, port->target);
409  } else if(safe_str_neq(data->docker_network, "host")) {
410  // No need to do port mapping if net=host
411  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
412  }
413  }
414 
415  if(data->docker_run_options) {
416  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
417  }
418 
419  if(data->docker_host_options) {
420  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
421  }
422 
423  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
424  free(buffer);
425 
426  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
427  free(dbuffer);
428 
429  if(tuple->child) {
430  if(data->docker_run_command) {
431  crm_create_nvpair_xml(xml_obj, NULL,
432  "run_cmd", data->docker_run_command);
433  } else {
434  crm_create_nvpair_xml(xml_obj, NULL,
435  "run_cmd", SBIN_DIR "/pacemaker-remoted");
436  }
437 
438  /* TODO: Allow users to specify their own?
439  *
440  * We just want to know if the container is alive, we'll
441  * monitor the child independently
442  */
443  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
444  /* } else if(child && data->untrusted) {
445  * Support this use-case?
446  *
447  * The ability to have resources started/stopped by us, but
448  * unable to set attributes, etc.
449  *
450  * Arguably better to control API access this with ACLs like
451  * "normal" remote nodes
452  *
453  * crm_create_nvpair_xml(xml_obj, NULL,
454  * "run_cmd",
455  * "/usr/libexec/pacemaker/pacemaker-execd");
456  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
457  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
458  */
459  } else {
460  if(data->docker_run_command) {
461  crm_create_nvpair_xml(xml_obj, NULL,
462  "run_cmd", data->docker_run_command);
463  }
464 
465  /* TODO: Allow users to specify their own?
466  *
467  * We don't know what's in the container, so we just want
468  * to know if it is alive
469  */
470  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
471  }
472 
473 
474  xml_obj = create_xml_node(xml_podman, "operations");
475  crm_create_op_xml(xml_obj, ID(xml_podman), "monitor", "60s", NULL);
476 
477  // TODO: Other ops? Timeouts and intervals from underlying resource?
478  if (common_unpack(xml_podman, &tuple->docker, parent, data_set) == FALSE) {
479  return FALSE;
480  }
481  parent->children = g_list_append(parent->children, tuple->docker);
482  return TRUE;
483 }
484 
485 static bool
486 create_rkt_resource(
487  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
488  pe_working_set_t * data_set)
489 {
490  int offset = 0, max = 4096;
491  char *buffer = calloc(1, max+1);
492 
493  int doffset = 0, dmax = 1024;
494  char *dbuffer = calloc(1, dmax+1);
495 
496  char *id = NULL;
497  xmlNode *xml_docker = NULL;
498  xmlNode *xml_obj = NULL;
499 
500  int volid = 0;
501 
502  id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
504  xml_docker = create_resource(id, "heartbeat", "rkt");
505  free(id);
506 
507  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
508  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
509 
510  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
511  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
512  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
513  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
514 
515  /* Set a container hostname only if we have an IP to map it to.
516  * The user can set -h or --uts=host themselves if they want a nicer
517  * name for logs, but this makes applications happy who need their
518  * hostname to match the IP they bind to.
519  */
520  if (data->ip_range_start != NULL) {
521  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
522  data->prefix, tuple->offset);
523  }
524 
525  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
526 
527  if(data->docker_network) {
528 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
529  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
530  }
531 
532  if(data->control_port) {
533  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
534  } else {
535  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
536  }
537 
538  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
539  container_mount_t *mount = pIter->data;
540 
541  if(mount->flags) {
542  char *source = crm_strdup_printf(
543  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
544 
545  if(doffset > 0) {
546  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
547  }
548  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
549  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
550  if(mount->options) {
551  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
552  }
553  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
554  free(source);
555 
556  } else {
557  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
558  if(mount->options) {
559  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
560  }
561  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
562  }
563  volid++;
564  }
565 
566  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
567  container_port_t *port = pIter->data;
568 
569  if(tuple->ipaddr) {
570  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
571  port->target, tuple->ipaddr, port->source);
572  } else {
573  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
574  }
575  }
576 
577  if(data->docker_run_options) {
578  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
579  }
580 
581  if(data->docker_host_options) {
582  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
583  }
584 
585  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
586  free(buffer);
587 
588  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
589  free(dbuffer);
590 
591  if(tuple->child) {
592  if(data->docker_run_command) {
593  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
594  } else {
595  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
596  SBIN_DIR "/pacemaker-remoted");
597  }
598 
599  /* TODO: Allow users to specify their own?
600  *
601  * We just want to know if the container is alive, we'll
602  * monitor the child independently
603  */
604  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
605  /* } else if(child && data->untrusted) {
606  * Support this use-case?
607  *
608  * The ability to have resources started/stopped by us, but
609  * unable to set attributes, etc.
610  *
611  * Arguably better to control API access this with ACLs like
612  * "normal" remote nodes
613  *
614  * crm_create_nvpair_xml(xml_obj, NULL,
615  * "run_cmd",
616  * "/usr/libexec/pacemaker/pacemaker-execd");
617  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
618  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
619  */
620  } else {
621  if(data->docker_run_command) {
622  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
623  data->docker_run_command);
624  }
625 
626  /* TODO: Allow users to specify their own?
627  *
628  * We don't know what's in the container, so we just want
629  * to know if it is alive
630  */
631  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
632  }
633 
634 
635  xml_obj = create_xml_node(xml_docker, "operations");
636  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
637 
638  // TODO: Other ops? Timeouts and intervals from underlying resource?
639 
640  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
641  return FALSE;
642  }
643  parent->children = g_list_append(parent->children, tuple->docker);
644  return TRUE;
645 }
646 
653 static void
654 disallow_node(resource_t *rsc, const char *uname)
655 {
656  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
657 
658  if (match) {
659  ((pe_node_t *) match)->weight = -INFINITY;
660  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
661  }
662  if (rsc->children) {
663  GListPtr child;
664 
665  for (child = rsc->children; child != NULL; child = child->next) {
666  disallow_node((resource_t *) (child->data), uname);
667  }
668  }
669 }
670 
671 static bool
672 create_remote_resource(
673  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
674  pe_working_set_t * data_set)
675 {
676  if (tuple->child && valid_network(data)) {
677  GHashTableIter gIter;
678  GListPtr rsc_iter = NULL;
679  node_t *node = NULL;
680  xmlNode *xml_remote = NULL;
681  char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
682  char *port_s = NULL;
683  const char *uname = NULL;
684  const char *connect_name = NULL;
685 
686  if (remote_id_conflict(id, data_set)) {
687  free(id);
688  // The biggest hammer we have
689  id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
690  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
691  }
692 
693  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
694  * connection does not have its own IP is a magic string that we use to
695  * support nested remotes (i.e. a bundle running on a remote node).
696  */
697  connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
698 
699  if (data->control_port == NULL) {
700  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
701  }
702 
703  /* This sets tuple->docker as tuple->remote's container, which is
704  * similar to what happens with guest nodes. This is how the PE knows
705  * that the bundle node is fenced by recovering docker, and that
706  * remote should be ordered relative to docker.
707  */
708  xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
709  NULL, NULL, NULL,
710  connect_name, (data->control_port?
711  data->control_port : port_s));
712  free(port_s);
713 
714  /* Abandon our created ID, and pull the copy from the XML, because we
715  * need something that will get freed during data set cleanup to use as
716  * the node ID and uname.
717  */
718  free(id);
719  id = NULL;
720  uname = ID(xml_remote);
721 
722  /* Ensure a node has been created for the guest (it may have already
723  * been, if it has a permanent node attribute), and ensure its weight is
724  * -INFINITY so no other resources can run on it.
725  */
726  node = pe_find_node(data_set->nodes, uname);
727  if (node == NULL) {
728  node = pe_create_node(uname, uname, "remote", "-INFINITY",
729  data_set);
730  } else {
731  node->weight = -INFINITY;
732  }
734 
735  /* unpack_remote_nodes() ensures that each remote node and guest node
736  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
737  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
738  * what nodes will be needed, so we do it just above.
739  *
740  * Worse, that means that the node may have been utilized while
741  * unpacking other resources, without our weight correction. The most
742  * likely place for this to happen is when common_unpack() calls
743  * resource_location() to set a default score in symmetric clusters.
744  * This adds a node *copy* to each resource's allowed nodes, and these
745  * copies will have the wrong weight.
746  *
747  * As a hacky workaround, fix those copies here.
748  *
749  * @TODO Possible alternative: ensure bundles are unpacked before other
750  * resources, so the weight is correct before any copies are made.
751  */
752  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
753  disallow_node((resource_t *) (rsc_iter->data), uname);
754  }
755 
756  tuple->node = node_copy(node);
757  tuple->node->weight = 500;
758  tuple->node->rsc_discover_mode = pe_discover_exclusive;
759 
760  /* Ensure the node shows up as allowed and with the correct discovery set */
761  if (tuple->child->allowed_nodes != NULL) {
762  g_hash_table_destroy(tuple->child->allowed_nodes);
763  }
764  tuple->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
765  g_str_equal, NULL,
766  free);
767  g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
768 
769  {
770  node_t *copy = node_copy(tuple->node);
771  copy->weight = -INFINITY;
772  g_hash_table_insert(tuple->child->parent->allowed_nodes, (gpointer) tuple->node->details->id, copy);
773  }
774  if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
775  return FALSE;
776  }
777 
778  g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
779  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
780  if(is_remote_node(node)) {
781  /* Remote resources can only run on 'normal' cluster node */
782  node->weight = -INFINITY;
783  }
784  }
785 
786  tuple->node->details->remote_rsc = tuple->remote;
787  tuple->remote->container = tuple->docker; // Ensures is_container_remote_node() functions correctly immediately
788 
789  /* A bundle's #kind is closer to "container" (guest node) than the
790  * "remote" set by pe_create_node().
791  */
792  g_hash_table_insert(tuple->node->details->attrs,
793  strdup(CRM_ATTR_KIND), strdup("container"));
794 
795  /* One effect of this is that setup_container() will add
796  * tuple->remote to tuple->docker's fillers, which will make
797  * rsc_contains_remote_node() true for tuple->docker.
798  *
799  * tuple->child does NOT get added to tuple->docker's fillers.
800  * The only noticeable effect if it did would be for its fail count to
801  * be taken into account when checking tuple->docker's migration
802  * threshold.
803  */
804  parent->children = g_list_append(parent->children, tuple->remote);
805  }
806  return TRUE;
807 }
808 
809 static bool
810 create_container(
811  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
812  pe_working_set_t * data_set)
813 {
814 
815  if (data->type == PE_CONTAINER_TYPE_DOCKER &&
816  create_docker_resource(parent, data, tuple, data_set) == FALSE) {
817  return FALSE;
818  }
819  if (data->type == PE_CONTAINER_TYPE_PODMAN &&
820  create_podman_resource(parent, data, tuple, data_set) == FALSE) {
821  return FALSE;
822  }
823  if (data->type == PE_CONTAINER_TYPE_RKT &&
824  create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
825  return FALSE;
826  }
827 
828  if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
829  return FALSE;
830  }
831  if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
832  return FALSE;
833  }
834  if(tuple->child && tuple->ipaddr) {
835  add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
836  }
837 
838  if(tuple->remote) {
839  /*
840  * Allow the remote connection resource to be allocated to a
841  * different node than the one on which the docker container
842  * is active.
843  *
844  * This makes it possible to have Pacemaker Remote nodes running
845  * containers with pacemaker-remoted inside in order to start
846  * services inside those containers.
847  */
848  set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
849  }
850 
851  return TRUE;
852 }
853 
854 static void
855 mount_add(container_variant_data_t *container_data, const char *source,
856  const char *target, const char *options, int flags)
857 {
858  container_mount_t *mount = calloc(1, sizeof(container_mount_t));
859 
860  mount->source = strdup(source);
861  mount->target = strdup(target);
862  if (options) {
863  mount->options = strdup(options);
864  }
865  mount->flags = flags;
866  container_data->mounts = g_list_append(container_data->mounts, mount);
867 }
868 
869 static void mount_free(container_mount_t *mount)
870 {
871  free(mount->source);
872  free(mount->target);
873  free(mount->options);
874  free(mount);
875 }
876 
877 static void port_free(container_port_t *port)
878 {
879  free(port->source);
880  free(port->target);
881  free(port);
882 }
883 
884 static container_grouping_t *
885 tuple_for_remote(resource_t *remote)
886 {
887  resource_t *top = remote;
888  container_variant_data_t *container_data = NULL;
889 
890  if (top == NULL) {
891  return NULL;
892  }
893 
894  while (top->parent != NULL) {
895  top = top->parent;
896  }
897 
898  get_container_variant_data(container_data, top);
899  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
900  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
901  if(tuple->remote == remote) {
902  return tuple;
903  }
904  }
905  CRM_LOG_ASSERT(FALSE);
906  return NULL;
907 }
908 
909 bool
911 {
912  const char *name;
913  const char *value;
914  const char *attr_list[] = {
918  };
919  const char *value_list[] = {
920  "remote",
922  "pacemaker"
923  };
924 
925  if(rsc == NULL) {
926  return FALSE;
927  }
928 
929  name = "addr";
930  value = g_hash_table_lookup(rsc->parameters, name);
931  if (safe_str_eq(value, "#uname") == FALSE) {
932  return FALSE;
933  }
934 
935  for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
936  value = crm_element_value(rsc->xml, attr_list[lpc]);
937  if (safe_str_eq(value, value_list[lpc]) == FALSE) {
938  return FALSE;
939  }
940  }
941  return TRUE;
942 }
943 
944 const char *
945 container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
946 {
947  // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
948 
949  pe_node_t *node = NULL;
950  container_grouping_t *tuple = NULL;
951 
952  if(container_fix_remote_addr(rsc) == FALSE) {
953  return NULL;
954  }
955 
956  tuple = tuple_for_remote(rsc);
957  if(tuple == NULL) {
958  return NULL;
959  }
960 
961  node = tuple->docker->allocated_to;
962  if (node == NULL) {
963  /* If it won't be running anywhere after the
964  * transition, go with where it's running now.
965  */
966  node = pe__current_node(tuple->docker);
967  }
968 
969  if(node == NULL) {
970  crm_trace("Cannot determine address for bundle connection %s", rsc->id);
971  return NULL;
972  }
973 
974  crm_trace("Setting address for bundle connection %s to bundle host %s",
975  rsc->id, node->details->uname);
976  if(xml != NULL && field != NULL) {
977  crm_xml_add(xml, field, node->details->uname);
978  }
979 
980  return node->details->uname;
981 }
982 
983 gboolean
985 {
986  const char *value = NULL;
987  xmlNode *xml_obj = NULL;
988  xmlNode *xml_resource = NULL;
989  container_variant_data_t *container_data = NULL;
990 
991  CRM_ASSERT(rsc != NULL);
992  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
993 
994  container_data = calloc(1, sizeof(container_variant_data_t));
995  rsc->variant_opaque = container_data;
996  container_data->prefix = strdup(rsc->id);
997 
998  xml_obj = first_named_child(rsc->xml, "docker");
999  if (xml_obj != NULL) {
1000  container_data->type = PE_CONTAINER_TYPE_DOCKER;
1001  } else {
1002  xml_obj = first_named_child(rsc->xml, "rkt");
1003  if (xml_obj != NULL) {
1004  container_data->type = PE_CONTAINER_TYPE_RKT;
1005  } else {
1006  xml_obj = first_named_child(rsc->xml, "podman");
1007  if (xml_obj != NULL) {
1008  container_data->type = PE_CONTAINER_TYPE_PODMAN;
1009  } else {
1010  return FALSE;
1011  }
1012  }
1013  }
1014 
1015  value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
1016  if (value == NULL) {
1017  // @COMPAT deprecated since 2.0.0
1018  value = crm_element_value(xml_obj, "masters");
1019  }
1020  container_data->promoted_max = crm_parse_int(value, "0");
1021  if (container_data->promoted_max < 0) {
1022  pe_err("%s for %s must be nonnegative integer, using 0",
1024  container_data->promoted_max = 0;
1025  }
1026 
1027  value = crm_element_value(xml_obj, "replicas");
1028  if ((value == NULL) && container_data->promoted_max) {
1029  container_data->replicas = container_data->promoted_max;
1030  } else {
1031  container_data->replicas = crm_parse_int(value, "1");
1032  }
1033  if (container_data->replicas < 1) {
1034  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
1035  container_data->replicas = 1;
1036  }
1037 
1038  /*
1039  * Communication between containers on the same host via the
1040  * floating IPs only works if docker is started with:
1041  * --userland-proxy=false --ip-masq=false
1042  */
1043  value = crm_element_value(xml_obj, "replicas-per-host");
1044  container_data->replicas_per_host = crm_parse_int(value, "1");
1045  if (container_data->replicas_per_host < 1) {
1046  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
1047  rsc->id);
1048  container_data->replicas_per_host = 1;
1049  }
1050  if (container_data->replicas_per_host == 1) {
1051  clear_bit(rsc->flags, pe_rsc_unique);
1052  }
1053 
1054  container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
1055  container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
1056  container_data->image = crm_element_value_copy(xml_obj, "image");
1057  container_data->docker_network = crm_element_value_copy(xml_obj, "network");
1058 
1059  xml_obj = first_named_child(rsc->xml, "network");
1060  if(xml_obj) {
1061 
1062  container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
1063  container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
1064  container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
1065  container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
1066  value = crm_element_value(xml_obj, "add-host");
1067  if (check_boolean(value) == FALSE) {
1068  container_data->add_host = TRUE;
1069  } else {
1070  crm_str_to_boolean(value, &container_data->add_host);
1071  }
1072 
1073  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
1074  xml_child = __xml_next_element(xml_child)) {
1075 
1076  container_port_t *port = calloc(1, sizeof(container_port_t));
1077  port->source = crm_element_value_copy(xml_child, "port");
1078 
1079  if(port->source == NULL) {
1080  port->source = crm_element_value_copy(xml_child, "range");
1081  } else {
1082  port->target = crm_element_value_copy(xml_child, "internal-port");
1083  }
1084 
1085  if(port->source != NULL && strlen(port->source) > 0) {
1086  if(port->target == NULL) {
1087  port->target = strdup(port->source);
1088  }
1089  container_data->ports = g_list_append(container_data->ports, port);
1090 
1091  } else {
1092  pe_err("Invalid port directive %s", ID(xml_child));
1093  port_free(port);
1094  }
1095  }
1096  }
1097 
1098  xml_obj = first_named_child(rsc->xml, "storage");
1099  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
1100  xml_child = __xml_next_element(xml_child)) {
1101 
1102  const char *source = crm_element_value(xml_child, "source-dir");
1103  const char *target = crm_element_value(xml_child, "target-dir");
1104  const char *options = crm_element_value(xml_child, "options");
1105  int flags = 0;
1106 
1107  if (source == NULL) {
1108  source = crm_element_value(xml_child, "source-dir-root");
1109  flags = 1;
1110  }
1111 
1112  if (source && target) {
1113  mount_add(container_data, source, target, options, flags);
1114  } else {
1115  pe_err("Invalid mount directive %s", ID(xml_child));
1116  }
1117  }
1118 
1119  xml_obj = first_named_child(rsc->xml, "primitive");
1120  if (xml_obj && valid_network(container_data)) {
1121  char *value = NULL;
1122  xmlNode *xml_set = NULL;
1123 
1124  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
1125 
1126  /* @COMPAT We no longer use the <master> tag, but we need to keep it as
1127  * part of the resource name, so that bundles don't restart in a rolling
1128  * upgrade. (It also avoids needing to change regression tests.)
1129  */
1130  crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix,
1131  (container_data->promoted_max? "master"
1132  : (const char *)xml_resource->name));
1133 
1134  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
1135  crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
1136 
1137  crm_create_nvpair_xml(xml_set, NULL,
1139 
1140  value = crm_itoa(container_data->replicas);
1141  crm_create_nvpair_xml(xml_set, NULL,
1143  free(value);
1144 
1145  value = crm_itoa(container_data->replicas_per_host);
1146  crm_create_nvpair_xml(xml_set, NULL,
1148  free(value);
1149 
1151  (container_data->replicas_per_host > 1)?
1153 
1154  if (container_data->promoted_max) {
1155  crm_create_nvpair_xml(xml_set, NULL,
1157 
1158  value = crm_itoa(container_data->promoted_max);
1159  crm_create_nvpair_xml(xml_set, NULL,
1160  XML_RSC_ATTR_PROMOTED_MAX, value);
1161  free(value);
1162  }
1163 
1164  //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
1165  add_node_copy(xml_resource, xml_obj);
1166 
1167  } else if(xml_obj) {
1168  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
1169  rsc->id, ID(xml_obj));
1170  return FALSE;
1171  }
1172 
1173  if(xml_resource) {
1174  int lpc = 0;
1175  GListPtr childIter = NULL;
1176  resource_t *new_rsc = NULL;
1177  container_port_t *port = NULL;
1178 
1179  int offset = 0, max = 1024;
1180  char *buffer = NULL;
1181 
1182  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
1183  pe_err("Failed unpacking resource %s", ID(rsc->xml));
1184  if (new_rsc != NULL && new_rsc->fns != NULL) {
1185  new_rsc->fns->free(new_rsc);
1186  }
1187  return FALSE;
1188  }
1189 
1190  container_data->child = new_rsc;
1191 
1192  /* Currently, we always map the default authentication key location
1193  * into the same location inside the container.
1194  *
1195  * Ideally, we would respect the host's PCMK_authkey_location, but:
1196  * - it may be different on different nodes;
1197  * - the actual connection will do extra checking to make sure the key
1198  * file exists and is readable, that we can't do here on the DC
1199  * - tools such as crm_resource and crm_simulate may not have the same
1200  * environment variables as the cluster, causing operation digests to
1201  * differ
1202  *
1203  * Always using the default location inside the container is fine,
1204  * because we control the pacemaker_remote environment, and it avoids
1205  * having to pass another environment variable to the container.
1206  *
1207  * @TODO A better solution may be to have only pacemaker_remote use the
1208  * environment variable, and have the cluster nodes use a new
1209  * cluster option for key location. This would introduce the limitation
1210  * of the location being the same on all cluster nodes, but that's
1211  * reasonable.
1212  */
1213  mount_add(container_data, DEFAULT_REMOTE_KEY_LOCATION,
1214  DEFAULT_REMOTE_KEY_LOCATION, NULL, 0);
1215 
1216  mount_add(container_data, CRM_BUNDLE_DIR, "/var/log", NULL, 1);
1217 
1218  port = calloc(1, sizeof(container_port_t));
1219  if(container_data->control_port) {
1220  port->source = strdup(container_data->control_port);
1221  } else {
1222  /* If we wanted to respect PCMK_remote_port, we could use
1223  * crm_default_remote_port() here and elsewhere in this file instead
1224  * of DEFAULT_REMOTE_PORT.
1225  *
1226  * However, it gains nothing, since we control both the container
1227  * environment and the connection resource parameters, and the user
1228  * can use a different port if desired by setting control-port.
1229  */
1230  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
1231  }
1232  port->target = strdup(port->source);
1233  container_data->ports = g_list_append(container_data->ports, port);
1234 
1235  buffer = calloc(1, max+1);
1236  for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
1237  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1238  tuple->child = childIter->data;
1239  tuple->child->exclusive_discover = TRUE;
1240  tuple->offset = lpc++;
1241 
1242  // Ensure the child's notify gets set based on the underlying primitive's value
1243  if(is_set(tuple->child->flags, pe_rsc_notify)) {
1244  set_bit(container_data->child->flags, pe_rsc_notify);
1245  }
1246 
1247  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1248  container_data->tuples = g_list_append(container_data->tuples, tuple);
1249  container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
1250  }
1251  container_data->docker_host_options = buffer;
1252  if(container_data->attribute_target) {
1253  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1254  g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1255  }
1256 
1257  } else {
1258  // Just a naked container, no pacemaker-remote
1259  int offset = 0, max = 1024;
1260  char *buffer = calloc(1, max+1);
1261 
1262  for(int lpc = 0; lpc < container_data->replicas; lpc++) {
1263  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1264  tuple->offset = lpc;
1265  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1266  container_data->tuples = g_list_append(container_data->tuples, tuple);
1267  }
1268 
1269  container_data->docker_host_options = buffer;
1270  }
1271 
1272  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1273  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1274  if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
1275  pe_err("Failed unpacking resource %s", rsc->id);
1276  rsc->fns->free(rsc);
1277  return FALSE;
1278  }
1279  }
1280 
1281  if(container_data->child) {
1282  rsc->children = g_list_append(rsc->children, container_data->child);
1283  }
1284  return TRUE;
1285 }
1286 
1287 static int
1288 tuple_rsc_active(resource_t *rsc, gboolean all)
1289 {
1290  if (rsc) {
1291  gboolean child_active = rsc->fns->active(rsc, all);
1292 
1293  if (child_active && !all) {
1294  return TRUE;
1295  } else if (!child_active && all) {
1296  return FALSE;
1297  }
1298  }
1299  return -1;
1300 }
1301 
1302 gboolean
1303 container_active(resource_t * rsc, gboolean all)
1304 {
1305  container_variant_data_t *container_data = NULL;
1306  GListPtr iter = NULL;
1307 
1308  get_container_variant_data(container_data, rsc);
1309  for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
1310  container_grouping_t *tuple = (container_grouping_t *)(iter->data);
1311  int rsc_active;
1312 
1313  rsc_active = tuple_rsc_active(tuple->ip, all);
1314  if (rsc_active >= 0) {
1315  return (gboolean) rsc_active;
1316  }
1317 
1318  rsc_active = tuple_rsc_active(tuple->child, all);
1319  if (rsc_active >= 0) {
1320  return (gboolean) rsc_active;
1321  }
1322 
1323  rsc_active = tuple_rsc_active(tuple->docker, all);
1324  if (rsc_active >= 0) {
1325  return (gboolean) rsc_active;
1326  }
1327 
1328  rsc_active = tuple_rsc_active(tuple->remote, all);
1329  if (rsc_active >= 0) {
1330  return (gboolean) rsc_active;
1331  }
1332  }
1333 
1334  /* If "all" is TRUE, we've already checked that no resources were inactive,
1335  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1336  * so return FALSE.
1337  */
1338  return all;
1339 }
1340 
1350 resource_t *
1351 find_container_child(const resource_t *bundle, const node_t *node)
1352 {
1353  container_variant_data_t *container_data = NULL;
1354  CRM_ASSERT(bundle && node);
1355 
1356  get_container_variant_data(container_data, bundle);
1357  for (GListPtr gIter = container_data->tuples; gIter != NULL;
1358  gIter = gIter->next) {
1359  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1360 
1361  CRM_ASSERT(tuple && tuple->node);
1362  if (tuple->node->details == node->details) {
1363  return tuple->child;
1364  }
1365  }
1366  return NULL;
1367 }
1368 
1369 static void
1370 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1371  void *print_data)
1372 {
1373  if (rsc != NULL) {
1374  if (options & pe_print_html) {
1375  status_print("<li>");
1376  }
1377  rsc->fns->print(rsc, pre_text, options, print_data);
1378  if (options & pe_print_html) {
1379  status_print("</li>\n");
1380  }
1381  }
1382 }
1383 
1384 static const char*
1385 container_type_as_string(enum container_type t)
1386 {
1387  if (t == PE_CONTAINER_TYPE_DOCKER) {
1388  return PE_CONTAINER_TYPE_DOCKER_S;
1389  } else if (t == PE_CONTAINER_TYPE_RKT) {
1390  return PE_CONTAINER_TYPE_RKT_S;
1391  } else if (t == PE_CONTAINER_TYPE_PODMAN) {
1392  return PE_CONTAINER_TYPE_PODMAN_S;
1393  } else {
1394  return PE_CONTAINER_TYPE_UNKNOWN_S;
1395  }
1396 }
1397 
1398 static void
1399 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
1400 {
1401  container_variant_data_t *container_data = NULL;
1402  char *child_text = NULL;
1403  CRM_CHECK(rsc != NULL, return);
1404 
1405  if (pre_text == NULL) {
1406  pre_text = "";
1407  }
1408  child_text = crm_concat(pre_text, " ", ' ');
1409 
1410  get_container_variant_data(container_data, rsc);
1411 
1412  status_print("%s<bundle ", pre_text);
1413  status_print("id=\"%s\" ", rsc->id);
1414 
1415  // Always lowercase the container technology type for use as XML value
1416  status_print("type=\"");
1417  for (const char *c = container_type_as_string(container_data->type);
1418  *c; ++c) {
1419  status_print("%c", tolower(*c));
1420  }
1421  status_print("\" ");
1422 
1423  status_print("image=\"%s\" ", container_data->image);
1424  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1425  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1426  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1427  status_print(">\n");
1428 
1429  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1430  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1431 
1432  CRM_ASSERT(tuple);
1433  status_print("%s <replica id=\"%d\">\n", pre_text, tuple->offset);
1434  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1435  print_rsc_in_list(tuple->child, child_text, options, print_data);
1436  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1437  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1438  status_print("%s </replica>\n", pre_text);
1439  }
1440  status_print("%s</bundle>\n", pre_text);
1441  free(child_text);
1442 }
1443 
1444 static void
1445 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
1446 {
1447  node_t *node = NULL;
1448  resource_t *rsc = tuple->child;
1449 
1450  int offset = 0;
1451  char buffer[LINE_MAX];
1452 
1453  if(rsc == NULL) {
1454  rsc = tuple->docker;
1455  }
1456 
1457  if(tuple->remote) {
1458  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
1459  } else {
1460  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
1461  }
1462  if(tuple->ipaddr) {
1463  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
1464  }
1465 
1466  node = pe__current_node(tuple->docker);
1467  common_print(rsc, pre_text, buffer, node, options, print_data);
1468 }
1469 
1470 void
1471 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
1472 {
1473  container_variant_data_t *container_data = NULL;
1474  char *child_text = NULL;
1475  CRM_CHECK(rsc != NULL, return);
1476 
1477  if (options & pe_print_xml) {
1478  container_print_xml(rsc, pre_text, options, print_data);
1479  return;
1480  }
1481 
1482  get_container_variant_data(container_data, rsc);
1483 
1484  if (pre_text == NULL) {
1485  pre_text = " ";
1486  }
1487 
1488  status_print("%s%s container%s: %s [%s]%s%s\n",
1489  pre_text, container_type_as_string(container_data->type),
1490  container_data->replicas>1?" set":"", rsc->id, container_data->image,
1491  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1492  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1493  if (options & pe_print_html) {
1494  status_print("<br />\n<ul>\n");
1495  }
1496 
1497 
1498  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1499  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1500 
1501  CRM_ASSERT(tuple);
1502  if (options & pe_print_html) {
1503  status_print("<li>");
1504  }
1505 
1506  if (is_set(options, pe_print_implicit)) {
1507  child_text = crm_strdup_printf(" %s", pre_text);
1508  if(g_list_length(container_data->tuples) > 1) {
1509  status_print(" %sReplica[%d]\n", pre_text, tuple->offset);
1510  }
1511  if (options & pe_print_html) {
1512  status_print("<br />\n<ul>\n");
1513  }
1514  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1515  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1516  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1517  print_rsc_in_list(tuple->child, child_text, options, print_data);
1518  if (options & pe_print_html) {
1519  status_print("</ul>\n");
1520  }
1521  } else {
1522  child_text = crm_strdup_printf("%s ", pre_text);
1523  tuple_print(tuple, child_text, options, print_data);
1524  }
1525  free(child_text);
1526 
1527  if (options & pe_print_html) {
1528  status_print("</li>\n");
1529  }
1530  }
1531  if (options & pe_print_html) {
1532  status_print("</ul>\n");
1533  }
1534 }
1535 
1536 void
1537 tuple_free(container_grouping_t *tuple)
1538 {
1539  if(tuple == NULL) {
1540  return;
1541  }
1542 
1543  if(tuple->node) {
1544  free(tuple->node);
1545  tuple->node = NULL;
1546  }
1547 
1548  if(tuple->ip) {
1549  free_xml(tuple->ip->xml);
1550  tuple->ip->xml = NULL;
1551  tuple->ip->fns->free(tuple->ip);
1552  tuple->ip = NULL;
1553  }
1554  if(tuple->docker) {
1555  free_xml(tuple->docker->xml);
1556  tuple->docker->xml = NULL;
1557  tuple->docker->fns->free(tuple->docker);
1558  tuple->docker = NULL;
1559  }
1560  if(tuple->remote) {
1561  free_xml(tuple->remote->xml);
1562  tuple->remote->xml = NULL;
1563  tuple->remote->fns->free(tuple->remote);
1564  tuple->remote = NULL;
1565  }
1566  free(tuple->ipaddr);
1567  free(tuple);
1568 }
1569 
1570 void
1572 {
1573  container_variant_data_t *container_data = NULL;
1574  CRM_CHECK(rsc != NULL, return);
1575 
1576  get_container_variant_data(container_data, rsc);
1577  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1578 
1579  free(container_data->prefix);
1580  free(container_data->image);
1581  free(container_data->control_port);
1582  free(container_data->host_network);
1583  free(container_data->host_netmask);
1584  free(container_data->ip_range_start);
1585  free(container_data->docker_network);
1586  free(container_data->docker_run_options);
1587  free(container_data->docker_run_command);
1588  free(container_data->docker_host_options);
1589 
1590  g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
1591  g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
1592  g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
1593  g_list_free(rsc->children);
1594 
1595  if(container_data->child) {
1596  free_xml(container_data->child->xml);
1597  container_data->child->xml = NULL;
1598  container_data->child->fns->free(container_data->child);
1599  }
1600  common_free(rsc);
1601 }
1602 
1603 enum rsc_role_e
1604 container_resource_state(const resource_t * rsc, gboolean current)
1605 {
1606  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1607  return container_role;
1608 }
1609 
1617 int
1619 {
1620  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1621  return 0;
1622  } else {
1623  container_variant_data_t *container_data = NULL;
1624 
1625  get_container_variant_data(container_data, rsc);
1626  return container_data->replicas;
1627  }
1628 }
pe_rsc_failed
#define pe_rsc_failed
Definition: status.h:209
GListPtr
GList * GListPtr
Definition: crm.h:190
CRM_ATTR_KIND
#define CRM_ATTR_KIND
Definition: crm.h:88
INFINITY
#define INFINITY
Definition: crm.h:71
pe_discover_never
@ pe_discover_never
Definition: status.h:407
pe_resource_s::variant
enum pe_obj_types variant
Definition: status.h:268
DEFAULT_REMOTE_KEY_LOCATION
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:45
pe_rsc_allow_remote_remotes
#define pe_rsc_allow_remote_remotes
Definition: status.h:207
pe_working_set_s::resources
GListPtr resources
Definition: status.h:109
pe_print_xml
@ pe_print_xml
Definition: common.h:113
flags
uint64_t flags
Definition: remote.c:3
pe_find_node
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:412
pe_working_set_s::nodes
GListPtr nodes
Definition: status.h:108
msg_xml.h
pe_resource_s::variant_opaque
void * variant_opaque
Definition: status.h:269
data
char data[0]
Definition: internal.h:10
common_free
void common_free(resource_t *rsc)
Definition: complex.c:758
crm_str_to_boolean
int crm_str_to_boolean(const char *s, int *ret)
Definition: strings.c:167
pe_discover_exclusive
@ pe_discover_exclusive
Definition: status.h:408
create_xml_node
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:1888
container_fix_remote_addr
bool container_fix_remote_addr(resource_t *rsc)
Definition: container.c:910
pe_resource_s::children
GListPtr children
Definition: status.h:315
pe_resource_s::id
char * id
Definition: status.h:259
XML_CIB_TAG_INCARNATION
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:174
rsc_role_e
rsc_role_e
Definition: common.h:86
PCMK_RESOURCE_CLASS_OCF
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:41
container_unpack
gboolean container_unpack(resource_t *rsc, pe_working_set_t *data_set)
Definition: container.c:984
XML_RSC_ATTR_INCARNATION_MAX
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:185
CRM_CHECK
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:165
pe_node_s::weight
int weight
Definition: status.h:185
pe_container
@ pe_container
Definition: complex.h:24
clear_bit
#define clear_bit(word, bit)
Definition: crm_internal.h:166
pe_node_s::details
struct pe_node_shared_s * details
Definition: status.h:188
pe_rsc_unique
#define pe_rsc_unique
Definition: status.h:198
internal.h
crm_str_hash
#define crm_str_hash
Definition: util.h:55
crm_trace
#define crm_trace(fmt, args...)
Definition: logging.h:255
pe_resource_s::meta
GHashTable * meta
Definition: status.h:311
safe_str_eq
#define safe_str_eq(a, b)
Definition: util.h:54
XML_TAG_ATTR_SETS
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:161
free_xml
void free_xml(xmlNode *child)
Definition: xml.c:2012
container_active
gboolean container_active(resource_t *rsc, gboolean all)
Definition: container.c:1303
common_unpack
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:358
pe_rsc_managed
#define pe_rsc_managed
Definition: status.h:193
SBIN_DIR
#define SBIN_DIR
Definition: config.h:589
common_print
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:502
set_bit
#define set_bit(word, bit)
Definition: crm_internal.h:165
find_container_child
resource_t * find_container_child(const resource_t *bundle, const node_t *node)
Definition: container.c:1351
resource_object_functions_s::free
void(* free)(pe_resource_t *)
Definition: complex.h:38
status_print
#define status_print(fmt, args...)
Definition: unpack.h:67
pe_create_remote_xml
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:140
XML_ATTR_ID
#define XML_ATTR_ID
Definition: msg_xml.h:94
ID
#define ID(x)
Definition: msg_xml.h:412
XML_CIB_TAG_RESOURCE
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:172
is_remote_node
gboolean is_remote_node(node_t *node)
Definition: remote.c:52
rsc_printable_id
const char * rsc_printable_id(pe_resource_t *rsc)
Definition: utils.c:2105
first_named_child
xmlNode * first_named_child(const xmlNode *parent, const char *name)
Definition: xml.c:4124
pe_err
#define pe_err(fmt...)
Definition: internal.h:18
tuple_free
void tuple_free(container_grouping_t *tuple)
Definition: container.c:1537
pe_rsc_notify
#define pe_rsc_notify
Definition: status.h:197
pe_resource_s::xml
xmlNode * xml
Definition: status.h:261
crm_xml_set_id
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
CRM_LOG_ASSERT
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:151
pe_print_html
@ pe_print_html
Definition: common.h:104
crm_create_nvpair_xml
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: nvpair.c:614
XML_AGENT_ATTR_CLASS
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:227
XML_RSC_ATTR_INCARNATION_NODEMAX
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:187
uname
char uname[MAX_NAME]
Definition: internal.h:5
XML_RSC_ATTR_PROMOTABLE
#define XML_RSC_ATTR_PROMOTABLE
Definition: msg_xml.h:188
resource_object_functions_s::print
void(* print)(pe_resource_t *, const char *, long, void *)
Definition: complex.h:34
crm_strdup_printf
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
RSC_ROLE_UNKNOWN
@ RSC_ROLE_UNKNOWN
Definition: common.h:87
DIMOF
#define DIMOF(a)
Definition: crm.h:33
pe_node_s::rsc_discover_mode
int rsc_discover_mode
Definition: status.h:189
XML_RSC_ATTR_UNIQUE
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:195
pe_bundle_replicas
int pe_bundle_replicas(const resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: container.c:1618
crm_xml_add
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:212
XML_RSC_ATTR_ORDERED
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:182
pe_working_set_s
Definition: status.h:87
crm_create_op_xml
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval_spec, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:359
crm_element_value
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:360
node_copy
node_t * node_copy(const node_t *this_node)
Definition: utils.c:116
resource_object_functions_s::active
gboolean(* active)(pe_resource_t *, gboolean)
Definition: complex.h:35
pe_create_node
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:331
XML_TAG_META_SETS
#define XML_TAG_META_SETS
Definition: msg_xml.h:162
container_print
void container_print(resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: container.c:1471
check_boolean
gboolean check_boolean(const char *value)
Definition: utils.c:89
CRM_BUNDLE_DIR
#define CRM_BUNDLE_DIR
Definition: config.h:29
rules.h
add_hash_param
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:406
variant.h
crm_parse_int
int crm_parse_int(const char *text, const char *default_text)
Parse an integer value from a string.
Definition: strings.c:107
XML_BOOLEAN_FALSE
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:106
container_free
void container_free(resource_t *rsc)
Definition: container.c:1571
remote_id_conflict
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:392
safe_str_neq
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:141
pe_resource_s::parent
pe_resource_t * parent
Definition: status.h:266
pe_resource_s::flags
unsigned long long flags
Definition: status.h:286
pe_rsc_trace
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:16
XML_AGENT_ATTR_PROVIDER
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:228
crm_element_value_copy
char * crm_element_value_copy(const xmlNode *data, const char *name)
Retrieve a copy of the value of an XML attribute.
Definition: nvpair.c:492
XML_RSC_ATTR_PROMOTED_MAX
#define XML_RSC_ATTR_PROMOTED_MAX
Definition: msg_xml.h:189
CRM_ASSERT
#define CRM_ASSERT(expr)
Definition: results.h:20
container_fix_remote_addr_in
const char * container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
Definition: container.c:945
pe_print_implicit
@ pe_print_implicit
Definition: common.h:118
unpack.h
XML_ATTR_TYPE
#define XML_ATTR_TYPE
Definition: msg_xml.h:97
pe_resource_s
Definition: status.h:258
pe_resource_s::allowed_nodes
GHashTable * allowed_nodes
Definition: status.h:306
pe_node_shared_s::uname
const char * uname
Definition: status.h:154
add_node_copy
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:1866
crm_internal.h
pe_node_s
Definition: status.h:184
DEFAULT_REMOTE_PORT
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:47
pe_resource_s::parameters
GHashTable * parameters
Definition: status.h:312
status.h
Cluster status and scheduling.
XML_BOOLEAN_TRUE
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:105
pe_resource_s::fns
resource_object_functions_t * fns
Definition: status.h:270
crm_xml_sanitize_id
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:2323
XML_RSC_ATTR_TARGET
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:180
container_resource_state
enum rsc_role_e container_resource_state(const resource_t *rsc, gboolean current)
Definition: container.c:1604