2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2016 Flavius Anton
5 * Copyright (c) 2016 Mihai Tiganus
6 * Copyright (c) 2016-2019 Mihai Carabas
7 * Copyright (c) 2017-2019 Darius Mihai
8 * Copyright (c) 2017-2019 Elena Mihailescu
9 * Copyright (c) 2018-2019 Sergiu Weisz
10 * All rights reserved.
11 * The bhyve-snapshot feature was developed under sponsorships
12 * from Matthew Grooms.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/types.h>
37 #ifndef WITHOUT_CAPSICUM
38 #include <sys/capsicum.h>
41 #include <sys/socket.h>
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
61 #include <pthread_np.h>
64 #include <sys/ioctl.h>
66 #include <machine/vmm.h>
67 #ifndef WITHOUT_CAPSICUM
68 #include <machine/vmm_dev.h>
70 #include <machine/vmm_snapshot.h>
76 #include "amd64/atkbdc.h"
93 extern int guest_ncpus;
95 static struct winsize winsize;
96 static sig_t old_winch_handler;
99 #define MB (1024UL * KB)
100 #define GB (1024UL * MB)
102 #define SNAPSHOT_CHUNK (4 * MB)
103 #define PROG_BUF_SZ (8192)
105 #define SNAPSHOT_BUFFER_SIZE (40 * MB)
107 #define JSON_KERNEL_ARR_KEY "kern_structs"
108 #define JSON_DEV_ARR_KEY "devices"
109 #define JSON_BASIC_METADATA_KEY "basic metadata"
110 #define JSON_SNAPSHOT_REQ_KEY "device"
111 #define JSON_SIZE_KEY "size"
112 #define JSON_FILE_OFFSET_KEY "file_offset"
114 #define JSON_NCPUS_KEY "ncpus"
115 #define JSON_VMNAME_KEY "vmname"
116 #define JSON_MEMSIZE_KEY "memsize"
117 #define JSON_MEMFLAGS_KEY "memflags"
121 __typeof__ (a) _a = (a); \
122 __typeof__ (b) _b = (b); \
126 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = {
127 { "vhpet", STRUCT_VHPET },
129 { "vioapic", STRUCT_VIOAPIC },
130 { "vlapic", STRUCT_VLAPIC },
131 { "vmcx", STRUCT_VMCX },
132 { "vatpit", STRUCT_VATPIT },
133 { "vatpic", STRUCT_VATPIC },
134 { "vpmtmr", STRUCT_VPMTMR },
135 { "vrtc", STRUCT_VRTC },
138 static cpuset_t vcpus_active, vcpus_suspended;
139 static pthread_mutex_t vcpu_lock = PTHREAD_MUTEX_INITIALIZER;
140 static pthread_cond_t vcpus_idle = PTHREAD_COND_INITIALIZER;
141 static pthread_cond_t vcpus_can_run = PTHREAD_COND_INITIALIZER;
142 static bool checkpoint_active;
145 * TODO: Harden this function and all of its callers since 'base_str' is a user
149 strcat_extension(const char *base_str, const char *ext)
152 size_t base_len, ext_len;
154 base_len = strnlen(base_str, NAME_MAX);
155 ext_len = strnlen(ext, NAME_MAX);
157 if (base_len + ext_len > NAME_MAX) {
158 EPRINTLN("Filename exceeds maximum length.");
162 res = malloc(base_len + ext_len + 1);
164 EPRINTLN("Failed to allocate memory: %s", strerror(errno));
168 memcpy(res, base_str, base_len);
169 memcpy(res + base_len, ext, ext_len);
170 res[base_len + ext_len] = 0;
176 destroy_restore_state(struct restore_state *rstate)
178 if (rstate == NULL) {
179 EPRINTLN("Attempting to destroy NULL restore struct.");
183 if (rstate->kdata_map != MAP_FAILED)
184 munmap(rstate->kdata_map, rstate->kdata_len);
186 if (rstate->kdata_fd > 0)
187 close(rstate->kdata_fd);
188 if (rstate->vmmem_fd > 0)
189 close(rstate->vmmem_fd);
191 if (rstate->meta_root_obj != NULL)
192 ucl_object_unref(rstate->meta_root_obj);
193 if (rstate->meta_parser != NULL)
194 ucl_parser_free(rstate->meta_parser);
198 load_vmmem_file(const char *filename, struct restore_state *rstate)
203 rstate->vmmem_fd = open(filename, O_RDONLY);
204 if (rstate->vmmem_fd < 0) {
205 perror("Failed to open restore file");
209 err = fstat(rstate->vmmem_fd, &sb);
211 perror("Failed to stat restore file");
215 if (sb.st_size == 0) {
216 fprintf(stderr, "Restore file is empty.\n");
220 rstate->vmmem_len = sb.st_size;
225 if (rstate->vmmem_fd > 0)
226 close(rstate->vmmem_fd);
231 load_kdata_file(const char *filename, struct restore_state *rstate)
236 rstate->kdata_fd = open(filename, O_RDONLY);
237 if (rstate->kdata_fd < 0) {
238 perror("Failed to open kernel data file");
242 err = fstat(rstate->kdata_fd, &sb);
244 perror("Failed to stat kernel data file");
248 if (sb.st_size == 0) {
249 fprintf(stderr, "Kernel data file is empty.\n");
253 rstate->kdata_len = sb.st_size;
254 rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ,
255 MAP_SHARED, rstate->kdata_fd, 0);
256 if (rstate->kdata_map == MAP_FAILED) {
257 perror("Failed to map restore file");
264 if (rstate->kdata_fd > 0)
265 close(rstate->kdata_fd);
270 load_metadata_file(const char *filename, struct restore_state *rstate)
273 struct ucl_parser *parser;
276 parser = ucl_parser_new(UCL_PARSER_DEFAULT);
277 if (parser == NULL) {
278 fprintf(stderr, "Failed to initialize UCL parser.\n");
280 goto err_load_metadata;
283 err = ucl_parser_add_file(parser, filename);
285 fprintf(stderr, "Failed to parse metadata file: '%s'\n",
288 goto err_load_metadata;
291 obj = ucl_parser_get_object(parser);
293 fprintf(stderr, "Failed to parse object.\n");
295 goto err_load_metadata;
298 rstate->meta_parser = parser;
299 rstate->meta_root_obj = (ucl_object_t *)obj;
305 ucl_parser_free(parser);
310 load_restore_file(const char *filename, struct restore_state *rstate)
313 char *kdata_filename = NULL, *meta_filename = NULL;
315 assert(filename != NULL);
316 assert(rstate != NULL);
318 memset(rstate, 0, sizeof(*rstate));
319 rstate->kdata_map = MAP_FAILED;
321 err = load_vmmem_file(filename, rstate);
323 fprintf(stderr, "Failed to load guest RAM file.\n");
327 kdata_filename = strcat_extension(filename, ".kern");
328 if (kdata_filename == NULL) {
329 fprintf(stderr, "Failed to construct kernel data filename.\n");
333 err = load_kdata_file(kdata_filename, rstate);
335 fprintf(stderr, "Failed to load guest kernel data file.\n");
339 meta_filename = strcat_extension(filename, ".meta");
340 if (meta_filename == NULL) {
341 fprintf(stderr, "Failed to construct kernel metadata filename.\n");
345 err = load_metadata_file(meta_filename, rstate);
347 fprintf(stderr, "Failed to load guest metadata file.\n");
354 destroy_restore_state(rstate);
355 if (kdata_filename != NULL)
356 free(kdata_filename);
357 if (meta_filename != NULL)
362 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret) \
364 const ucl_object_t *obj__; \
365 obj__ = ucl_object_lookup(obj, key); \
366 if (obj__ == NULL) { \
367 fprintf(stderr, "Missing key: '%s'", key); \
370 if (!ucl_object_toint_safe(obj__, result_ptr)) { \
371 fprintf(stderr, "Cannot convert '%s' value to int.", key); \
376 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret) \
378 const ucl_object_t *obj__; \
379 obj__ = ucl_object_lookup(obj, key); \
380 if (obj__ == NULL) { \
381 fprintf(stderr, "Missing key: '%s'", key); \
384 if (!ucl_object_tostring_safe(obj__, result_ptr)) { \
385 fprintf(stderr, "Cannot convert '%s' value to string.", key); \
391 lookup_check_dev(const char *dev_name, struct restore_state *rstate,
392 const ucl_object_t *obj, size_t *data_size)
394 const char *snapshot_req;
395 int64_t size, file_offset;
398 JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj,
399 &snapshot_req, NULL);
400 assert(snapshot_req != NULL);
401 if (!strcmp(snapshot_req, dev_name)) {
402 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj,
406 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj,
408 assert(file_offset >= 0);
409 assert((uint64_t)file_offset + size <= rstate->kdata_len);
411 *data_size = (size_t)size;
412 return ((uint8_t *)rstate->kdata_map + file_offset);
419 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate,
422 const ucl_object_t *devs = NULL, *obj = NULL;
423 ucl_object_iter_t it = NULL;
426 devs = ucl_object_lookup(rstate->meta_root_obj, key);
428 fprintf(stderr, "Failed to find '%s' object.\n",
433 if (ucl_object_type(devs) != UCL_ARRAY) {
434 fprintf(stderr, "Object '%s' is not an array.\n",
439 while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) {
440 ret = lookup_check_dev(dev_name, rstate, obj, data_size);
448 static const ucl_object_t *
449 lookup_basic_metadata_object(struct restore_state *rstate)
451 const ucl_object_t *basic_meta_obj = NULL;
453 basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj,
454 JSON_BASIC_METADATA_KEY);
455 if (basic_meta_obj == NULL) {
456 fprintf(stderr, "Failed to find '%s' object.\n",
457 JSON_BASIC_METADATA_KEY);
461 if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) {
462 fprintf(stderr, "Object '%s' is not a JSON object.\n",
463 JSON_BASIC_METADATA_KEY);
467 return (basic_meta_obj);
471 lookup_vmname(struct restore_state *rstate)
474 const ucl_object_t *obj;
476 obj = lookup_basic_metadata_object(rstate);
480 JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL);
485 lookup_memflags(struct restore_state *rstate)
488 const ucl_object_t *obj;
490 obj = lookup_basic_metadata_object(rstate);
494 JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0);
496 return ((int)memflags);
500 lookup_memsize(struct restore_state *rstate)
503 const ucl_object_t *obj;
505 obj = lookup_basic_metadata_object(rstate);
509 JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0);
513 return ((size_t)memsize);
518 lookup_guest_ncpus(struct restore_state *rstate)
521 const ucl_object_t *obj;
523 obj = lookup_basic_metadata_object(rstate);
527 JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0);
532 winch_handler(int signal __unused)
535 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
536 #endif /* TIOCGWINSZ */
540 print_progress(size_t crtval, const size_t maxval)
543 double crtval_gb, maxval_gb;
544 size_t i, win_width, prog_start, prog_done, prog_end;
547 static char prog_buf[PROG_BUF_SZ];
548 static const size_t len = sizeof(prog_buf);
551 static const char *div_str;
553 static char wip_bar[] = { '/', '-', '\\', '|' };
554 static int wip_idx = 0;
557 printf("[0B / 0B]\r\n");
564 if (maxval > 10 * GB) {
567 } else if (maxval > 10 * MB) {
575 crtval_gb = (double) crtval / div;
576 maxval_gb = (double) maxval / div;
578 rc = snprintf(prog_buf, len, "%.03lf", maxval_gb);
580 fprintf(stderr, "Maxval too big\n");
585 rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |",
586 mval_len, crtval_gb, div_str, maxval_gb, div_str);
589 fprintf(stderr, "Buffer too small to print progress\n");
593 win_width = min(winsize.ws_col, len);
596 if (prog_start < (win_width - 2)) {
597 prog_end = win_width - prog_start - 2;
598 prog_done = prog_end * (crtval_gb / maxval_gb);
600 for (i = prog_start; i < prog_start + prog_done; i++)
603 if (crtval != maxval) {
604 prog_buf[i] = wip_bar[wip_idx];
605 wip_idx = (wip_idx + 1) % sizeof(wip_bar);
611 for (; i < win_width - 2; i++)
614 prog_buf[win_width - 2] = '|';
617 prog_buf[win_width - 1] = '\0';
618 write(STDOUT_FILENO, prog_buf, win_width);
624 snapshot_spinner_cb(void *arg)
627 size_t crtval, maxval, total;
628 struct spinner_info *si;
636 ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */
639 crtval = *si->crtval;
643 rc = print_progress(crtval, total);
645 fprintf(stderr, "Failed to parse progress\n");
649 nanosleep(&ts, NULL);
650 } while (crtval < maxval);
657 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src,
658 const size_t len, const size_t totalmem, const bool op_wr)
661 size_t part_done, todo, rem;
664 pthread_t spinner_th;
665 struct spinner_info *si;
667 if (lseek(snapfd, foff, SEEK_SET) < 0) {
668 perror("Failed to change file offset");
672 show_progress = false;
673 if (isatty(STDIN_FILENO) && (winsize.ws_col != 0))
674 show_progress = true;
680 si = &(struct spinner_info) {
681 .crtval = &part_done,
682 .maxval = foff + len,
686 rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si);
688 perror("Unable to create spinner thread");
689 show_progress = false;
695 todo = min(SNAPSHOT_CHUNK, rem);
700 done = write(snapfd, src, todo);
702 done = read(snapfd, src, todo);
704 perror("Failed to write in file");
708 src = (uint8_t *)src + done;
714 rc = pthread_join(spinner_th, NULL);
716 perror("Unable to end spinner thread");
723 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr)
726 size_t lowmem, highmem, totalmem;
729 ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem);
731 fprintf(stderr, "%s: unable to retrieve guest memory size\r\n",
735 totalmem = lowmem + highmem;
737 if ((op_wr == false) && (totalmem != memsz)) {
738 fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n",
739 __func__, totalmem, memsz);
745 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
746 #endif /* TIOCGWINSZ */
747 old_winch_handler = signal(SIGWINCH, winch_handler);
749 ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem,
752 fprintf(stderr, "%s: Could not %s lowmem\r\n",
753 __func__, op_wr ? "write" : "read");
761 ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB,
762 highmem, totalmem, op_wr);
764 fprintf(stderr, "%s: Could not %s highmem\r\n",
765 __func__, op_wr ? "write" : "read");
772 signal(SIGWINCH, old_winch_handler);
778 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate)
782 restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len,
785 if (restored != rstate->vmmem_len)
792 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate)
794 for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) {
795 const struct vm_snapshot_kern_info *info;
796 struct vm_snapshot_meta *meta;
800 info = &snapshot_kern_structs[i];
801 data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size);
803 errx(EX_DATAERR, "Cannot find kern struct %s",
807 errx(EX_DATAERR, "data with zero size for %s",
810 meta = &(struct vm_snapshot_meta) {
811 .dev_name = info->struct_name,
812 .dev_req = info->req,
814 .buffer.buf_start = data,
815 .buffer.buf_size = size,
818 .buffer.buf_rem = size,
820 .op = VM_SNAPSHOT_RESTORE,
823 if (vm_snapshot_req(ctx, meta))
824 err(EX_DATAERR, "Failed to restore %s",
831 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func,
832 const char *name, void *data)
837 struct vm_snapshot_meta *meta;
839 dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size);
841 if (dev_ptr == NULL) {
842 EPRINTLN("Failed to lookup dev: %s", name);
847 EPRINTLN("Restore device size is 0: %s", name);
851 meta = &(struct vm_snapshot_meta) {
855 .buffer.buf_start = dev_ptr,
856 .buffer.buf_size = dev_size,
858 .buffer.buf = dev_ptr,
859 .buffer.buf_rem = dev_size,
861 .op = VM_SNAPSHOT_RESTORE,
866 EPRINTLN("Failed to restore dev: %s %d", name, ret);
874 vm_restore_devices(struct restore_state *rstate)
877 struct pci_devinst *pdi = NULL;
879 while ((pdi = pci_next(pdi)) != NULL) {
880 ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi);
886 ret = vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL);
894 vm_pause_devices(void)
897 struct pci_devinst *pdi = NULL;
899 while ((pdi = pci_next(pdi)) != NULL) {
900 ret = pci_pause(pdi);
902 EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret);
911 vm_resume_devices(void)
914 struct pci_devinst *pdi = NULL;
916 while ((pdi = pci_next(pdi)) != NULL) {
917 ret = pci_resume(pdi);
919 EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret);
928 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop,
929 const char *array_key, struct vm_snapshot_meta *meta, off_t *offset)
935 ret = vm_snapshot_req(ctx, meta);
937 fprintf(stderr, "%s: Failed to snapshot struct %s\r\n",
938 __func__, meta->dev_name);
943 data_size = vm_get_snapshot_size(meta);
945 /* XXX-MJ no handling for short writes. */
946 write_cnt = write(data_fd, meta->buffer.buf_start, data_size);
947 if (write_cnt < 0 || (size_t)write_cnt != data_size) {
948 perror("Failed to write all snapshotted data.");
953 /* Write metadata. */
954 xo_open_instance_h(xop, array_key);
955 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n",
957 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
958 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
959 xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY);
961 *offset += data_size;
968 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop)
971 size_t buf_size, i, offset;
973 struct vm_snapshot_meta *meta;
977 buf_size = SNAPSHOT_BUFFER_SIZE;
979 buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char));
980 if (buffer == NULL) {
982 perror("Failed to allocate memory for snapshot buffer");
983 goto err_vm_snapshot_kern_data;
986 meta = &(struct vm_snapshot_meta) {
987 .buffer.buf_start = buffer,
988 .buffer.buf_size = buf_size,
990 .op = VM_SNAPSHOT_SAVE,
993 xo_open_list_h(xop, JSON_KERNEL_ARR_KEY);
994 for (i = 0; i < nitems(snapshot_kern_structs); i++) {
995 meta->dev_name = snapshot_kern_structs[i].struct_name;
996 meta->dev_req = snapshot_kern_structs[i].req;
998 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
999 meta->buffer.buf = meta->buffer.buf_start;
1000 meta->buffer.buf_rem = meta->buffer.buf_size;
1002 ret = vm_save_kern_struct(ctx, data_fd, xop,
1003 JSON_DEV_ARR_KEY, meta, &offset);
1006 goto err_vm_snapshot_kern_data;
1009 xo_close_list_h(xop, JSON_KERNEL_ARR_KEY);
1011 err_vm_snapshot_kern_data:
1018 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz)
1021 xo_open_container_h(xop, JSON_BASIC_METADATA_KEY);
1022 xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus);
1023 xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx));
1024 xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz);
1025 xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx));
1026 xo_close_container_h(xop, JSON_BASIC_METADATA_KEY);
1032 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key,
1033 struct vm_snapshot_meta *meta, off_t *offset)
1038 data_size = vm_get_snapshot_size(meta);
1040 /* XXX-MJ no handling for short writes. */
1041 ret = write(data_fd, meta->buffer.buf_start, data_size);
1042 if (ret < 0 || (size_t)ret != data_size) {
1043 perror("Failed to write all snapshotted data.");
1047 /* Write metadata. */
1048 xo_open_instance_h(xop, array_key);
1049 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name);
1050 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
1051 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
1052 xo_close_instance_h(xop, array_key);
1054 *offset += data_size;
1060 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name,
1061 void *devdata, int data_fd, xo_handle_t *xop,
1062 struct vm_snapshot_meta *meta, off_t *offset)
1066 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1067 meta->buffer.buf = meta->buffer.buf_start;
1068 meta->buffer.buf_rem = meta->buffer.buf_size;
1069 meta->dev_name = dev_name;
1070 meta->dev_data = devdata;
1074 EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret);
1078 ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta,
1087 vm_snapshot_devices(int data_fd, xo_handle_t *xop)
1093 struct vm_snapshot_meta *meta;
1094 struct pci_devinst *pdi;
1096 buf_size = SNAPSHOT_BUFFER_SIZE;
1098 offset = lseek(data_fd, 0, SEEK_CUR);
1100 perror("Failed to get data file current offset.");
1104 buffer = malloc(buf_size);
1105 if (buffer == NULL) {
1106 perror("Failed to allocate memory for snapshot buffer");
1111 meta = &(struct vm_snapshot_meta) {
1112 .buffer.buf_start = buffer,
1113 .buffer.buf_size = buf_size,
1115 .op = VM_SNAPSHOT_SAVE,
1118 xo_open_list_h(xop, JSON_DEV_ARR_KEY);
1120 /* Save PCI devices */
1122 while ((pdi = pci_next(pdi)) != NULL) {
1123 ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi,
1124 data_fd, xop, meta, &offset);
1130 ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL,
1131 data_fd, xop, meta, &offset);
1136 xo_close_list_h(xop, JSON_DEV_ARR_KEY);
1145 checkpoint_cpu_add(int vcpu)
1148 pthread_mutex_lock(&vcpu_lock);
1149 CPU_SET(vcpu, &vcpus_active);
1151 if (checkpoint_active) {
1152 CPU_SET(vcpu, &vcpus_suspended);
1153 while (checkpoint_active)
1154 pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1155 CPU_CLR(vcpu, &vcpus_suspended);
1157 pthread_mutex_unlock(&vcpu_lock);
1161 * When a vCPU is suspended for any reason, it calls
1162 * checkpoint_cpu_suspend(). This records that the vCPU is idle.
1163 * Before returning from suspension, checkpoint_cpu_resume() is
1164 * called. In suspend we note that the vCPU is idle. In resume we
1165 * pause the vCPU thread until the checkpoint is complete. The reason
1166 * for the two-step process is that vCPUs might already be stopped in
1167 * the debug server when a checkpoint is requested. This approach
1168 * allows us to account for and handle those vCPUs.
1171 checkpoint_cpu_suspend(int vcpu)
1174 pthread_mutex_lock(&vcpu_lock);
1175 CPU_SET(vcpu, &vcpus_suspended);
1176 if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0)
1177 pthread_cond_signal(&vcpus_idle);
1178 pthread_mutex_unlock(&vcpu_lock);
1182 checkpoint_cpu_resume(int vcpu)
1185 pthread_mutex_lock(&vcpu_lock);
1186 while (checkpoint_active)
1187 pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1188 CPU_CLR(vcpu, &vcpus_suspended);
1189 pthread_mutex_unlock(&vcpu_lock);
1193 vm_vcpu_pause(struct vmctx *ctx)
1196 pthread_mutex_lock(&vcpu_lock);
1197 checkpoint_active = true;
1198 vm_suspend_all_cpus(ctx);
1199 while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
1200 pthread_cond_wait(&vcpus_idle, &vcpu_lock);
1201 pthread_mutex_unlock(&vcpu_lock);
1205 vm_vcpu_resume(struct vmctx *ctx)
1208 pthread_mutex_lock(&vcpu_lock);
1209 checkpoint_active = false;
1210 pthread_mutex_unlock(&vcpu_lock);
1211 vm_resume_all_cpus(ctx);
1212 pthread_cond_broadcast(&vcpus_can_run);
1216 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file,
1219 int fd_checkpoint = 0, kdata_fd = 0, fd_meta;
1223 xo_handle_t *xop = NULL;
1224 char *meta_filename = NULL;
1225 char *kdata_filename = NULL;
1226 FILE *meta_file = NULL;
1228 kdata_filename = strcat_extension(checkpoint_file, ".kern");
1229 if (kdata_filename == NULL) {
1230 fprintf(stderr, "Failed to construct kernel data filename.\n");
1234 kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1236 perror("Failed to open kernel data snapshot file.");
1241 fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700);
1243 if (fd_checkpoint < 0) {
1244 perror("Failed to create checkpoint file");
1249 meta_filename = strcat_extension(checkpoint_file, ".meta");
1250 if (meta_filename == NULL) {
1251 fprintf(stderr, "Failed to construct vm metadata filename.\n");
1255 fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1257 meta_file = fdopen(fd_meta, "w");
1258 if (meta_file == NULL) {
1259 perror("Failed to open vm metadata snapshot file.");
1264 xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY);
1266 perror("Failed to get libxo handle on metadata file.");
1272 ret = vm_pause_devices();
1274 fprintf(stderr, "Could not pause devices\r\n");
1279 memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true);
1281 perror("Could not write guest memory to file");
1286 ret = vm_snapshot_basic_metadata(ctx, xop, memsz);
1288 fprintf(stderr, "Failed to snapshot vm basic metadata.\n");
1293 ret = vm_save_kern_structs(ctx, kdata_fd, xop);
1295 fprintf(stderr, "Failed to snapshot vm kernel data.\n");
1300 ret = vm_snapshot_devices(kdata_fd, xop);
1302 fprintf(stderr, "Failed to snapshot device state.\n");
1315 ret = vm_resume_devices();
1317 fprintf(stderr, "Could not resume devices\r\n");
1318 vm_vcpu_resume(ctx);
1319 if (fd_checkpoint > 0)
1320 close(fd_checkpoint);
1321 if (meta_filename != NULL)
1322 free(meta_filename);
1323 if (kdata_filename != NULL)
1324 free(kdata_filename);
1327 if (meta_file != NULL)
1335 handle_message(struct vmctx *ctx, nvlist_t *nvl)
1338 struct ipc_command **ipc_cmd;
1340 if (!nvlist_exists_string(nvl, "cmd"))
1343 cmd = nvlist_get_string(nvl, "cmd");
1344 IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) {
1345 if (strcmp(cmd, (*ipc_cmd)->name) == 0)
1346 return ((*ipc_cmd)->handler(ctx, nvl));
1349 return (EOPNOTSUPP);
1353 * Listen for commands from bhyvectl
1356 checkpoint_thread(void *param)
1359 struct checkpoint_thread_info *thread_info;
1362 pthread_set_name_np(pthread_self(), "checkpoint thread");
1363 thread_info = (struct checkpoint_thread_info *)param;
1365 while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) {
1366 nvl = nvlist_recv(fd, 0);
1368 handle_message(thread_info->ctx, nvl);
1370 EPRINTLN("nvlist_recv() failed: %s", strerror(errno));
1373 nvlist_destroy(nvl);
1380 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl)
1384 if (!nvlist_exists_string(nvl, "filename") ||
1385 !nvlist_exists_bool(nvl, "suspend") ||
1386 !nvlist_exists_descriptor(nvl, "fddir"))
1389 error = vm_checkpoint(ctx,
1390 nvlist_get_descriptor(nvl, "fddir"),
1391 nvlist_get_string(nvl, "filename"),
1392 nvlist_get_bool(nvl, "suspend"));
1396 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint);
1399 * Create the listening socket for IPC with bhyvectl
1402 init_checkpoint_thread(struct vmctx *ctx)
1404 struct checkpoint_thread_info *checkpoint_info = NULL;
1405 struct sockaddr_un addr;
1407 pthread_t checkpoint_pthread;
1409 #ifndef WITHOUT_CAPSICUM
1410 cap_rights_t rights;
1413 memset(&addr, 0, sizeof(addr));
1415 socket_fd = socket(PF_UNIX, SOCK_STREAM, 0);
1416 if (socket_fd < 0) {
1417 EPRINTLN("Socket creation failed: %s", strerror(errno));
1422 addr.sun_family = AF_UNIX;
1424 snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s",
1425 BHYVE_RUN_DIR, vm_get_name(ctx));
1426 addr.sun_len = SUN_LEN(&addr);
1427 unlink(addr.sun_path);
1429 if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) {
1430 EPRINTLN("Failed to bind socket \"%s\": %s\n",
1431 addr.sun_path, strerror(errno));
1436 if (listen(socket_fd, 10) < 0) {
1437 EPRINTLN("ipc socket listen: %s\n", strerror(errno));
1442 #ifndef WITHOUT_CAPSICUM
1443 cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE,
1444 CAP_SEND, CAP_GETSOCKOPT);
1446 if (caph_rights_limit(socket_fd, &rights) == -1)
1447 errx(EX_OSERR, "Unable to apply rights for sandbox");
1449 checkpoint_info = calloc(1, sizeof(*checkpoint_info));
1450 checkpoint_info->ctx = ctx;
1451 checkpoint_info->socket_fd = socket_fd;
1453 err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread,
1460 free(checkpoint_info);
1463 unlink(addr.sun_path);
1469 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op)
1473 if (op == VM_SNAPSHOT_SAVE)
1475 else if (op == VM_SNAPSHOT_RESTORE)
1480 fprintf(stderr, "%s: snapshot-%s failed for %s\r\n",
1481 __func__, __op, bufname);
1485 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1487 struct vm_snapshot_buffer *buffer;
1490 buffer = &meta->buffer;
1493 if (buffer->buf_rem < data_size) {
1494 fprintf(stderr, "%s: buffer too small\r\n", __func__);
1498 if (op == VM_SNAPSHOT_SAVE)
1499 memcpy(buffer->buf, data, data_size);
1500 else if (op == VM_SNAPSHOT_RESTORE)
1501 memcpy(data, buffer->buf, data_size);
1505 buffer->buf += data_size;
1506 buffer->buf_rem -= data_size;
1512 vm_get_snapshot_size(struct vm_snapshot_meta *meta)
1515 struct vm_snapshot_buffer *buffer;
1517 buffer = &meta->buffer;
1519 if (buffer->buf_size < buffer->buf_rem) {
1520 fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n",
1521 __func__, buffer->buf_size, buffer->buf_rem);
1524 length = buffer->buf_size - buffer->buf_rem;
1531 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len,
1532 bool restore_null, struct vm_snapshot_meta *meta)
1537 if (meta->op == VM_SNAPSHOT_SAVE) {
1538 gaddr = paddr_host2guest(ctx, *addrp);
1539 if (gaddr == (vm_paddr_t) -1) {
1540 if (!restore_null ||
1541 (restore_null && (*addrp != NULL))) {
1547 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1548 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
1549 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1550 if (gaddr == (vm_paddr_t) -1) {
1551 if (!restore_null) {
1557 *addrp = paddr_guest2host(ctx, gaddr, len);
1567 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1569 struct vm_snapshot_buffer *buffer;
1573 buffer = &meta->buffer;
1576 if (buffer->buf_rem < data_size) {
1577 fprintf(stderr, "%s: buffer too small\r\n", __func__);
1582 if (op == VM_SNAPSHOT_SAVE) {
1584 memcpy(buffer->buf, data, data_size);
1585 } else if (op == VM_SNAPSHOT_RESTORE) {
1586 ret = memcmp(data, buffer->buf, data_size);
1592 buffer->buf += data_size;
1593 buffer->buf_rem -= data_size;