2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <machine/atomic.h>
41 #include <machine/specialreg.h>
42 #include <machine/vmm.h>
43 #include <netinet/in.h>
45 #ifndef WITHOUT_CAPSICUM
46 #include <capsicum_helpers.h>
53 #include <pthread_np.h>
69 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
72 #define GDB_SIGNAL_TRAP 5
74 static void gdb_resume_vcpus(void);
75 static void check_command(int fd);
77 static struct mevent *read_event, *write_event;
79 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
80 static pthread_mutex_t gdb_lock;
81 static pthread_cond_t idle_vcpus;
82 static bool first_stop, report_next_stop, swbreak_enabled;
85 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
86 * read buffer, 'start' is unused and 'len' contains the number of
87 * valid bytes in the buffer. For a write buffer, 'start' is set to
88 * the index of the next byte in 'data' to send, and 'len' contains
89 * the remaining number of valid bytes to send.
101 TAILQ_ENTRY(breakpoint) link;
105 * When a vCPU stops to due to an event that should be reported to the
106 * debugger, information about the event is stored in this structure.
107 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
108 * and stops other vCPUs so the event can be reported. The
109 * report_stop() function reports the event for the 'stopped_vcpu'
110 * vCPU. When the debugger resumes execution via continue or step,
111 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
112 * event handlers until the associated event is reported or disabled.
114 * An idle vCPU will have all of the boolean fields set to false.
116 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
117 * released to execute the stepped instruction. When the vCPU reports
118 * the stepping trap, 'stepped' is set.
120 * When a vCPU hits a breakpoint set by the debug server,
121 * 'hit_swbreak' is set to true.
129 static struct io_buffer cur_comm, cur_resp;
130 static uint8_t cur_csum;
131 static struct vmctx *ctx;
132 static int cur_fd = -1;
133 static TAILQ_HEAD(, breakpoint) breakpoints;
134 static struct vcpu_state *vcpu_state;
135 static int cur_vcpu, stopped_vcpu;
136 static bool gdb_active = false;
138 static const int gdb_regset[] = {
165 static const int gdb_regsize[] = {
196 static void __printflike(1, 2)
197 debug(const char *fmt, ...)
199 static FILE *logfile;
202 if (logfile == NULL) {
203 logfile = fopen("/tmp/bhyve_gdb.log", "w");
206 #ifndef WITHOUT_CAPSICUM
207 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
216 vfprintf(logfile, fmt, ap);
223 static void remove_all_sw_breakpoints(void);
226 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
229 const int regset[4] = {
236 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
240 * For the debugger, always pretend to be the kernel (CPL 0),
241 * and if long-mode is enabled, always parse addresses as if
244 paging->cr3 = regs[1];
246 if (regs[3] & EFER_LMA)
247 paging->cpu_mode = CPU_MODE_64BIT;
248 else if (regs[0] & CR0_PE)
249 paging->cpu_mode = CPU_MODE_PROTECTED;
251 paging->cpu_mode = CPU_MODE_REAL;
252 if (!(regs[0] & CR0_PG))
253 paging->paging_mode = PAGING_MODE_FLAT;
254 else if (!(regs[2] & CR4_PAE))
255 paging->paging_mode = PAGING_MODE_32;
256 else if (regs[3] & EFER_LME)
257 paging->paging_mode = (regs[2] & CR4_LA57) ?
258 PAGING_MODE_64_LA57 : PAGING_MODE_64;
260 paging->paging_mode = PAGING_MODE_PAE;
265 * Map a guest virtual address to a physical address (for a given vcpu).
266 * If a guest virtual address is valid, return 1. If the address is
267 * not valid, return 0. If an error occurs obtaining the mapping,
271 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
273 struct vm_guest_paging paging;
276 if (guest_paging_info(vcpu, &paging) == -1)
280 * Always use PROT_READ. We really care if the VA is
281 * accessible, not if the current vCPU can write.
283 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
292 io_buffer_reset(struct io_buffer *io)
299 /* Available room for adding data. */
301 io_buffer_avail(struct io_buffer *io)
304 return (io->capacity - (io->start + io->len));
308 io_buffer_head(struct io_buffer *io)
311 return (io->data + io->start);
315 io_buffer_tail(struct io_buffer *io)
318 return (io->data + io->start + io->len);
322 io_buffer_advance(struct io_buffer *io, size_t amount)
325 assert(amount <= io->len);
331 io_buffer_consume(struct io_buffer *io, size_t amount)
334 io_buffer_advance(io, amount);
341 * XXX: Consider making this move optional and compacting on a
342 * future read() before realloc().
344 memmove(io->data, io_buffer_head(io), io->len);
349 io_buffer_grow(struct io_buffer *io, size_t newsize)
352 size_t avail, new_cap;
354 avail = io_buffer_avail(io);
355 if (newsize <= avail)
358 new_cap = io->capacity + (newsize - avail);
359 new_data = realloc(io->data, new_cap);
360 if (new_data == NULL)
361 err(1, "Failed to grow GDB I/O buffer");
363 io->capacity = new_cap;
367 response_pending(void)
370 if (cur_resp.start == 0 && cur_resp.len == 0)
372 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
378 close_connection(void)
382 * XXX: This triggers a warning because mevent does the close
383 * before the EV_DELETE.
385 pthread_mutex_lock(&gdb_lock);
386 mevent_delete(write_event);
387 mevent_delete_close(read_event);
390 io_buffer_reset(&cur_comm);
391 io_buffer_reset(&cur_resp);
394 remove_all_sw_breakpoints();
396 /* Clear any pending events. */
397 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
399 /* Resume any stopped vCPUs. */
401 pthread_mutex_unlock(&gdb_lock);
405 hex_digit(uint8_t nibble)
409 return (nibble + '0');
411 return (nibble + 'a' - 10);
415 parse_digit(uint8_t v)
418 if (v >= '0' && v <= '9')
420 if (v >= 'a' && v <= 'f')
421 return (v - 'a' + 10);
422 if (v >= 'A' && v <= 'F')
423 return (v - 'A' + 10);
427 /* Parses big-endian hexadecimal. */
429 parse_integer(const uint8_t *p, size_t len)
436 v |= parse_digit(*p);
444 parse_byte(const uint8_t *p)
447 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
451 send_pending_data(int fd)
455 if (cur_resp.len == 0) {
456 mevent_disable(write_event);
459 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
460 if (nwritten == -1) {
461 warn("Write to GDB socket failed");
464 io_buffer_advance(&cur_resp, nwritten);
465 if (cur_resp.len == 0)
466 mevent_disable(write_event);
468 mevent_enable(write_event);
472 /* Append a single character to the output buffer. */
474 send_char(uint8_t data)
476 io_buffer_grow(&cur_resp, 1);
477 *io_buffer_tail(&cur_resp) = data;
481 /* Append an array of bytes to the output buffer. */
483 send_data(const uint8_t *data, size_t len)
486 io_buffer_grow(&cur_resp, len);
487 memcpy(io_buffer_tail(&cur_resp), data, len);
492 format_byte(uint8_t v, uint8_t *buf)
495 buf[0] = hex_digit(v >> 4);
496 buf[1] = hex_digit(v & 0xf);
500 * Append a single byte (formatted as two hex characters) to the
509 send_data(buf, sizeof(buf));
526 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
530 * Append a single character (for the packet payload) and update the
534 append_char(uint8_t v)
542 * Append an array of bytes (for the packet payload) and update the
546 append_packet_data(const uint8_t *data, size_t len)
549 send_data(data, len);
558 append_string(const char *str)
561 append_packet_data(str, strlen(str));
565 append_byte(uint8_t v)
570 append_packet_data(buf, sizeof(buf));
574 append_unsigned_native(uintmax_t value, size_t len)
578 for (i = 0; i < len; i++) {
585 append_unsigned_be(uintmax_t value, size_t len)
590 for (i = 0; i < len; i++) {
591 format_byte(value, buf + (len - i - 1) * 2);
594 append_packet_data(buf, sizeof(buf));
598 append_integer(unsigned int value)
604 append_unsigned_be(value, (fls(value) + 7) / 8);
608 append_asciihex(const char *str)
611 while (*str != '\0') {
618 send_empty_response(void)
626 send_error(int error)
645 parse_threadid(const uint8_t *data, size_t len)
648 if (len == 1 && *data == '0')
650 if (len == 2 && memcmp(data, "-1", 2) == 0)
654 return (parse_integer(data, len));
658 * Report the current stop event to the debugger. If the stop is due
659 * to an event triggered on a specific vCPU such as a breakpoint or
660 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
661 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
662 * the reporting vCPU for vCPU events.
665 report_stop(bool set_cur_vcpu)
667 struct vcpu_state *vs;
670 if (stopped_vcpu == -1) {
672 append_byte(GDB_SIGNAL_TRAP);
674 vs = &vcpu_state[stopped_vcpu];
676 cur_vcpu = stopped_vcpu;
678 append_byte(GDB_SIGNAL_TRAP);
679 append_string("thread:");
680 append_integer(stopped_vcpu + 1);
682 if (vs->hit_swbreak) {
683 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
685 append_string("swbreak:;");
686 } else if (vs->stepped)
687 debug("$vCPU %d reporting step\n", stopped_vcpu);
689 debug("$vCPU %d reporting ???\n", stopped_vcpu);
692 report_next_stop = false;
696 * If this stop is due to a vCPU event, clear that event to mark it as
702 struct vcpu_state *vs;
704 if (stopped_vcpu != -1) {
705 vs = &vcpu_state[stopped_vcpu];
706 vs->hit_swbreak = false;
710 report_next_stop = true;
714 gdb_finish_suspend_vcpus(void)
720 } else if (report_next_stop) {
721 assert(!response_pending());
723 send_pending_data(cur_fd);
728 * vCPU threads invoke this function whenever the vCPU enters the
729 * debug server to pause or report an event. vCPU threads wait here
730 * as long as the debug server keeps them suspended.
733 _gdb_cpu_suspend(int vcpu, bool report_stop)
736 debug("$vCPU %d suspending\n", vcpu);
737 CPU_SET(vcpu, &vcpus_waiting);
738 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
739 gdb_finish_suspend_vcpus();
740 while (CPU_ISSET(vcpu, &vcpus_suspended))
741 pthread_cond_wait(&idle_vcpus, &gdb_lock);
742 CPU_CLR(vcpu, &vcpus_waiting);
743 debug("$vCPU %d resuming\n", vcpu);
747 * Invoked at the start of a vCPU thread's execution to inform the
748 * debug server about the new thread.
751 gdb_cpu_add(int vcpu)
756 debug("$vCPU %d starting\n", vcpu);
757 pthread_mutex_lock(&gdb_lock);
758 assert(vcpu < guest_ncpus);
759 CPU_SET(vcpu, &vcpus_active);
760 if (!TAILQ_EMPTY(&breakpoints)) {
761 vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
762 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
766 * If a vcpu is added while vcpus are stopped, suspend the new
767 * vcpu so that it will pop back out with a debug exit before
768 * executing the first instruction.
770 if (!CPU_EMPTY(&vcpus_suspended)) {
771 CPU_SET(vcpu, &vcpus_suspended);
772 _gdb_cpu_suspend(vcpu, false);
774 pthread_mutex_unlock(&gdb_lock);
778 * Invoked by vCPU before resuming execution. This enables stepping
779 * if the vCPU is marked as stepping.
782 gdb_cpu_resume(int vcpu)
784 struct vcpu_state *vs;
787 vs = &vcpu_state[vcpu];
790 * Any pending event should already be reported before
793 assert(vs->hit_swbreak == false);
794 assert(vs->stepped == false);
796 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
802 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
803 * has been suspended due to an event on different vCPU or in response
804 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
807 gdb_cpu_suspend(int vcpu)
812 pthread_mutex_lock(&gdb_lock);
813 _gdb_cpu_suspend(vcpu, true);
814 gdb_cpu_resume(vcpu);
815 pthread_mutex_unlock(&gdb_lock);
819 gdb_suspend_vcpus(void)
822 assert(pthread_mutex_isowned_np(&gdb_lock));
823 debug("suspending all CPUs\n");
824 vcpus_suspended = vcpus_active;
825 vm_suspend_cpu(ctx, -1);
826 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
827 gdb_finish_suspend_vcpus();
831 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
832 * the VT-x-specific MTRAP exit.
835 gdb_cpu_mtrap(int vcpu)
837 struct vcpu_state *vs;
841 debug("$vCPU %d MTRAP\n", vcpu);
842 pthread_mutex_lock(&gdb_lock);
843 vs = &vcpu_state[vcpu];
845 vs->stepping = false;
847 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
848 while (vs->stepped) {
849 if (stopped_vcpu == -1) {
850 debug("$vCPU %d reporting step\n", vcpu);
854 _gdb_cpu_suspend(vcpu, true);
856 gdb_cpu_resume(vcpu);
858 pthread_mutex_unlock(&gdb_lock);
861 static struct breakpoint *
862 find_breakpoint(uint64_t gpa)
864 struct breakpoint *bp;
866 TAILQ_FOREACH(bp, &breakpoints, link) {
874 gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
876 struct breakpoint *bp;
877 struct vcpu_state *vs;
882 fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
885 pthread_mutex_lock(&gdb_lock);
886 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
888 bp = find_breakpoint(gpa);
890 vs = &vcpu_state[vcpu];
891 assert(vs->stepping == false);
892 assert(vs->stepped == false);
893 assert(vs->hit_swbreak == false);
894 vs->hit_swbreak = true;
895 vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
897 if (stopped_vcpu == -1) {
898 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
903 _gdb_cpu_suspend(vcpu, true);
904 if (!vs->hit_swbreak) {
905 /* Breakpoint reported. */
908 bp = find_breakpoint(gpa);
910 /* Breakpoint was removed. */
911 vs->hit_swbreak = false;
915 gdb_cpu_resume(vcpu);
917 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
919 error = vm_set_register(ctx, vcpu,
920 VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
922 error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
925 pthread_mutex_unlock(&gdb_lock);
929 gdb_step_vcpu(int vcpu)
933 debug("$vCPU %d step\n", vcpu);
934 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
939 vcpu_state[vcpu].stepping = true;
940 vm_resume_cpu(ctx, vcpu);
941 CPU_CLR(vcpu, &vcpus_suspended);
942 pthread_cond_broadcast(&idle_vcpus);
947 gdb_resume_vcpus(void)
950 assert(pthread_mutex_isowned_np(&gdb_lock));
951 vm_resume_cpu(ctx, -1);
952 debug("resuming all CPUs\n");
953 CPU_ZERO(&vcpus_suspended);
954 pthread_cond_broadcast(&idle_vcpus);
960 uint64_t regvals[nitems(gdb_regset)];
962 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
963 gdb_regset, regvals) == -1) {
968 for (size_t i = 0; i < nitems(regvals); i++)
969 append_unsigned_native(regvals[i], gdb_regsize[i]);
974 gdb_read_mem(const uint8_t *data, size_t len)
976 uint64_t gpa, gva, val;
978 size_t resid, todo, bytes;
986 /* Parse and consume address. */
987 cp = memchr(data, ',', len);
988 if (cp == NULL || cp == data) {
992 gva = parse_integer(data, cp - data);
993 len -= (cp - data) + 1;
994 data += (cp - data) + 1;
997 resid = parse_integer(data, len);
1001 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1017 /* Read bytes from current page. */
1018 todo = getpagesize() - gpa % getpagesize();
1022 cp = paddr_guest2host(ctx, gpa, todo);
1025 * If this page is guest RAM, read it a byte
1042 * If this page isn't guest RAM, try to handle
1043 * it via MMIO. For MMIO requests, use
1044 * aligned reads of words when possible.
1047 if (gpa & 1 || todo == 1)
1049 else if (gpa & 2 || todo == 2)
1053 error = read_mem(ctx, cur_vcpu, gpa, &val,
1078 assert(resid == 0 || gpa % getpagesize() == 0);
1086 gdb_write_mem(const uint8_t *data, size_t len)
1088 uint64_t gpa, gva, val;
1090 size_t resid, todo, bytes;
1097 /* Parse and consume address. */
1098 cp = memchr(data, ',', len);
1099 if (cp == NULL || cp == data) {
1103 gva = parse_integer(data, cp - data);
1104 len -= (cp - data) + 1;
1105 data += (cp - data) + 1;
1107 /* Parse and consume length. */
1108 cp = memchr(data, ':', len);
1109 if (cp == NULL || cp == data) {
1113 resid = parse_integer(data, cp - data);
1114 len -= (cp - data) + 1;
1115 data += (cp - data) + 1;
1117 /* Verify the available bytes match the length. */
1118 if (len != resid * 2) {
1124 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1134 /* Write bytes to current page. */
1135 todo = getpagesize() - gpa % getpagesize();
1139 cp = paddr_guest2host(ctx, gpa, todo);
1142 * If this page is guest RAM, write it a byte
1147 *cp = parse_byte(data);
1158 * If this page isn't guest RAM, try to handle
1159 * it via MMIO. For MMIO requests, use
1160 * aligned writes of words when possible.
1163 if (gpa & 1 || todo == 1) {
1165 val = parse_byte(data);
1166 } else if (gpa & 2 || todo == 2) {
1168 val = be16toh(parse_integer(data, 4));
1171 val = be32toh(parse_integer(data, 8));
1173 error = write_mem(ctx, cur_vcpu, gpa, val,
1188 assert(resid == 0 || gpa % getpagesize() == 0);
1195 set_breakpoint_caps(bool enable)
1200 mask = vcpus_active;
1201 while (!CPU_EMPTY(&mask)) {
1202 vcpu = CPU_FFS(&mask) - 1;
1203 CPU_CLR(vcpu, &mask);
1204 if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
1205 enable ? 1 : 0) < 0)
1207 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1208 enable ? "en" : "dis");
1214 remove_all_sw_breakpoints(void)
1216 struct breakpoint *bp, *nbp;
1219 if (TAILQ_EMPTY(&breakpoints))
1222 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1223 debug("remove breakpoint at %#lx\n", bp->gpa);
1224 cp = paddr_guest2host(ctx, bp->gpa, 1);
1225 *cp = bp->shadow_inst;
1226 TAILQ_REMOVE(&breakpoints, bp, link);
1229 TAILQ_INIT(&breakpoints);
1230 set_breakpoint_caps(false);
1234 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1236 struct breakpoint *bp;
1246 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1256 cp = paddr_guest2host(ctx, gpa, 1);
1258 /* Only permit breakpoints in guest RAM. */
1264 /* Find any existing breakpoint. */
1265 bp = find_breakpoint(gpa);
1268 * Silently ignore duplicate commands since the protocol
1269 * requires these packets to be idempotent.
1273 if (TAILQ_EMPTY(&breakpoints) &&
1274 !set_breakpoint_caps(true)) {
1275 send_empty_response();
1278 bp = malloc(sizeof(*bp));
1280 bp->shadow_inst = *cp;
1281 *cp = 0xcc; /* INT 3 */
1282 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1283 debug("new breakpoint at %#lx\n", gpa);
1287 debug("remove breakpoint at %#lx\n", gpa);
1288 *cp = bp->shadow_inst;
1289 TAILQ_REMOVE(&breakpoints, bp, link);
1291 if (TAILQ_EMPTY(&breakpoints))
1292 set_breakpoint_caps(false);
1299 parse_breakpoint(const uint8_t *data, size_t len)
1306 insert = data[0] == 'Z';
1312 /* Parse and consume type. */
1313 cp = memchr(data, ',', len);
1314 if (cp == NULL || cp == data) {
1318 type = parse_integer(data, cp - data);
1319 len -= (cp - data) + 1;
1320 data += (cp - data) + 1;
1322 /* Parse and consume address. */
1323 cp = memchr(data, ',', len);
1324 if (cp == NULL || cp == data) {
1328 gva = parse_integer(data, cp - data);
1329 len -= (cp - data) + 1;
1330 data += (cp - data) + 1;
1332 /* Parse and consume kind. */
1333 cp = memchr(data, ';', len);
1340 * We do not advertise support for either the
1341 * ConditionalBreakpoints or BreakpointCommands
1342 * features, so we should not be getting conditions or
1343 * commands from the remote end.
1345 send_empty_response();
1348 kind = parse_integer(data, len);
1354 update_sw_breakpoint(gva, kind, insert);
1357 send_empty_response();
1363 command_equals(const uint8_t *data, size_t len, const char *cmd)
1366 if (strlen(cmd) > len)
1368 return (memcmp(data, cmd, strlen(cmd)) == 0);
1372 check_features(const uint8_t *data, size_t len)
1374 char *feature, *next_feature, *str, *value;
1377 str = malloc(len + 1);
1378 memcpy(str, data, len);
1382 while ((feature = strsep(&next_feature, ";")) != NULL) {
1384 * Null features shouldn't exist, but skip if they
1387 if (strcmp(feature, "") == 0)
1391 * Look for the value or supported / not supported
1394 value = strchr(feature, '=');
1395 if (value != NULL) {
1400 value = feature + strlen(feature) - 1;
1410 * This is really a protocol error,
1411 * but we just ignore malformed
1412 * features for ease of
1420 if (strcmp(feature, "swbreak") == 0)
1421 swbreak_enabled = supported;
1427 /* This is an arbitrary limit. */
1428 append_string("PacketSize=4096");
1429 append_string(";swbreak+");
1434 gdb_query(const uint8_t *data, size_t len)
1441 if (command_equals(data, len, "qAttached")) {
1445 } else if (command_equals(data, len, "qC")) {
1447 append_string("QC");
1448 append_integer(cur_vcpu + 1);
1450 } else if (command_equals(data, len, "qfThreadInfo")) {
1455 if (CPU_EMPTY(&vcpus_active)) {
1459 mask = vcpus_active;
1463 while (!CPU_EMPTY(&mask)) {
1464 vcpu = CPU_FFS(&mask) - 1;
1465 CPU_CLR(vcpu, &mask);
1470 append_integer(vcpu + 1);
1473 } else if (command_equals(data, len, "qsThreadInfo")) {
1477 } else if (command_equals(data, len, "qSupported")) {
1478 data += strlen("qSupported");
1479 len -= strlen("qSupported");
1480 check_features(data, len);
1481 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1485 data += strlen("qThreadExtraInfo");
1486 len -= strlen("qThreadExtraInfo");
1491 tid = parse_threadid(data + 1, len - 1);
1492 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1497 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1499 append_asciihex(buf);
1502 send_empty_response();
1506 handle_command(const uint8_t *data, size_t len)
1509 /* Reject packets with a sequence-id. */
1510 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1511 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1512 send_empty_response();
1529 /* TODO: Resume any stopped CPUs. */
1538 if (data[1] != 'g' && data[1] != 'c') {
1542 tid = parse_threadid(data + 2, len - 2);
1548 if (CPU_EMPTY(&vcpus_active)) {
1552 if (tid == -1 || tid == 0)
1553 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1554 else if (CPU_ISSET(tid - 1, &vcpus_active))
1564 gdb_read_mem(data, len);
1567 gdb_write_mem(data, len);
1572 tid = parse_threadid(data + 1, len - 1);
1573 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1581 gdb_query(data, len);
1589 /* Don't send a reply until a stop occurs. */
1590 if (!gdb_step_vcpu(cur_vcpu)) {
1591 send_error(EOPNOTSUPP);
1597 parse_breakpoint(data, len);
1602 case 'G': /* TODO */
1604 /* Handle 'vCont' */
1606 case 'p': /* TODO */
1607 case 'P': /* TODO */
1608 case 'Q': /* TODO */
1609 case 't': /* TODO */
1610 case 'X': /* TODO */
1612 send_empty_response();
1616 /* Check for a valid packet in the command buffer. */
1618 check_command(int fd)
1620 uint8_t *head, *hash, *p, sum;
1624 avail = cur_comm.len;
1627 head = io_buffer_head(&cur_comm);
1630 debug("<- Ctrl-C\n");
1631 io_buffer_consume(&cur_comm, 1);
1633 gdb_suspend_vcpus();
1636 /* ACK of previous response. */
1638 if (response_pending())
1639 io_buffer_reset(&cur_resp);
1640 io_buffer_consume(&cur_comm, 1);
1641 if (stopped_vcpu != -1 && report_next_stop) {
1643 send_pending_data(fd);
1647 /* NACK of previous response. */
1649 if (response_pending()) {
1650 cur_resp.len += cur_resp.start;
1652 if (cur_resp.data[0] == '+')
1653 io_buffer_advance(&cur_resp, 1);
1654 debug("-> %.*s\n", (int)cur_resp.len,
1655 io_buffer_head(&cur_resp));
1657 io_buffer_consume(&cur_comm, 1);
1658 send_pending_data(fd);
1663 if (response_pending()) {
1664 warnx("New GDB command while response in "
1666 io_buffer_reset(&cur_resp);
1669 /* Is packet complete? */
1670 hash = memchr(head, '#', avail);
1673 plen = (hash - head + 1) + 2;
1676 debug("<- %.*s\n", (int)plen, head);
1678 /* Verify checksum. */
1679 for (sum = 0, p = head + 1; p < hash; p++)
1681 if (sum != parse_byte(hash + 1)) {
1682 io_buffer_consume(&cur_comm, plen);
1685 send_pending_data(fd);
1690 handle_command(head + 1, hash - (head + 1));
1691 io_buffer_consume(&cur_comm, plen);
1692 if (!response_pending())
1694 send_pending_data(fd);
1697 /* XXX: Possibly drop connection instead. */
1698 debug("-> %02x\n", *head);
1699 io_buffer_consume(&cur_comm, 1);
1706 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused)
1712 if (ioctl(fd, FIONREAD, &n) == -1) {
1713 warn("FIONREAD on GDB socket");
1720 * 'pending' might be zero due to EOF. We need to call read
1721 * with a non-zero length to detect EOF.
1726 /* Ensure there is room in the command buffer. */
1727 io_buffer_grow(&cur_comm, pending);
1728 assert(io_buffer_avail(&cur_comm) >= pending);
1730 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1733 } else if (nread == -1) {
1734 if (errno == EAGAIN)
1737 warn("Read from GDB socket");
1740 cur_comm.len += nread;
1741 pthread_mutex_lock(&gdb_lock);
1743 pthread_mutex_unlock(&gdb_lock);
1748 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused)
1751 send_pending_data(fd);
1755 new_connection(int fd, enum ev_type event __unused, void *arg)
1759 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1762 err(1, "Failed accepting initial GDB connection");
1764 /* Silently ignore errors post-startup. */
1769 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1771 warn("Failed to disable SIGPIPE for GDB connection");
1776 pthread_mutex_lock(&gdb_lock);
1779 warnx("Ignoring additional GDB connection.");
1782 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1783 if (read_event == NULL) {
1785 err(1, "Failed to setup initial GDB connection");
1786 pthread_mutex_unlock(&gdb_lock);
1789 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1790 if (write_event == NULL) {
1792 err(1, "Failed to setup initial GDB connection");
1793 mevent_delete_close(read_event);
1801 /* Break on attach. */
1803 report_next_stop = false;
1804 gdb_suspend_vcpus();
1805 pthread_mutex_unlock(&gdb_lock);
1808 #ifndef WITHOUT_CAPSICUM
1810 limit_gdb_socket(int s)
1812 cap_rights_t rights;
1813 unsigned long ioctls[] = { FIONREAD };
1815 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1816 CAP_SETSOCKOPT, CAP_IOCTL);
1817 if (caph_rights_limit(s, &rights) == -1)
1818 errx(EX_OSERR, "Unable to apply rights for sandbox");
1819 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1820 errx(EX_OSERR, "Unable to apply rights for sandbox");
1825 init_gdb(struct vmctx *_ctx)
1827 int error, flags, optval, s;
1828 struct addrinfo hints;
1829 struct addrinfo *gdbaddr;
1830 const char *saddr, *value;
1834 value = get_config_value("gdb.port");
1837 sport = strdup(value);
1839 errx(4, "Failed to allocate memory");
1841 wait = get_config_bool_default("gdb.wait", false);
1843 saddr = get_config_value("gdb.address");
1844 if (saddr == NULL) {
1845 saddr = "localhost";
1848 debug("==> starting on %s:%s, %swaiting\n",
1849 saddr, sport, wait ? "" : "not ");
1851 error = pthread_mutex_init(&gdb_lock, NULL);
1853 errc(1, error, "gdb mutex init");
1854 error = pthread_cond_init(&idle_vcpus, NULL);
1856 errc(1, error, "gdb cv init");
1858 memset(&hints, 0, sizeof(hints));
1859 hints.ai_family = AF_UNSPEC;
1860 hints.ai_socktype = SOCK_STREAM;
1861 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
1863 error = getaddrinfo(saddr, sport, &hints, &gdbaddr);
1865 errx(1, "gdb address resolution: %s", gai_strerror(error));
1868 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0);
1870 err(1, "gdb socket create");
1873 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1875 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0)
1876 err(1, "gdb socket bind");
1878 if (listen(s, 1) < 0)
1879 err(1, "gdb socket listen");
1882 TAILQ_INIT(&breakpoints);
1883 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1886 * Set vcpu 0 in vcpus_suspended. This will trigger the
1887 * logic in gdb_cpu_add() to suspend the first vcpu before
1888 * it starts execution. The vcpu will remain suspended
1889 * until a debugger connects.
1891 CPU_SET(0, &vcpus_suspended);
1895 flags = fcntl(s, F_GETFL);
1896 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1897 err(1, "Failed to mark gdb socket non-blocking");
1899 #ifndef WITHOUT_CAPSICUM
1900 limit_gdb_socket(s);
1902 mevent_add(s, EVF_READ, new_connection, NULL);
1904 freeaddrinfo(gdbaddr);