2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <machine/atomic.h>
41 #include <machine/specialreg.h>
42 #include <machine/vmm.h>
43 #include <netinet/in.h>
45 #ifndef WITHOUT_CAPSICUM
46 #include <capsicum_helpers.h>
52 #include <pthread_np.h>
67 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
70 #define GDB_SIGNAL_TRAP 5
72 static void gdb_resume_vcpus(void);
73 static void check_command(int fd);
75 static struct mevent *read_event, *write_event;
77 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
78 static pthread_mutex_t gdb_lock;
79 static pthread_cond_t idle_vcpus;
80 static bool first_stop, report_next_stop, swbreak_enabled;
83 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
84 * read buffer, 'start' is unused and 'len' contains the number of
85 * valid bytes in the buffer. For a write buffer, 'start' is set to
86 * the index of the next byte in 'data' to send, and 'len' contains
87 * the remaining number of valid bytes to send.
99 TAILQ_ENTRY(breakpoint) link;
103 * When a vCPU stops to due to an event that should be reported to the
104 * debugger, information about the event is stored in this structure.
105 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
106 * and stops other vCPUs so the event can be reported. The
107 * report_stop() function reports the event for the 'stopped_vcpu'
108 * vCPU. When the debugger resumes execution via continue or step,
109 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
110 * event handlers until the associated event is reported or disabled.
112 * An idle vCPU will have all of the boolean fields set to false.
114 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
115 * released to execute the stepped instruction. When the vCPU reports
116 * the stepping trap, 'stepped' is set.
118 * When a vCPU hits a breakpoint set by the debug server,
119 * 'hit_swbreak' is set to true.
127 static struct io_buffer cur_comm, cur_resp;
128 static uint8_t cur_csum;
129 static struct vmctx *ctx;
130 static int cur_fd = -1;
131 static TAILQ_HEAD(, breakpoint) breakpoints;
132 static struct vcpu_state *vcpu_state;
133 static int cur_vcpu, stopped_vcpu;
135 const int gdb_regset[] = {
162 const int gdb_regsize[] = {
193 static void __printflike(1, 2)
194 debug(const char *fmt, ...)
196 static FILE *logfile;
199 if (logfile == NULL) {
200 logfile = fopen("/tmp/bhyve_gdb.log", "w");
203 #ifndef WITHOUT_CAPSICUM
204 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
213 vfprintf(logfile, fmt, ap);
220 static void remove_all_sw_breakpoints(void);
223 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
226 const int regset[4] = {
233 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
237 * For the debugger, always pretend to be the kernel (CPL 0),
238 * and if long-mode is enabled, always parse addresses as if
241 paging->cr3 = regs[1];
243 if (regs[3] & EFER_LMA)
244 paging->cpu_mode = CPU_MODE_64BIT;
245 else if (regs[0] & CR0_PE)
246 paging->cpu_mode = CPU_MODE_PROTECTED;
248 paging->cpu_mode = CPU_MODE_REAL;
249 if (!(regs[0] & CR0_PG))
250 paging->paging_mode = PAGING_MODE_FLAT;
251 else if (!(regs[2] & CR4_PAE))
252 paging->paging_mode = PAGING_MODE_32;
253 else if (regs[3] & EFER_LME)
254 paging->paging_mode = (regs[2] & CR4_LA57) ?
255 PAGING_MODE_64_LA57 : PAGING_MODE_64;
257 paging->paging_mode = PAGING_MODE_PAE;
262 * Map a guest virtual address to a physical address (for a given vcpu).
263 * If a guest virtual address is valid, return 1. If the address is
264 * not valid, return 0. If an error occurs obtaining the mapping,
268 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
270 struct vm_guest_paging paging;
273 if (guest_paging_info(vcpu, &paging) == -1)
277 * Always use PROT_READ. We really care if the VA is
278 * accessible, not if the current vCPU can write.
280 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
289 io_buffer_reset(struct io_buffer *io)
296 /* Available room for adding data. */
298 io_buffer_avail(struct io_buffer *io)
301 return (io->capacity - (io->start + io->len));
305 io_buffer_head(struct io_buffer *io)
308 return (io->data + io->start);
312 io_buffer_tail(struct io_buffer *io)
315 return (io->data + io->start + io->len);
319 io_buffer_advance(struct io_buffer *io, size_t amount)
322 assert(amount <= io->len);
328 io_buffer_consume(struct io_buffer *io, size_t amount)
331 io_buffer_advance(io, amount);
338 * XXX: Consider making this move optional and compacting on a
339 * future read() before realloc().
341 memmove(io->data, io_buffer_head(io), io->len);
346 io_buffer_grow(struct io_buffer *io, size_t newsize)
349 size_t avail, new_cap;
351 avail = io_buffer_avail(io);
352 if (newsize <= avail)
355 new_cap = io->capacity + (newsize - avail);
356 new_data = realloc(io->data, new_cap);
357 if (new_data == NULL)
358 err(1, "Failed to grow GDB I/O buffer");
360 io->capacity = new_cap;
364 response_pending(void)
367 if (cur_resp.start == 0 && cur_resp.len == 0)
369 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
375 close_connection(void)
379 * XXX: This triggers a warning because mevent does the close
380 * before the EV_DELETE.
382 pthread_mutex_lock(&gdb_lock);
383 mevent_delete(write_event);
384 mevent_delete_close(read_event);
387 io_buffer_reset(&cur_comm);
388 io_buffer_reset(&cur_resp);
391 remove_all_sw_breakpoints();
393 /* Clear any pending events. */
394 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
396 /* Resume any stopped vCPUs. */
398 pthread_mutex_unlock(&gdb_lock);
402 hex_digit(uint8_t nibble)
406 return (nibble + '0');
408 return (nibble + 'a' - 10);
412 parse_digit(uint8_t v)
415 if (v >= '0' && v <= '9')
417 if (v >= 'a' && v <= 'f')
418 return (v - 'a' + 10);
419 if (v >= 'A' && v <= 'F')
420 return (v - 'A' + 10);
424 /* Parses big-endian hexadecimal. */
426 parse_integer(const uint8_t *p, size_t len)
433 v |= parse_digit(*p);
441 parse_byte(const uint8_t *p)
444 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
448 send_pending_data(int fd)
452 if (cur_resp.len == 0) {
453 mevent_disable(write_event);
456 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
457 if (nwritten == -1) {
458 warn("Write to GDB socket failed");
461 io_buffer_advance(&cur_resp, nwritten);
462 if (cur_resp.len == 0)
463 mevent_disable(write_event);
465 mevent_enable(write_event);
469 /* Append a single character to the output buffer. */
471 send_char(uint8_t data)
473 io_buffer_grow(&cur_resp, 1);
474 *io_buffer_tail(&cur_resp) = data;
478 /* Append an array of bytes to the output buffer. */
480 send_data(const uint8_t *data, size_t len)
483 io_buffer_grow(&cur_resp, len);
484 memcpy(io_buffer_tail(&cur_resp), data, len);
489 format_byte(uint8_t v, uint8_t *buf)
492 buf[0] = hex_digit(v >> 4);
493 buf[1] = hex_digit(v & 0xf);
497 * Append a single byte (formatted as two hex characters) to the
506 send_data(buf, sizeof(buf));
523 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
527 * Append a single character (for the packet payload) and update the
531 append_char(uint8_t v)
539 * Append an array of bytes (for the packet payload) and update the
543 append_packet_data(const uint8_t *data, size_t len)
546 send_data(data, len);
555 append_string(const char *str)
558 append_packet_data(str, strlen(str));
562 append_byte(uint8_t v)
567 append_packet_data(buf, sizeof(buf));
571 append_unsigned_native(uintmax_t value, size_t len)
575 for (i = 0; i < len; i++) {
582 append_unsigned_be(uintmax_t value, size_t len)
587 for (i = 0; i < len; i++) {
588 format_byte(value, buf + (len - i - 1) * 2);
591 append_packet_data(buf, sizeof(buf));
595 append_integer(unsigned int value)
601 append_unsigned_be(value, (fls(value) + 7) / 8);
605 append_asciihex(const char *str)
608 while (*str != '\0') {
615 send_empty_response(void)
623 send_error(int error)
642 parse_threadid(const uint8_t *data, size_t len)
645 if (len == 1 && *data == '0')
647 if (len == 2 && memcmp(data, "-1", 2) == 0)
651 return (parse_integer(data, len));
655 * Report the current stop event to the debugger. If the stop is due
656 * to an event triggered on a specific vCPU such as a breakpoint or
657 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
658 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
659 * the reporting vCPU for vCPU events.
662 report_stop(bool set_cur_vcpu)
664 struct vcpu_state *vs;
667 if (stopped_vcpu == -1) {
669 append_byte(GDB_SIGNAL_TRAP);
671 vs = &vcpu_state[stopped_vcpu];
673 cur_vcpu = stopped_vcpu;
675 append_byte(GDB_SIGNAL_TRAP);
676 append_string("thread:");
677 append_integer(stopped_vcpu + 1);
679 if (vs->hit_swbreak) {
680 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
682 append_string("swbreak:;");
683 } else if (vs->stepped)
684 debug("$vCPU %d reporting step\n", stopped_vcpu);
686 debug("$vCPU %d reporting ???\n", stopped_vcpu);
689 report_next_stop = false;
693 * If this stop is due to a vCPU event, clear that event to mark it as
699 struct vcpu_state *vs;
701 if (stopped_vcpu != -1) {
702 vs = &vcpu_state[stopped_vcpu];
703 vs->hit_swbreak = false;
707 report_next_stop = true;
711 gdb_finish_suspend_vcpus(void)
717 } else if (report_next_stop) {
718 assert(!response_pending());
720 send_pending_data(cur_fd);
725 * vCPU threads invoke this function whenever the vCPU enters the
726 * debug server to pause or report an event. vCPU threads wait here
727 * as long as the debug server keeps them suspended.
730 _gdb_cpu_suspend(int vcpu, bool report_stop)
733 debug("$vCPU %d suspending\n", vcpu);
734 CPU_SET(vcpu, &vcpus_waiting);
735 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
736 gdb_finish_suspend_vcpus();
737 while (CPU_ISSET(vcpu, &vcpus_suspended))
738 pthread_cond_wait(&idle_vcpus, &gdb_lock);
739 CPU_CLR(vcpu, &vcpus_waiting);
740 debug("$vCPU %d resuming\n", vcpu);
744 * Invoked at the start of a vCPU thread's execution to inform the
745 * debug server about the new thread.
748 gdb_cpu_add(int vcpu)
751 debug("$vCPU %d starting\n", vcpu);
752 pthread_mutex_lock(&gdb_lock);
753 assert(vcpu < guest_ncpus);
754 CPU_SET(vcpu, &vcpus_active);
755 if (!TAILQ_EMPTY(&breakpoints)) {
756 vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
757 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
761 * If a vcpu is added while vcpus are stopped, suspend the new
762 * vcpu so that it will pop back out with a debug exit before
763 * executing the first instruction.
765 if (!CPU_EMPTY(&vcpus_suspended)) {
766 CPU_SET(vcpu, &vcpus_suspended);
767 _gdb_cpu_suspend(vcpu, false);
769 pthread_mutex_unlock(&gdb_lock);
773 * Invoked by vCPU before resuming execution. This enables stepping
774 * if the vCPU is marked as stepping.
777 gdb_cpu_resume(int vcpu)
779 struct vcpu_state *vs;
782 vs = &vcpu_state[vcpu];
785 * Any pending event should already be reported before
788 assert(vs->hit_swbreak == false);
789 assert(vs->stepped == false);
791 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
797 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
798 * has been suspended due to an event on different vCPU or in response
799 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
802 gdb_cpu_suspend(int vcpu)
805 pthread_mutex_lock(&gdb_lock);
806 _gdb_cpu_suspend(vcpu, true);
807 gdb_cpu_resume(vcpu);
808 pthread_mutex_unlock(&gdb_lock);
812 gdb_suspend_vcpus(void)
815 assert(pthread_mutex_isowned_np(&gdb_lock));
816 debug("suspending all CPUs\n");
817 vcpus_suspended = vcpus_active;
818 vm_suspend_cpu(ctx, -1);
819 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
820 gdb_finish_suspend_vcpus();
824 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
825 * the VT-x-specific MTRAP exit.
828 gdb_cpu_mtrap(int vcpu)
830 struct vcpu_state *vs;
832 debug("$vCPU %d MTRAP\n", vcpu);
833 pthread_mutex_lock(&gdb_lock);
834 vs = &vcpu_state[vcpu];
836 vs->stepping = false;
838 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
839 while (vs->stepped) {
840 if (stopped_vcpu == -1) {
841 debug("$vCPU %d reporting step\n", vcpu);
845 _gdb_cpu_suspend(vcpu, true);
847 gdb_cpu_resume(vcpu);
849 pthread_mutex_unlock(&gdb_lock);
852 static struct breakpoint *
853 find_breakpoint(uint64_t gpa)
855 struct breakpoint *bp;
857 TAILQ_FOREACH(bp, &breakpoints, link) {
865 gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
867 struct breakpoint *bp;
868 struct vcpu_state *vs;
872 pthread_mutex_lock(&gdb_lock);
873 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
875 bp = find_breakpoint(gpa);
877 vs = &vcpu_state[vcpu];
878 assert(vs->stepping == false);
879 assert(vs->stepped == false);
880 assert(vs->hit_swbreak == false);
881 vs->hit_swbreak = true;
882 vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
884 if (stopped_vcpu == -1) {
885 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
890 _gdb_cpu_suspend(vcpu, true);
891 if (!vs->hit_swbreak) {
892 /* Breakpoint reported. */
895 bp = find_breakpoint(gpa);
897 /* Breakpoint was removed. */
898 vs->hit_swbreak = false;
902 gdb_cpu_resume(vcpu);
904 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
906 error = vm_set_register(ctx, vcpu,
907 VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
909 error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
912 pthread_mutex_unlock(&gdb_lock);
916 gdb_step_vcpu(int vcpu)
920 debug("$vCPU %d step\n", vcpu);
921 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
926 vcpu_state[vcpu].stepping = true;
927 vm_resume_cpu(ctx, vcpu);
928 CPU_CLR(vcpu, &vcpus_suspended);
929 pthread_cond_broadcast(&idle_vcpus);
934 gdb_resume_vcpus(void)
937 assert(pthread_mutex_isowned_np(&gdb_lock));
938 vm_resume_cpu(ctx, -1);
939 debug("resuming all CPUs\n");
940 CPU_ZERO(&vcpus_suspended);
941 pthread_cond_broadcast(&idle_vcpus);
947 uint64_t regvals[nitems(gdb_regset)];
950 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
951 gdb_regset, regvals) == -1) {
956 for (i = 0; i < nitems(regvals); i++)
957 append_unsigned_native(regvals[i], gdb_regsize[i]);
962 gdb_read_mem(const uint8_t *data, size_t len)
964 uint64_t gpa, gva, val;
966 size_t resid, todo, bytes;
974 /* Parse and consume address. */
975 cp = memchr(data, ',', len);
976 if (cp == NULL || cp == data) {
980 gva = parse_integer(data, cp - data);
981 len -= (cp - data) + 1;
982 data += (cp - data) + 1;
985 resid = parse_integer(data, len);
989 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1005 /* Read bytes from current page. */
1006 todo = getpagesize() - gpa % getpagesize();
1010 cp = paddr_guest2host(ctx, gpa, todo);
1013 * If this page is guest RAM, read it a byte
1030 * If this page isn't guest RAM, try to handle
1031 * it via MMIO. For MMIO requests, use
1032 * aligned reads of words when possible.
1035 if (gpa & 1 || todo == 1)
1037 else if (gpa & 2 || todo == 2)
1041 error = read_mem(ctx, cur_vcpu, gpa, &val,
1066 assert(resid == 0 || gpa % getpagesize() == 0);
1074 gdb_write_mem(const uint8_t *data, size_t len)
1076 uint64_t gpa, gva, val;
1078 size_t resid, todo, bytes;
1085 /* Parse and consume address. */
1086 cp = memchr(data, ',', len);
1087 if (cp == NULL || cp == data) {
1091 gva = parse_integer(data, cp - data);
1092 len -= (cp - data) + 1;
1093 data += (cp - data) + 1;
1095 /* Parse and consume length. */
1096 cp = memchr(data, ':', len);
1097 if (cp == NULL || cp == data) {
1101 resid = parse_integer(data, cp - data);
1102 len -= (cp - data) + 1;
1103 data += (cp - data) + 1;
1105 /* Verify the available bytes match the length. */
1106 if (len != resid * 2) {
1112 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1122 /* Write bytes to current page. */
1123 todo = getpagesize() - gpa % getpagesize();
1127 cp = paddr_guest2host(ctx, gpa, todo);
1130 * If this page is guest RAM, write it a byte
1135 *cp = parse_byte(data);
1146 * If this page isn't guest RAM, try to handle
1147 * it via MMIO. For MMIO requests, use
1148 * aligned writes of words when possible.
1151 if (gpa & 1 || todo == 1) {
1153 val = parse_byte(data);
1154 } else if (gpa & 2 || todo == 2) {
1156 val = be16toh(parse_integer(data, 4));
1159 val = be32toh(parse_integer(data, 8));
1161 error = write_mem(ctx, cur_vcpu, gpa, val,
1176 assert(resid == 0 || gpa % getpagesize() == 0);
1183 set_breakpoint_caps(bool enable)
1188 mask = vcpus_active;
1189 while (!CPU_EMPTY(&mask)) {
1190 vcpu = CPU_FFS(&mask) - 1;
1191 CPU_CLR(vcpu, &mask);
1192 if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
1193 enable ? 1 : 0) < 0)
1195 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1196 enable ? "en" : "dis");
1202 remove_all_sw_breakpoints(void)
1204 struct breakpoint *bp, *nbp;
1207 if (TAILQ_EMPTY(&breakpoints))
1210 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1211 debug("remove breakpoint at %#lx\n", bp->gpa);
1212 cp = paddr_guest2host(ctx, bp->gpa, 1);
1213 *cp = bp->shadow_inst;
1214 TAILQ_REMOVE(&breakpoints, bp, link);
1217 TAILQ_INIT(&breakpoints);
1218 set_breakpoint_caps(false);
1222 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1224 struct breakpoint *bp;
1234 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1244 cp = paddr_guest2host(ctx, gpa, 1);
1246 /* Only permit breakpoints in guest RAM. */
1252 /* Find any existing breakpoint. */
1253 bp = find_breakpoint(gpa);
1256 * Silently ignore duplicate commands since the protocol
1257 * requires these packets to be idempotent.
1261 if (TAILQ_EMPTY(&breakpoints) &&
1262 !set_breakpoint_caps(true)) {
1263 send_empty_response();
1266 bp = malloc(sizeof(*bp));
1268 bp->shadow_inst = *cp;
1269 *cp = 0xcc; /* INT 3 */
1270 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1271 debug("new breakpoint at %#lx\n", gpa);
1275 debug("remove breakpoint at %#lx\n", gpa);
1276 *cp = bp->shadow_inst;
1277 TAILQ_REMOVE(&breakpoints, bp, link);
1279 if (TAILQ_EMPTY(&breakpoints))
1280 set_breakpoint_caps(false);
1287 parse_breakpoint(const uint8_t *data, size_t len)
1294 insert = data[0] == 'Z';
1300 /* Parse and consume type. */
1301 cp = memchr(data, ',', len);
1302 if (cp == NULL || cp == data) {
1306 type = parse_integer(data, cp - data);
1307 len -= (cp - data) + 1;
1308 data += (cp - data) + 1;
1310 /* Parse and consume address. */
1311 cp = memchr(data, ',', len);
1312 if (cp == NULL || cp == data) {
1316 gva = parse_integer(data, cp - data);
1317 len -= (cp - data) + 1;
1318 data += (cp - data) + 1;
1320 /* Parse and consume kind. */
1321 cp = memchr(data, ';', len);
1328 * We do not advertise support for either the
1329 * ConditionalBreakpoints or BreakpointCommands
1330 * features, so we should not be getting conditions or
1331 * commands from the remote end.
1333 send_empty_response();
1336 kind = parse_integer(data, len);
1342 update_sw_breakpoint(gva, kind, insert);
1345 send_empty_response();
1351 command_equals(const uint8_t *data, size_t len, const char *cmd)
1354 if (strlen(cmd) > len)
1356 return (memcmp(data, cmd, strlen(cmd)) == 0);
1360 check_features(const uint8_t *data, size_t len)
1362 char *feature, *next_feature, *str, *value;
1365 str = malloc(len + 1);
1366 memcpy(str, data, len);
1370 while ((feature = strsep(&next_feature, ";")) != NULL) {
1372 * Null features shouldn't exist, but skip if they
1375 if (strcmp(feature, "") == 0)
1379 * Look for the value or supported / not supported
1382 value = strchr(feature, '=');
1383 if (value != NULL) {
1388 value = feature + strlen(feature) - 1;
1398 * This is really a protocol error,
1399 * but we just ignore malformed
1400 * features for ease of
1408 if (strcmp(feature, "swbreak") == 0)
1409 swbreak_enabled = supported;
1415 /* This is an arbitrary limit. */
1416 append_string("PacketSize=4096");
1417 append_string(";swbreak+");
1422 gdb_query(const uint8_t *data, size_t len)
1429 if (command_equals(data, len, "qAttached")) {
1433 } else if (command_equals(data, len, "qC")) {
1435 append_string("QC");
1436 append_integer(cur_vcpu + 1);
1438 } else if (command_equals(data, len, "qfThreadInfo")) {
1443 if (CPU_EMPTY(&vcpus_active)) {
1447 mask = vcpus_active;
1451 while (!CPU_EMPTY(&mask)) {
1452 vcpu = CPU_FFS(&mask) - 1;
1453 CPU_CLR(vcpu, &mask);
1458 append_integer(vcpu + 1);
1461 } else if (command_equals(data, len, "qsThreadInfo")) {
1465 } else if (command_equals(data, len, "qSupported")) {
1466 data += strlen("qSupported");
1467 len -= strlen("qSupported");
1468 check_features(data, len);
1469 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1473 data += strlen("qThreadExtraInfo");
1474 len -= strlen("qThreadExtraInfo");
1479 tid = parse_threadid(data + 1, len - 1);
1480 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1485 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1487 append_asciihex(buf);
1490 send_empty_response();
1494 handle_command(const uint8_t *data, size_t len)
1497 /* Reject packets with a sequence-id. */
1498 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1499 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1500 send_empty_response();
1517 /* TODO: Resume any stopped CPUs. */
1526 if (data[1] != 'g' && data[1] != 'c') {
1530 tid = parse_threadid(data + 2, len - 2);
1536 if (CPU_EMPTY(&vcpus_active)) {
1540 if (tid == -1 || tid == 0)
1541 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1542 else if (CPU_ISSET(tid - 1, &vcpus_active))
1552 gdb_read_mem(data, len);
1555 gdb_write_mem(data, len);
1560 tid = parse_threadid(data + 1, len - 1);
1561 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1569 gdb_query(data, len);
1577 /* Don't send a reply until a stop occurs. */
1578 if (!gdb_step_vcpu(cur_vcpu)) {
1579 send_error(EOPNOTSUPP);
1585 parse_breakpoint(data, len);
1590 case 'G': /* TODO */
1592 /* Handle 'vCont' */
1594 case 'p': /* TODO */
1595 case 'P': /* TODO */
1596 case 'Q': /* TODO */
1597 case 't': /* TODO */
1598 case 'X': /* TODO */
1600 send_empty_response();
1604 /* Check for a valid packet in the command buffer. */
1606 check_command(int fd)
1608 uint8_t *head, *hash, *p, sum;
1612 avail = cur_comm.len;
1615 head = io_buffer_head(&cur_comm);
1618 debug("<- Ctrl-C\n");
1619 io_buffer_consume(&cur_comm, 1);
1621 gdb_suspend_vcpus();
1624 /* ACK of previous response. */
1626 if (response_pending())
1627 io_buffer_reset(&cur_resp);
1628 io_buffer_consume(&cur_comm, 1);
1629 if (stopped_vcpu != -1 && report_next_stop) {
1631 send_pending_data(fd);
1635 /* NACK of previous response. */
1637 if (response_pending()) {
1638 cur_resp.len += cur_resp.start;
1640 if (cur_resp.data[0] == '+')
1641 io_buffer_advance(&cur_resp, 1);
1642 debug("-> %.*s\n", (int)cur_resp.len,
1643 io_buffer_head(&cur_resp));
1645 io_buffer_consume(&cur_comm, 1);
1646 send_pending_data(fd);
1651 if (response_pending()) {
1652 warnx("New GDB command while response in "
1654 io_buffer_reset(&cur_resp);
1657 /* Is packet complete? */
1658 hash = memchr(head, '#', avail);
1661 plen = (hash - head + 1) + 2;
1664 debug("<- %.*s\n", (int)plen, head);
1666 /* Verify checksum. */
1667 for (sum = 0, p = head + 1; p < hash; p++)
1669 if (sum != parse_byte(hash + 1)) {
1670 io_buffer_consume(&cur_comm, plen);
1673 send_pending_data(fd);
1678 handle_command(head + 1, hash - (head + 1));
1679 io_buffer_consume(&cur_comm, plen);
1680 if (!response_pending())
1682 send_pending_data(fd);
1685 /* XXX: Possibly drop connection instead. */
1686 debug("-> %02x\n", *head);
1687 io_buffer_consume(&cur_comm, 1);
1694 gdb_readable(int fd, enum ev_type event, void *arg)
1699 if (ioctl(fd, FIONREAD, &pending) == -1) {
1700 warn("FIONREAD on GDB socket");
1705 * 'pending' might be zero due to EOF. We need to call read
1706 * with a non-zero length to detect EOF.
1711 /* Ensure there is room in the command buffer. */
1712 io_buffer_grow(&cur_comm, pending);
1713 assert(io_buffer_avail(&cur_comm) >= pending);
1715 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1718 } else if (nread == -1) {
1719 if (errno == EAGAIN)
1722 warn("Read from GDB socket");
1725 cur_comm.len += nread;
1726 pthread_mutex_lock(&gdb_lock);
1728 pthread_mutex_unlock(&gdb_lock);
1733 gdb_writable(int fd, enum ev_type event, void *arg)
1736 send_pending_data(fd);
1740 new_connection(int fd, enum ev_type event, void *arg)
1744 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1747 err(1, "Failed accepting initial GDB connection");
1749 /* Silently ignore errors post-startup. */
1754 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1756 warn("Failed to disable SIGPIPE for GDB connection");
1761 pthread_mutex_lock(&gdb_lock);
1764 warnx("Ignoring additional GDB connection.");
1767 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1768 if (read_event == NULL) {
1770 err(1, "Failed to setup initial GDB connection");
1771 pthread_mutex_unlock(&gdb_lock);
1774 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1775 if (write_event == NULL) {
1777 err(1, "Failed to setup initial GDB connection");
1778 mevent_delete_close(read_event);
1786 /* Break on attach. */
1788 report_next_stop = false;
1789 gdb_suspend_vcpus();
1790 pthread_mutex_unlock(&gdb_lock);
1793 #ifndef WITHOUT_CAPSICUM
1795 limit_gdb_socket(int s)
1797 cap_rights_t rights;
1798 unsigned long ioctls[] = { FIONREAD };
1800 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1801 CAP_SETSOCKOPT, CAP_IOCTL);
1802 if (caph_rights_limit(s, &rights) == -1)
1803 errx(EX_OSERR, "Unable to apply rights for sandbox");
1804 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1805 errx(EX_OSERR, "Unable to apply rights for sandbox");
1810 init_gdb(struct vmctx *_ctx, int sport, bool wait)
1812 struct sockaddr_in sin;
1813 int error, flags, s;
1815 debug("==> starting on %d, %swaiting\n", sport, wait ? "" : "not ");
1817 error = pthread_mutex_init(&gdb_lock, NULL);
1819 errc(1, error, "gdb mutex init");
1820 error = pthread_cond_init(&idle_vcpus, NULL);
1822 errc(1, error, "gdb cv init");
1825 s = socket(PF_INET, SOCK_STREAM, 0);
1827 err(1, "gdb socket create");
1829 sin.sin_len = sizeof(sin);
1830 sin.sin_family = AF_INET;
1831 sin.sin_addr.s_addr = htonl(INADDR_ANY);
1832 sin.sin_port = htons(sport);
1834 if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0)
1835 err(1, "gdb socket bind");
1837 if (listen(s, 1) < 0)
1838 err(1, "gdb socket listen");
1841 TAILQ_INIT(&breakpoints);
1842 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1845 * Set vcpu 0 in vcpus_suspended. This will trigger the
1846 * logic in gdb_cpu_add() to suspend the first vcpu before
1847 * it starts execution. The vcpu will remain suspended
1848 * until a debugger connects.
1850 CPU_SET(0, &vcpus_suspended);
1854 flags = fcntl(s, F_GETFL);
1855 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1856 err(1, "Failed to mark gdb socket non-blocking");
1858 #ifndef WITHOUT_CAPSICUM
1859 limit_gdb_socket(s);
1861 mevent_add(s, EVF_READ, new_connection, NULL);