2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <machine/atomic.h>
41 #include <machine/specialreg.h>
42 #include <machine/vmm.h>
43 #include <netinet/in.h>
45 #ifndef WITHOUT_CAPSICUM
46 #include <capsicum_helpers.h>
52 #include <pthread_np.h>
67 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
70 #define GDB_SIGNAL_TRAP 5
72 static void gdb_resume_vcpus(void);
73 static void check_command(int fd);
75 static struct mevent *read_event, *write_event;
77 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
78 static pthread_mutex_t gdb_lock;
79 static pthread_cond_t idle_vcpus;
80 static bool first_stop, report_next_stop, swbreak_enabled;
83 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
84 * read buffer, 'start' is unused and 'len' contains the number of
85 * valid bytes in the buffer. For a write buffer, 'start' is set to
86 * the index of the next byte in 'data' to send, and 'len' contains
87 * the remaining number of valid bytes to send.
99 TAILQ_ENTRY(breakpoint) link;
103 * When a vCPU stops to due to an event that should be reported to the
104 * debugger, information about the event is stored in this structure.
105 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
106 * and stops other vCPUs so the event can be reported. The
107 * report_stop() function reports the event for the 'stopped_vcpu'
108 * vCPU. When the debugger resumes execution via continue or step,
109 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
110 * event handlers until the associated event is reported or disabled.
112 * An idle vCPU will have all of the boolean fields set to false.
114 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
115 * released to execute the stepped instruction. When the vCPU reports
116 * the stepping trap, 'stepped' is set.
118 * When a vCPU hits a breakpoint set by the debug server,
119 * 'hit_swbreak' is set to true.
127 static struct io_buffer cur_comm, cur_resp;
128 static uint8_t cur_csum;
129 static struct vmctx *ctx;
130 static int cur_fd = -1;
131 static TAILQ_HEAD(, breakpoint) breakpoints;
132 static struct vcpu_state *vcpu_state;
133 static int cur_vcpu, stopped_vcpu;
135 const int gdb_regset[] = {
162 const int gdb_regsize[] = {
193 static void __printflike(1, 2)
194 debug(const char *fmt, ...)
196 static FILE *logfile;
199 if (logfile == NULL) {
200 logfile = fopen("/tmp/bhyve_gdb.log", "w");
203 #ifndef WITHOUT_CAPSICUM
204 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
213 vfprintf(logfile, fmt, ap);
220 static void remove_all_sw_breakpoints(void);
223 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
226 const int regset[4] = {
233 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
237 * For the debugger, always pretend to be the kernel (CPL 0),
238 * and if long-mode is enabled, always parse addresses as if
241 paging->cr3 = regs[1];
243 if (regs[3] & EFER_LMA)
244 paging->cpu_mode = CPU_MODE_64BIT;
245 else if (regs[0] & CR0_PE)
246 paging->cpu_mode = CPU_MODE_PROTECTED;
248 paging->cpu_mode = CPU_MODE_REAL;
249 if (!(regs[0] & CR0_PG))
250 paging->paging_mode = PAGING_MODE_FLAT;
251 else if (!(regs[2] & CR4_PAE))
252 paging->paging_mode = PAGING_MODE_32;
253 else if (regs[3] & EFER_LME)
254 paging->paging_mode = PAGING_MODE_64;
256 paging->paging_mode = PAGING_MODE_PAE;
261 * Map a guest virtual address to a physical address (for a given vcpu).
262 * If a guest virtual address is valid, return 1. If the address is
263 * not valid, return 0. If an error occurs obtaining the mapping,
267 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
269 struct vm_guest_paging paging;
272 if (guest_paging_info(vcpu, &paging) == -1)
276 * Always use PROT_READ. We really care if the VA is
277 * accessible, not if the current vCPU can write.
279 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
288 io_buffer_reset(struct io_buffer *io)
295 /* Available room for adding data. */
297 io_buffer_avail(struct io_buffer *io)
300 return (io->capacity - (io->start + io->len));
304 io_buffer_head(struct io_buffer *io)
307 return (io->data + io->start);
311 io_buffer_tail(struct io_buffer *io)
314 return (io->data + io->start + io->len);
318 io_buffer_advance(struct io_buffer *io, size_t amount)
321 assert(amount <= io->len);
327 io_buffer_consume(struct io_buffer *io, size_t amount)
330 io_buffer_advance(io, amount);
337 * XXX: Consider making this move optional and compacting on a
338 * future read() before realloc().
340 memmove(io->data, io_buffer_head(io), io->len);
345 io_buffer_grow(struct io_buffer *io, size_t newsize)
348 size_t avail, new_cap;
350 avail = io_buffer_avail(io);
351 if (newsize <= avail)
354 new_cap = io->capacity + (newsize - avail);
355 new_data = realloc(io->data, new_cap);
356 if (new_data == NULL)
357 err(1, "Failed to grow GDB I/O buffer");
359 io->capacity = new_cap;
363 response_pending(void)
366 if (cur_resp.start == 0 && cur_resp.len == 0)
368 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
374 close_connection(void)
378 * XXX: This triggers a warning because mevent does the close
379 * before the EV_DELETE.
381 pthread_mutex_lock(&gdb_lock);
382 mevent_delete(write_event);
383 mevent_delete_close(read_event);
386 io_buffer_reset(&cur_comm);
387 io_buffer_reset(&cur_resp);
390 remove_all_sw_breakpoints();
392 /* Clear any pending events. */
393 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
395 /* Resume any stopped vCPUs. */
397 pthread_mutex_unlock(&gdb_lock);
401 hex_digit(uint8_t nibble)
405 return (nibble + '0');
407 return (nibble + 'a' - 10);
411 parse_digit(uint8_t v)
414 if (v >= '0' && v <= '9')
416 if (v >= 'a' && v <= 'f')
417 return (v - 'a' + 10);
418 if (v >= 'A' && v <= 'F')
419 return (v - 'A' + 10);
423 /* Parses big-endian hexadecimal. */
425 parse_integer(const uint8_t *p, size_t len)
432 v |= parse_digit(*p);
440 parse_byte(const uint8_t *p)
443 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
447 send_pending_data(int fd)
451 if (cur_resp.len == 0) {
452 mevent_disable(write_event);
455 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
456 if (nwritten == -1) {
457 warn("Write to GDB socket failed");
460 io_buffer_advance(&cur_resp, nwritten);
461 if (cur_resp.len == 0)
462 mevent_disable(write_event);
464 mevent_enable(write_event);
468 /* Append a single character to the output buffer. */
470 send_char(uint8_t data)
472 io_buffer_grow(&cur_resp, 1);
473 *io_buffer_tail(&cur_resp) = data;
477 /* Append an array of bytes to the output buffer. */
479 send_data(const uint8_t *data, size_t len)
482 io_buffer_grow(&cur_resp, len);
483 memcpy(io_buffer_tail(&cur_resp), data, len);
488 format_byte(uint8_t v, uint8_t *buf)
491 buf[0] = hex_digit(v >> 4);
492 buf[1] = hex_digit(v & 0xf);
496 * Append a single byte (formatted as two hex characters) to the
505 send_data(buf, sizeof(buf));
522 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
526 * Append a single character (for the packet payload) and update the
530 append_char(uint8_t v)
538 * Append an array of bytes (for the packet payload) and update the
542 append_packet_data(const uint8_t *data, size_t len)
545 send_data(data, len);
554 append_string(const char *str)
557 append_packet_data(str, strlen(str));
561 append_byte(uint8_t v)
566 append_packet_data(buf, sizeof(buf));
570 append_unsigned_native(uintmax_t value, size_t len)
574 for (i = 0; i < len; i++) {
581 append_unsigned_be(uintmax_t value, size_t len)
586 for (i = 0; i < len; i++) {
587 format_byte(value, buf + (len - i - 1) * 2);
590 append_packet_data(buf, sizeof(buf));
594 append_integer(unsigned int value)
600 append_unsigned_be(value, (fls(value) + 7) / 8);
604 append_asciihex(const char *str)
607 while (*str != '\0') {
614 send_empty_response(void)
622 send_error(int error)
641 parse_threadid(const uint8_t *data, size_t len)
644 if (len == 1 && *data == '0')
646 if (len == 2 && memcmp(data, "-1", 2) == 0)
650 return (parse_integer(data, len));
654 * Report the current stop event to the debugger. If the stop is due
655 * to an event triggered on a specific vCPU such as a breakpoint or
656 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
657 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
658 * the reporting vCPU for vCPU events.
661 report_stop(bool set_cur_vcpu)
663 struct vcpu_state *vs;
666 if (stopped_vcpu == -1) {
668 append_byte(GDB_SIGNAL_TRAP);
670 vs = &vcpu_state[stopped_vcpu];
672 cur_vcpu = stopped_vcpu;
674 append_byte(GDB_SIGNAL_TRAP);
675 append_string("thread:");
676 append_integer(stopped_vcpu + 1);
678 if (vs->hit_swbreak) {
679 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
681 append_string("swbreak:;");
682 } else if (vs->stepped)
683 debug("$vCPU %d reporting step\n", stopped_vcpu);
685 debug("$vCPU %d reporting ???\n", stopped_vcpu);
688 report_next_stop = false;
692 * If this stop is due to a vCPU event, clear that event to mark it as
698 struct vcpu_state *vs;
700 if (stopped_vcpu != -1) {
701 vs = &vcpu_state[stopped_vcpu];
702 vs->hit_swbreak = false;
706 report_next_stop = true;
710 gdb_finish_suspend_vcpus(void)
716 } else if (report_next_stop) {
717 assert(!response_pending());
719 send_pending_data(cur_fd);
724 * vCPU threads invoke this function whenever the vCPU enters the
725 * debug server to pause or report an event. vCPU threads wait here
726 * as long as the debug server keeps them suspended.
729 _gdb_cpu_suspend(int vcpu, bool report_stop)
732 debug("$vCPU %d suspending\n", vcpu);
733 CPU_SET(vcpu, &vcpus_waiting);
734 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
735 gdb_finish_suspend_vcpus();
736 while (CPU_ISSET(vcpu, &vcpus_suspended))
737 pthread_cond_wait(&idle_vcpus, &gdb_lock);
738 CPU_CLR(vcpu, &vcpus_waiting);
739 debug("$vCPU %d resuming\n", vcpu);
743 * Invoked at the start of a vCPU thread's execution to inform the
744 * debug server about the new thread.
747 gdb_cpu_add(int vcpu)
750 debug("$vCPU %d starting\n", vcpu);
751 pthread_mutex_lock(&gdb_lock);
752 assert(vcpu < guest_ncpus);
753 CPU_SET(vcpu, &vcpus_active);
754 if (!TAILQ_EMPTY(&breakpoints)) {
755 vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
756 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
760 * If a vcpu is added while vcpus are stopped, suspend the new
761 * vcpu so that it will pop back out with a debug exit before
762 * executing the first instruction.
764 if (!CPU_EMPTY(&vcpus_suspended)) {
765 CPU_SET(vcpu, &vcpus_suspended);
766 _gdb_cpu_suspend(vcpu, false);
768 pthread_mutex_unlock(&gdb_lock);
772 * Invoked by vCPU before resuming execution. This enables stepping
773 * if the vCPU is marked as stepping.
776 gdb_cpu_resume(int vcpu)
778 struct vcpu_state *vs;
781 vs = &vcpu_state[vcpu];
784 * Any pending event should already be reported before
787 assert(vs->hit_swbreak == false);
788 assert(vs->stepped == false);
790 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
796 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
797 * has been suspended due to an event on different vCPU or in response
798 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
801 gdb_cpu_suspend(int vcpu)
804 pthread_mutex_lock(&gdb_lock);
805 _gdb_cpu_suspend(vcpu, true);
806 gdb_cpu_resume(vcpu);
807 pthread_mutex_unlock(&gdb_lock);
811 gdb_suspend_vcpus(void)
814 assert(pthread_mutex_isowned_np(&gdb_lock));
815 debug("suspending all CPUs\n");
816 vcpus_suspended = vcpus_active;
817 vm_suspend_cpu(ctx, -1);
818 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
819 gdb_finish_suspend_vcpus();
823 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
824 * the VT-x-specific MTRAP exit.
827 gdb_cpu_mtrap(int vcpu)
829 struct vcpu_state *vs;
831 debug("$vCPU %d MTRAP\n", vcpu);
832 pthread_mutex_lock(&gdb_lock);
833 vs = &vcpu_state[vcpu];
835 vs->stepping = false;
837 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
838 while (vs->stepped) {
839 if (stopped_vcpu == -1) {
840 debug("$vCPU %d reporting step\n", vcpu);
844 _gdb_cpu_suspend(vcpu, true);
846 gdb_cpu_resume(vcpu);
848 pthread_mutex_unlock(&gdb_lock);
851 static struct breakpoint *
852 find_breakpoint(uint64_t gpa)
854 struct breakpoint *bp;
856 TAILQ_FOREACH(bp, &breakpoints, link) {
864 gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
866 struct breakpoint *bp;
867 struct vcpu_state *vs;
871 pthread_mutex_lock(&gdb_lock);
872 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
874 bp = find_breakpoint(gpa);
876 vs = &vcpu_state[vcpu];
877 assert(vs->stepping == false);
878 assert(vs->stepped == false);
879 assert(vs->hit_swbreak == false);
880 vs->hit_swbreak = true;
881 vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
883 if (stopped_vcpu == -1) {
884 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
889 _gdb_cpu_suspend(vcpu, true);
890 if (!vs->hit_swbreak) {
891 /* Breakpoint reported. */
894 bp = find_breakpoint(gpa);
896 /* Breakpoint was removed. */
897 vs->hit_swbreak = false;
901 gdb_cpu_resume(vcpu);
903 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
905 error = vm_set_register(ctx, vcpu,
906 VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
908 error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
911 pthread_mutex_unlock(&gdb_lock);
915 gdb_step_vcpu(int vcpu)
919 debug("$vCPU %d step\n", vcpu);
920 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
925 vcpu_state[vcpu].stepping = true;
926 vm_resume_cpu(ctx, vcpu);
927 CPU_CLR(vcpu, &vcpus_suspended);
928 pthread_cond_broadcast(&idle_vcpus);
933 gdb_resume_vcpus(void)
936 assert(pthread_mutex_isowned_np(&gdb_lock));
937 vm_resume_cpu(ctx, -1);
938 debug("resuming all CPUs\n");
939 CPU_ZERO(&vcpus_suspended);
940 pthread_cond_broadcast(&idle_vcpus);
946 uint64_t regvals[nitems(gdb_regset)];
949 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
950 gdb_regset, regvals) == -1) {
955 for (i = 0; i < nitems(regvals); i++)
956 append_unsigned_native(regvals[i], gdb_regsize[i]);
961 gdb_read_mem(const uint8_t *data, size_t len)
963 uint64_t gpa, gva, val;
965 size_t resid, todo, bytes;
973 /* Parse and consume address. */
974 cp = memchr(data, ',', len);
975 if (cp == NULL || cp == data) {
979 gva = parse_integer(data, cp - data);
980 len -= (cp - data) + 1;
981 data += (cp - data) + 1;
984 resid = parse_integer(data, len);
988 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1004 /* Read bytes from current page. */
1005 todo = getpagesize() - gpa % getpagesize();
1009 cp = paddr_guest2host(ctx, gpa, todo);
1012 * If this page is guest RAM, read it a byte
1029 * If this page isn't guest RAM, try to handle
1030 * it via MMIO. For MMIO requests, use
1031 * aligned reads of words when possible.
1034 if (gpa & 1 || todo == 1)
1036 else if (gpa & 2 || todo == 2)
1040 error = read_mem(ctx, cur_vcpu, gpa, &val,
1065 assert(resid == 0 || gpa % getpagesize() == 0);
1073 gdb_write_mem(const uint8_t *data, size_t len)
1075 uint64_t gpa, gva, val;
1077 size_t resid, todo, bytes;
1084 /* Parse and consume address. */
1085 cp = memchr(data, ',', len);
1086 if (cp == NULL || cp == data) {
1090 gva = parse_integer(data, cp - data);
1091 len -= (cp - data) + 1;
1092 data += (cp - data) + 1;
1094 /* Parse and consume length. */
1095 cp = memchr(data, ':', len);
1096 if (cp == NULL || cp == data) {
1100 resid = parse_integer(data, cp - data);
1101 len -= (cp - data) + 1;
1102 data += (cp - data) + 1;
1104 /* Verify the available bytes match the length. */
1105 if (len != resid * 2) {
1111 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1121 /* Write bytes to current page. */
1122 todo = getpagesize() - gpa % getpagesize();
1126 cp = paddr_guest2host(ctx, gpa, todo);
1129 * If this page is guest RAM, write it a byte
1134 *cp = parse_byte(data);
1145 * If this page isn't guest RAM, try to handle
1146 * it via MMIO. For MMIO requests, use
1147 * aligned writes of words when possible.
1150 if (gpa & 1 || todo == 1) {
1152 val = parse_byte(data);
1153 } else if (gpa & 2 || todo == 2) {
1155 val = be16toh(parse_integer(data, 4));
1158 val = be32toh(parse_integer(data, 8));
1160 error = write_mem(ctx, cur_vcpu, gpa, val,
1175 assert(resid == 0 || gpa % getpagesize() == 0);
1182 set_breakpoint_caps(bool enable)
1187 mask = vcpus_active;
1188 while (!CPU_EMPTY(&mask)) {
1189 vcpu = CPU_FFS(&mask) - 1;
1190 CPU_CLR(vcpu, &mask);
1191 if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
1192 enable ? 1 : 0) < 0)
1194 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1195 enable ? "en" : "dis");
1201 remove_all_sw_breakpoints(void)
1203 struct breakpoint *bp, *nbp;
1206 if (TAILQ_EMPTY(&breakpoints))
1209 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1210 debug("remove breakpoint at %#lx\n", bp->gpa);
1211 cp = paddr_guest2host(ctx, bp->gpa, 1);
1212 *cp = bp->shadow_inst;
1213 TAILQ_REMOVE(&breakpoints, bp, link);
1216 TAILQ_INIT(&breakpoints);
1217 set_breakpoint_caps(false);
1221 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1223 struct breakpoint *bp;
1233 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1243 cp = paddr_guest2host(ctx, gpa, 1);
1245 /* Only permit breakpoints in guest RAM. */
1251 /* Find any existing breakpoint. */
1252 bp = find_breakpoint(gpa);
1255 * Silently ignore duplicate commands since the protocol
1256 * requires these packets to be idempotent.
1260 if (TAILQ_EMPTY(&breakpoints) &&
1261 !set_breakpoint_caps(true)) {
1262 send_empty_response();
1265 bp = malloc(sizeof(*bp));
1267 bp->shadow_inst = *cp;
1268 *cp = 0xcc; /* INT 3 */
1269 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1270 debug("new breakpoint at %#lx\n", gpa);
1274 debug("remove breakpoint at %#lx\n", gpa);
1275 *cp = bp->shadow_inst;
1276 TAILQ_REMOVE(&breakpoints, bp, link);
1278 if (TAILQ_EMPTY(&breakpoints))
1279 set_breakpoint_caps(false);
1286 parse_breakpoint(const uint8_t *data, size_t len)
1293 insert = data[0] == 'Z';
1299 /* Parse and consume type. */
1300 cp = memchr(data, ',', len);
1301 if (cp == NULL || cp == data) {
1305 type = parse_integer(data, cp - data);
1306 len -= (cp - data) + 1;
1307 data += (cp - data) + 1;
1309 /* Parse and consume address. */
1310 cp = memchr(data, ',', len);
1311 if (cp == NULL || cp == data) {
1315 gva = parse_integer(data, cp - data);
1316 len -= (cp - data) + 1;
1317 data += (cp - data) + 1;
1319 /* Parse and consume kind. */
1320 cp = memchr(data, ';', len);
1327 * We do not advertise support for either the
1328 * ConditionalBreakpoints or BreakpointCommands
1329 * features, so we should not be getting conditions or
1330 * commands from the remote end.
1332 send_empty_response();
1335 kind = parse_integer(data, len);
1341 update_sw_breakpoint(gva, kind, insert);
1344 send_empty_response();
1350 command_equals(const uint8_t *data, size_t len, const char *cmd)
1353 if (strlen(cmd) > len)
1355 return (memcmp(data, cmd, strlen(cmd)) == 0);
1359 check_features(const uint8_t *data, size_t len)
1361 char *feature, *next_feature, *str, *value;
1364 str = malloc(len + 1);
1365 memcpy(str, data, len);
1369 while ((feature = strsep(&next_feature, ";")) != NULL) {
1371 * Null features shouldn't exist, but skip if they
1374 if (strcmp(feature, "") == 0)
1378 * Look for the value or supported / not supported
1381 value = strchr(feature, '=');
1382 if (value != NULL) {
1387 value = feature + strlen(feature) - 1;
1397 * This is really a protocol error,
1398 * but we just ignore malformed
1399 * features for ease of
1407 if (strcmp(feature, "swbreak") == 0)
1408 swbreak_enabled = supported;
1414 /* This is an arbitrary limit. */
1415 append_string("PacketSize=4096");
1416 append_string(";swbreak+");
1421 gdb_query(const uint8_t *data, size_t len)
1428 if (command_equals(data, len, "qAttached")) {
1432 } else if (command_equals(data, len, "qC")) {
1434 append_string("QC");
1435 append_integer(cur_vcpu + 1);
1437 } else if (command_equals(data, len, "qfThreadInfo")) {
1442 if (CPU_EMPTY(&vcpus_active)) {
1446 mask = vcpus_active;
1450 while (!CPU_EMPTY(&mask)) {
1451 vcpu = CPU_FFS(&mask) - 1;
1452 CPU_CLR(vcpu, &mask);
1457 append_integer(vcpu + 1);
1460 } else if (command_equals(data, len, "qsThreadInfo")) {
1464 } else if (command_equals(data, len, "qSupported")) {
1465 data += strlen("qSupported");
1466 len -= strlen("qSupported");
1467 check_features(data, len);
1468 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1472 data += strlen("qThreadExtraInfo");
1473 len -= strlen("qThreadExtraInfo");
1478 tid = parse_threadid(data + 1, len - 1);
1479 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1484 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1486 append_asciihex(buf);
1489 send_empty_response();
1493 handle_command(const uint8_t *data, size_t len)
1496 /* Reject packets with a sequence-id. */
1497 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1498 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1499 send_empty_response();
1516 /* TODO: Resume any stopped CPUs. */
1525 if (data[1] != 'g' && data[1] != 'c') {
1529 tid = parse_threadid(data + 2, len - 2);
1535 if (CPU_EMPTY(&vcpus_active)) {
1539 if (tid == -1 || tid == 0)
1540 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1541 else if (CPU_ISSET(tid - 1, &vcpus_active))
1551 gdb_read_mem(data, len);
1554 gdb_write_mem(data, len);
1559 tid = parse_threadid(data + 1, len - 1);
1560 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1568 gdb_query(data, len);
1576 /* Don't send a reply until a stop occurs. */
1577 if (!gdb_step_vcpu(cur_vcpu)) {
1578 send_error(EOPNOTSUPP);
1584 parse_breakpoint(data, len);
1589 case 'G': /* TODO */
1591 /* Handle 'vCont' */
1593 case 'p': /* TODO */
1594 case 'P': /* TODO */
1595 case 'Q': /* TODO */
1596 case 't': /* TODO */
1597 case 'X': /* TODO */
1599 send_empty_response();
1603 /* Check for a valid packet in the command buffer. */
1605 check_command(int fd)
1607 uint8_t *head, *hash, *p, sum;
1611 avail = cur_comm.len;
1614 head = io_buffer_head(&cur_comm);
1617 debug("<- Ctrl-C\n");
1618 io_buffer_consume(&cur_comm, 1);
1620 gdb_suspend_vcpus();
1623 /* ACK of previous response. */
1625 if (response_pending())
1626 io_buffer_reset(&cur_resp);
1627 io_buffer_consume(&cur_comm, 1);
1628 if (stopped_vcpu != -1 && report_next_stop) {
1630 send_pending_data(fd);
1634 /* NACK of previous response. */
1636 if (response_pending()) {
1637 cur_resp.len += cur_resp.start;
1639 if (cur_resp.data[0] == '+')
1640 io_buffer_advance(&cur_resp, 1);
1641 debug("-> %.*s\n", (int)cur_resp.len,
1642 io_buffer_head(&cur_resp));
1644 io_buffer_consume(&cur_comm, 1);
1645 send_pending_data(fd);
1650 if (response_pending()) {
1651 warnx("New GDB command while response in "
1653 io_buffer_reset(&cur_resp);
1656 /* Is packet complete? */
1657 hash = memchr(head, '#', avail);
1660 plen = (hash - head + 1) + 2;
1663 debug("<- %.*s\n", (int)plen, head);
1665 /* Verify checksum. */
1666 for (sum = 0, p = head + 1; p < hash; p++)
1668 if (sum != parse_byte(hash + 1)) {
1669 io_buffer_consume(&cur_comm, plen);
1672 send_pending_data(fd);
1677 handle_command(head + 1, hash - (head + 1));
1678 io_buffer_consume(&cur_comm, plen);
1679 if (!response_pending())
1681 send_pending_data(fd);
1684 /* XXX: Possibly drop connection instead. */
1685 debug("-> %02x\n", *head);
1686 io_buffer_consume(&cur_comm, 1);
1693 gdb_readable(int fd, enum ev_type event, void *arg)
1698 if (ioctl(fd, FIONREAD, &pending) == -1) {
1699 warn("FIONREAD on GDB socket");
1704 * 'pending' might be zero due to EOF. We need to call read
1705 * with a non-zero length to detect EOF.
1710 /* Ensure there is room in the command buffer. */
1711 io_buffer_grow(&cur_comm, pending);
1712 assert(io_buffer_avail(&cur_comm) >= pending);
1714 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1717 } else if (nread == -1) {
1718 if (errno == EAGAIN)
1721 warn("Read from GDB socket");
1724 cur_comm.len += nread;
1725 pthread_mutex_lock(&gdb_lock);
1727 pthread_mutex_unlock(&gdb_lock);
1732 gdb_writable(int fd, enum ev_type event, void *arg)
1735 send_pending_data(fd);
1739 new_connection(int fd, enum ev_type event, void *arg)
1743 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1746 err(1, "Failed accepting initial GDB connection");
1748 /* Silently ignore errors post-startup. */
1753 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1755 warn("Failed to disable SIGPIPE for GDB connection");
1760 pthread_mutex_lock(&gdb_lock);
1763 warnx("Ignoring additional GDB connection.");
1766 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1767 if (read_event == NULL) {
1769 err(1, "Failed to setup initial GDB connection");
1770 pthread_mutex_unlock(&gdb_lock);
1773 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1774 if (write_event == NULL) {
1776 err(1, "Failed to setup initial GDB connection");
1777 mevent_delete_close(read_event);
1785 /* Break on attach. */
1787 report_next_stop = false;
1788 gdb_suspend_vcpus();
1789 pthread_mutex_unlock(&gdb_lock);
1792 #ifndef WITHOUT_CAPSICUM
1794 limit_gdb_socket(int s)
1796 cap_rights_t rights;
1797 unsigned long ioctls[] = { FIONREAD };
1799 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1800 CAP_SETSOCKOPT, CAP_IOCTL);
1801 if (caph_rights_limit(s, &rights) == -1)
1802 errx(EX_OSERR, "Unable to apply rights for sandbox");
1803 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1804 errx(EX_OSERR, "Unable to apply rights for sandbox");
1809 init_gdb(struct vmctx *_ctx, int sport, bool wait)
1811 struct sockaddr_in sin;
1812 int error, flags, optval, s;
1814 debug("==> starting on %d, %swaiting\n", sport, wait ? "" : "not ");
1816 error = pthread_mutex_init(&gdb_lock, NULL);
1818 errc(1, error, "gdb mutex init");
1819 error = pthread_cond_init(&idle_vcpus, NULL);
1821 errc(1, error, "gdb cv init");
1824 s = socket(PF_INET, SOCK_STREAM, 0);
1826 err(1, "gdb socket create");
1829 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1831 sin.sin_len = sizeof(sin);
1832 sin.sin_family = AF_INET;
1833 sin.sin_addr.s_addr = htonl(INADDR_ANY);
1834 sin.sin_port = htons(sport);
1836 if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0)
1837 err(1, "gdb socket bind");
1839 if (listen(s, 1) < 0)
1840 err(1, "gdb socket listen");
1843 TAILQ_INIT(&breakpoints);
1844 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1847 * Set vcpu 0 in vcpus_suspended. This will trigger the
1848 * logic in gdb_cpu_add() to suspend the first vcpu before
1849 * it starts execution. The vcpu will remain suspended
1850 * until a debugger connects.
1852 CPU_SET(0, &vcpus_suspended);
1856 flags = fcntl(s, F_GETFL);
1857 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1858 err(1, "Failed to mark gdb socket non-blocking");
1860 #ifndef WITHOUT_CAPSICUM
1861 limit_gdb_socket(s);
1863 mevent_add(s, EVF_READ, new_connection, NULL);