2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <machine/atomic.h>
41 #include <machine/specialreg.h>
42 #include <machine/vmm.h>
43 #include <netinet/in.h>
45 #ifndef WITHOUT_CAPSICUM
46 #include <capsicum_helpers.h>
52 #include <pthread_np.h>
67 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
70 #define GDB_SIGNAL_TRAP 5
72 static void gdb_resume_vcpus(void);
73 static void check_command(int fd);
75 static struct mevent *read_event, *write_event;
77 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
78 static pthread_mutex_t gdb_lock;
79 static pthread_cond_t idle_vcpus;
80 static bool first_stop, report_next_stop, swbreak_enabled;
83 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
84 * read buffer, 'start' is unused and 'len' contains the number of
85 * valid bytes in the buffer. For a write buffer, 'start' is set to
86 * the index of the next byte in 'data' to send, and 'len' contains
87 * the remaining number of valid bytes to send.
99 TAILQ_ENTRY(breakpoint) link;
103 * When a vCPU stops to due to an event that should be reported to the
104 * debugger, information about the event is stored in this structure.
105 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
106 * and stops other vCPUs so the event can be reported. The
107 * report_stop() function reports the event for the 'stopped_vcpu'
108 * vCPU. When the debugger resumes execution via continue or step,
109 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
110 * event handlers until the associated event is reported or disabled.
112 * An idle vCPU will have all of the boolean fields set to false.
114 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
115 * released to execute the stepped instruction. When the vCPU reports
116 * the stepping trap, 'stepped' is set.
118 * When a vCPU hits a breakpoint set by the debug server,
119 * 'hit_swbreak' is set to true.
127 static struct io_buffer cur_comm, cur_resp;
128 static uint8_t cur_csum;
129 static struct vmctx *ctx;
130 static int cur_fd = -1;
131 static TAILQ_HEAD(, breakpoint) breakpoints;
132 static struct vcpu_state *vcpu_state;
133 static int cur_vcpu, stopped_vcpu;
134 static bool gdb_active = false;
136 const int gdb_regset[] = {
163 const int gdb_regsize[] = {
194 static void __printflike(1, 2)
195 debug(const char *fmt, ...)
197 static FILE *logfile;
200 if (logfile == NULL) {
201 logfile = fopen("/tmp/bhyve_gdb.log", "w");
204 #ifndef WITHOUT_CAPSICUM
205 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
214 vfprintf(logfile, fmt, ap);
221 static void remove_all_sw_breakpoints(void);
224 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
227 const int regset[4] = {
234 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
238 * For the debugger, always pretend to be the kernel (CPL 0),
239 * and if long-mode is enabled, always parse addresses as if
242 paging->cr3 = regs[1];
244 if (regs[3] & EFER_LMA)
245 paging->cpu_mode = CPU_MODE_64BIT;
246 else if (regs[0] & CR0_PE)
247 paging->cpu_mode = CPU_MODE_PROTECTED;
249 paging->cpu_mode = CPU_MODE_REAL;
250 if (!(regs[0] & CR0_PG))
251 paging->paging_mode = PAGING_MODE_FLAT;
252 else if (!(regs[2] & CR4_PAE))
253 paging->paging_mode = PAGING_MODE_32;
254 else if (regs[3] & EFER_LME)
255 paging->paging_mode = (regs[2] & CR4_LA57) ?
256 PAGING_MODE_64_LA57 : PAGING_MODE_64;
258 paging->paging_mode = PAGING_MODE_PAE;
263 * Map a guest virtual address to a physical address (for a given vcpu).
264 * If a guest virtual address is valid, return 1. If the address is
265 * not valid, return 0. If an error occurs obtaining the mapping,
269 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
271 struct vm_guest_paging paging;
274 if (guest_paging_info(vcpu, &paging) == -1)
278 * Always use PROT_READ. We really care if the VA is
279 * accessible, not if the current vCPU can write.
281 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
290 io_buffer_reset(struct io_buffer *io)
297 /* Available room for adding data. */
299 io_buffer_avail(struct io_buffer *io)
302 return (io->capacity - (io->start + io->len));
306 io_buffer_head(struct io_buffer *io)
309 return (io->data + io->start);
313 io_buffer_tail(struct io_buffer *io)
316 return (io->data + io->start + io->len);
320 io_buffer_advance(struct io_buffer *io, size_t amount)
323 assert(amount <= io->len);
329 io_buffer_consume(struct io_buffer *io, size_t amount)
332 io_buffer_advance(io, amount);
339 * XXX: Consider making this move optional and compacting on a
340 * future read() before realloc().
342 memmove(io->data, io_buffer_head(io), io->len);
347 io_buffer_grow(struct io_buffer *io, size_t newsize)
350 size_t avail, new_cap;
352 avail = io_buffer_avail(io);
353 if (newsize <= avail)
356 new_cap = io->capacity + (newsize - avail);
357 new_data = realloc(io->data, new_cap);
358 if (new_data == NULL)
359 err(1, "Failed to grow GDB I/O buffer");
361 io->capacity = new_cap;
365 response_pending(void)
368 if (cur_resp.start == 0 && cur_resp.len == 0)
370 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
376 close_connection(void)
380 * XXX: This triggers a warning because mevent does the close
381 * before the EV_DELETE.
383 pthread_mutex_lock(&gdb_lock);
384 mevent_delete(write_event);
385 mevent_delete_close(read_event);
388 io_buffer_reset(&cur_comm);
389 io_buffer_reset(&cur_resp);
392 remove_all_sw_breakpoints();
394 /* Clear any pending events. */
395 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
397 /* Resume any stopped vCPUs. */
399 pthread_mutex_unlock(&gdb_lock);
403 hex_digit(uint8_t nibble)
407 return (nibble + '0');
409 return (nibble + 'a' - 10);
413 parse_digit(uint8_t v)
416 if (v >= '0' && v <= '9')
418 if (v >= 'a' && v <= 'f')
419 return (v - 'a' + 10);
420 if (v >= 'A' && v <= 'F')
421 return (v - 'A' + 10);
425 /* Parses big-endian hexadecimal. */
427 parse_integer(const uint8_t *p, size_t len)
434 v |= parse_digit(*p);
442 parse_byte(const uint8_t *p)
445 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
449 send_pending_data(int fd)
453 if (cur_resp.len == 0) {
454 mevent_disable(write_event);
457 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
458 if (nwritten == -1) {
459 warn("Write to GDB socket failed");
462 io_buffer_advance(&cur_resp, nwritten);
463 if (cur_resp.len == 0)
464 mevent_disable(write_event);
466 mevent_enable(write_event);
470 /* Append a single character to the output buffer. */
472 send_char(uint8_t data)
474 io_buffer_grow(&cur_resp, 1);
475 *io_buffer_tail(&cur_resp) = data;
479 /* Append an array of bytes to the output buffer. */
481 send_data(const uint8_t *data, size_t len)
484 io_buffer_grow(&cur_resp, len);
485 memcpy(io_buffer_tail(&cur_resp), data, len);
490 format_byte(uint8_t v, uint8_t *buf)
493 buf[0] = hex_digit(v >> 4);
494 buf[1] = hex_digit(v & 0xf);
498 * Append a single byte (formatted as two hex characters) to the
507 send_data(buf, sizeof(buf));
524 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
528 * Append a single character (for the packet payload) and update the
532 append_char(uint8_t v)
540 * Append an array of bytes (for the packet payload) and update the
544 append_packet_data(const uint8_t *data, size_t len)
547 send_data(data, len);
556 append_string(const char *str)
559 append_packet_data(str, strlen(str));
563 append_byte(uint8_t v)
568 append_packet_data(buf, sizeof(buf));
572 append_unsigned_native(uintmax_t value, size_t len)
576 for (i = 0; i < len; i++) {
583 append_unsigned_be(uintmax_t value, size_t len)
588 for (i = 0; i < len; i++) {
589 format_byte(value, buf + (len - i - 1) * 2);
592 append_packet_data(buf, sizeof(buf));
596 append_integer(unsigned int value)
602 append_unsigned_be(value, (fls(value) + 7) / 8);
606 append_asciihex(const char *str)
609 while (*str != '\0') {
616 send_empty_response(void)
624 send_error(int error)
643 parse_threadid(const uint8_t *data, size_t len)
646 if (len == 1 && *data == '0')
648 if (len == 2 && memcmp(data, "-1", 2) == 0)
652 return (parse_integer(data, len));
656 * Report the current stop event to the debugger. If the stop is due
657 * to an event triggered on a specific vCPU such as a breakpoint or
658 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
659 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
660 * the reporting vCPU for vCPU events.
663 report_stop(bool set_cur_vcpu)
665 struct vcpu_state *vs;
668 if (stopped_vcpu == -1) {
670 append_byte(GDB_SIGNAL_TRAP);
672 vs = &vcpu_state[stopped_vcpu];
674 cur_vcpu = stopped_vcpu;
676 append_byte(GDB_SIGNAL_TRAP);
677 append_string("thread:");
678 append_integer(stopped_vcpu + 1);
680 if (vs->hit_swbreak) {
681 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
683 append_string("swbreak:;");
684 } else if (vs->stepped)
685 debug("$vCPU %d reporting step\n", stopped_vcpu);
687 debug("$vCPU %d reporting ???\n", stopped_vcpu);
690 report_next_stop = false;
694 * If this stop is due to a vCPU event, clear that event to mark it as
700 struct vcpu_state *vs;
702 if (stopped_vcpu != -1) {
703 vs = &vcpu_state[stopped_vcpu];
704 vs->hit_swbreak = false;
708 report_next_stop = true;
712 gdb_finish_suspend_vcpus(void)
718 } else if (report_next_stop) {
719 assert(!response_pending());
721 send_pending_data(cur_fd);
726 * vCPU threads invoke this function whenever the vCPU enters the
727 * debug server to pause or report an event. vCPU threads wait here
728 * as long as the debug server keeps them suspended.
731 _gdb_cpu_suspend(int vcpu, bool report_stop)
734 debug("$vCPU %d suspending\n", vcpu);
735 CPU_SET(vcpu, &vcpus_waiting);
736 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
737 gdb_finish_suspend_vcpus();
738 while (CPU_ISSET(vcpu, &vcpus_suspended))
739 pthread_cond_wait(&idle_vcpus, &gdb_lock);
740 CPU_CLR(vcpu, &vcpus_waiting);
741 debug("$vCPU %d resuming\n", vcpu);
745 * Invoked at the start of a vCPU thread's execution to inform the
746 * debug server about the new thread.
749 gdb_cpu_add(int vcpu)
754 debug("$vCPU %d starting\n", vcpu);
755 pthread_mutex_lock(&gdb_lock);
756 assert(vcpu < guest_ncpus);
757 CPU_SET(vcpu, &vcpus_active);
758 if (!TAILQ_EMPTY(&breakpoints)) {
759 vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
760 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
764 * If a vcpu is added while vcpus are stopped, suspend the new
765 * vcpu so that it will pop back out with a debug exit before
766 * executing the first instruction.
768 if (!CPU_EMPTY(&vcpus_suspended)) {
769 CPU_SET(vcpu, &vcpus_suspended);
770 _gdb_cpu_suspend(vcpu, false);
772 pthread_mutex_unlock(&gdb_lock);
776 * Invoked by vCPU before resuming execution. This enables stepping
777 * if the vCPU is marked as stepping.
780 gdb_cpu_resume(int vcpu)
782 struct vcpu_state *vs;
785 vs = &vcpu_state[vcpu];
788 * Any pending event should already be reported before
791 assert(vs->hit_swbreak == false);
792 assert(vs->stepped == false);
794 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
800 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
801 * has been suspended due to an event on different vCPU or in response
802 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
805 gdb_cpu_suspend(int vcpu)
810 pthread_mutex_lock(&gdb_lock);
811 _gdb_cpu_suspend(vcpu, true);
812 gdb_cpu_resume(vcpu);
813 pthread_mutex_unlock(&gdb_lock);
817 gdb_suspend_vcpus(void)
820 assert(pthread_mutex_isowned_np(&gdb_lock));
821 debug("suspending all CPUs\n");
822 vcpus_suspended = vcpus_active;
823 vm_suspend_cpu(ctx, -1);
824 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
825 gdb_finish_suspend_vcpus();
829 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
830 * the VT-x-specific MTRAP exit.
833 gdb_cpu_mtrap(int vcpu)
835 struct vcpu_state *vs;
839 debug("$vCPU %d MTRAP\n", vcpu);
840 pthread_mutex_lock(&gdb_lock);
841 vs = &vcpu_state[vcpu];
843 vs->stepping = false;
845 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
846 while (vs->stepped) {
847 if (stopped_vcpu == -1) {
848 debug("$vCPU %d reporting step\n", vcpu);
852 _gdb_cpu_suspend(vcpu, true);
854 gdb_cpu_resume(vcpu);
856 pthread_mutex_unlock(&gdb_lock);
859 static struct breakpoint *
860 find_breakpoint(uint64_t gpa)
862 struct breakpoint *bp;
864 TAILQ_FOREACH(bp, &breakpoints, link) {
872 gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
874 struct breakpoint *bp;
875 struct vcpu_state *vs;
880 fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
883 pthread_mutex_lock(&gdb_lock);
884 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
886 bp = find_breakpoint(gpa);
888 vs = &vcpu_state[vcpu];
889 assert(vs->stepping == false);
890 assert(vs->stepped == false);
891 assert(vs->hit_swbreak == false);
892 vs->hit_swbreak = true;
893 vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
895 if (stopped_vcpu == -1) {
896 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
901 _gdb_cpu_suspend(vcpu, true);
902 if (!vs->hit_swbreak) {
903 /* Breakpoint reported. */
906 bp = find_breakpoint(gpa);
908 /* Breakpoint was removed. */
909 vs->hit_swbreak = false;
913 gdb_cpu_resume(vcpu);
915 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
917 error = vm_set_register(ctx, vcpu,
918 VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
920 error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
923 pthread_mutex_unlock(&gdb_lock);
927 gdb_step_vcpu(int vcpu)
931 debug("$vCPU %d step\n", vcpu);
932 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
937 vcpu_state[vcpu].stepping = true;
938 vm_resume_cpu(ctx, vcpu);
939 CPU_CLR(vcpu, &vcpus_suspended);
940 pthread_cond_broadcast(&idle_vcpus);
945 gdb_resume_vcpus(void)
948 assert(pthread_mutex_isowned_np(&gdb_lock));
949 vm_resume_cpu(ctx, -1);
950 debug("resuming all CPUs\n");
951 CPU_ZERO(&vcpus_suspended);
952 pthread_cond_broadcast(&idle_vcpus);
958 uint64_t regvals[nitems(gdb_regset)];
961 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
962 gdb_regset, regvals) == -1) {
967 for (i = 0; i < nitems(regvals); i++)
968 append_unsigned_native(regvals[i], gdb_regsize[i]);
973 gdb_read_mem(const uint8_t *data, size_t len)
975 uint64_t gpa, gva, val;
977 size_t resid, todo, bytes;
985 /* Parse and consume address. */
986 cp = memchr(data, ',', len);
987 if (cp == NULL || cp == data) {
991 gva = parse_integer(data, cp - data);
992 len -= (cp - data) + 1;
993 data += (cp - data) + 1;
996 resid = parse_integer(data, len);
1000 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1016 /* Read bytes from current page. */
1017 todo = getpagesize() - gpa % getpagesize();
1021 cp = paddr_guest2host(ctx, gpa, todo);
1024 * If this page is guest RAM, read it a byte
1041 * If this page isn't guest RAM, try to handle
1042 * it via MMIO. For MMIO requests, use
1043 * aligned reads of words when possible.
1046 if (gpa & 1 || todo == 1)
1048 else if (gpa & 2 || todo == 2)
1052 error = read_mem(ctx, cur_vcpu, gpa, &val,
1077 assert(resid == 0 || gpa % getpagesize() == 0);
1085 gdb_write_mem(const uint8_t *data, size_t len)
1087 uint64_t gpa, gva, val;
1089 size_t resid, todo, bytes;
1096 /* Parse and consume address. */
1097 cp = memchr(data, ',', len);
1098 if (cp == NULL || cp == data) {
1102 gva = parse_integer(data, cp - data);
1103 len -= (cp - data) + 1;
1104 data += (cp - data) + 1;
1106 /* Parse and consume length. */
1107 cp = memchr(data, ':', len);
1108 if (cp == NULL || cp == data) {
1112 resid = parse_integer(data, cp - data);
1113 len -= (cp - data) + 1;
1114 data += (cp - data) + 1;
1116 /* Verify the available bytes match the length. */
1117 if (len != resid * 2) {
1123 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1133 /* Write bytes to current page. */
1134 todo = getpagesize() - gpa % getpagesize();
1138 cp = paddr_guest2host(ctx, gpa, todo);
1141 * If this page is guest RAM, write it a byte
1146 *cp = parse_byte(data);
1157 * If this page isn't guest RAM, try to handle
1158 * it via MMIO. For MMIO requests, use
1159 * aligned writes of words when possible.
1162 if (gpa & 1 || todo == 1) {
1164 val = parse_byte(data);
1165 } else if (gpa & 2 || todo == 2) {
1167 val = be16toh(parse_integer(data, 4));
1170 val = be32toh(parse_integer(data, 8));
1172 error = write_mem(ctx, cur_vcpu, gpa, val,
1187 assert(resid == 0 || gpa % getpagesize() == 0);
1194 set_breakpoint_caps(bool enable)
1199 mask = vcpus_active;
1200 while (!CPU_EMPTY(&mask)) {
1201 vcpu = CPU_FFS(&mask) - 1;
1202 CPU_CLR(vcpu, &mask);
1203 if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
1204 enable ? 1 : 0) < 0)
1206 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1207 enable ? "en" : "dis");
1213 remove_all_sw_breakpoints(void)
1215 struct breakpoint *bp, *nbp;
1218 if (TAILQ_EMPTY(&breakpoints))
1221 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1222 debug("remove breakpoint at %#lx\n", bp->gpa);
1223 cp = paddr_guest2host(ctx, bp->gpa, 1);
1224 *cp = bp->shadow_inst;
1225 TAILQ_REMOVE(&breakpoints, bp, link);
1228 TAILQ_INIT(&breakpoints);
1229 set_breakpoint_caps(false);
1233 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1235 struct breakpoint *bp;
1245 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1255 cp = paddr_guest2host(ctx, gpa, 1);
1257 /* Only permit breakpoints in guest RAM. */
1263 /* Find any existing breakpoint. */
1264 bp = find_breakpoint(gpa);
1267 * Silently ignore duplicate commands since the protocol
1268 * requires these packets to be idempotent.
1272 if (TAILQ_EMPTY(&breakpoints) &&
1273 !set_breakpoint_caps(true)) {
1274 send_empty_response();
1277 bp = malloc(sizeof(*bp));
1279 bp->shadow_inst = *cp;
1280 *cp = 0xcc; /* INT 3 */
1281 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1282 debug("new breakpoint at %#lx\n", gpa);
1286 debug("remove breakpoint at %#lx\n", gpa);
1287 *cp = bp->shadow_inst;
1288 TAILQ_REMOVE(&breakpoints, bp, link);
1290 if (TAILQ_EMPTY(&breakpoints))
1291 set_breakpoint_caps(false);
1298 parse_breakpoint(const uint8_t *data, size_t len)
1305 insert = data[0] == 'Z';
1311 /* Parse and consume type. */
1312 cp = memchr(data, ',', len);
1313 if (cp == NULL || cp == data) {
1317 type = parse_integer(data, cp - data);
1318 len -= (cp - data) + 1;
1319 data += (cp - data) + 1;
1321 /* Parse and consume address. */
1322 cp = memchr(data, ',', len);
1323 if (cp == NULL || cp == data) {
1327 gva = parse_integer(data, cp - data);
1328 len -= (cp - data) + 1;
1329 data += (cp - data) + 1;
1331 /* Parse and consume kind. */
1332 cp = memchr(data, ';', len);
1339 * We do not advertise support for either the
1340 * ConditionalBreakpoints or BreakpointCommands
1341 * features, so we should not be getting conditions or
1342 * commands from the remote end.
1344 send_empty_response();
1347 kind = parse_integer(data, len);
1353 update_sw_breakpoint(gva, kind, insert);
1356 send_empty_response();
1362 command_equals(const uint8_t *data, size_t len, const char *cmd)
1365 if (strlen(cmd) > len)
1367 return (memcmp(data, cmd, strlen(cmd)) == 0);
1371 check_features(const uint8_t *data, size_t len)
1373 char *feature, *next_feature, *str, *value;
1376 str = malloc(len + 1);
1377 memcpy(str, data, len);
1381 while ((feature = strsep(&next_feature, ";")) != NULL) {
1383 * Null features shouldn't exist, but skip if they
1386 if (strcmp(feature, "") == 0)
1390 * Look for the value or supported / not supported
1393 value = strchr(feature, '=');
1394 if (value != NULL) {
1399 value = feature + strlen(feature) - 1;
1409 * This is really a protocol error,
1410 * but we just ignore malformed
1411 * features for ease of
1419 if (strcmp(feature, "swbreak") == 0)
1420 swbreak_enabled = supported;
1426 /* This is an arbitrary limit. */
1427 append_string("PacketSize=4096");
1428 append_string(";swbreak+");
1433 gdb_query(const uint8_t *data, size_t len)
1440 if (command_equals(data, len, "qAttached")) {
1444 } else if (command_equals(data, len, "qC")) {
1446 append_string("QC");
1447 append_integer(cur_vcpu + 1);
1449 } else if (command_equals(data, len, "qfThreadInfo")) {
1454 if (CPU_EMPTY(&vcpus_active)) {
1458 mask = vcpus_active;
1462 while (!CPU_EMPTY(&mask)) {
1463 vcpu = CPU_FFS(&mask) - 1;
1464 CPU_CLR(vcpu, &mask);
1469 append_integer(vcpu + 1);
1472 } else if (command_equals(data, len, "qsThreadInfo")) {
1476 } else if (command_equals(data, len, "qSupported")) {
1477 data += strlen("qSupported");
1478 len -= strlen("qSupported");
1479 check_features(data, len);
1480 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1484 data += strlen("qThreadExtraInfo");
1485 len -= strlen("qThreadExtraInfo");
1490 tid = parse_threadid(data + 1, len - 1);
1491 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1496 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1498 append_asciihex(buf);
1501 send_empty_response();
1505 handle_command(const uint8_t *data, size_t len)
1508 /* Reject packets with a sequence-id. */
1509 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1510 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1511 send_empty_response();
1528 /* TODO: Resume any stopped CPUs. */
1537 if (data[1] != 'g' && data[1] != 'c') {
1541 tid = parse_threadid(data + 2, len - 2);
1547 if (CPU_EMPTY(&vcpus_active)) {
1551 if (tid == -1 || tid == 0)
1552 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1553 else if (CPU_ISSET(tid - 1, &vcpus_active))
1563 gdb_read_mem(data, len);
1566 gdb_write_mem(data, len);
1571 tid = parse_threadid(data + 1, len - 1);
1572 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1580 gdb_query(data, len);
1588 /* Don't send a reply until a stop occurs. */
1589 if (!gdb_step_vcpu(cur_vcpu)) {
1590 send_error(EOPNOTSUPP);
1596 parse_breakpoint(data, len);
1601 case 'G': /* TODO */
1603 /* Handle 'vCont' */
1605 case 'p': /* TODO */
1606 case 'P': /* TODO */
1607 case 'Q': /* TODO */
1608 case 't': /* TODO */
1609 case 'X': /* TODO */
1611 send_empty_response();
1615 /* Check for a valid packet in the command buffer. */
1617 check_command(int fd)
1619 uint8_t *head, *hash, *p, sum;
1623 avail = cur_comm.len;
1626 head = io_buffer_head(&cur_comm);
1629 debug("<- Ctrl-C\n");
1630 io_buffer_consume(&cur_comm, 1);
1632 gdb_suspend_vcpus();
1635 /* ACK of previous response. */
1637 if (response_pending())
1638 io_buffer_reset(&cur_resp);
1639 io_buffer_consume(&cur_comm, 1);
1640 if (stopped_vcpu != -1 && report_next_stop) {
1642 send_pending_data(fd);
1646 /* NACK of previous response. */
1648 if (response_pending()) {
1649 cur_resp.len += cur_resp.start;
1651 if (cur_resp.data[0] == '+')
1652 io_buffer_advance(&cur_resp, 1);
1653 debug("-> %.*s\n", (int)cur_resp.len,
1654 io_buffer_head(&cur_resp));
1656 io_buffer_consume(&cur_comm, 1);
1657 send_pending_data(fd);
1662 if (response_pending()) {
1663 warnx("New GDB command while response in "
1665 io_buffer_reset(&cur_resp);
1668 /* Is packet complete? */
1669 hash = memchr(head, '#', avail);
1672 plen = (hash - head + 1) + 2;
1675 debug("<- %.*s\n", (int)plen, head);
1677 /* Verify checksum. */
1678 for (sum = 0, p = head + 1; p < hash; p++)
1680 if (sum != parse_byte(hash + 1)) {
1681 io_buffer_consume(&cur_comm, plen);
1684 send_pending_data(fd);
1689 handle_command(head + 1, hash - (head + 1));
1690 io_buffer_consume(&cur_comm, plen);
1691 if (!response_pending())
1693 send_pending_data(fd);
1696 /* XXX: Possibly drop connection instead. */
1697 debug("-> %02x\n", *head);
1698 io_buffer_consume(&cur_comm, 1);
1705 gdb_readable(int fd, enum ev_type event, void *arg)
1710 if (ioctl(fd, FIONREAD, &pending) == -1) {
1711 warn("FIONREAD on GDB socket");
1716 * 'pending' might be zero due to EOF. We need to call read
1717 * with a non-zero length to detect EOF.
1722 /* Ensure there is room in the command buffer. */
1723 io_buffer_grow(&cur_comm, pending);
1724 assert(io_buffer_avail(&cur_comm) >= pending);
1726 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1729 } else if (nread == -1) {
1730 if (errno == EAGAIN)
1733 warn("Read from GDB socket");
1736 cur_comm.len += nread;
1737 pthread_mutex_lock(&gdb_lock);
1739 pthread_mutex_unlock(&gdb_lock);
1744 gdb_writable(int fd, enum ev_type event, void *arg)
1747 send_pending_data(fd);
1751 new_connection(int fd, enum ev_type event, void *arg)
1755 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1758 err(1, "Failed accepting initial GDB connection");
1760 /* Silently ignore errors post-startup. */
1765 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1767 warn("Failed to disable SIGPIPE for GDB connection");
1772 pthread_mutex_lock(&gdb_lock);
1775 warnx("Ignoring additional GDB connection.");
1778 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1779 if (read_event == NULL) {
1781 err(1, "Failed to setup initial GDB connection");
1782 pthread_mutex_unlock(&gdb_lock);
1785 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1786 if (write_event == NULL) {
1788 err(1, "Failed to setup initial GDB connection");
1789 mevent_delete_close(read_event);
1797 /* Break on attach. */
1799 report_next_stop = false;
1800 gdb_suspend_vcpus();
1801 pthread_mutex_unlock(&gdb_lock);
1804 #ifndef WITHOUT_CAPSICUM
1806 limit_gdb_socket(int s)
1808 cap_rights_t rights;
1809 unsigned long ioctls[] = { FIONREAD };
1811 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1812 CAP_SETSOCKOPT, CAP_IOCTL);
1813 if (caph_rights_limit(s, &rights) == -1)
1814 errx(EX_OSERR, "Unable to apply rights for sandbox");
1815 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1816 errx(EX_OSERR, "Unable to apply rights for sandbox");
1821 init_gdb(struct vmctx *_ctx, int sport, bool wait)
1823 struct sockaddr_in sin;
1824 int error, flags, optval, s;
1826 debug("==> starting on %d, %swaiting\n", sport, wait ? "" : "not ");
1828 error = pthread_mutex_init(&gdb_lock, NULL);
1830 errc(1, error, "gdb mutex init");
1831 error = pthread_cond_init(&idle_vcpus, NULL);
1833 errc(1, error, "gdb cv init");
1836 s = socket(PF_INET, SOCK_STREAM, 0);
1838 err(1, "gdb socket create");
1841 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1843 sin.sin_len = sizeof(sin);
1844 sin.sin_family = AF_INET;
1845 sin.sin_addr.s_addr = htonl(INADDR_ANY);
1846 sin.sin_port = htons(sport);
1848 if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0)
1849 err(1, "gdb socket bind");
1851 if (listen(s, 1) < 0)
1852 err(1, "gdb socket listen");
1855 TAILQ_INIT(&breakpoints);
1856 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1859 * Set vcpu 0 in vcpus_suspended. This will trigger the
1860 * logic in gdb_cpu_add() to suspend the first vcpu before
1861 * it starts execution. The vcpu will remain suspended
1862 * until a debugger connects.
1864 CPU_SET(0, &vcpus_suspended);
1868 flags = fcntl(s, F_GETFL);
1869 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1870 err(1, "Failed to mark gdb socket non-blocking");
1872 #ifndef WITHOUT_CAPSICUM
1873 limit_gdb_socket(s);
1875 mevent_add(s, EVF_READ, new_connection, NULL);