2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <machine/atomic.h>
41 #include <machine/specialreg.h>
42 #include <machine/vmm.h>
43 #include <netinet/in.h>
45 #ifndef WITHOUT_CAPSICUM
46 #include <capsicum_helpers.h>
53 #include <pthread_np.h>
69 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
72 #define GDB_SIGNAL_TRAP 5
74 static void gdb_resume_vcpus(void);
75 static void check_command(int fd);
77 static struct mevent *read_event, *write_event;
79 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
80 static pthread_mutex_t gdb_lock;
81 static pthread_cond_t idle_vcpus;
82 static bool first_stop, report_next_stop, swbreak_enabled;
85 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
86 * read buffer, 'start' is unused and 'len' contains the number of
87 * valid bytes in the buffer. For a write buffer, 'start' is set to
88 * the index of the next byte in 'data' to send, and 'len' contains
89 * the remaining number of valid bytes to send.
101 TAILQ_ENTRY(breakpoint) link;
105 * When a vCPU stops to due to an event that should be reported to the
106 * debugger, information about the event is stored in this structure.
107 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
108 * and stops other vCPUs so the event can be reported. The
109 * report_stop() function reports the event for the 'stopped_vcpu'
110 * vCPU. When the debugger resumes execution via continue or step,
111 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
112 * event handlers until the associated event is reported or disabled.
114 * An idle vCPU will have all of the boolean fields set to false.
116 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
117 * released to execute the stepped instruction. When the vCPU reports
118 * the stepping trap, 'stepped' is set.
120 * When a vCPU hits a breakpoint set by the debug server,
121 * 'hit_swbreak' is set to true.
129 static struct io_buffer cur_comm, cur_resp;
130 static uint8_t cur_csum;
131 static struct vmctx *ctx;
132 static int cur_fd = -1;
133 static TAILQ_HEAD(, breakpoint) breakpoints;
134 static struct vcpu_state *vcpu_state;
135 static struct vcpu **vcpus;
136 static int cur_vcpu, stopped_vcpu;
137 static bool gdb_active = false;
139 static const int gdb_regset[] = {
166 static const int gdb_regsize[] = {
197 static void __printflike(1, 2)
198 debug(const char *fmt, ...)
200 static FILE *logfile;
203 if (logfile == NULL) {
204 logfile = fopen("/tmp/bhyve_gdb.log", "w");
207 #ifndef WITHOUT_CAPSICUM
208 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
217 vfprintf(logfile, fmt, ap);
224 static void remove_all_sw_breakpoints(void);
227 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
230 const int regset[4] = {
237 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
241 * For the debugger, always pretend to be the kernel (CPL 0),
242 * and if long-mode is enabled, always parse addresses as if
245 paging->cr3 = regs[1];
247 if (regs[3] & EFER_LMA)
248 paging->cpu_mode = CPU_MODE_64BIT;
249 else if (regs[0] & CR0_PE)
250 paging->cpu_mode = CPU_MODE_PROTECTED;
252 paging->cpu_mode = CPU_MODE_REAL;
253 if (!(regs[0] & CR0_PG))
254 paging->paging_mode = PAGING_MODE_FLAT;
255 else if (!(regs[2] & CR4_PAE))
256 paging->paging_mode = PAGING_MODE_32;
257 else if (regs[3] & EFER_LME)
258 paging->paging_mode = (regs[2] & CR4_LA57) ?
259 PAGING_MODE_64_LA57 : PAGING_MODE_64;
261 paging->paging_mode = PAGING_MODE_PAE;
266 * Map a guest virtual address to a physical address (for a given vcpu).
267 * If a guest virtual address is valid, return 1. If the address is
268 * not valid, return 0. If an error occurs obtaining the mapping,
272 guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
274 struct vm_guest_paging paging;
277 if (guest_paging_info(vcpu, &paging) == -1)
281 * Always use PROT_READ. We really care if the VA is
282 * accessible, not if the current vCPU can write.
284 if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
293 io_buffer_reset(struct io_buffer *io)
300 /* Available room for adding data. */
302 io_buffer_avail(struct io_buffer *io)
305 return (io->capacity - (io->start + io->len));
309 io_buffer_head(struct io_buffer *io)
312 return (io->data + io->start);
316 io_buffer_tail(struct io_buffer *io)
319 return (io->data + io->start + io->len);
323 io_buffer_advance(struct io_buffer *io, size_t amount)
326 assert(amount <= io->len);
332 io_buffer_consume(struct io_buffer *io, size_t amount)
335 io_buffer_advance(io, amount);
342 * XXX: Consider making this move optional and compacting on a
343 * future read() before realloc().
345 memmove(io->data, io_buffer_head(io), io->len);
350 io_buffer_grow(struct io_buffer *io, size_t newsize)
353 size_t avail, new_cap;
355 avail = io_buffer_avail(io);
356 if (newsize <= avail)
359 new_cap = io->capacity + (newsize - avail);
360 new_data = realloc(io->data, new_cap);
361 if (new_data == NULL)
362 err(1, "Failed to grow GDB I/O buffer");
364 io->capacity = new_cap;
368 response_pending(void)
371 if (cur_resp.start == 0 && cur_resp.len == 0)
373 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
379 close_connection(void)
383 * XXX: This triggers a warning because mevent does the close
384 * before the EV_DELETE.
386 pthread_mutex_lock(&gdb_lock);
387 mevent_delete(write_event);
388 mevent_delete_close(read_event);
391 io_buffer_reset(&cur_comm);
392 io_buffer_reset(&cur_resp);
395 remove_all_sw_breakpoints();
397 /* Clear any pending events. */
398 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
400 /* Resume any stopped vCPUs. */
402 pthread_mutex_unlock(&gdb_lock);
406 hex_digit(uint8_t nibble)
410 return (nibble + '0');
412 return (nibble + 'a' - 10);
416 parse_digit(uint8_t v)
419 if (v >= '0' && v <= '9')
421 if (v >= 'a' && v <= 'f')
422 return (v - 'a' + 10);
423 if (v >= 'A' && v <= 'F')
424 return (v - 'A' + 10);
428 /* Parses big-endian hexadecimal. */
430 parse_integer(const uint8_t *p, size_t len)
437 v |= parse_digit(*p);
445 parse_byte(const uint8_t *p)
448 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
452 send_pending_data(int fd)
456 if (cur_resp.len == 0) {
457 mevent_disable(write_event);
460 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
461 if (nwritten == -1) {
462 warn("Write to GDB socket failed");
465 io_buffer_advance(&cur_resp, nwritten);
466 if (cur_resp.len == 0)
467 mevent_disable(write_event);
469 mevent_enable(write_event);
473 /* Append a single character to the output buffer. */
475 send_char(uint8_t data)
477 io_buffer_grow(&cur_resp, 1);
478 *io_buffer_tail(&cur_resp) = data;
482 /* Append an array of bytes to the output buffer. */
484 send_data(const uint8_t *data, size_t len)
487 io_buffer_grow(&cur_resp, len);
488 memcpy(io_buffer_tail(&cur_resp), data, len);
493 format_byte(uint8_t v, uint8_t *buf)
496 buf[0] = hex_digit(v >> 4);
497 buf[1] = hex_digit(v & 0xf);
501 * Append a single byte (formatted as two hex characters) to the
510 send_data(buf, sizeof(buf));
527 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
531 * Append a single character (for the packet payload) and update the
535 append_char(uint8_t v)
543 * Append an array of bytes (for the packet payload) and update the
547 append_packet_data(const uint8_t *data, size_t len)
550 send_data(data, len);
559 append_string(const char *str)
562 append_packet_data(str, strlen(str));
566 append_byte(uint8_t v)
571 append_packet_data(buf, sizeof(buf));
575 append_unsigned_native(uintmax_t value, size_t len)
579 for (i = 0; i < len; i++) {
586 append_unsigned_be(uintmax_t value, size_t len)
591 for (i = 0; i < len; i++) {
592 format_byte(value, buf + (len - i - 1) * 2);
595 append_packet_data(buf, sizeof(buf));
599 append_integer(unsigned int value)
605 append_unsigned_be(value, (fls(value) + 7) / 8);
609 append_asciihex(const char *str)
612 while (*str != '\0') {
619 send_empty_response(void)
627 send_error(int error)
646 parse_threadid(const uint8_t *data, size_t len)
649 if (len == 1 && *data == '0')
651 if (len == 2 && memcmp(data, "-1", 2) == 0)
655 return (parse_integer(data, len));
659 * Report the current stop event to the debugger. If the stop is due
660 * to an event triggered on a specific vCPU such as a breakpoint or
661 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
662 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
663 * the reporting vCPU for vCPU events.
666 report_stop(bool set_cur_vcpu)
668 struct vcpu_state *vs;
671 if (stopped_vcpu == -1) {
673 append_byte(GDB_SIGNAL_TRAP);
675 vs = &vcpu_state[stopped_vcpu];
677 cur_vcpu = stopped_vcpu;
679 append_byte(GDB_SIGNAL_TRAP);
680 append_string("thread:");
681 append_integer(stopped_vcpu + 1);
683 if (vs->hit_swbreak) {
684 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
686 append_string("swbreak:;");
687 } else if (vs->stepped)
688 debug("$vCPU %d reporting step\n", stopped_vcpu);
690 debug("$vCPU %d reporting ???\n", stopped_vcpu);
693 report_next_stop = false;
697 * If this stop is due to a vCPU event, clear that event to mark it as
703 struct vcpu_state *vs;
705 if (stopped_vcpu != -1) {
706 vs = &vcpu_state[stopped_vcpu];
707 vs->hit_swbreak = false;
711 report_next_stop = true;
715 gdb_finish_suspend_vcpus(void)
721 } else if (report_next_stop) {
722 assert(!response_pending());
724 send_pending_data(cur_fd);
729 * vCPU threads invoke this function whenever the vCPU enters the
730 * debug server to pause or report an event. vCPU threads wait here
731 * as long as the debug server keeps them suspended.
734 _gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
736 int vcpuid = vcpu_id(vcpu);
738 debug("$vCPU %d suspending\n", vcpuid);
739 CPU_SET(vcpuid, &vcpus_waiting);
740 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
741 gdb_finish_suspend_vcpus();
742 while (CPU_ISSET(vcpuid, &vcpus_suspended))
743 pthread_cond_wait(&idle_vcpus, &gdb_lock);
744 CPU_CLR(vcpuid, &vcpus_waiting);
745 debug("$vCPU %d resuming\n", vcpuid);
749 * Invoked at the start of a vCPU thread's execution to inform the
750 * debug server about the new thread.
753 gdb_cpu_add(struct vcpu *vcpu)
759 vcpuid = vcpu_id(vcpu);
760 debug("$vCPU %d starting\n", vcpuid);
761 pthread_mutex_lock(&gdb_lock);
762 assert(vcpuid < guest_ncpus);
763 assert(vcpus[vcpuid] == NULL);
764 vcpus[vcpuid] = vcpu;
765 CPU_SET(vcpuid, &vcpus_active);
766 if (!TAILQ_EMPTY(&breakpoints)) {
767 vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1);
768 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
772 * If a vcpu is added while vcpus are stopped, suspend the new
773 * vcpu so that it will pop back out with a debug exit before
774 * executing the first instruction.
776 if (!CPU_EMPTY(&vcpus_suspended)) {
777 CPU_SET(vcpuid, &vcpus_suspended);
778 _gdb_cpu_suspend(vcpu, false);
780 pthread_mutex_unlock(&gdb_lock);
784 * Invoked by vCPU before resuming execution. This enables stepping
785 * if the vCPU is marked as stepping.
788 gdb_cpu_resume(struct vcpu *vcpu)
790 struct vcpu_state *vs;
793 vs = &vcpu_state[vcpu_id(vcpu)];
796 * Any pending event should already be reported before
799 assert(vs->hit_swbreak == false);
800 assert(vs->stepped == false);
802 error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1);
805 error = vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, 1);
811 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
812 * has been suspended due to an event on different vCPU or in response
813 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
816 gdb_cpu_suspend(struct vcpu *vcpu)
821 pthread_mutex_lock(&gdb_lock);
822 _gdb_cpu_suspend(vcpu, true);
823 gdb_cpu_resume(vcpu);
824 pthread_mutex_unlock(&gdb_lock);
828 gdb_suspend_vcpus(void)
831 assert(pthread_mutex_isowned_np(&gdb_lock));
832 debug("suspending all CPUs\n");
833 vcpus_suspended = vcpus_active;
834 vm_suspend_all_cpus(ctx);
835 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
836 gdb_finish_suspend_vcpus();
840 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
841 * the VT-x-specific MTRAP exit.
844 gdb_cpu_mtrap(struct vcpu *vcpu)
846 struct vcpu_state *vs;
851 vcpuid = vcpu_id(vcpu);
852 debug("$vCPU %d MTRAP\n", vcpuid);
853 pthread_mutex_lock(&gdb_lock);
854 vs = &vcpu_state[vcpuid];
856 vs->stepping = false;
858 vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0);
859 vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, 0);
861 while (vs->stepped) {
862 if (stopped_vcpu == -1) {
863 debug("$vCPU %d reporting step\n", vcpuid);
864 stopped_vcpu = vcpuid;
867 _gdb_cpu_suspend(vcpu, true);
869 gdb_cpu_resume(vcpu);
871 pthread_mutex_unlock(&gdb_lock);
874 static struct breakpoint *
875 find_breakpoint(uint64_t gpa)
877 struct breakpoint *bp;
879 TAILQ_FOREACH(bp, &breakpoints, link) {
887 gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
889 struct breakpoint *bp;
890 struct vcpu_state *vs;
895 fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
898 vcpuid = vcpu_id(vcpu);
899 pthread_mutex_lock(&gdb_lock);
900 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
902 bp = find_breakpoint(gpa);
904 vs = &vcpu_state[vcpuid];
905 assert(vs->stepping == false);
906 assert(vs->stepped == false);
907 assert(vs->hit_swbreak == false);
908 vs->hit_swbreak = true;
909 vm_set_register(vcpu, VM_REG_GUEST_RIP, vmexit->rip);
911 if (stopped_vcpu == -1) {
912 debug("$vCPU %d reporting breakpoint at rip %#lx\n",
913 vcpuid, vmexit->rip);
914 stopped_vcpu = vcpuid;
917 _gdb_cpu_suspend(vcpu, true);
918 if (!vs->hit_swbreak) {
919 /* Breakpoint reported. */
922 bp = find_breakpoint(gpa);
924 /* Breakpoint was removed. */
925 vs->hit_swbreak = false;
929 gdb_cpu_resume(vcpu);
931 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid,
933 error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
934 vmexit->u.bpt.inst_length);
936 error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
939 pthread_mutex_unlock(&gdb_lock);
943 gdb_step_vcpu(struct vcpu *vcpu)
945 int error, val, vcpuid;
947 vcpuid = vcpu_id(vcpu);
948 debug("$vCPU %d step\n", vcpuid);
949 error = vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val);
954 vcpu_state[vcpuid].stepping = true;
956 CPU_CLR(vcpuid, &vcpus_suspended);
957 pthread_cond_broadcast(&idle_vcpus);
962 gdb_resume_vcpus(void)
965 assert(pthread_mutex_isowned_np(&gdb_lock));
966 vm_resume_all_cpus(ctx);
967 debug("resuming all CPUs\n");
968 CPU_ZERO(&vcpus_suspended);
969 pthread_cond_broadcast(&idle_vcpus);
975 uint64_t regvals[nitems(gdb_regset)];
977 if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset),
978 gdb_regset, regvals) == -1) {
983 for (size_t i = 0; i < nitems(regvals); i++)
984 append_unsigned_native(regvals[i], gdb_regsize[i]);
989 gdb_read_mem(const uint8_t *data, size_t len)
991 uint64_t gpa, gva, val;
993 size_t resid, todo, bytes;
1001 /* Parse and consume address. */
1002 cp = memchr(data, ',', len);
1003 if (cp == NULL || cp == data) {
1007 gva = parse_integer(data, cp - data);
1008 len -= (cp - data) + 1;
1009 data += (cp - data) + 1;
1012 resid = parse_integer(data, len);
1016 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1032 /* Read bytes from current page. */
1033 todo = getpagesize() - gpa % getpagesize();
1037 cp = paddr_guest2host(ctx, gpa, todo);
1040 * If this page is guest RAM, read it a byte
1057 * If this page isn't guest RAM, try to handle
1058 * it via MMIO. For MMIO requests, use
1059 * aligned reads of words when possible.
1062 if (gpa & 1 || todo == 1)
1064 else if (gpa & 2 || todo == 2)
1068 error = read_mem(vcpus[cur_vcpu], gpa, &val,
1093 assert(resid == 0 || gpa % getpagesize() == 0);
1101 gdb_write_mem(const uint8_t *data, size_t len)
1103 uint64_t gpa, gva, val;
1105 size_t resid, todo, bytes;
1112 /* Parse and consume address. */
1113 cp = memchr(data, ',', len);
1114 if (cp == NULL || cp == data) {
1118 gva = parse_integer(data, cp - data);
1119 len -= (cp - data) + 1;
1120 data += (cp - data) + 1;
1122 /* Parse and consume length. */
1123 cp = memchr(data, ':', len);
1124 if (cp == NULL || cp == data) {
1128 resid = parse_integer(data, cp - data);
1129 len -= (cp - data) + 1;
1130 data += (cp - data) + 1;
1132 /* Verify the available bytes match the length. */
1133 if (len != resid * 2) {
1139 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1149 /* Write bytes to current page. */
1150 todo = getpagesize() - gpa % getpagesize();
1154 cp = paddr_guest2host(ctx, gpa, todo);
1157 * If this page is guest RAM, write it a byte
1162 *cp = parse_byte(data);
1173 * If this page isn't guest RAM, try to handle
1174 * it via MMIO. For MMIO requests, use
1175 * aligned writes of words when possible.
1178 if (gpa & 1 || todo == 1) {
1180 val = parse_byte(data);
1181 } else if (gpa & 2 || todo == 2) {
1183 val = be16toh(parse_integer(data, 4));
1186 val = be32toh(parse_integer(data, 8));
1188 error = write_mem(vcpus[cur_vcpu], gpa, val,
1203 assert(resid == 0 || gpa % getpagesize() == 0);
1210 set_breakpoint_caps(bool enable)
1215 mask = vcpus_active;
1216 while (!CPU_EMPTY(&mask)) {
1217 vcpu = CPU_FFS(&mask) - 1;
1218 CPU_CLR(vcpu, &mask);
1219 if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT,
1220 enable ? 1 : 0) < 0)
1222 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1223 enable ? "en" : "dis");
1229 remove_all_sw_breakpoints(void)
1231 struct breakpoint *bp, *nbp;
1234 if (TAILQ_EMPTY(&breakpoints))
1237 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1238 debug("remove breakpoint at %#lx\n", bp->gpa);
1239 cp = paddr_guest2host(ctx, bp->gpa, 1);
1240 *cp = bp->shadow_inst;
1241 TAILQ_REMOVE(&breakpoints, bp, link);
1244 TAILQ_INIT(&breakpoints);
1245 set_breakpoint_caps(false);
1249 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1251 struct breakpoint *bp;
1261 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1271 cp = paddr_guest2host(ctx, gpa, 1);
1273 /* Only permit breakpoints in guest RAM. */
1279 /* Find any existing breakpoint. */
1280 bp = find_breakpoint(gpa);
1283 * Silently ignore duplicate commands since the protocol
1284 * requires these packets to be idempotent.
1288 if (TAILQ_EMPTY(&breakpoints) &&
1289 !set_breakpoint_caps(true)) {
1290 send_empty_response();
1293 bp = malloc(sizeof(*bp));
1295 bp->shadow_inst = *cp;
1296 *cp = 0xcc; /* INT 3 */
1297 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1298 debug("new breakpoint at %#lx\n", gpa);
1302 debug("remove breakpoint at %#lx\n", gpa);
1303 *cp = bp->shadow_inst;
1304 TAILQ_REMOVE(&breakpoints, bp, link);
1306 if (TAILQ_EMPTY(&breakpoints))
1307 set_breakpoint_caps(false);
1314 parse_breakpoint(const uint8_t *data, size_t len)
1321 insert = data[0] == 'Z';
1327 /* Parse and consume type. */
1328 cp = memchr(data, ',', len);
1329 if (cp == NULL || cp == data) {
1333 type = parse_integer(data, cp - data);
1334 len -= (cp - data) + 1;
1335 data += (cp - data) + 1;
1337 /* Parse and consume address. */
1338 cp = memchr(data, ',', len);
1339 if (cp == NULL || cp == data) {
1343 gva = parse_integer(data, cp - data);
1344 len -= (cp - data) + 1;
1345 data += (cp - data) + 1;
1347 /* Parse and consume kind. */
1348 cp = memchr(data, ';', len);
1355 * We do not advertise support for either the
1356 * ConditionalBreakpoints or BreakpointCommands
1357 * features, so we should not be getting conditions or
1358 * commands from the remote end.
1360 send_empty_response();
1363 kind = parse_integer(data, len);
1369 update_sw_breakpoint(gva, kind, insert);
1372 send_empty_response();
1378 command_equals(const uint8_t *data, size_t len, const char *cmd)
1381 if (strlen(cmd) > len)
1383 return (memcmp(data, cmd, strlen(cmd)) == 0);
1387 check_features(const uint8_t *data, size_t len)
1389 char *feature, *next_feature, *str, *value;
1392 str = malloc(len + 1);
1393 memcpy(str, data, len);
1397 while ((feature = strsep(&next_feature, ";")) != NULL) {
1399 * Null features shouldn't exist, but skip if they
1402 if (strcmp(feature, "") == 0)
1406 * Look for the value or supported / not supported
1409 value = strchr(feature, '=');
1410 if (value != NULL) {
1415 value = feature + strlen(feature) - 1;
1425 * This is really a protocol error,
1426 * but we just ignore malformed
1427 * features for ease of
1435 if (strcmp(feature, "swbreak") == 0)
1436 swbreak_enabled = supported;
1442 /* This is an arbitrary limit. */
1443 append_string("PacketSize=4096");
1444 append_string(";swbreak+");
1449 gdb_query(const uint8_t *data, size_t len)
1456 if (command_equals(data, len, "qAttached")) {
1460 } else if (command_equals(data, len, "qC")) {
1462 append_string("QC");
1463 append_integer(cur_vcpu + 1);
1465 } else if (command_equals(data, len, "qfThreadInfo")) {
1470 if (CPU_EMPTY(&vcpus_active)) {
1474 mask = vcpus_active;
1478 while (!CPU_EMPTY(&mask)) {
1479 vcpu = CPU_FFS(&mask) - 1;
1480 CPU_CLR(vcpu, &mask);
1485 append_integer(vcpu + 1);
1488 } else if (command_equals(data, len, "qsThreadInfo")) {
1492 } else if (command_equals(data, len, "qSupported")) {
1493 data += strlen("qSupported");
1494 len -= strlen("qSupported");
1495 check_features(data, len);
1496 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1500 data += strlen("qThreadExtraInfo");
1501 len -= strlen("qThreadExtraInfo");
1506 tid = parse_threadid(data + 1, len - 1);
1507 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1512 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1514 append_asciihex(buf);
1517 send_empty_response();
1521 handle_command(const uint8_t *data, size_t len)
1524 /* Reject packets with a sequence-id. */
1525 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1526 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1527 send_empty_response();
1544 /* TODO: Resume any stopped CPUs. */
1553 if (data[1] != 'g' && data[1] != 'c') {
1557 tid = parse_threadid(data + 2, len - 2);
1563 if (CPU_EMPTY(&vcpus_active)) {
1567 if (tid == -1 || tid == 0)
1568 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1569 else if (CPU_ISSET(tid - 1, &vcpus_active))
1579 gdb_read_mem(data, len);
1582 gdb_write_mem(data, len);
1587 tid = parse_threadid(data + 1, len - 1);
1588 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1596 gdb_query(data, len);
1604 /* Don't send a reply until a stop occurs. */
1605 if (!gdb_step_vcpu(vcpus[cur_vcpu])) {
1606 send_error(EOPNOTSUPP);
1612 parse_breakpoint(data, len);
1617 case 'G': /* TODO */
1619 /* Handle 'vCont' */
1621 case 'p': /* TODO */
1622 case 'P': /* TODO */
1623 case 'Q': /* TODO */
1624 case 't': /* TODO */
1625 case 'X': /* TODO */
1627 send_empty_response();
1631 /* Check for a valid packet in the command buffer. */
1633 check_command(int fd)
1635 uint8_t *head, *hash, *p, sum;
1639 avail = cur_comm.len;
1642 head = io_buffer_head(&cur_comm);
1645 debug("<- Ctrl-C\n");
1646 io_buffer_consume(&cur_comm, 1);
1648 gdb_suspend_vcpus();
1651 /* ACK of previous response. */
1653 if (response_pending())
1654 io_buffer_reset(&cur_resp);
1655 io_buffer_consume(&cur_comm, 1);
1656 if (stopped_vcpu != -1 && report_next_stop) {
1658 send_pending_data(fd);
1662 /* NACK of previous response. */
1664 if (response_pending()) {
1665 cur_resp.len += cur_resp.start;
1667 if (cur_resp.data[0] == '+')
1668 io_buffer_advance(&cur_resp, 1);
1669 debug("-> %.*s\n", (int)cur_resp.len,
1670 io_buffer_head(&cur_resp));
1672 io_buffer_consume(&cur_comm, 1);
1673 send_pending_data(fd);
1678 if (response_pending()) {
1679 warnx("New GDB command while response in "
1681 io_buffer_reset(&cur_resp);
1684 /* Is packet complete? */
1685 hash = memchr(head, '#', avail);
1688 plen = (hash - head + 1) + 2;
1691 debug("<- %.*s\n", (int)plen, head);
1693 /* Verify checksum. */
1694 for (sum = 0, p = head + 1; p < hash; p++)
1696 if (sum != parse_byte(hash + 1)) {
1697 io_buffer_consume(&cur_comm, plen);
1700 send_pending_data(fd);
1705 handle_command(head + 1, hash - (head + 1));
1706 io_buffer_consume(&cur_comm, plen);
1707 if (!response_pending())
1709 send_pending_data(fd);
1712 /* XXX: Possibly drop connection instead. */
1713 debug("-> %02x\n", *head);
1714 io_buffer_consume(&cur_comm, 1);
1721 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused)
1727 if (ioctl(fd, FIONREAD, &n) == -1) {
1728 warn("FIONREAD on GDB socket");
1735 * 'pending' might be zero due to EOF. We need to call read
1736 * with a non-zero length to detect EOF.
1741 /* Ensure there is room in the command buffer. */
1742 io_buffer_grow(&cur_comm, pending);
1743 assert(io_buffer_avail(&cur_comm) >= pending);
1745 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1748 } else if (nread == -1) {
1749 if (errno == EAGAIN)
1752 warn("Read from GDB socket");
1755 cur_comm.len += nread;
1756 pthread_mutex_lock(&gdb_lock);
1758 pthread_mutex_unlock(&gdb_lock);
1763 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused)
1766 send_pending_data(fd);
1770 new_connection(int fd, enum ev_type event __unused, void *arg)
1774 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1777 err(1, "Failed accepting initial GDB connection");
1779 /* Silently ignore errors post-startup. */
1784 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1786 warn("Failed to disable SIGPIPE for GDB connection");
1791 pthread_mutex_lock(&gdb_lock);
1794 warnx("Ignoring additional GDB connection.");
1797 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1798 if (read_event == NULL) {
1800 err(1, "Failed to setup initial GDB connection");
1801 pthread_mutex_unlock(&gdb_lock);
1804 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1805 if (write_event == NULL) {
1807 err(1, "Failed to setup initial GDB connection");
1808 mevent_delete_close(read_event);
1816 /* Break on attach. */
1818 report_next_stop = false;
1819 gdb_suspend_vcpus();
1820 pthread_mutex_unlock(&gdb_lock);
1823 #ifndef WITHOUT_CAPSICUM
1825 limit_gdb_socket(int s)
1827 cap_rights_t rights;
1828 unsigned long ioctls[] = { FIONREAD };
1830 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1831 CAP_SETSOCKOPT, CAP_IOCTL);
1832 if (caph_rights_limit(s, &rights) == -1)
1833 errx(EX_OSERR, "Unable to apply rights for sandbox");
1834 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1835 errx(EX_OSERR, "Unable to apply rights for sandbox");
1840 init_gdb(struct vmctx *_ctx)
1842 int error, flags, optval, s;
1843 struct addrinfo hints;
1844 struct addrinfo *gdbaddr;
1845 const char *saddr, *value;
1849 value = get_config_value("gdb.port");
1852 sport = strdup(value);
1854 errx(4, "Failed to allocate memory");
1856 wait = get_config_bool_default("gdb.wait", false);
1858 saddr = get_config_value("gdb.address");
1859 if (saddr == NULL) {
1860 saddr = "localhost";
1863 debug("==> starting on %s:%s, %swaiting\n",
1864 saddr, sport, wait ? "" : "not ");
1866 error = pthread_mutex_init(&gdb_lock, NULL);
1868 errc(1, error, "gdb mutex init");
1869 error = pthread_cond_init(&idle_vcpus, NULL);
1871 errc(1, error, "gdb cv init");
1873 memset(&hints, 0, sizeof(hints));
1874 hints.ai_family = AF_UNSPEC;
1875 hints.ai_socktype = SOCK_STREAM;
1876 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
1878 error = getaddrinfo(saddr, sport, &hints, &gdbaddr);
1880 errx(1, "gdb address resolution: %s", gai_strerror(error));
1883 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0);
1885 err(1, "gdb socket create");
1888 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1890 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0)
1891 err(1, "gdb socket bind");
1893 if (listen(s, 1) < 0)
1894 err(1, "gdb socket listen");
1897 TAILQ_INIT(&breakpoints);
1898 vcpus = calloc(guest_ncpus, sizeof(*vcpus));
1899 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1902 * Set vcpu 0 in vcpus_suspended. This will trigger the
1903 * logic in gdb_cpu_add() to suspend the first vcpu before
1904 * it starts execution. The vcpu will remain suspended
1905 * until a debugger connects.
1907 CPU_SET(0, &vcpus_suspended);
1911 flags = fcntl(s, F_GETFL);
1912 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1913 err(1, "Failed to mark gdb socket non-blocking");
1915 #ifndef WITHOUT_CAPSICUM
1916 limit_gdb_socket(s);
1918 mevent_add(s, EVF_READ, new_connection, NULL);
1920 freeaddrinfo(gdbaddr);