2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/ioctl.h>
37 #include <sys/socket.h>
38 #include <machine/atomic.h>
39 #include <machine/specialreg.h>
40 #include <machine/vmm.h>
41 #include <netinet/in.h>
43 #ifndef WITHOUT_CAPSICUM
44 #include <capsicum_helpers.h>
50 #include <pthread_np.h>
64 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
67 #define GDB_SIGNAL_TRAP 5
69 static void gdb_resume_vcpus(void);
70 static void check_command(int fd);
72 static struct mevent *read_event, *write_event;
74 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
75 static pthread_mutex_t gdb_lock;
76 static pthread_cond_t idle_vcpus;
77 static bool stop_pending, first_stop;
78 static int stepping_vcpu, stopped_vcpu;
81 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
82 * read buffer, 'start' is unused and 'len' contains the number of
83 * valid bytes in the buffer. For a write buffer, 'start' is set to
84 * the index of the next byte in 'data' to send, and 'len' contains
85 * the remaining number of valid bytes to send.
94 static struct io_buffer cur_comm, cur_resp;
95 static uint8_t cur_csum;
97 static struct vmctx *ctx;
98 static int cur_fd = -1;
100 const int gdb_regset[] = {
127 const int gdb_regsize[] = {
158 static void __printflike(1, 2)
159 debug(const char *fmt, ...)
161 static FILE *logfile;
164 if (logfile == NULL) {
165 logfile = fopen("/tmp/bhyve_gdb.log", "w");
168 #ifndef WITHOUT_CAPSICUM
169 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
178 vfprintf(logfile, fmt, ap);
186 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
189 const int regset[4] = {
196 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
200 * For the debugger, always pretend to be the kernel (CPL 0),
201 * and if long-mode is enabled, always parse addresses as if
204 paging->cr3 = regs[1];
206 if (regs[3] & EFER_LMA)
207 paging->cpu_mode = CPU_MODE_64BIT;
208 else if (regs[0] & CR0_PE)
209 paging->cpu_mode = CPU_MODE_PROTECTED;
211 paging->cpu_mode = CPU_MODE_REAL;
212 if (!(regs[0] & CR0_PG))
213 paging->paging_mode = PAGING_MODE_FLAT;
214 else if (!(regs[2] & CR4_PAE))
215 paging->paging_mode = PAGING_MODE_32;
216 else if (regs[3] & EFER_LME)
217 paging->paging_mode = PAGING_MODE_64;
219 paging->paging_mode = PAGING_MODE_PAE;
224 * Map a guest virtual address to a physical address (for a given vcpu).
225 * If a guest virtual address is valid, return 1. If the address is
226 * not valid, return 0. If an error occurs obtaining the mapping,
230 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
232 struct vm_guest_paging paging;
235 if (guest_paging_info(vcpu, &paging) == -1)
239 * Always use PROT_READ. We really care if the VA is
240 * accessible, not if the current vCPU can write.
242 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
251 io_buffer_reset(struct io_buffer *io)
258 /* Available room for adding data. */
260 io_buffer_avail(struct io_buffer *io)
263 return (io->capacity - (io->start + io->len));
267 io_buffer_head(struct io_buffer *io)
270 return (io->data + io->start);
274 io_buffer_tail(struct io_buffer *io)
277 return (io->data + io->start + io->len);
281 io_buffer_advance(struct io_buffer *io, size_t amount)
284 assert(amount <= io->len);
290 io_buffer_consume(struct io_buffer *io, size_t amount)
293 io_buffer_advance(io, amount);
300 * XXX: Consider making this move optional and compacting on a
301 * future read() before realloc().
303 memmove(io->data, io_buffer_head(io), io->len);
308 io_buffer_grow(struct io_buffer *io, size_t newsize)
311 size_t avail, new_cap;
313 avail = io_buffer_avail(io);
314 if (newsize <= avail)
317 new_cap = io->capacity + (newsize - avail);
318 new_data = realloc(io->data, new_cap);
319 if (new_data == NULL)
320 err(1, "Failed to grow GDB I/O buffer");
322 io->capacity = new_cap;
326 response_pending(void)
329 if (cur_resp.start == 0 && cur_resp.len == 0)
331 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
337 close_connection(void)
341 * XXX: This triggers a warning because mevent does the close
342 * before the EV_DELETE.
344 pthread_mutex_lock(&gdb_lock);
345 mevent_delete(write_event);
346 mevent_delete_close(read_event);
349 io_buffer_reset(&cur_comm);
350 io_buffer_reset(&cur_resp);
353 /* Resume any stopped vCPUs. */
355 pthread_mutex_unlock(&gdb_lock);
359 hex_digit(uint8_t nibble)
363 return (nibble + '0');
365 return (nibble + 'a' - 10);
369 parse_digit(uint8_t v)
372 if (v >= '0' && v <= '9')
374 if (v >= 'a' && v <= 'f')
375 return (v - 'a' + 10);
376 if (v >= 'A' && v <= 'F')
377 return (v - 'A' + 10);
381 /* Parses big-endian hexadecimal. */
383 parse_integer(const uint8_t *p, size_t len)
390 v |= parse_digit(*p);
398 parse_byte(const uint8_t *p)
401 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
405 send_pending_data(int fd)
409 if (cur_resp.len == 0) {
410 mevent_disable(write_event);
413 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
414 if (nwritten == -1) {
415 warn("Write to GDB socket failed");
418 io_buffer_advance(&cur_resp, nwritten);
419 if (cur_resp.len == 0)
420 mevent_disable(write_event);
422 mevent_enable(write_event);
426 /* Append a single character to the output buffer. */
428 send_char(uint8_t data)
430 io_buffer_grow(&cur_resp, 1);
431 *io_buffer_tail(&cur_resp) = data;
435 /* Append an array of bytes to the output buffer. */
437 send_data(const uint8_t *data, size_t len)
440 io_buffer_grow(&cur_resp, len);
441 memcpy(io_buffer_tail(&cur_resp), data, len);
446 format_byte(uint8_t v, uint8_t *buf)
449 buf[0] = hex_digit(v >> 4);
450 buf[1] = hex_digit(v & 0xf);
454 * Append a single byte (formatted as two hex characters) to the
463 send_data(buf, sizeof(buf));
480 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
484 * Append a single character (for the packet payload) and update the
488 append_char(uint8_t v)
496 * Append an array of bytes (for the packet payload) and update the
500 append_packet_data(const uint8_t *data, size_t len)
503 send_data(data, len);
512 append_string(const char *str)
515 append_packet_data(str, strlen(str));
519 append_byte(uint8_t v)
524 append_packet_data(buf, sizeof(buf));
528 append_unsigned_native(uintmax_t value, size_t len)
532 for (i = 0; i < len; i++) {
539 append_unsigned_be(uintmax_t value, size_t len)
544 for (i = 0; i < len; i++) {
545 format_byte(value, buf + (len - i - 1) * 2);
548 append_packet_data(buf, sizeof(buf));
552 append_integer(unsigned int value)
558 append_unsigned_be(value, fls(value) + 7 / 8);
562 append_asciihex(const char *str)
565 while (*str != '\0') {
572 send_empty_response(void)
580 send_error(int error)
599 parse_threadid(const uint8_t *data, size_t len)
602 if (len == 1 && *data == '0')
604 if (len == 2 && memcmp(data, "-1", 2) == 0)
608 return (parse_integer(data, len));
616 if (stopped_vcpu == -1)
620 append_byte(GDB_SIGNAL_TRAP);
621 if (stopped_vcpu != -1) {
622 append_string("thread:");
623 append_integer(stopped_vcpu + 1);
631 gdb_finish_suspend_vcpus(void)
637 } else if (response_pending())
641 send_pending_data(cur_fd);
646 _gdb_cpu_suspend(int vcpu, bool report_stop)
649 debug("$vCPU %d suspending\n", vcpu);
650 CPU_SET(vcpu, &vcpus_waiting);
651 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
652 gdb_finish_suspend_vcpus();
653 while (CPU_ISSET(vcpu, &vcpus_suspended) && vcpu != stepping_vcpu)
654 pthread_cond_wait(&idle_vcpus, &gdb_lock);
655 CPU_CLR(vcpu, &vcpus_waiting);
656 debug("$vCPU %d resuming\n", vcpu);
660 gdb_cpu_add(int vcpu)
663 debug("$vCPU %d starting\n", vcpu);
664 pthread_mutex_lock(&gdb_lock);
665 CPU_SET(vcpu, &vcpus_active);
668 * If a vcpu is added while vcpus are stopped, suspend the new
669 * vcpu so that it will pop back out with a debug exit before
670 * executing the first instruction.
672 if (!CPU_EMPTY(&vcpus_suspended)) {
673 CPU_SET(vcpu, &vcpus_suspended);
674 _gdb_cpu_suspend(vcpu, false);
676 pthread_mutex_unlock(&gdb_lock);
680 gdb_cpu_suspend(int vcpu)
683 pthread_mutex_lock(&gdb_lock);
684 _gdb_cpu_suspend(vcpu, true);
685 pthread_mutex_unlock(&gdb_lock);
689 gdb_cpu_mtrap(int vcpu)
692 debug("$vCPU %d MTRAP\n", vcpu);
693 pthread_mutex_lock(&gdb_lock);
694 if (vcpu == stepping_vcpu) {
696 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
697 vm_suspend_cpu(ctx, vcpu);
698 assert(stopped_vcpu == -1);
700 _gdb_cpu_suspend(vcpu, true);
702 pthread_mutex_unlock(&gdb_lock);
706 gdb_suspend_vcpus(void)
709 assert(pthread_mutex_isowned_np(&gdb_lock));
710 debug("suspending all CPUs\n");
711 vcpus_suspended = vcpus_active;
712 vm_suspend_cpu(ctx, -1);
713 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
714 gdb_finish_suspend_vcpus();
718 gdb_step_vcpu(int vcpu)
722 debug("$vCPU %d step\n", vcpu);
723 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
726 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
727 vm_resume_cpu(ctx, vcpu);
728 stepping_vcpu = vcpu;
729 pthread_cond_broadcast(&idle_vcpus);
734 gdb_resume_vcpus(void)
737 assert(pthread_mutex_isowned_np(&gdb_lock));
738 vm_resume_cpu(ctx, -1);
739 debug("resuming all CPUs\n");
740 CPU_ZERO(&vcpus_suspended);
741 pthread_cond_broadcast(&idle_vcpus);
747 uint64_t regvals[nitems(gdb_regset)];
750 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
751 gdb_regset, regvals) == -1) {
756 for (i = 0; i < nitems(regvals); i++)
757 append_unsigned_native(regvals[i], gdb_regsize[i]);
762 gdb_read_mem(const uint8_t *data, size_t len)
764 uint64_t gpa, gva, val;
766 size_t resid, todo, bytes;
774 /* Parse and consume address. */
775 cp = memchr(data, ',', len);
776 if (cp == NULL || cp == data) {
780 gva = parse_integer(data, cp - data);
781 len -= (cp - data) + 1;
782 data += (cp - data) + 1;
785 resid = parse_integer(data, len);
789 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
805 /* Read bytes from current page. */
806 todo = getpagesize() - gpa % getpagesize();
810 cp = paddr_guest2host(ctx, gpa, todo);
813 * If this page is guest RAM, read it a byte
830 * If this page isn't guest RAM, try to handle
831 * it via MMIO. For MMIO requests, use
832 * aligned reads of words when possible.
835 if (gpa & 1 || todo == 1)
837 else if (gpa & 2 || todo == 2)
841 error = read_mem(ctx, cur_vcpu, gpa, &val,
866 assert(resid == 0 || gpa % getpagesize() == 0);
874 gdb_write_mem(const uint8_t *data, size_t len)
876 uint64_t gpa, gva, val;
878 size_t resid, todo, bytes;
885 /* Parse and consume address. */
886 cp = memchr(data, ',', len);
887 if (cp == NULL || cp == data) {
891 gva = parse_integer(data, cp - data);
892 len -= (cp - data) + 1;
893 data += (cp - data) + 1;
895 /* Parse and consume length. */
896 cp = memchr(data, ':', len);
897 if (cp == NULL || cp == data) {
901 resid = parse_integer(data, cp - data);
902 len -= (cp - data) + 1;
903 data += (cp - data) + 1;
905 /* Verify the available bytes match the length. */
906 if (len != resid * 2) {
912 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
922 /* Write bytes to current page. */
923 todo = getpagesize() - gpa % getpagesize();
927 cp = paddr_guest2host(ctx, gpa, todo);
930 * If this page is guest RAM, write it a byte
935 *cp = parse_byte(data);
946 * If this page isn't guest RAM, try to handle
947 * it via MMIO. For MMIO requests, use
948 * aligned writes of words when possible.
951 if (gpa & 1 || todo == 1) {
953 val = parse_byte(data);
954 } else if (gpa & 2 || todo == 2) {
956 val = parse_byte(data) |
957 (parse_byte(data + 2) << 8);
960 val = parse_byte(data) |
961 (parse_byte(data + 2) << 8) |
962 (parse_byte(data + 4) << 16) |
963 (parse_byte(data + 6) << 24);
965 error = write_mem(ctx, cur_vcpu, gpa, val,
980 assert(resid == 0 || gpa % getpagesize() == 0);
987 command_equals(const uint8_t *data, size_t len, const char *cmd)
990 if (strlen(cmd) > len)
992 return (memcmp(data, cmd, strlen(cmd)) == 0);
996 check_features(const uint8_t *data, size_t len)
998 char *feature, *next_feature, *str, *value;
1001 str = malloc(len + 1);
1002 memcpy(str, data, len);
1006 while ((feature = strsep(&next_feature, ";")) != NULL) {
1008 * Null features shouldn't exist, but skip if they
1011 if (strcmp(feature, "") == 0)
1015 * Look for the value or supported / not supported
1018 value = strchr(feature, '=');
1019 if (value != NULL) {
1024 value = feature + strlen(feature) - 1;
1034 * This is really a protocol error,
1035 * but we just ignore malformed
1036 * features for ease of
1044 /* No currently supported features. */
1050 /* This is an arbitrary limit. */
1051 append_string("PacketSize=4096");
1056 gdb_query(const uint8_t *data, size_t len)
1063 if (command_equals(data, len, "qAttached")) {
1067 } else if (command_equals(data, len, "qC")) {
1069 append_string("QC");
1070 append_integer(cur_vcpu + 1);
1072 } else if (command_equals(data, len, "qfThreadInfo")) {
1077 if (CPU_EMPTY(&vcpus_active)) {
1081 mask = vcpus_active;
1085 while (!CPU_EMPTY(&mask)) {
1086 vcpu = CPU_FFS(&mask) - 1;
1087 CPU_CLR(vcpu, &mask);
1092 append_integer(vcpu + 1);
1095 } else if (command_equals(data, len, "qsThreadInfo")) {
1099 } else if (command_equals(data, len, "qSupported")) {
1100 data += strlen("qSupported");
1101 len -= strlen("qSupported");
1102 check_features(data, len);
1103 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1107 data += strlen("qThreadExtraInfo");
1108 len -= strlen("qThreadExtraInfo");
1113 tid = parse_threadid(data + 1, len - 1);
1114 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1119 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1121 append_asciihex(buf);
1124 send_empty_response();
1128 handle_command(const uint8_t *data, size_t len)
1131 /* Reject packets with a sequence-id. */
1132 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1133 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1134 send_empty_response();
1145 /* Don't send a reply until a stop occurs. */
1151 /* TODO: Resume any stopped CPUs. */
1160 if (data[1] != 'g' && data[1] != 'c') {
1164 tid = parse_threadid(data + 2, len - 2);
1170 if (CPU_EMPTY(&vcpus_active)) {
1174 if (tid == -1 || tid == 0)
1175 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1176 else if (CPU_ISSET(tid - 1, &vcpus_active))
1186 gdb_read_mem(data, len);
1189 gdb_write_mem(data, len);
1194 tid = parse_threadid(data + 1, len - 1);
1195 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1203 gdb_query(data, len);
1211 /* Don't send a reply until a stop occurs. */
1212 if (!gdb_step_vcpu(cur_vcpu)) {
1213 send_error(EOPNOTSUPP);
1218 /* XXX: Only if stopped? */
1219 /* For now, just report that we are always stopped. */
1222 append_byte(GDB_SIGNAL_TRAP);
1225 case 'G': /* TODO */
1227 /* Handle 'vCont' */
1229 case 'p': /* TODO */
1230 case 'P': /* TODO */
1231 case 'Q': /* TODO */
1232 case 't': /* TODO */
1233 case 'X': /* TODO */
1234 case 'z': /* TODO */
1235 case 'Z': /* TODO */
1237 send_empty_response();
1241 /* Check for a valid packet in the command buffer. */
1243 check_command(int fd)
1245 uint8_t *head, *hash, *p, sum;
1249 avail = cur_comm.len;
1252 head = io_buffer_head(&cur_comm);
1255 debug("<- Ctrl-C\n");
1256 io_buffer_consume(&cur_comm, 1);
1258 gdb_suspend_vcpus();
1261 /* ACK of previous response. */
1263 if (response_pending())
1264 io_buffer_reset(&cur_resp);
1265 io_buffer_consume(&cur_comm, 1);
1267 stop_pending = false;
1269 send_pending_data(fd);
1273 /* NACK of previous response. */
1275 if (response_pending()) {
1276 cur_resp.len += cur_resp.start;
1278 if (cur_resp.data[0] == '+')
1279 io_buffer_advance(&cur_resp, 1);
1280 debug("-> %.*s\n", (int)cur_resp.len,
1281 io_buffer_head(&cur_resp));
1283 io_buffer_consume(&cur_comm, 1);
1284 send_pending_data(fd);
1289 if (response_pending()) {
1290 warnx("New GDB command while response in "
1292 io_buffer_reset(&cur_resp);
1295 /* Is packet complete? */
1296 hash = memchr(head, '#', avail);
1299 plen = (hash - head + 1) + 2;
1302 debug("<- %.*s\n", (int)plen, head);
1304 /* Verify checksum. */
1305 for (sum = 0, p = head + 1; p < hash; p++)
1307 if (sum != parse_byte(hash + 1)) {
1308 io_buffer_consume(&cur_comm, plen);
1311 send_pending_data(fd);
1316 handle_command(head + 1, hash - (head + 1));
1317 io_buffer_consume(&cur_comm, plen);
1318 if (!response_pending())
1320 send_pending_data(fd);
1323 /* XXX: Possibly drop connection instead. */
1324 debug("-> %02x\n", *head);
1325 io_buffer_consume(&cur_comm, 1);
1332 gdb_readable(int fd, enum ev_type event, void *arg)
1337 if (ioctl(fd, FIONREAD, &pending) == -1) {
1338 warn("FIONREAD on GDB socket");
1343 * 'pending' might be zero due to EOF. We need to call read
1344 * with a non-zero length to detect EOF.
1349 /* Ensure there is room in the command buffer. */
1350 io_buffer_grow(&cur_comm, pending);
1351 assert(io_buffer_avail(&cur_comm) >= pending);
1353 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1356 } else if (nread == -1) {
1357 if (errno == EAGAIN)
1360 warn("Read from GDB socket");
1363 cur_comm.len += nread;
1364 pthread_mutex_lock(&gdb_lock);
1366 pthread_mutex_unlock(&gdb_lock);
1371 gdb_writable(int fd, enum ev_type event, void *arg)
1374 send_pending_data(fd);
1378 new_connection(int fd, enum ev_type event, void *arg)
1382 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1385 err(1, "Failed accepting initial GDB connection");
1387 /* Silently ignore errors post-startup. */
1392 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1394 warn("Failed to disable SIGPIPE for GDB connection");
1399 pthread_mutex_lock(&gdb_lock);
1402 warnx("Ignoring additional GDB connection.");
1405 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1406 if (read_event == NULL) {
1408 err(1, "Failed to setup initial GDB connection");
1409 pthread_mutex_unlock(&gdb_lock);
1412 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1413 if (write_event == NULL) {
1415 err(1, "Failed to setup initial GDB connection");
1416 mevent_delete_close(read_event);
1424 stop_pending = false;
1426 /* Break on attach. */
1428 gdb_suspend_vcpus();
1429 pthread_mutex_unlock(&gdb_lock);
1432 #ifndef WITHOUT_CAPSICUM
1434 limit_gdb_socket(int s)
1436 cap_rights_t rights;
1437 unsigned long ioctls[] = { FIONREAD };
1439 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1440 CAP_SETSOCKOPT, CAP_IOCTL);
1441 if (caph_rights_limit(s, &rights) == -1)
1442 errx(EX_OSERR, "Unable to apply rights for sandbox");
1443 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1444 errx(EX_OSERR, "Unable to apply rights for sandbox");
1449 init_gdb(struct vmctx *_ctx, int sport, bool wait)
1451 struct sockaddr_in sin;
1452 int error, flags, s;
1454 debug("==> starting on %d, %swaiting\n", sport, wait ? "" : "not ");
1456 error = pthread_mutex_init(&gdb_lock, NULL);
1458 errc(1, error, "gdb mutex init");
1459 error = pthread_cond_init(&idle_vcpus, NULL);
1461 errc(1, error, "gdb cv init");
1464 s = socket(PF_INET, SOCK_STREAM, 0);
1466 err(1, "gdb socket create");
1468 sin.sin_len = sizeof(sin);
1469 sin.sin_family = AF_INET;
1470 sin.sin_addr.s_addr = htonl(INADDR_ANY);
1471 sin.sin_port = htons(sport);
1473 if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0)
1474 err(1, "gdb socket bind");
1476 if (listen(s, 1) < 0)
1477 err(1, "gdb socket listen");
1481 * Set vcpu 0 in vcpus_suspended. This will trigger the
1482 * logic in gdb_cpu_add() to suspend the first vcpu before
1483 * it starts execution. The vcpu will remain suspended
1484 * until a debugger connects.
1488 CPU_SET(0, &vcpus_suspended);
1491 flags = fcntl(s, F_GETFL);
1492 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1493 err(1, "Failed to mark gdb socket non-blocking");
1495 #ifndef WITHOUT_CAPSICUM
1496 limit_gdb_socket(s);
1498 mevent_add(s, EVF_READ, new_connection, NULL);