2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/endian.h>
36 #include <sys/ioctl.h>
38 #include <sys/socket.h>
39 #include <machine/atomic.h>
40 #include <machine/specialreg.h>
41 #include <machine/vmm.h>
42 #include <netinet/in.h>
44 #ifndef WITHOUT_CAPSICUM
45 #include <capsicum_helpers.h>
51 #include <pthread_np.h>
65 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
68 #define GDB_SIGNAL_TRAP 5
70 static void gdb_resume_vcpus(void);
71 static void check_command(int fd);
73 static struct mevent *read_event, *write_event;
75 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
76 static pthread_mutex_t gdb_lock;
77 static pthread_cond_t idle_vcpus;
78 static bool stop_pending, first_stop;
79 static int stepping_vcpu, stopped_vcpu;
82 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
83 * read buffer, 'start' is unused and 'len' contains the number of
84 * valid bytes in the buffer. For a write buffer, 'start' is set to
85 * the index of the next byte in 'data' to send, and 'len' contains
86 * the remaining number of valid bytes to send.
95 static struct io_buffer cur_comm, cur_resp;
96 static uint8_t cur_csum;
98 static struct vmctx *ctx;
99 static int cur_fd = -1;
101 const int gdb_regset[] = {
128 const int gdb_regsize[] = {
159 static void __printflike(1, 2)
160 debug(const char *fmt, ...)
162 static FILE *logfile;
165 if (logfile == NULL) {
166 logfile = fopen("/tmp/bhyve_gdb.log", "w");
169 #ifndef WITHOUT_CAPSICUM
170 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
179 vfprintf(logfile, fmt, ap);
187 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
190 const int regset[4] = {
197 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
201 * For the debugger, always pretend to be the kernel (CPL 0),
202 * and if long-mode is enabled, always parse addresses as if
205 paging->cr3 = regs[1];
207 if (regs[3] & EFER_LMA)
208 paging->cpu_mode = CPU_MODE_64BIT;
209 else if (regs[0] & CR0_PE)
210 paging->cpu_mode = CPU_MODE_PROTECTED;
212 paging->cpu_mode = CPU_MODE_REAL;
213 if (!(regs[0] & CR0_PG))
214 paging->paging_mode = PAGING_MODE_FLAT;
215 else if (!(regs[2] & CR4_PAE))
216 paging->paging_mode = PAGING_MODE_32;
217 else if (regs[3] & EFER_LME)
218 paging->paging_mode = PAGING_MODE_64;
220 paging->paging_mode = PAGING_MODE_PAE;
225 * Map a guest virtual address to a physical address (for a given vcpu).
226 * If a guest virtual address is valid, return 1. If the address is
227 * not valid, return 0. If an error occurs obtaining the mapping,
231 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
233 struct vm_guest_paging paging;
236 if (guest_paging_info(vcpu, &paging) == -1)
240 * Always use PROT_READ. We really care if the VA is
241 * accessible, not if the current vCPU can write.
243 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
252 io_buffer_reset(struct io_buffer *io)
259 /* Available room for adding data. */
261 io_buffer_avail(struct io_buffer *io)
264 return (io->capacity - (io->start + io->len));
268 io_buffer_head(struct io_buffer *io)
271 return (io->data + io->start);
275 io_buffer_tail(struct io_buffer *io)
278 return (io->data + io->start + io->len);
282 io_buffer_advance(struct io_buffer *io, size_t amount)
285 assert(amount <= io->len);
291 io_buffer_consume(struct io_buffer *io, size_t amount)
294 io_buffer_advance(io, amount);
301 * XXX: Consider making this move optional and compacting on a
302 * future read() before realloc().
304 memmove(io->data, io_buffer_head(io), io->len);
309 io_buffer_grow(struct io_buffer *io, size_t newsize)
312 size_t avail, new_cap;
314 avail = io_buffer_avail(io);
315 if (newsize <= avail)
318 new_cap = io->capacity + (newsize - avail);
319 new_data = realloc(io->data, new_cap);
320 if (new_data == NULL)
321 err(1, "Failed to grow GDB I/O buffer");
323 io->capacity = new_cap;
327 response_pending(void)
330 if (cur_resp.start == 0 && cur_resp.len == 0)
332 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
338 close_connection(void)
342 * XXX: This triggers a warning because mevent does the close
343 * before the EV_DELETE.
345 pthread_mutex_lock(&gdb_lock);
346 mevent_delete(write_event);
347 mevent_delete_close(read_event);
350 io_buffer_reset(&cur_comm);
351 io_buffer_reset(&cur_resp);
354 /* Resume any stopped vCPUs. */
356 pthread_mutex_unlock(&gdb_lock);
360 hex_digit(uint8_t nibble)
364 return (nibble + '0');
366 return (nibble + 'a' - 10);
370 parse_digit(uint8_t v)
373 if (v >= '0' && v <= '9')
375 if (v >= 'a' && v <= 'f')
376 return (v - 'a' + 10);
377 if (v >= 'A' && v <= 'F')
378 return (v - 'A' + 10);
382 /* Parses big-endian hexadecimal. */
384 parse_integer(const uint8_t *p, size_t len)
391 v |= parse_digit(*p);
399 parse_byte(const uint8_t *p)
402 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
406 send_pending_data(int fd)
410 if (cur_resp.len == 0) {
411 mevent_disable(write_event);
414 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
415 if (nwritten == -1) {
416 warn("Write to GDB socket failed");
419 io_buffer_advance(&cur_resp, nwritten);
420 if (cur_resp.len == 0)
421 mevent_disable(write_event);
423 mevent_enable(write_event);
427 /* Append a single character to the output buffer. */
429 send_char(uint8_t data)
431 io_buffer_grow(&cur_resp, 1);
432 *io_buffer_tail(&cur_resp) = data;
436 /* Append an array of bytes to the output buffer. */
438 send_data(const uint8_t *data, size_t len)
441 io_buffer_grow(&cur_resp, len);
442 memcpy(io_buffer_tail(&cur_resp), data, len);
447 format_byte(uint8_t v, uint8_t *buf)
450 buf[0] = hex_digit(v >> 4);
451 buf[1] = hex_digit(v & 0xf);
455 * Append a single byte (formatted as two hex characters) to the
464 send_data(buf, sizeof(buf));
481 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
485 * Append a single character (for the packet payload) and update the
489 append_char(uint8_t v)
497 * Append an array of bytes (for the packet payload) and update the
501 append_packet_data(const uint8_t *data, size_t len)
504 send_data(data, len);
513 append_string(const char *str)
516 append_packet_data(str, strlen(str));
520 append_byte(uint8_t v)
525 append_packet_data(buf, sizeof(buf));
529 append_unsigned_native(uintmax_t value, size_t len)
533 for (i = 0; i < len; i++) {
540 append_unsigned_be(uintmax_t value, size_t len)
545 for (i = 0; i < len; i++) {
546 format_byte(value, buf + (len - i - 1) * 2);
549 append_packet_data(buf, sizeof(buf));
553 append_integer(unsigned int value)
559 append_unsigned_be(value, fls(value) + 7 / 8);
563 append_asciihex(const char *str)
566 while (*str != '\0') {
573 send_empty_response(void)
581 send_error(int error)
600 parse_threadid(const uint8_t *data, size_t len)
603 if (len == 1 && *data == '0')
605 if (len == 2 && memcmp(data, "-1", 2) == 0)
609 return (parse_integer(data, len));
617 if (stopped_vcpu == -1)
621 append_byte(GDB_SIGNAL_TRAP);
622 if (stopped_vcpu != -1) {
623 append_string("thread:");
624 append_integer(stopped_vcpu + 1);
632 gdb_finish_suspend_vcpus(void)
638 } else if (response_pending())
642 send_pending_data(cur_fd);
647 _gdb_cpu_suspend(int vcpu, bool report_stop)
650 debug("$vCPU %d suspending\n", vcpu);
651 CPU_SET(vcpu, &vcpus_waiting);
652 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
653 gdb_finish_suspend_vcpus();
654 while (CPU_ISSET(vcpu, &vcpus_suspended) && vcpu != stepping_vcpu)
655 pthread_cond_wait(&idle_vcpus, &gdb_lock);
656 CPU_CLR(vcpu, &vcpus_waiting);
657 debug("$vCPU %d resuming\n", vcpu);
661 gdb_cpu_add(int vcpu)
664 debug("$vCPU %d starting\n", vcpu);
665 pthread_mutex_lock(&gdb_lock);
666 CPU_SET(vcpu, &vcpus_active);
669 * If a vcpu is added while vcpus are stopped, suspend the new
670 * vcpu so that it will pop back out with a debug exit before
671 * executing the first instruction.
673 if (!CPU_EMPTY(&vcpus_suspended)) {
674 CPU_SET(vcpu, &vcpus_suspended);
675 _gdb_cpu_suspend(vcpu, false);
677 pthread_mutex_unlock(&gdb_lock);
681 gdb_cpu_suspend(int vcpu)
684 pthread_mutex_lock(&gdb_lock);
685 _gdb_cpu_suspend(vcpu, true);
686 pthread_mutex_unlock(&gdb_lock);
690 gdb_cpu_mtrap(int vcpu)
693 debug("$vCPU %d MTRAP\n", vcpu);
694 pthread_mutex_lock(&gdb_lock);
695 if (vcpu == stepping_vcpu) {
697 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
698 vm_suspend_cpu(ctx, vcpu);
699 assert(stopped_vcpu == -1);
701 _gdb_cpu_suspend(vcpu, true);
703 pthread_mutex_unlock(&gdb_lock);
707 gdb_suspend_vcpus(void)
710 assert(pthread_mutex_isowned_np(&gdb_lock));
711 debug("suspending all CPUs\n");
712 vcpus_suspended = vcpus_active;
713 vm_suspend_cpu(ctx, -1);
714 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
715 gdb_finish_suspend_vcpus();
719 gdb_step_vcpu(int vcpu)
723 debug("$vCPU %d step\n", vcpu);
724 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
727 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
728 vm_resume_cpu(ctx, vcpu);
729 stepping_vcpu = vcpu;
730 pthread_cond_broadcast(&idle_vcpus);
735 gdb_resume_vcpus(void)
738 assert(pthread_mutex_isowned_np(&gdb_lock));
739 vm_resume_cpu(ctx, -1);
740 debug("resuming all CPUs\n");
741 CPU_ZERO(&vcpus_suspended);
742 pthread_cond_broadcast(&idle_vcpus);
748 uint64_t regvals[nitems(gdb_regset)];
751 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
752 gdb_regset, regvals) == -1) {
757 for (i = 0; i < nitems(regvals); i++)
758 append_unsigned_native(regvals[i], gdb_regsize[i]);
763 gdb_read_mem(const uint8_t *data, size_t len)
765 uint64_t gpa, gva, val;
767 size_t resid, todo, bytes;
775 /* Parse and consume address. */
776 cp = memchr(data, ',', len);
777 if (cp == NULL || cp == data) {
781 gva = parse_integer(data, cp - data);
782 len -= (cp - data) + 1;
783 data += (cp - data) + 1;
786 resid = parse_integer(data, len);
790 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
806 /* Read bytes from current page. */
807 todo = getpagesize() - gpa % getpagesize();
811 cp = paddr_guest2host(ctx, gpa, todo);
814 * If this page is guest RAM, read it a byte
831 * If this page isn't guest RAM, try to handle
832 * it via MMIO. For MMIO requests, use
833 * aligned reads of words when possible.
836 if (gpa & 1 || todo == 1)
838 else if (gpa & 2 || todo == 2)
842 error = read_mem(ctx, cur_vcpu, gpa, &val,
867 assert(resid == 0 || gpa % getpagesize() == 0);
875 gdb_write_mem(const uint8_t *data, size_t len)
877 uint64_t gpa, gva, val;
879 size_t resid, todo, bytes;
886 /* Parse and consume address. */
887 cp = memchr(data, ',', len);
888 if (cp == NULL || cp == data) {
892 gva = parse_integer(data, cp - data);
893 len -= (cp - data) + 1;
894 data += (cp - data) + 1;
896 /* Parse and consume length. */
897 cp = memchr(data, ':', len);
898 if (cp == NULL || cp == data) {
902 resid = parse_integer(data, cp - data);
903 len -= (cp - data) + 1;
904 data += (cp - data) + 1;
906 /* Verify the available bytes match the length. */
907 if (len != resid * 2) {
913 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
923 /* Write bytes to current page. */
924 todo = getpagesize() - gpa % getpagesize();
928 cp = paddr_guest2host(ctx, gpa, todo);
931 * If this page is guest RAM, write it a byte
936 *cp = parse_byte(data);
947 * If this page isn't guest RAM, try to handle
948 * it via MMIO. For MMIO requests, use
949 * aligned writes of words when possible.
952 if (gpa & 1 || todo == 1) {
954 val = parse_byte(data);
955 } else if (gpa & 2 || todo == 2) {
957 val = be16toh(parse_integer(data, 4));
960 val = be32toh(parse_integer(data, 8));
962 error = write_mem(ctx, cur_vcpu, gpa, val,
977 assert(resid == 0 || gpa % getpagesize() == 0);
984 command_equals(const uint8_t *data, size_t len, const char *cmd)
987 if (strlen(cmd) > len)
989 return (memcmp(data, cmd, strlen(cmd)) == 0);
993 check_features(const uint8_t *data, size_t len)
995 char *feature, *next_feature, *str, *value;
998 str = malloc(len + 1);
999 memcpy(str, data, len);
1003 while ((feature = strsep(&next_feature, ";")) != NULL) {
1005 * Null features shouldn't exist, but skip if they
1008 if (strcmp(feature, "") == 0)
1012 * Look for the value or supported / not supported
1015 value = strchr(feature, '=');
1016 if (value != NULL) {
1021 value = feature + strlen(feature) - 1;
1031 * This is really a protocol error,
1032 * but we just ignore malformed
1033 * features for ease of
1041 /* No currently supported features. */
1047 /* This is an arbitrary limit. */
1048 append_string("PacketSize=4096");
1053 gdb_query(const uint8_t *data, size_t len)
1060 if (command_equals(data, len, "qAttached")) {
1064 } else if (command_equals(data, len, "qC")) {
1066 append_string("QC");
1067 append_integer(cur_vcpu + 1);
1069 } else if (command_equals(data, len, "qfThreadInfo")) {
1074 if (CPU_EMPTY(&vcpus_active)) {
1078 mask = vcpus_active;
1082 while (!CPU_EMPTY(&mask)) {
1083 vcpu = CPU_FFS(&mask) - 1;
1084 CPU_CLR(vcpu, &mask);
1089 append_integer(vcpu + 1);
1092 } else if (command_equals(data, len, "qsThreadInfo")) {
1096 } else if (command_equals(data, len, "qSupported")) {
1097 data += strlen("qSupported");
1098 len -= strlen("qSupported");
1099 check_features(data, len);
1100 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1104 data += strlen("qThreadExtraInfo");
1105 len -= strlen("qThreadExtraInfo");
1110 tid = parse_threadid(data + 1, len - 1);
1111 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1116 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1118 append_asciihex(buf);
1121 send_empty_response();
1125 handle_command(const uint8_t *data, size_t len)
1128 /* Reject packets with a sequence-id. */
1129 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1130 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1131 send_empty_response();
1142 /* Don't send a reply until a stop occurs. */
1148 /* TODO: Resume any stopped CPUs. */
1157 if (data[1] != 'g' && data[1] != 'c') {
1161 tid = parse_threadid(data + 2, len - 2);
1167 if (CPU_EMPTY(&vcpus_active)) {
1171 if (tid == -1 || tid == 0)
1172 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1173 else if (CPU_ISSET(tid - 1, &vcpus_active))
1183 gdb_read_mem(data, len);
1186 gdb_write_mem(data, len);
1191 tid = parse_threadid(data + 1, len - 1);
1192 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1200 gdb_query(data, len);
1208 /* Don't send a reply until a stop occurs. */
1209 if (!gdb_step_vcpu(cur_vcpu)) {
1210 send_error(EOPNOTSUPP);
1215 /* XXX: Only if stopped? */
1216 /* For now, just report that we are always stopped. */
1219 append_byte(GDB_SIGNAL_TRAP);
1222 case 'G': /* TODO */
1224 /* Handle 'vCont' */
1226 case 'p': /* TODO */
1227 case 'P': /* TODO */
1228 case 'Q': /* TODO */
1229 case 't': /* TODO */
1230 case 'X': /* TODO */
1231 case 'z': /* TODO */
1232 case 'Z': /* TODO */
1234 send_empty_response();
1238 /* Check for a valid packet in the command buffer. */
1240 check_command(int fd)
1242 uint8_t *head, *hash, *p, sum;
1246 avail = cur_comm.len;
1249 head = io_buffer_head(&cur_comm);
1252 debug("<- Ctrl-C\n");
1253 io_buffer_consume(&cur_comm, 1);
1255 gdb_suspend_vcpus();
1258 /* ACK of previous response. */
1260 if (response_pending())
1261 io_buffer_reset(&cur_resp);
1262 io_buffer_consume(&cur_comm, 1);
1264 stop_pending = false;
1266 send_pending_data(fd);
1270 /* NACK of previous response. */
1272 if (response_pending()) {
1273 cur_resp.len += cur_resp.start;
1275 if (cur_resp.data[0] == '+')
1276 io_buffer_advance(&cur_resp, 1);
1277 debug("-> %.*s\n", (int)cur_resp.len,
1278 io_buffer_head(&cur_resp));
1280 io_buffer_consume(&cur_comm, 1);
1281 send_pending_data(fd);
1286 if (response_pending()) {
1287 warnx("New GDB command while response in "
1289 io_buffer_reset(&cur_resp);
1292 /* Is packet complete? */
1293 hash = memchr(head, '#', avail);
1296 plen = (hash - head + 1) + 2;
1299 debug("<- %.*s\n", (int)plen, head);
1301 /* Verify checksum. */
1302 for (sum = 0, p = head + 1; p < hash; p++)
1304 if (sum != parse_byte(hash + 1)) {
1305 io_buffer_consume(&cur_comm, plen);
1308 send_pending_data(fd);
1313 handle_command(head + 1, hash - (head + 1));
1314 io_buffer_consume(&cur_comm, plen);
1315 if (!response_pending())
1317 send_pending_data(fd);
1320 /* XXX: Possibly drop connection instead. */
1321 debug("-> %02x\n", *head);
1322 io_buffer_consume(&cur_comm, 1);
1329 gdb_readable(int fd, enum ev_type event, void *arg)
1334 if (ioctl(fd, FIONREAD, &pending) == -1) {
1335 warn("FIONREAD on GDB socket");
1340 * 'pending' might be zero due to EOF. We need to call read
1341 * with a non-zero length to detect EOF.
1346 /* Ensure there is room in the command buffer. */
1347 io_buffer_grow(&cur_comm, pending);
1348 assert(io_buffer_avail(&cur_comm) >= pending);
1350 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1353 } else if (nread == -1) {
1354 if (errno == EAGAIN)
1357 warn("Read from GDB socket");
1360 cur_comm.len += nread;
1361 pthread_mutex_lock(&gdb_lock);
1363 pthread_mutex_unlock(&gdb_lock);
1368 gdb_writable(int fd, enum ev_type event, void *arg)
1371 send_pending_data(fd);
1375 new_connection(int fd, enum ev_type event, void *arg)
1379 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1382 err(1, "Failed accepting initial GDB connection");
1384 /* Silently ignore errors post-startup. */
1389 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1391 warn("Failed to disable SIGPIPE for GDB connection");
1396 pthread_mutex_lock(&gdb_lock);
1399 warnx("Ignoring additional GDB connection.");
1402 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1403 if (read_event == NULL) {
1405 err(1, "Failed to setup initial GDB connection");
1406 pthread_mutex_unlock(&gdb_lock);
1409 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1410 if (write_event == NULL) {
1412 err(1, "Failed to setup initial GDB connection");
1413 mevent_delete_close(read_event);
1421 stop_pending = false;
1423 /* Break on attach. */
1425 gdb_suspend_vcpus();
1426 pthread_mutex_unlock(&gdb_lock);
1429 #ifndef WITHOUT_CAPSICUM
1431 limit_gdb_socket(int s)
1433 cap_rights_t rights;
1434 unsigned long ioctls[] = { FIONREAD };
1436 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1437 CAP_SETSOCKOPT, CAP_IOCTL);
1438 if (caph_rights_limit(s, &rights) == -1)
1439 errx(EX_OSERR, "Unable to apply rights for sandbox");
1440 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1441 errx(EX_OSERR, "Unable to apply rights for sandbox");
1446 init_gdb(struct vmctx *_ctx, int sport, bool wait)
1448 struct sockaddr_in sin;
1449 int error, flags, s;
1451 debug("==> starting on %d, %swaiting\n", sport, wait ? "" : "not ");
1453 error = pthread_mutex_init(&gdb_lock, NULL);
1455 errc(1, error, "gdb mutex init");
1456 error = pthread_cond_init(&idle_vcpus, NULL);
1458 errc(1, error, "gdb cv init");
1461 s = socket(PF_INET, SOCK_STREAM, 0);
1463 err(1, "gdb socket create");
1465 sin.sin_len = sizeof(sin);
1466 sin.sin_family = AF_INET;
1467 sin.sin_addr.s_addr = htonl(INADDR_ANY);
1468 sin.sin_port = htons(sport);
1470 if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0)
1471 err(1, "gdb socket bind");
1473 if (listen(s, 1) < 0)
1474 err(1, "gdb socket listen");
1478 * Set vcpu 0 in vcpus_suspended. This will trigger the
1479 * logic in gdb_cpu_add() to suspend the first vcpu before
1480 * it starts execution. The vcpu will remain suspended
1481 * until a debugger connects.
1485 CPU_SET(0, &vcpus_suspended);
1488 flags = fcntl(s, F_GETFL);
1489 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1490 err(1, "Failed to mark gdb socket non-blocking");
1492 #ifndef WITHOUT_CAPSICUM
1493 limit_gdb_socket(s);
1495 mevent_add(s, EVF_READ, new_connection, NULL);