2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/condvar.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
44 #include <sys/reboot.h>
46 #include <sys/selinfo.h>
47 #include <sys/sysctl.h>
48 #include <sys/watchdog.h>
55 #include <dev/ipmi/ipmivars.h>
59 * Driver request structures are allocated on the stack via alloca() to
60 * avoid calling malloc(), especially for the watchdog handler.
61 * To avoid too much stack growth, a previously allocated structure can
62 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
63 * that there is adequate reply/request space in the original allocation.
65 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
66 bzero((req), sizeof(struct ipmi_request)); \
67 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
69 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
70 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \
71 (reqlen) + (replylen)); \
72 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \
76 static int ipmi_ipmb_checksum(u_char, int);
77 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
81 static d_ioctl_t ipmi_ioctl;
82 static d_poll_t ipmi_poll;
83 static d_open_t ipmi_open;
84 static void ipmi_dtor(void *arg);
86 int ipmi_attached = 0;
89 static bool wd_in_shutdown = false;
90 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
91 static int wd_shutdown_countdown = 0; /* sec */
92 static int wd_startup_countdown = 0; /* sec */
93 static int wd_pretimeout_countdown = 120; /* sec */
94 static int cycle_wait = 10; /* sec */
96 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0,
97 "IPMI driver parameters");
98 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
100 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RW,
101 &wd_timer_actions, 0,
102 "IPMI watchdog timer actions (including pre-timeout interrupt)");
103 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RW,
104 &wd_shutdown_countdown, 0,
105 "IPMI watchdog countdown for shutdown (seconds)");
106 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
107 &wd_startup_countdown, 0,
108 "IPMI watchdog countdown initialized during startup (seconds)");
109 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RW,
110 &wd_pretimeout_countdown, 0,
111 "IPMI watchdog pre-timeout countdown (seconds)");
112 SYSCTL_INT(_hw_ipmi, OID_AUTO, cyle_wait, CTLFLAG_RWTUN,
114 "IPMI power cycle on reboot delay time (seconds)");
116 static struct cdevsw ipmi_cdevsw = {
117 .d_version = D_VERSION,
119 .d_ioctl = ipmi_ioctl,
124 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
127 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
129 struct ipmi_device *dev;
130 struct ipmi_softc *sc;
136 /* Initialize the per file descriptor data. */
137 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
138 error = devfs_set_cdevpriv(dev, ipmi_dtor);
145 TAILQ_INIT(&dev->ipmi_completed_requests);
146 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
147 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
148 dev->ipmi_softc = sc;
157 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
159 struct ipmi_device *dev;
160 struct ipmi_softc *sc;
163 if (devfs_get_cdevpriv((void **)&dev))
168 if (poll_events & (POLLIN | POLLRDNORM)) {
169 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
170 revents |= poll_events & (POLLIN | POLLRDNORM);
171 if (dev->ipmi_requests == 0)
176 if (poll_events & (POLLIN | POLLRDNORM))
177 selrecord(td, &dev->ipmi_select);
185 ipmi_purge_completed_requests(struct ipmi_device *dev)
187 struct ipmi_request *req;
189 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
190 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
191 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
192 dev->ipmi_requests--;
193 ipmi_free_request(req);
200 struct ipmi_request *req, *nreq;
201 struct ipmi_device *dev;
202 struct ipmi_softc *sc;
205 sc = dev->ipmi_softc;
208 if (dev->ipmi_requests) {
209 /* Throw away any pending requests for this device. */
210 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
212 if (req->ir_owner == dev) {
213 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
215 dev->ipmi_requests--;
216 ipmi_free_request(req);
220 /* Throw away any pending completed requests for this device. */
221 ipmi_purge_completed_requests(dev);
224 * If we still have outstanding requests, they must be stuck
225 * in an interface driver, so wait for those to drain.
227 dev->ipmi_closing = 1;
228 while (dev->ipmi_requests > 0) {
229 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
230 PWAIT, "ipmidrain", 0);
231 ipmi_purge_completed_requests(dev);
243 ipmi_ipmb_checksum(u_char *data, int len)
253 /* XXX: Needs work */
255 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
256 u_char command, u_char seq, u_char *data, int data_len)
258 struct ipmi_softc *sc = device_get_softc(dev);
259 struct ipmi_request *req;
260 u_char slave_addr = 0x52;
263 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
264 IPMI_SEND_MSG, data_len + 8, 0);
265 req->ir_request[0] = channel;
266 req->ir_request[1] = slave_addr;
267 req->ir_request[2] = IPMI_ADDR(netfn, 0);
268 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
269 req->ir_request[4] = sc->ipmi_address;
270 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
271 req->ir_request[6] = command;
273 bcopy(data, &req->ir_request[7], data_len);
274 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
277 ipmi_submit_driver_request(sc, req);
278 error = req->ir_error;
284 ipmi_handle_attn(struct ipmi_softc *sc)
286 struct ipmi_request *req;
289 device_printf(sc->ipmi_dev, "BMC has a message\n");
290 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
291 IPMI_GET_MSG_FLAGS, 0, 1);
293 ipmi_submit_driver_request(sc, req);
295 if (req->ir_error == 0 && req->ir_compcode == 0) {
296 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
297 device_printf(sc->ipmi_dev, "message buffer full");
299 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
300 device_printf(sc->ipmi_dev,
301 "watchdog about to go off");
303 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
304 IPMI_ALLOC_DRIVER_REQUEST(req,
305 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
308 device_printf(sc->ipmi_dev, "throw out message ");
312 error = req->ir_error;
318 #ifdef IPMICTL_SEND_COMMAND_32
319 #define PTRIN(p) ((void *)(uintptr_t)(p))
320 #define PTROUT(p) ((uintptr_t)(p))
324 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
325 int flags, struct thread *td)
327 struct ipmi_softc *sc;
328 struct ipmi_device *dev;
329 struct ipmi_request *kreq;
330 struct ipmi_req *req = (struct ipmi_req *)data;
331 struct ipmi_recv *recv = (struct ipmi_recv *)data;
332 struct ipmi_addr addr;
333 #ifdef IPMICTL_SEND_COMMAND_32
334 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
335 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
338 struct ipmi_recv recv;
343 error = devfs_get_cdevpriv((void **)&dev);
349 #ifdef IPMICTL_SEND_COMMAND_32
350 /* Convert 32-bit structures to native. */
352 case IPMICTL_SEND_COMMAND_32:
354 req->addr = PTRIN(req32->addr);
355 req->addr_len = req32->addr_len;
356 req->msgid = req32->msgid;
357 req->msg.netfn = req32->msg.netfn;
358 req->msg.cmd = req32->msg.cmd;
359 req->msg.data_len = req32->msg.data_len;
360 req->msg.data = PTRIN(req32->msg.data);
362 case IPMICTL_RECEIVE_MSG_TRUNC_32:
363 case IPMICTL_RECEIVE_MSG_32:
364 recv = &thunk32.recv;
365 recv->addr = PTRIN(recv32->addr);
366 recv->addr_len = recv32->addr_len;
367 recv->msg.data_len = recv32->msg.data_len;
368 recv->msg.data = PTRIN(recv32->msg.data);
374 #ifdef IPMICTL_SEND_COMMAND_32
375 case IPMICTL_SEND_COMMAND_32:
377 case IPMICTL_SEND_COMMAND:
379 * XXX: Need to add proper handling of this.
381 error = copyin(req->addr, &addr, sizeof(addr));
386 /* clear out old stuff in queue of stuff done */
387 /* XXX: This seems odd. */
388 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
389 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
391 dev->ipmi_requests--;
392 ipmi_free_request(kreq);
396 kreq = ipmi_alloc_request(dev, req->msgid,
397 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
398 req->msg.data_len, IPMI_MAX_RX);
399 error = copyin(req->msg.data, kreq->ir_request,
402 ipmi_free_request(kreq);
406 dev->ipmi_requests++;
407 error = sc->ipmi_enqueue_request(sc, kreq);
412 #ifdef IPMICTL_SEND_COMMAND_32
413 case IPMICTL_RECEIVE_MSG_TRUNC_32:
414 case IPMICTL_RECEIVE_MSG_32:
416 case IPMICTL_RECEIVE_MSG_TRUNC:
417 case IPMICTL_RECEIVE_MSG:
418 error = copyin(recv->addr, &addr, sizeof(addr));
423 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
428 addr.channel = IPMI_BMC_CHANNEL;
430 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
431 recv->msgid = kreq->ir_msgid;
432 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
433 recv->msg.cmd = kreq->ir_command;
434 error = kreq->ir_error;
436 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
438 dev->ipmi_requests--;
440 ipmi_free_request(kreq);
443 len = kreq->ir_replylen + 1;
444 if (recv->msg.data_len < len &&
445 (cmd == IPMICTL_RECEIVE_MSG
446 #ifdef IPMICTL_RECEIVE_MSG_32
447 || cmd == IPMICTL_RECEIVE_MSG_32
453 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
454 dev->ipmi_requests--;
456 len = min(recv->msg.data_len, len);
457 recv->msg.data_len = len;
458 error = copyout(&addr, recv->addr,sizeof(addr));
460 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
462 error = copyout(kreq->ir_reply, recv->msg.data + 1,
464 ipmi_free_request(kreq);
468 case IPMICTL_SET_MY_ADDRESS_CMD:
470 dev->ipmi_address = *(int*)data;
473 case IPMICTL_GET_MY_ADDRESS_CMD:
475 *(int*)data = dev->ipmi_address;
478 case IPMICTL_SET_MY_LUN_CMD:
480 dev->ipmi_lun = *(int*)data & 0x3;
483 case IPMICTL_GET_MY_LUN_CMD:
485 *(int*)data = dev->ipmi_lun;
488 case IPMICTL_SET_GETS_EVENTS_CMD:
490 device_printf(sc->ipmi_dev,
491 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
494 case IPMICTL_REGISTER_FOR_CMD:
495 case IPMICTL_UNREGISTER_FOR_CMD:
498 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
502 #ifdef IPMICTL_SEND_COMMAND_32
503 /* Update changed fields in 32-bit structures. */
505 case IPMICTL_RECEIVE_MSG_TRUNC_32:
506 case IPMICTL_RECEIVE_MSG_32:
507 recv32->recv_type = recv->recv_type;
508 recv32->msgid = recv->msgid;
509 recv32->msg.netfn = recv->msg.netfn;
510 recv32->msg.cmd = recv->msg.cmd;
511 recv32->msg.data_len = recv->msg.data_len;
519 * Request management.
523 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
524 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
528 req->ir_msgid = msgid;
530 req->ir_command = command;
532 req->ir_request = (char *)&req[1];
533 req->ir_requestlen = requestlen;
536 req->ir_reply = (char *)&req[1] + requestlen;
537 req->ir_replybuflen = replylen;
541 /* Allocate a new request with request and reply buffers. */
542 struct ipmi_request *
543 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
544 uint8_t command, size_t requestlen, size_t replylen)
546 struct ipmi_request *req;
548 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
549 M_IPMI, M_WAITOK | M_ZERO);
550 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
554 /* Free a request no longer in use. */
556 ipmi_free_request(struct ipmi_request *req)
562 /* Store a processed request on the appropriate completion queue. */
564 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
566 struct ipmi_device *dev;
568 IPMI_LOCK_ASSERT(sc);
571 * Anonymous requests (from inside the driver) always have a
572 * waiter that we awaken.
574 if (req->ir_owner == NULL)
578 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
579 selwakeup(&dev->ipmi_select);
580 if (dev->ipmi_closing)
581 wakeup(&dev->ipmi_requests);
585 /* Perform an internal driver request. */
587 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
591 return (sc->ipmi_driver_request(sc, req, timo));
595 * Helper routine for polled system interfaces that use
596 * ipmi_polled_enqueue_request() to queue requests. This request
597 * waits until there is a pending request and then returns the first
598 * request. If the driver is shutting down, it returns NULL.
600 struct ipmi_request *
601 ipmi_dequeue_request(struct ipmi_softc *sc)
603 struct ipmi_request *req;
605 IPMI_LOCK_ASSERT(sc);
607 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
608 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
609 if (sc->ipmi_detaching)
612 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
613 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
617 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
619 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
622 IPMI_LOCK_ASSERT(sc);
624 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
625 cv_signal(&sc->ipmi_request_added);
630 * Watchdog event handler.
634 ipmi_reset_watchdog(struct ipmi_softc *sc)
636 struct ipmi_request *req;
639 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
640 IPMI_RESET_WDOG, 0, 0);
641 error = ipmi_submit_driver_request(sc, req, 0);
643 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
648 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
650 struct ipmi_request *req;
653 if (sec > 0xffff / 10)
656 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
657 IPMI_SET_WDOG, 6, 0);
659 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
660 | IPMI_SET_WD_TIMER_SMS_OS;
661 req->ir_request[1] = (wd_timer_actions & 0xff);
662 req->ir_request[2] = (wd_pretimeout_countdown & 0xff);
663 req->ir_request[3] = 0; /* Timer use */
664 req->ir_request[4] = (sec * 10) & 0xff;
665 req->ir_request[5] = (sec * 10) >> 8;
667 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
668 req->ir_request[1] = 0;
669 req->ir_request[2] = 0;
670 req->ir_request[3] = 0; /* Timer use */
671 req->ir_request[4] = 0;
672 req->ir_request[5] = 0;
674 error = ipmi_submit_driver_request(sc, req, 0);
676 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
681 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
683 struct ipmi_softc *sc = arg;
684 unsigned int timeout;
687 /* Ignore requests while disabled. */
692 * To prevent infinite hangs, we don't let anyone pat or change
693 * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
694 * However, we do want to keep patting the watchdog while we are doing
697 if (wd_in_shutdown) {
698 if (dumping && sc->ipmi_watchdog_active)
699 ipmi_reset_watchdog(sc);
704 if (cmd > 0 && cmd <= 63) {
705 timeout = ((uint64_t)1 << cmd) / 1000000000;
708 if (timeout != sc->ipmi_watchdog_active ||
709 wd_timer_actions != sc->ipmi_watchdog_actions ||
710 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
711 e = ipmi_set_watchdog(sc, timeout);
713 sc->ipmi_watchdog_active = timeout;
714 sc->ipmi_watchdog_actions = wd_timer_actions;
715 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
717 (void)ipmi_set_watchdog(sc, 0);
718 sc->ipmi_watchdog_active = 0;
719 sc->ipmi_watchdog_actions = 0;
720 sc->ipmi_watchdog_pretimeout = 0;
723 if (sc->ipmi_watchdog_active != 0) {
724 e = ipmi_reset_watchdog(sc);
728 (void)ipmi_set_watchdog(sc, 0);
729 sc->ipmi_watchdog_active = 0;
730 sc->ipmi_watchdog_actions = 0;
731 sc->ipmi_watchdog_pretimeout = 0;
734 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
735 sc->ipmi_watchdog_actions = 0;
736 sc->ipmi_watchdog_pretimeout = 0;
738 e = ipmi_set_watchdog(sc, 0);
739 if (e != 0 && cmd == 0)
745 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error)
747 struct ipmi_softc *sc = arg;
749 /* Ignore event if disabled. */
754 * Positive wd_shutdown_countdown value will re-arm watchdog;
755 * Zero value in wd_shutdown_countdown will disable watchdog;
756 * Negative value in wd_shutdown_countdown will keep existing state;
758 * Revert to using a power cycle to ensure that the watchdog will
759 * do something useful here. Having the watchdog send an NMI
760 * instead is useless during shutdown, and might be ignored if an
761 * NMI already triggered.
764 wd_in_shutdown = true;
765 if (wd_shutdown_countdown == 0) {
766 /* disable watchdog */
767 ipmi_set_watchdog(sc, 0);
768 sc->ipmi_watchdog_active = 0;
769 } else if (wd_shutdown_countdown > 0) {
770 /* set desired action and time, and, reset watchdog */
771 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
772 ipmi_set_watchdog(sc, wd_shutdown_countdown);
773 sc->ipmi_watchdog_active = wd_shutdown_countdown;
774 ipmi_reset_watchdog(sc);
779 ipmi_power_cycle(void *arg, int howto)
781 struct ipmi_softc *sc = arg;
782 struct ipmi_request *req;
785 * Ignore everything except power cycling requests
787 if ((howto & RB_POWERCYCLE) == 0)
790 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
793 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
794 * as described in IPMI v2.0 spec section 28.3.
796 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
797 IPMI_CHASSIS_CONTROL, 1, 0);
798 req->ir_request[0] = IPMI_CC_POWER_CYCLE;
800 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
802 if (req->ir_error != 0 || req->ir_compcode != 0) {
803 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
804 req->ir_error, req->ir_compcode);
809 * BMCs are notoriously slow, give it cyle_wait seconds for the power
810 * down leg of the power cycle. If that fails, fallback to the next
811 * hanlder in the shutdown_final chain and/or the platform failsafe.
813 DELAY(cycle_wait * 1000 * 1000);
814 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
818 ipmi_startup(void *arg)
820 struct ipmi_softc *sc = arg;
821 struct ipmi_request *req;
825 config_intrhook_disestablish(&sc->ipmi_ich);
828 /* Initialize interface-independent state. */
829 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
830 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
831 cv_init(&sc->ipmi_request_added, "ipmireq");
832 TAILQ_INIT(&sc->ipmi_pending_requests);
834 /* Initialize interface-dependent state. */
835 error = sc->ipmi_startup(sc);
837 device_printf(dev, "Failed to initialize interface: %d\n",
842 /* Send a GET_DEVICE_ID request. */
843 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
844 IPMI_GET_DEVICE_ID, 0, 15);
846 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
847 if (error == EWOULDBLOCK) {
848 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
851 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
853 } else if (req->ir_compcode != 0) {
855 "Bad completion code for GET_DEVICE_ID: %d\n",
858 } else if (req->ir_replylen < 5) {
859 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
864 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
865 "version %d.%d, device support mask %#x\n",
866 req->ir_reply[1] & 0x0f,
867 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
868 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
870 sc->ipmi_dev_support = req->ir_reply[5];
872 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
873 IPMI_CLEAR_FLAGS, 1, 0);
875 ipmi_submit_driver_request(sc, req, 0);
877 /* XXX: Magic numbers */
878 if (req->ir_compcode == 0xc0) {
879 device_printf(dev, "Clear flags is busy\n");
881 if (req->ir_compcode == 0xc1) {
882 device_printf(dev, "Clear flags illegal\n");
885 for (i = 0; i < 8; i++) {
886 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
887 IPMI_GET_CHANNEL_INFO, 1, 0);
888 req->ir_request[0] = i;
890 ipmi_submit_driver_request(sc, req, 0);
892 if (req->ir_compcode != 0)
895 device_printf(dev, "Number of channels %d\n", i);
898 * Probe for watchdog, but only for backends which support
899 * polled driver requests.
901 if (sc->ipmi_driver_requests_polled) {
902 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
903 IPMI_GET_WDOG, 0, 0);
905 ipmi_submit_driver_request(sc, req, 0);
907 if (req->ir_compcode == 0x00) {
908 device_printf(dev, "Attached watchdog\n");
909 /* register the watchdog event handler */
910 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
911 watchdog_list, ipmi_wd_event, sc, 0);
912 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
913 shutdown_pre_sync, ipmi_shutdown_event,
918 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
919 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
920 if (sc->ipmi_cdev == NULL) {
921 device_printf(dev, "Failed to create cdev\n");
924 sc->ipmi_cdev->si_drv1 = sc;
927 * Set initial watchdog state. If desired, set an initial
928 * watchdog on startup. Or, if the watchdog device is
929 * disabled, clear any existing watchdog.
931 if (on && wd_startup_countdown > 0) {
932 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
933 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
934 ipmi_reset_watchdog(sc) == 0) {
935 sc->ipmi_watchdog_active = wd_startup_countdown;
936 sc->ipmi_watchdog_actions = wd_timer_actions;
937 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
939 (void)ipmi_set_watchdog(sc, 0);
940 ipmi_reset_watchdog(sc);
942 (void)ipmi_set_watchdog(sc, 0);
944 * Power cycle the system off using IPMI. We use last - 2 since we don't
945 * handle all the other kinds of reboots. We'll let others handle them.
946 * We only try to do this if the BMC supports the Chassis device.
948 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
949 device_printf(dev, "Establishing power cycle handler\n");
950 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
951 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
956 ipmi_attach(device_t dev)
958 struct ipmi_softc *sc = device_get_softc(dev);
961 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
962 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
963 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
965 device_printf(dev, "can't set up interrupt\n");
970 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
971 sc->ipmi_ich.ich_func = ipmi_startup;
972 sc->ipmi_ich.ich_arg = sc;
973 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
974 device_printf(dev, "can't establish configuration hook\n");
983 ipmi_detach(device_t dev)
985 struct ipmi_softc *sc;
987 sc = device_get_softc(dev);
989 /* Fail if there are any open handles. */
991 if (sc->ipmi_opened) {
997 destroy_dev(sc->ipmi_cdev);
999 /* Detach from watchdog handling and turn off watchdog. */
1000 if (sc->ipmi_shutdown_tag)
1001 EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
1002 sc->ipmi_shutdown_tag);
1003 if (sc->ipmi_watchdog_tag) {
1004 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
1005 ipmi_set_watchdog(sc, 0);
1008 /* Detach from shutdown handling for power cycle reboot */
1009 if (sc->ipmi_power_cycle_tag)
1010 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
1012 /* XXX: should use shutdown callout I think. */
1013 /* If the backend uses a kthread, shut it down. */
1015 sc->ipmi_detaching = 1;
1016 if (sc->ipmi_kthread) {
1017 cv_broadcast(&sc->ipmi_request_added);
1018 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
1023 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1025 ipmi_release_resources(dev);
1026 mtx_destroy(&sc->ipmi_io_lock);
1027 mtx_destroy(&sc->ipmi_requests_lock);
1032 ipmi_release_resources(device_t dev)
1034 struct ipmi_softc *sc;
1037 sc = device_get_softc(dev);
1039 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1040 if (sc->ipmi_irq_res)
1041 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
1043 for (i = 0; i < MAX_RES; i++)
1044 if (sc->ipmi_io_res[i])
1045 bus_release_resource(dev, sc->ipmi_io_type,
1046 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
1049 devclass_t ipmi_devclass;
1053 ipmi_unload(void *arg)
1059 if (ipmi_devclass == NULL)
1061 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
1063 for (i = 0; i < count; i++)
1064 device_delete_child(device_get_parent(devs[i]), devs[i]);
1067 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
1071 dump_buf(u_char *data, int len)
1079 printf("Address %p len %d\n", data, len);
1083 for (; len > 0; len--, data++) {
1084 sprintf(temp, "%02x ", *data);
1086 if (*data >= ' ' && *data <= '~')
1088 else if (*data >= 'A' && *data <= 'Z')
1092 if (++count == 16) {
1093 buf[count] = '\000';
1095 printf(" %3x %s %s\n", i, line, buf);
1100 buf[count] = '\000';
1102 for (; count != 16; count++) {
1105 printf(" %3x %s %s\n", i, line, buf);