4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016, Intel Corporation.
25 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
29 * The ZFS retire agent is responsible for managing hot spares across all pools.
30 * When we see a device fault or a device removal, we try to open the associated
31 * pool and look for any hot spares. We iterate over any available hot spares
32 * and attempt a 'zpool replace' for each one.
34 * For vdevs diagnosed as faulty, the agent is also responsible for proactively
35 * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
38 #include <sys/fs/zfs.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/fs/zfs.h>
44 #include "zfs_agents.h"
48 typedef struct zfs_retire_repaired {
49 struct zfs_retire_repaired *zrr_next;
52 } zfs_retire_repaired_t;
54 typedef struct zfs_retire_data {
55 libzfs_handle_t *zrd_hdl;
56 zfs_retire_repaired_t *zrd_repaired;
60 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
62 zfs_retire_repaired_t *zrp;
64 while ((zrp = zdp->zrd_repaired) != NULL) {
65 zdp->zrd_repaired = zrp->zrr_next;
66 fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
71 * Find a pool with a matching GUID.
73 typedef struct find_cbdata {
75 zpool_handle_t *cb_zhp;
80 find_pool(zpool_handle_t *zhp, void *data)
82 find_cbdata_t *cbp = data;
85 zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
95 * Find a vdev within a tree with a matching GUID.
98 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
105 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
106 guid == search_guid) {
107 fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
108 "matched vdev %llu", guid);
112 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
113 &child, &children) != 0)
116 for (c = 0; c < children; c++) {
117 if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
121 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
122 &child, &children) != 0)
125 for (c = 0; c < children; c++) {
126 if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
130 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
131 &child, &children) != 0)
134 for (c = 0; c < children; c++) {
135 if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
143 * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
145 static zpool_handle_t *
146 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
151 nvlist_t *config, *nvroot;
154 * Find the corresponding pool and make sure the vdev still exists.
156 cb.cb_guid = pool_guid;
157 if (zpool_iter(zhdl, find_pool, &cb) != 1)
161 config = zpool_get_config(zhp, NULL);
162 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
168 if (vdev_guid != 0) {
169 if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
179 * Given a vdev, attempt to replace it with every known spare until one
180 * succeeds or we run out of devices to try.
181 * Return whether we were successful or not in replacing the device.
184 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
186 nvlist_t *config, *nvroot, *replacement;
190 zprop_source_t source;
193 config = zpool_get_config(zhp, NULL);
194 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
199 * Find out if there are any hot spares available in the pool.
201 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
202 &spares, &nspares) != 0)
206 * lookup "ashift" pool property, we may need it for the replacement
208 ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
210 replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
212 (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
215 dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
218 * Try to replace each spare, ending when we successfully
221 for (s = 0; s < nspares; s++) {
224 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
228 /* if set, add the "ashift" pool property to the spare nvlist */
229 if (source != ZPROP_SRC_DEFAULT)
230 (void) nvlist_add_uint64(spares[s],
231 ZPOOL_CONFIG_ASHIFT, ashift);
233 (void) nvlist_add_nvlist_array(replacement,
234 ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
236 fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
237 dev_name, basename(spare_name));
239 if (zpool_vdev_attach(zhp, dev_name, spare_name,
240 replacement, B_TRUE, B_FALSE) == 0) {
242 nvlist_free(replacement);
248 nvlist_free(replacement);
254 * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
255 * ASRU is now usable. ZFS has found the device to be present and
260 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
262 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
263 zfs_retire_repaired_t *zrp;
264 uint64_t pool_guid, vdev_guid;
265 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
266 &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
267 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
271 * Before checking the state of the ASRU, go through and see if we've
272 * already made an attempt to repair this ASRU. This list is cleared
273 * whenever we receive any kind of list event, and is designed to
274 * prevent us from generating a feedback loop when we attempt repairs
275 * against a faulted pool. The problem is that checking the unusable
276 * state of the ASRU can involve opening the pool, which can post
277 * statechange events but otherwise leave the pool in the faulted
278 * state. This list allows us to detect when a statechange event is
279 * due to our own request.
281 for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
282 if (zrp->zrr_pool == pool_guid &&
283 zrp->zrr_vdev == vdev_guid)
287 zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
288 zrp->zrr_next = zdp->zrd_repaired;
289 zrp->zrr_pool = pool_guid;
290 zrp->zrr_vdev = vdev_guid;
291 zdp->zrd_repaired = zrp;
293 fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
294 vdev_guid, pool_guid);
299 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
302 uint64_t pool_guid, vdev_guid;
304 nvlist_t *resource, *fault;
307 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
308 libzfs_handle_t *zhdl = zdp->zrd_hdl;
309 boolean_t fault_device, degrade_device;
312 nvlist_t *vdev = NULL;
320 fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
322 nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, &state);
325 * If this is a resource notifying us of device removal then simply
326 * check for an available spare and continue unless the device is a
327 * l2arc vdev, in which case we just offline it.
329 if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
330 (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
331 state == VDEV_STATE_REMOVED)) {
335 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
337 nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
341 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
345 devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
347 /* Can't replace l2arc with a spare: offline the device */
348 if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
349 &devtype) == 0 && strcmp(devtype, VDEV_TYPE_L2CACHE) == 0) {
350 fmd_hdl_debug(hdl, "zpool_vdev_offline '%s'", devname);
351 zpool_vdev_offline(zhp, devname, B_TRUE);
352 } else if (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
353 replace_with_spare(hdl, zhp, vdev) == B_FALSE) {
354 /* Could not handle with spare */
355 fmd_hdl_debug(hdl, "no spare for '%s'", devname);
363 if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
367 * Note: on Linux statechange events are more than just
368 * healthy ones so we need to confirm the actual state value.
370 if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
371 state == VDEV_STATE_HEALTHY) {
372 zfs_vdev_repair(hdl, nvl);
375 if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
376 zfs_vdev_repair(hdl, nvl);
380 zfs_retire_clear_data(hdl, zdp);
382 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
388 * We subscribe to zfs faults as well as all repair events.
390 if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
391 &faults, &nfaults) != 0)
394 for (f = 0; f < nfaults; f++) {
397 fault_device = B_FALSE;
398 degrade_device = B_FALSE;
401 if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
402 &retire) == 0 && retire == 0)
406 * While we subscribe to fault.fs.zfs.*, we only take action
407 * for faults targeting a specific vdev (open failure or SERD
408 * failure). We also subscribe to fault.io.* events, so that
409 * faulty disks will be faulted in the ZFS configuration.
411 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
412 fault_device = B_TRUE;
413 } else if (fmd_nvl_class_match(hdl, fault,
414 "fault.fs.zfs.vdev.checksum")) {
415 degrade_device = B_TRUE;
416 } else if (fmd_nvl_class_match(hdl, fault,
417 "fault.fs.zfs.device")) {
418 fault_device = B_FALSE;
419 } else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
421 fault_device = B_TRUE;
430 * This is a ZFS fault. Lookup the resource, and
431 * attempt to find the matching vdev.
433 if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
435 nvlist_lookup_string(resource, FM_FMRI_SCHEME,
439 if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
442 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
446 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
454 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
458 aux = VDEV_AUX_ERR_EXCEEDED;
461 if (vdev_guid == 0) {
463 * For pool-level repair events, clear the entire pool.
465 fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
466 zpool_get_name(zhp));
467 (void) zpool_clear(zhp, NULL, NULL);
473 * If this is a repair event, then mark the vdev as repaired and
478 fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
479 zpool_get_name(zhp), vdev_guid);
480 (void) zpool_vdev_clear(zhp, vdev_guid);
486 * Actively fault the device if needed.
489 (void) zpool_vdev_fault(zhp, vdev_guid, aux);
491 (void) zpool_vdev_degrade(zhp, vdev_guid, aux);
493 if (fault_device || degrade_device)
494 fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
495 fault_device ? "fault" : "degrade", vdev_guid,
496 zpool_get_name(zhp));
499 * Attempt to substitute a hot spare.
501 (void) replace_with_spare(hdl, zhp, vdev);
505 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
506 nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
507 fmd_case_uuresolved(hdl, uuid);
510 static const fmd_hdl_ops_t fmd_ops = {
511 zfs_retire_recv, /* fmdo_recv */
512 NULL, /* fmdo_timeout */
513 NULL, /* fmdo_close */
514 NULL, /* fmdo_stats */
518 static const fmd_prop_t fmd_props[] = {
519 { "spare_on_remove", FMD_TYPE_BOOL, "true" },
523 static const fmd_hdl_info_t fmd_info = {
524 "ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
528 _zfs_retire_init(fmd_hdl_t *hdl)
530 zfs_retire_data_t *zdp;
531 libzfs_handle_t *zhdl;
533 if ((zhdl = libzfs_init()) == NULL)
536 if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
541 zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
544 fmd_hdl_setspecific(hdl, zdp);
548 _zfs_retire_fini(fmd_hdl_t *hdl)
550 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
553 zfs_retire_clear_data(hdl, zdp);
554 libzfs_fini(zdp->zrd_hdl);
555 fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));