2 * Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
36 #include <sys/cdefs.h>
37 #include <sys/byteorder.h>
39 #include <sys/fs/zfs.h>
40 #include <sys/vdev_impl.h>
47 * Undefine flush, defined by cpufunc.h on sparc64, because it conflicts with
57 #include <devdctl/guid.h>
58 #include <devdctl/event.h>
59 #include <devdctl/event_factory.h>
60 #include <devdctl/exception.h>
61 #include <devdctl/consumer.h>
64 #include "vdev_iterator.h"
65 #include "zfsd_event.h"
66 #include "case_file.h"
69 #include "zfsd_exception.h"
70 #include "zpool_list.h"
72 __FBSDID("$FreeBSD$");
73 /*============================ Namespace Control =============================*/
76 using DevdCtl::NVPairMap;
77 using std::stringstream;
79 /*=========================== Class Implementations ==========================*/
81 /*-------------------------------- GeomEvent --------------------------------*/
83 //- GeomEvent Static Public Methods -------------------------------------------
85 GeomEvent::Builder(Event::Type type,
87 const string &eventString)
89 return (new GeomEvent(type, nvPairs, eventString));
92 //- GeomEvent Virtual Public Methods ------------------------------------------
94 GeomEvent::DeepCopy() const
96 return (new GeomEvent(*this));
100 GeomEvent::Process() const
103 * We only use GEOM events to repair damaged pools. So return early if
104 * there are no damaged pools
106 if (CaseFile::Empty())
110 * We are only concerned with arrivals and physical path changes,
111 * because those can be used to satisfy online and autoreplace
114 if (Value("type") != "GEOM::physpath" && Value("type") != "CREATE")
117 /* Log the event since it is of interest. */
121 if (!DevPath(devPath))
124 int devFd(open(devPath.c_str(), O_RDONLY));
130 nvlist_t *devLabel(ReadLabel(devFd, inUse, degraded));
133 bool havePhysPath(PhysicalPath(physPath));
139 if (inUse && devLabel != NULL) {
140 OnlineByLabel(devPath, physPath, devLabel);
141 } else if (degraded) {
142 syslog(LOG_INFO, "%s is marked degraded. Ignoring "
143 "as a replace by physical path candidate.\n",
145 } else if (havePhysPath) {
147 * TODO: attempt to resolve events using every casefile
148 * that matches this physpath
150 CaseFile *caseFile(CaseFile::Find(physPath));
151 if (caseFile != NULL) {
153 "Found CaseFile(%s:%s:%s) - ReEvaluating\n",
154 caseFile->PoolGUIDString().c_str(),
155 caseFile->VdevGUIDString().c_str(),
156 zpool_state_to_name(caseFile->VdevState(),
158 caseFile->ReEvaluate(devPath, physPath, /*vdev*/NULL);
164 //- GeomEvent Protected Methods -----------------------------------------------
165 GeomEvent::GeomEvent(Event::Type type, NVPairMap &nvpairs,
166 const string &eventString)
167 : DevdCtl::GeomEvent(type, nvpairs, eventString)
171 GeomEvent::GeomEvent(const GeomEvent &src)
172 : DevdCtl::GeomEvent::GeomEvent(src)
177 GeomEvent::ReadLabel(int devFd, bool &inUse, bool °raded)
179 pool_state_t poolState;
187 if (zpool_in_use(g_zfsHandle, devFd, &poolState,
188 &poolName, &b_inuse) == 0) {
189 nvlist_t *devLabel = NULL;
191 inUse = b_inuse == B_TRUE;
192 if (poolName != NULL)
195 if (zpool_read_label(devFd, &devLabel, &nlabels) != 0)
198 * If we find a disk with fewer than the maximum number of
199 * labels, it might be the whole disk of a partitioned disk
200 * where ZFS resides on a partition. In that case, we should do
201 * nothing and wait for the partition to appear. Or, the disk
202 * might be damaged. In that case, zfsd should do nothing and
203 * wait for the sysadmin to decide.
205 if (nlabels != VDEV_LABELS || devLabel == NULL) {
206 nvlist_free(devLabel);
212 degraded = vdev.State() != VDEV_STATE_HEALTHY;
214 } catch (ZfsdException &exp) {
215 string devName = fdevname(devFd);
216 string devPath = _PATH_DEV + devName;
217 string context("GeomEvent::ReadLabel: "
220 exp.GetString().insert(0, context);
222 nvlist_free(devLabel);
229 GeomEvent::OnlineByLabel(const string &devPath, const string& physPath,
234 * A device with ZFS label information has been
235 * inserted. If it matches a device for which we
236 * have a case, see if we can solve that case.
238 syslog(LOG_INFO, "Interrogating VDEV label for %s\n",
240 Vdev vdev(devConfig);
241 CaseFile *caseFile(CaseFile::Find(vdev.PoolGUID(),
243 if (caseFile != NULL)
244 return (caseFile->ReEvaluate(devPath, physPath, &vdev));
246 } catch (ZfsdException &exp) {
247 string context("GeomEvent::OnlineByLabel: " + devPath + ": ");
249 exp.GetString().insert(0, context);
256 /*--------------------------------- ZfsEvent ---------------------------------*/
257 //- ZfsEvent Static Public Methods ---------------------------------------------
259 ZfsEvent::Builder(Event::Type type, NVPairMap &nvpairs,
260 const string &eventString)
262 return (new ZfsEvent(type, nvpairs, eventString));
265 //- ZfsEvent Virtual Public Methods --------------------------------------------
267 ZfsEvent::DeepCopy() const
269 return (new ZfsEvent(*this));
273 ZfsEvent::Process() const
277 if (!Contains("class") && !Contains("type")) {
279 "ZfsEvent::Process: Missing class or type data.");
283 /* On config syncs, replay any queued events first. */
284 if (Value("type").find("misc.fs.zfs.config_sync") == 0) {
286 * Even if saved events are unconsumed the second time
287 * around, drop them. Any events that still can't be
288 * consumed are probably referring to vdevs or pools that
291 ZfsDaemon::Get().ReplayUnconsumedEvents(/*discard*/true);
292 CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
295 if (Value("type").find("misc.fs.zfs.") == 0) {
296 /* Configuration changes, resilver events, etc. */
301 if (!Contains("pool_guid") || !Contains("vdev_guid")) {
302 /* Only currently interested in Vdev related events. */
306 CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
307 if (caseFile != NULL) {
309 syslog(LOG_INFO, "Evaluating existing case file\n");
310 caseFile->ReEvaluate(*this);
314 /* Skip events that can't be handled. */
315 Guid poolGUID(PoolGUID());
316 /* If there are no replicas for a pool, then it's not manageable. */
317 if (Value("class").find("fs.zfs.vdev.no_replicas") == 0) {
319 msg << "No replicas available for pool " << poolGUID;
322 syslog(LOG_INFO, "%s", msg.str().c_str());
327 * Create a case file for this vdev, and have it
328 * evaluate the event.
330 ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
333 int priority = LOG_INFO;
334 msg << "ZfsEvent::Process: Event for unknown pool ";
335 msg << poolGUID << " ";
338 syslog(priority, "%s", msg.str().c_str());
342 nvlist_t *vdevConfig = VdevIterator(zpl.front()).Find(VdevGUID());
343 if (vdevConfig == NULL) {
345 int priority = LOG_INFO;
346 msg << "ZfsEvent::Process: Event for unknown vdev ";
347 msg << VdevGUID() << " ";
350 syslog(priority, "%s", msg.str().c_str());
354 Vdev vdev(zpl.front(), vdevConfig);
355 caseFile = &CaseFile::Create(vdev);
356 if (caseFile->ReEvaluate(*this) == false) {
358 int priority = LOG_INFO;
359 msg << "ZfsEvent::Process: Unconsumed event for vdev(";
360 msg << zpool_get_name(zpl.front()) << ",";
361 msg << vdev.GUID() << ") ";
364 syslog(priority, "%s", msg.str().c_str());
370 //- ZfsEvent Protected Methods -------------------------------------------------
371 ZfsEvent::ZfsEvent(Event::Type type, NVPairMap &nvpairs,
372 const string &eventString)
373 : DevdCtl::ZfsEvent(type, nvpairs, eventString)
377 ZfsEvent::ZfsEvent(const ZfsEvent &src)
378 : DevdCtl::ZfsEvent(src)
383 * Sometimes the kernel won't detach a spare when it is no longer needed. This
384 * can happen for example if a drive is removed, then either the pool is
385 * exported or the machine is powered off, then the drive is reinserted, then
386 * the machine is powered on or the pool is imported. ZFSD must detach these
390 ZfsEvent::CleanupSpares() const
392 Guid poolGUID(PoolGUID());
393 ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
398 VdevIterator(hdl).Each(TryDetach, (void*)hdl);
403 ZfsEvent::ProcessPoolEvent() const
405 bool degradedDevice(false);
407 /* The pool is destroyed. Discard any open cases */
408 if (Value("type") == "misc.fs.zfs.pool_destroy") {
410 CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
414 CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
415 if (caseFile != NULL) {
416 if (caseFile->VdevState() != VDEV_STATE_UNKNOWN
417 && caseFile->VdevState() < VDEV_STATE_HEALTHY)
418 degradedDevice = true;
421 caseFile->ReEvaluate(*this);
423 else if (Value("type") == "misc.fs.zfs.resilver_finish")
426 * It's possible to get a resilver_finish event with no
427 * corresponding casefile. For example, if a damaged pool were
428 * exported, repaired, then reimported.
434 if (Value("type") == "misc.fs.zfs.vdev_remove"
435 && degradedDevice == false) {
437 /* See if any other cases can make use of this device. */
439 ZfsDaemon::RequestSystemRescan();
444 ZfsEvent::TryDetach(Vdev &vdev, void *cbArg)
448 * if this device is a spare, and its parent includes one healthy,
449 * non-spare child, then detach this device.
451 zpool_handle_t *hdl(static_cast<zpool_handle_t*>(cbArg));
453 if (vdev.IsSpare()) {
454 std::list<Vdev> siblings;
455 std::list<Vdev>::iterator siblings_it;
456 boolean_t cleanup = B_FALSE;
458 Vdev parent = vdev.Parent();
459 siblings = parent.Children();
461 /* Determine whether the parent should be cleaned up */
462 for (siblings_it = siblings.begin();
463 siblings_it != siblings.end();
465 Vdev sibling = *siblings_it;
467 if (!sibling.IsSpare() &&
468 sibling.State() == VDEV_STATE_HEALTHY) {
475 syslog(LOG_INFO, "Detaching spare vdev %s from pool %s",
476 vdev.Path().c_str(), zpool_get_name(hdl));
477 zpool_vdev_detach(hdl, vdev.Path().c_str());
482 /* Always return false, because there may be other spares to detach */