]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - cddl/usr.sbin/zfsd/zfsd_event.cc
zfsd(8), the ZFS fault management daemon
[FreeBSD/FreeBSD.git] / cddl / usr.sbin / zfsd / zfsd_event.cc
1 /*-
2  * Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    substantially similar to the "NO WARRANTY" disclaimer below
13  *    ("Disclaimer") and any redistribution must be conditioned upon
14  *    including a substantially similar Disclaimer requirement for further
15  *    binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGES.
29  *
30  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31  */
32
33 /**
34  * \file zfsd_event.cc
35  */
36 #include <sys/cdefs.h>
37 #include <sys/time.h>
38 #include <sys/fs/zfs.h>
39
40 #include <syslog.h>
41
42 #include <libzfs.h>
43 /* 
44  * Undefine flush, defined by cpufunc.h on sparc64, because it conflicts with
45  * C++ flush methods
46  */
47 #undef   flush
48
49 #include <list>
50 #include <map>
51 #include <sstream>
52 #include <string>
53
54 #include <devdctl/guid.h>
55 #include <devdctl/event.h>
56 #include <devdctl/event_factory.h>
57 #include <devdctl/exception.h>
58 #include <devdctl/consumer.h>
59
60 #include "callout.h"
61 #include "vdev_iterator.h"
62 #include "zfsd_event.h"
63 #include "case_file.h"
64 #include "vdev.h"
65 #include "zfsd.h"
66 #include "zfsd_exception.h"
67 #include "zpool_list.h"
68
69 __FBSDID("$FreeBSD$");
70 /*============================ Namespace Control =============================*/
71 using DevdCtl::Event;
72 using DevdCtl::Guid;
73 using DevdCtl::NVPairMap;
74 using std::stringstream;
75
76 /*=========================== Class Implementations ==========================*/
77
78 /*-------------------------------- DevfsEvent --------------------------------*/
79
80 //- DevfsEvent Static Public Methods -------------------------------------------
81 Event *
82 DevfsEvent::Builder(Event::Type type,
83                     NVPairMap &nvPairs,
84                     const string &eventString)
85 {
86         return (new DevfsEvent(type, nvPairs, eventString));
87 }
88
89 //- DevfsEvent Static Protected Methods ----------------------------------------
90 nvlist_t *
91 DevfsEvent::ReadLabel(int devFd, bool &inUse, bool &degraded)
92 {
93         pool_state_t poolState;
94         char        *poolName;
95         boolean_t    b_inuse;
96
97         inUse    = false;
98         degraded = false;
99         poolName = NULL;
100         if (zpool_in_use(g_zfsHandle, devFd, &poolState,
101                          &poolName, &b_inuse) == 0) {
102                 nvlist_t *devLabel;
103
104                 inUse = b_inuse == B_TRUE;
105                 if (poolName != NULL)
106                         free(poolName);
107
108                 if (zpool_read_label(devFd, &devLabel) != 0
109                  || devLabel == NULL)
110                         return (NULL);
111
112                 try {
113                         Vdev vdev(devLabel);
114                         degraded = vdev.State() != VDEV_STATE_HEALTHY;
115                         return (devLabel);
116                 } catch (ZfsdException &exp) {
117                         string devName = fdevname(devFd);
118                         string devPath = _PATH_DEV + devName;
119                         string context("DevfsEvent::ReadLabel: "
120                                      + devPath + ": ");
121
122                         exp.GetString().insert(0, context);
123                         exp.Log();
124                 }
125         }
126         return (NULL);
127 }
128
129 bool
130 DevfsEvent::OnlineByLabel(const string &devPath, const string& physPath,
131                               nvlist_t *devConfig)
132 {
133         try {
134                 /*
135                  * A device with ZFS label information has been
136                  * inserted.  If it matches a device for which we
137                  * have a case, see if we can solve that case.
138                  */
139                 syslog(LOG_INFO, "Interrogating VDEV label for %s\n",
140                        devPath.c_str());
141                 Vdev vdev(devConfig);
142                 CaseFile *caseFile(CaseFile::Find(vdev.PoolGUID(),
143                                                   vdev.GUID()));
144                 if (caseFile != NULL)
145                         return (caseFile->ReEvaluate(devPath, physPath, &vdev));
146
147         } catch (ZfsdException &exp) {
148                 string context("DevfsEvent::OnlineByLabel: " + devPath + ": ");
149
150                 exp.GetString().insert(0, context);
151                 exp.Log();
152         }
153         return (false);
154 }
155
156 //- DevfsEvent Virtual Public Methods ------------------------------------------
157 Event *
158 DevfsEvent::DeepCopy() const
159 {
160         return (new DevfsEvent(*this));
161 }
162
163 bool
164 DevfsEvent::Process() const
165 {
166         /*
167          * We are only concerned with newly discovered
168          * devices that can be ZFS vdevs.
169          */
170         if (Value("type") != "CREATE" || !IsDiskDev())
171                 return (false);
172
173         /* Log the event since it is of interest. */
174         Log(LOG_INFO);
175
176         string devPath;
177         if (!DevPath(devPath))
178                 return (false);
179
180         int devFd(open(devPath.c_str(), O_RDONLY));
181         if (devFd == -1)
182                 return (false);
183
184         bool inUse;
185         bool degraded;
186         nvlist_t *devLabel(ReadLabel(devFd, inUse, degraded));
187
188         string physPath;
189         bool havePhysPath(PhysicalPath(physPath));
190
191         string devName;
192         DevName(devName);
193         close(devFd);
194
195         if (inUse && devLabel != NULL) {
196                 OnlineByLabel(devPath, physPath, devLabel);
197         } else if (degraded) {
198                 syslog(LOG_INFO, "%s is marked degraded.  Ignoring "
199                        "as a replace by physical path candidate.\n",
200                        devName.c_str());
201         } else if (havePhysPath && IsWholeDev()) {
202                 /*
203                  * TODO: attempt to resolve events using every casefile
204                  * that matches this physpath
205                  */
206                 CaseFile *caseFile(CaseFile::Find(physPath));
207                 if (caseFile != NULL) {
208                         syslog(LOG_INFO,
209                                "Found CaseFile(%s:%s:%s) - ReEvaluating\n",
210                                caseFile->PoolGUIDString().c_str(),
211                                caseFile->VdevGUIDString().c_str(),
212                                zpool_state_to_name(caseFile->VdevState(),
213                                                    VDEV_AUX_NONE));
214                         caseFile->ReEvaluate(devPath, physPath, /*vdev*/NULL);
215                 }
216         }
217         if (devLabel != NULL)
218                 nvlist_free(devLabel);
219         return (false);
220 }
221
222 //- DevfsEvent Protected Methods -----------------------------------------------
223 DevfsEvent::DevfsEvent(Event::Type type, NVPairMap &nvpairs,
224                                const string &eventString)
225  : DevdCtl::DevfsEvent(type, nvpairs, eventString)
226 {
227 }
228
229 DevfsEvent::DevfsEvent(const DevfsEvent &src)
230  : DevdCtl::DevfsEvent::DevfsEvent(src)
231 {
232 }
233
234 /*-------------------------------- GeomEvent --------------------------------*/
235
236 //- GeomEvent Static Public Methods -------------------------------------------
237 Event *
238 GeomEvent::Builder(Event::Type type,
239                    NVPairMap &nvPairs,
240                    const string &eventString)
241 {
242         return (new GeomEvent(type, nvPairs, eventString));
243 }
244
245 //- GeomEvent Virtual Public Methods ------------------------------------------
246 Event *
247 GeomEvent::DeepCopy() const
248 {
249         return (new GeomEvent(*this));
250 }
251  
252 bool
253 GeomEvent::Process() const
254 {
255         /*
256          * We are only concerned with physical path changes, because those can
257          * be used to satisfy autoreplace operations
258          */
259         if (Value("type") != "GEOM::physpath" || !IsDiskDev())
260                 return (false);
261
262         /* Log the event since it is of interest. */
263         Log(LOG_INFO);
264
265         string devPath;
266         if (!DevPath(devPath))
267                 return (false);
268
269         string physPath;
270         bool havePhysPath(PhysicalPath(physPath));
271
272         string devName;
273         DevName(devName);
274
275         if (havePhysPath) {
276                 /* 
277                  * TODO: attempt to resolve events using every casefile
278                  * that matches this physpath
279                  */
280                 CaseFile *caseFile(CaseFile::Find(physPath));
281                 if (caseFile != NULL) {
282                         syslog(LOG_INFO,
283                                "Found CaseFile(%s:%s:%s) - ReEvaluating\n",
284                                caseFile->PoolGUIDString().c_str(),
285                                caseFile->VdevGUIDString().c_str(),
286                                zpool_state_to_name(caseFile->VdevState(),
287                                                    VDEV_AUX_NONE));
288                         caseFile->ReEvaluate(devPath, physPath, /*vdev*/NULL);
289                 }
290         }
291         return (false);
292 }
293
294 //- GeomEvent Protected Methods -----------------------------------------------
295 GeomEvent::GeomEvent(Event::Type type, NVPairMap &nvpairs,
296                                const string &eventString)
297  : DevdCtl::GeomEvent(type, nvpairs, eventString)
298 {
299 }
300
301 GeomEvent::GeomEvent(const GeomEvent &src)
302  : DevdCtl::GeomEvent::GeomEvent(src)
303 {
304 }
305
306
307 /*--------------------------------- ZfsEvent ---------------------------------*/
308 //- ZfsEvent Static Public Methods ---------------------------------------------
309 DevdCtl::Event *
310 ZfsEvent::Builder(Event::Type type, NVPairMap &nvpairs,
311                   const string &eventString)
312 {
313         return (new ZfsEvent(type, nvpairs, eventString));
314 }
315
316 //- ZfsEvent Virtual Public Methods --------------------------------------------
317 Event *
318 ZfsEvent::DeepCopy() const
319 {
320         return (new ZfsEvent(*this));
321 }
322
323 bool
324 ZfsEvent::Process() const
325 {
326         string logstr("");
327
328         if (!Contains("class") && !Contains("type")) {
329                 syslog(LOG_ERR,
330                        "ZfsEvent::Process: Missing class or type data.");
331                 return (false);
332         }
333
334         /* On config syncs, replay any queued events first. */
335         if (Value("type").find("misc.fs.zfs.config_sync") == 0) {
336                 /*
337                  * Even if saved events are unconsumed the second time
338                  * around, drop them.  Any events that still can't be
339                  * consumed are probably referring to vdevs or pools that
340                  * no longer exist.
341                  */
342                 ZfsDaemon::Get().ReplayUnconsumedEvents(/*discard*/true);
343                 CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
344         }
345
346         if (Value("type").find("misc.fs.zfs.") == 0) {
347                 /* Configuration changes, resilver events, etc. */
348                 ProcessPoolEvent();
349                 return (false);
350         }
351
352         if (!Contains("pool_guid") || !Contains("vdev_guid")) {
353                 /* Only currently interested in Vdev related events. */
354                 return (false);
355         }
356
357         CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
358         if (caseFile != NULL) {
359                 Log(LOG_INFO);
360                 syslog(LOG_INFO, "Evaluating existing case file\n");
361                 caseFile->ReEvaluate(*this);
362                 return (false);
363         }
364
365         /* Skip events that can't be handled. */
366         Guid poolGUID(PoolGUID());
367         /* If there are no replicas for a pool, then it's not manageable. */
368         if (Value("class").find("fs.zfs.vdev.no_replicas") == 0) {
369                 stringstream msg;
370                 msg << "No replicas available for pool "  << poolGUID;
371                 msg << ", ignoring";
372                 Log(LOG_INFO);
373                 syslog(LOG_INFO, "%s", msg.str().c_str());
374                 return (false);
375         }
376
377         /*
378          * Create a case file for this vdev, and have it
379          * evaluate the event.
380          */
381         ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
382         if (zpl.empty()) {
383                 stringstream msg;
384                 int priority = LOG_INFO;
385                 msg << "ZfsEvent::Process: Event for unknown pool ";
386                 msg << poolGUID << " ";
387                 msg << "queued";
388                 Log(LOG_INFO);
389                 syslog(priority, "%s", msg.str().c_str());
390                 return (true);
391         }
392
393         nvlist_t *vdevConfig = VdevIterator(zpl.front()).Find(VdevGUID());
394         if (vdevConfig == NULL) {
395                 stringstream msg;
396                 int priority = LOG_INFO;
397                 msg << "ZfsEvent::Process: Event for unknown vdev ";
398                 msg << VdevGUID() << " ";
399                 msg << "queued";
400                 Log(LOG_INFO);
401                 syslog(priority, "%s", msg.str().c_str());
402                 return (true);
403         }
404
405         Vdev vdev(zpl.front(), vdevConfig);
406         caseFile = &CaseFile::Create(vdev);
407         if (caseFile->ReEvaluate(*this) == false) {
408                 stringstream msg;
409                 int priority = LOG_INFO;
410                 msg << "ZfsEvent::Process: Unconsumed event for vdev(";
411                 msg << zpool_get_name(zpl.front()) << ",";
412                 msg << vdev.GUID() << ") ";
413                 msg << "queued";
414                 Log(LOG_INFO);
415                 syslog(priority, "%s", msg.str().c_str());
416                 return (true);
417         }
418         return (false);
419 }
420
421 //- ZfsEvent Protected Methods -------------------------------------------------
422 ZfsEvent::ZfsEvent(Event::Type type, NVPairMap &nvpairs,
423                            const string &eventString)
424  : DevdCtl::ZfsEvent(type, nvpairs, eventString)
425 {
426 }
427
428 ZfsEvent::ZfsEvent(const ZfsEvent &src)
429  : DevdCtl::ZfsEvent(src)
430 {
431 }
432
433 /*
434  * Sometimes the kernel won't detach a spare when it is no longer needed.  This
435  * can happen for example if a drive is removed, then either the pool is
436  * exported or the machine is powered off, then the drive is reinserted, then
437  * the machine is powered on or the pool is imported.  ZFSD must detach these
438  * spares itself.
439  */
440 void
441 ZfsEvent::CleanupSpares() const
442 {
443         Guid poolGUID(PoolGUID());
444         ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
445         if (!zpl.empty()) {
446                 zpool_handle_t* hdl;
447
448                 hdl = zpl.front();
449                 VdevIterator(hdl).Each(TryDetach, (void*)hdl);
450         }
451 }
452
453 void
454 ZfsEvent::ProcessPoolEvent() const
455 {
456         bool degradedDevice(false);
457
458         /* The pool is destroyed.  Discard any open cases */
459         if (Value("type") == "misc.fs.zfs.pool_destroy") {
460                 Log(LOG_INFO);
461                 CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
462                 return;
463         }
464
465         CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
466         if (caseFile != NULL) {
467                 if (caseFile->VdevState() != VDEV_STATE_UNKNOWN
468                  && caseFile->VdevState() < VDEV_STATE_HEALTHY)
469                         degradedDevice = true;
470
471                 Log(LOG_INFO);
472                 caseFile->ReEvaluate(*this);
473         }
474         else if (Value("type") == "misc.fs.zfs.resilver_finish")
475         {
476                 /*
477                  * It's possible to get a resilver_finish event with no
478                  * corresponding casefile.  For example, if a damaged pool were
479                  * exported, repaired, then reimported.
480                  */
481                 Log(LOG_INFO);
482                 CleanupSpares();
483         }
484
485         if (Value("type") == "misc.fs.zfs.vdev_remove"
486          && degradedDevice == false) {
487
488                 /* See if any other cases can make use of this device. */
489                 Log(LOG_INFO);
490                 ZfsDaemon::RequestSystemRescan();
491         }
492 }
493
494 bool
495 ZfsEvent::TryDetach(Vdev &vdev, void *cbArg)
496 {
497         /*
498          * Outline:
499          * if this device is a spare, and its parent includes one healthy,
500          * non-spare child, then detach this device.
501          */
502         zpool_handle_t *hdl(static_cast<zpool_handle_t*>(cbArg));
503
504         if (vdev.IsSpare()) {
505                 std::list<Vdev> siblings;
506                 std::list<Vdev>::iterator siblings_it;
507                 boolean_t cleanup = B_FALSE;
508
509                 Vdev parent = vdev.Parent();
510                 siblings = parent.Children();
511
512                 /* Determine whether the parent should be cleaned up */
513                 for (siblings_it = siblings.begin();
514                      siblings_it != siblings.end();
515                      siblings_it++) {
516                         Vdev sibling = *siblings_it;
517
518                         if (!sibling.IsSpare() &&
519                              sibling.State() == VDEV_STATE_HEALTHY) {
520                                 cleanup = B_TRUE;
521                                 break;
522                         }
523                 }
524
525                 if (cleanup) {
526                         syslog(LOG_INFO, "Detaching spare vdev %s from pool %s",
527                                vdev.Path().c_str(), zpool_get_name(hdl));
528                         zpool_vdev_detach(hdl, vdev.Path().c_str());
529                 }
530
531         }
532
533         /* Always return false, because there may be other spares to detach */
534         return (false);
535 }