2 * Copyright (c) 2009-2010 The FreeBSD Foundation
5 * This software was developed by Pawel Jakub Dawidek under sponsorship from
6 * the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
51 #include <activemap.h>
57 #include "hast_proto.h"
72 TAILQ_ENTRY(hio) hio_next;
76 * Free list holds unused structures. When free list is empty, we have to wait
77 * until some in-progress requests are freed.
79 static TAILQ_HEAD(, hio) hio_free_list;
80 static pthread_mutex_t hio_free_list_lock;
81 static pthread_cond_t hio_free_list_cond;
83 * Disk thread (the one that do I/O requests) takes requests from this list.
85 static TAILQ_HEAD(, hio) hio_disk_list;
86 static pthread_mutex_t hio_disk_list_lock;
87 static pthread_cond_t hio_disk_list_cond;
89 * There is one recv list for every component, although local components don't
90 * use recv lists as local requests are done synchronously.
92 static TAILQ_HEAD(, hio) hio_send_list;
93 static pthread_mutex_t hio_send_list_lock;
94 static pthread_cond_t hio_send_list_cond;
97 * Maximum number of outstanding I/O requests.
99 #define HAST_HIO_MAX 256
101 static void *recv_thread(void *arg);
102 static void *disk_thread(void *arg);
103 static void *send_thread(void *arg);
106 init_environment(void)
112 * Initialize lists, their locks and theirs condition variables.
114 TAILQ_INIT(&hio_free_list);
115 mtx_init(&hio_free_list_lock);
116 cv_init(&hio_free_list_cond);
117 TAILQ_INIT(&hio_disk_list);
118 mtx_init(&hio_disk_list_lock);
119 cv_init(&hio_disk_list_cond);
120 TAILQ_INIT(&hio_send_list);
121 mtx_init(&hio_send_list_lock);
122 cv_init(&hio_send_list_cond);
125 * Allocate requests pool and initialize requests.
127 for (ii = 0; ii < HAST_HIO_MAX; ii++) {
128 hio = malloc(sizeof(*hio));
130 errx(EX_TEMPFAIL, "cannot allocate %zu bytes of memory "
131 "for hio request", sizeof(*hio));
134 hio->hio_data = malloc(MAXPHYS);
135 if (hio->hio_data == NULL) {
136 errx(EX_TEMPFAIL, "cannot allocate %zu bytes of memory "
137 "for gctl_data", (size_t)MAXPHYS);
139 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_next);
144 init_local(struct hast_resource *res)
147 if (metadata_read(res, true) < 0)
152 init_remote(struct hast_resource *res, struct nv *nvin)
162 nv_add_int64(nvout, (int64_t)res->hr_datasize, "datasize");
163 nv_add_int32(nvout, (int32_t)res->hr_extentsize, "extentsize");
164 resuid = nv_get_uint64(nvin, "resuid");
165 res->hr_primary_localcnt = nv_get_uint64(nvin, "localcnt");
166 res->hr_primary_remotecnt = nv_get_uint64(nvin, "remotecnt");
167 nv_add_uint64(nvout, res->hr_secondary_localcnt, "localcnt");
168 nv_add_uint64(nvout, res->hr_secondary_remotecnt, "remotecnt");
169 mapsize = activemap_calc_ondisk_size(res->hr_local_mediasize -
170 METADATA_SIZE, res->hr_extentsize, res->hr_local_sectorsize);
171 map = malloc(mapsize);
173 pjdlog_exitx(EX_TEMPFAIL,
174 "Unable to allocate memory (%zu bytes) for activemap.",
177 nv_add_uint32(nvout, (uint32_t)mapsize, "mapsize");
179 * When we work as primary and secondary is missing we will increase
180 * localcnt in our metadata. When secondary is connected and synced
181 * we make localcnt be equal to remotecnt, which means nodes are more
183 * Split-brain condition is when both nodes are not able to communicate
184 * and are both configured as primary nodes. In turn, they can both
185 * make incompatible changes to the data and we have to detect that.
186 * Under split-brain condition we will increase our localcnt on first
187 * write and remote node will increase its localcnt on first write.
188 * When we connect we can see that primary's localcnt is greater than
189 * our remotecnt (primary was modified while we weren't watching) and
190 * our localcnt is greater than primary's remotecnt (we were modified
191 * while primary wasn't watching).
192 * There are many possible combinations which are all gathered below.
193 * Don't pay too much attention to exact numbers, the more important
194 * is to compare them. We compare secondary's local with primary's
195 * remote and secondary's remote with primary's local.
196 * Note that every case where primary's localcnt is smaller than
197 * secondary's remotecnt and where secondary's localcnt is smaller than
198 * primary's remotecnt should be impossible in practise. We will perform
199 * full synchronization then. Those cases are marked with an asterisk.
200 * Regular synchronization means that only extents marked as dirty are
201 * synchronized (regular synchronization).
203 * SECONDARY METADATA PRIMARY METADATA
204 * local=3 remote=3 local=2 remote=2* ?! Full sync from secondary.
205 * local=3 remote=3 local=2 remote=3* ?! Full sync from primary.
206 * local=3 remote=3 local=2 remote=4* ?! Full sync from primary.
207 * local=3 remote=3 local=3 remote=2 Primary is out-of-date,
208 * regular sync from secondary.
209 * local=3 remote=3 local=3 remote=3 Regular sync just in case.
210 * local=3 remote=3 local=3 remote=4* ?! Full sync from primary.
211 * local=3 remote=3 local=4 remote=2 Split-brain condition.
212 * local=3 remote=3 local=4 remote=3 Secondary out-of-date,
213 * regular sync from primary.
214 * local=3 remote=3 local=4 remote=4* ?! Full sync from primary.
216 if (res->hr_resuid == 0) {
218 * Provider is used for the first time. Initialize everything.
220 assert(res->hr_secondary_localcnt == 0);
221 res->hr_resuid = resuid;
222 if (metadata_write(res) < 0)
224 memset(map, 0xff, mapsize);
225 nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc");
227 /* Is primary is out-of-date? */
228 (res->hr_secondary_localcnt > res->hr_primary_remotecnt &&
229 res->hr_secondary_remotecnt == res->hr_primary_localcnt) ||
230 /* Node are more or less in sync? */
231 (res->hr_secondary_localcnt == res->hr_primary_remotecnt &&
232 res->hr_secondary_remotecnt == res->hr_primary_localcnt) ||
233 /* Is secondary is out-of-date? */
234 (res->hr_secondary_localcnt == res->hr_primary_remotecnt &&
235 res->hr_secondary_remotecnt < res->hr_primary_localcnt)) {
237 * Nodes are more or less in sync or one of the nodes is
239 * It doesn't matter at this point which one, we just have to
240 * send out local bitmap to the remote node.
242 if (pread(res->hr_localfd, map, mapsize, METADATA_SIZE) !=
244 pjdlog_exit(LOG_ERR, "Unable to read activemap");
246 if (res->hr_secondary_localcnt > res->hr_primary_remotecnt &&
247 res->hr_secondary_remotecnt == res->hr_primary_localcnt) {
248 /* Primary is out-of-date, sync from secondary. */
249 nv_add_uint8(nvout, HAST_SYNCSRC_SECONDARY, "syncsrc");
252 * Secondary is out-of-date or counts match.
255 nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc");
257 } else if (res->hr_secondary_localcnt > res->hr_primary_remotecnt &&
258 res->hr_primary_localcnt > res->hr_secondary_remotecnt) {
260 * Not good, we have split-brain condition.
262 pjdlog_error("Split-brain detected, exiting.");
263 nv_add_string(nvout, "Split-brain condition!", "errmsg");
267 } else /* if (res->hr_secondary_localcnt < res->hr_primary_remotecnt ||
268 res->hr_primary_localcnt < res->hr_secondary_remotecnt) */ {
270 * This should never happen in practise, but we will perform
271 * full synchronization.
273 assert(res->hr_secondary_localcnt < res->hr_primary_remotecnt ||
274 res->hr_primary_localcnt < res->hr_secondary_remotecnt);
275 mapsize = activemap_calc_ondisk_size(res->hr_local_mediasize -
276 METADATA_SIZE, res->hr_extentsize,
277 res->hr_local_sectorsize);
278 memset(map, 0xff, mapsize);
279 if (res->hr_secondary_localcnt > res->hr_primary_remotecnt) {
280 /* In this one of five cases sync from secondary. */
281 nv_add_uint8(nvout, HAST_SYNCSRC_SECONDARY, "syncsrc");
283 /* For the rest four cases sync from primary. */
284 nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc");
286 pjdlog_warning("This should never happen, asking for full synchronization (primary(local=%ju, remote=%ju), secondary(local=%ju, remote=%ju)).",
287 (uintmax_t)res->hr_primary_localcnt,
288 (uintmax_t)res->hr_primary_remotecnt,
289 (uintmax_t)res->hr_secondary_localcnt,
290 (uintmax_t)res->hr_secondary_remotecnt);
292 if (hast_proto_send(res, res->hr_remotein, nvout, map, mapsize) < 0) {
293 pjdlog_errno(LOG_WARNING, "Unable to send activemap to %s",
299 if (res->hr_secondary_localcnt > res->hr_primary_remotecnt &&
300 res->hr_primary_localcnt > res->hr_secondary_remotecnt) {
301 /* Exit on split-brain. */
307 hastd_secondary(struct hast_resource *res, struct nv *nvin)
314 * Create communication channel between parent and child.
316 if (proto_client("socketpair://", &res->hr_ctrl) < 0) {
317 KEEP_ERRNO((void)pidfile_remove(pfh));
318 pjdlog_exit(EX_OSERR,
319 "Unable to create control sockets between parent and child");
324 KEEP_ERRNO((void)pidfile_remove(pfh));
325 pjdlog_exit(EX_OSERR, "Unable to fork");
329 /* This is parent. */
330 proto_close(res->hr_remotein);
331 res->hr_remotein = NULL;
332 proto_close(res->hr_remoteout);
333 res->hr_remoteout = NULL;
334 res->hr_workerpid = pid;
337 (void)pidfile_close(pfh);
339 setproctitle("%s (secondary)", res->hr_name);
341 /* Error in setting timeout is not critical, but why should it fail? */
342 if (proto_timeout(res->hr_remotein, 0) < 0)
343 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout");
344 if (proto_timeout(res->hr_remoteout, res->hr_timeout) < 0)
345 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout");
348 init_remote(res, nvin);
351 error = pthread_create(&td, NULL, recv_thread, res);
353 error = pthread_create(&td, NULL, disk_thread, res);
355 error = pthread_create(&td, NULL, send_thread, res);
357 (void)ctrl_thread(res);
361 reqlog(int loglevel, int debuglevel, int error, struct hio *hio, const char *fmt, ...)
368 len = vsnprintf(msg, sizeof(msg), fmt, ap);
370 if ((size_t)len < sizeof(msg)) {
371 switch (hio->hio_cmd) {
373 (void)snprintf(msg + len, sizeof(msg) - len,
374 "READ(%ju, %ju).", (uintmax_t)hio->hio_offset,
375 (uintmax_t)hio->hio_length);
378 (void)snprintf(msg + len, sizeof(msg) - len,
379 "DELETE(%ju, %ju).", (uintmax_t)hio->hio_offset,
380 (uintmax_t)hio->hio_length);
383 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH.");
386 (void)snprintf(msg + len, sizeof(msg) - len,
387 "WRITE(%ju, %ju).", (uintmax_t)hio->hio_offset,
388 (uintmax_t)hio->hio_length);
391 (void)snprintf(msg + len, sizeof(msg) - len,
392 "UNKNOWN(%u).", (unsigned int)hio->hio_cmd);
396 pjdlog_common(loglevel, debuglevel, error, "%s", msg);
400 requnpack(struct hast_resource *res, struct hio *hio)
403 hio->hio_cmd = nv_get_uint8(hio->hio_nv, "cmd");
404 if (hio->hio_cmd == 0) {
405 pjdlog_error("Header contains no 'cmd' field.");
406 hio->hio_error = EINVAL;
409 switch (hio->hio_cmd) {
413 hio->hio_offset = nv_get_uint64(hio->hio_nv, "offset");
414 if (nv_error(hio->hio_nv) != 0) {
415 pjdlog_error("Header is missing 'offset' field.");
416 hio->hio_error = EINVAL;
419 hio->hio_length = nv_get_uint64(hio->hio_nv, "length");
420 if (nv_error(hio->hio_nv) != 0) {
421 pjdlog_error("Header is missing 'length' field.");
422 hio->hio_error = EINVAL;
425 if (hio->hio_length == 0) {
426 pjdlog_error("Data length is zero.");
427 hio->hio_error = EINVAL;
430 if (hio->hio_length > MAXPHYS) {
431 pjdlog_error("Data length is too large (%ju > %ju).",
432 (uintmax_t)hio->hio_length, (uintmax_t)MAXPHYS);
433 hio->hio_error = EINVAL;
436 if ((hio->hio_offset % res->hr_local_sectorsize) != 0) {
437 pjdlog_error("Offset %ju is not multiple of sector size.",
438 (uintmax_t)hio->hio_offset);
439 hio->hio_error = EINVAL;
442 if ((hio->hio_length % res->hr_local_sectorsize) != 0) {
443 pjdlog_error("Length %ju is not multiple of sector size.",
444 (uintmax_t)hio->hio_length);
445 hio->hio_error = EINVAL;
448 if (hio->hio_offset + hio->hio_length >
449 (uint64_t)res->hr_datasize) {
450 pjdlog_error("Data offset is too large (%ju > %ju).",
451 (uintmax_t)(hio->hio_offset + hio->hio_length),
452 (uintmax_t)res->hr_datasize);
453 hio->hio_error = EINVAL;
458 pjdlog_error("Header contains invalid 'cmd' (%hhu).",
460 hio->hio_error = EINVAL;
465 return (hio->hio_error);
469 * Thread receives requests from the primary node.
472 recv_thread(void *arg)
474 struct hast_resource *res = arg;
479 pjdlog_debug(2, "recv: Taking free request.");
480 mtx_lock(&hio_free_list_lock);
481 while ((hio = TAILQ_FIRST(&hio_free_list)) == NULL) {
482 pjdlog_debug(2, "recv: No free requests, waiting.");
483 cv_wait(&hio_free_list_cond, &hio_free_list_lock);
485 TAILQ_REMOVE(&hio_free_list, hio, hio_next);
486 mtx_unlock(&hio_free_list_lock);
487 pjdlog_debug(2, "recv: (%p) Got request.", hio);
488 if (hast_proto_recv_hdr(res->hr_remotein, &hio->hio_nv) < 0) {
489 pjdlog_exit(EX_TEMPFAIL,
490 "Unable to receive request header");
492 if (requnpack(res, hio) != 0)
494 reqlog(LOG_DEBUG, 2, -1, hio,
495 "recv: (%p) Got request header: ", hio);
496 if (hio->hio_cmd == HIO_WRITE) {
497 if (hast_proto_recv_data(res, res->hr_remotein,
498 hio->hio_nv, hio->hio_data, MAXPHYS) < 0) {
499 pjdlog_exit(EX_TEMPFAIL,
500 "Unable to receive reply data");
503 pjdlog_debug(2, "recv: (%p) Moving request to the disk queue.",
505 mtx_lock(&hio_disk_list_lock);
506 wakeup = TAILQ_EMPTY(&hio_disk_list);
507 TAILQ_INSERT_TAIL(&hio_disk_list, hio, hio_next);
508 mtx_unlock(&hio_disk_list_lock);
510 cv_signal(&hio_disk_list_cond);
513 pjdlog_debug(2, "recv: (%p) Moving request to the send queue.",
515 mtx_lock(&hio_send_list_lock);
516 wakeup = TAILQ_EMPTY(&hio_send_list);
517 TAILQ_INSERT_TAIL(&hio_send_list, hio, hio_next);
518 mtx_unlock(&hio_send_list_lock);
520 cv_signal(&hio_send_list_cond);
527 * Thread reads from or writes to local component and also handles DELETE and
531 disk_thread(void *arg)
533 struct hast_resource *res = arg;
536 bool clear_activemap, wakeup;
538 clear_activemap = true;
541 pjdlog_debug(2, "disk: Taking request.");
542 mtx_lock(&hio_disk_list_lock);
543 while ((hio = TAILQ_FIRST(&hio_disk_list)) == NULL) {
544 pjdlog_debug(2, "disk: No requests, waiting.");
545 cv_wait(&hio_disk_list_cond, &hio_disk_list_lock);
547 TAILQ_REMOVE(&hio_disk_list, hio, hio_next);
548 mtx_unlock(&hio_disk_list_lock);
549 while (clear_activemap) {
554 * When first request is received, it means that primary
555 * already received our activemap, merged it and stored
556 * locally. We can now safely clear our activemap.
559 activemap_calc_ondisk_size(res->hr_local_mediasize -
560 METADATA_SIZE, res->hr_extentsize,
561 res->hr_local_sectorsize);
562 map = calloc(1, mapsize);
564 pjdlog_warning("Unable to allocate memory to clear local activemap.");
567 if (pwrite(res->hr_localfd, map, mapsize,
568 METADATA_SIZE) != (ssize_t)mapsize) {
569 pjdlog_errno(LOG_WARNING,
570 "Unable to store cleared activemap");
575 clear_activemap = false;
576 pjdlog_debug(1, "Local activemap cleared.");
578 reqlog(LOG_DEBUG, 2, -1, hio, "disk: (%p) Got request: ", hio);
579 /* Handle the actual request. */
580 switch (hio->hio_cmd) {
582 ret = pread(res->hr_localfd, hio->hio_data,
584 hio->hio_offset + res->hr_localoff);
586 hio->hio_error = errno;
587 else if (ret != (int64_t)hio->hio_length)
588 hio->hio_error = EIO;
593 ret = pwrite(res->hr_localfd, hio->hio_data,
595 hio->hio_offset + res->hr_localoff);
597 hio->hio_error = errno;
598 else if (ret != (int64_t)hio->hio_length)
599 hio->hio_error = EIO;
604 ret = g_delete(res->hr_localfd,
605 hio->hio_offset + res->hr_localoff,
608 hio->hio_error = errno;
613 ret = g_flush(res->hr_localfd);
615 hio->hio_error = errno;
620 if (hio->hio_error != 0) {
621 reqlog(LOG_ERR, 0, hio->hio_error, hio,
624 pjdlog_debug(2, "disk: (%p) Moving request to the send queue.",
626 mtx_lock(&hio_send_list_lock);
627 wakeup = TAILQ_EMPTY(&hio_send_list);
628 TAILQ_INSERT_TAIL(&hio_send_list, hio, hio_next);
629 mtx_unlock(&hio_send_list_lock);
631 cv_signal(&hio_send_list_cond);
638 * Thread sends requests back to primary node.
641 send_thread(void *arg)
643 struct hast_resource *res = arg;
651 pjdlog_debug(2, "send: Taking request.");
652 mtx_lock(&hio_send_list_lock);
653 while ((hio = TAILQ_FIRST(&hio_send_list)) == NULL) {
654 pjdlog_debug(2, "send: No requests, waiting.");
655 cv_wait(&hio_send_list_cond, &hio_send_list_lock);
657 TAILQ_REMOVE(&hio_send_list, hio, hio_next);
658 mtx_unlock(&hio_send_list_lock);
659 reqlog(LOG_DEBUG, 2, -1, hio, "send: (%p) Got request: ", hio);
661 /* Copy sequence number. */
662 nv_add_uint64(nvout, nv_get_uint64(hio->hio_nv, "seq"), "seq");
663 switch (hio->hio_cmd) {
665 if (hio->hio_error == 0) {
666 data = hio->hio_data;
667 length = hio->hio_length;
671 * We send no data in case of an error.
684 if (hio->hio_error != 0)
685 nv_add_int16(nvout, hio->hio_error, "error");
686 if (hast_proto_send(res, res->hr_remoteout, nvout, data,
688 pjdlog_exit(EX_TEMPFAIL, "Unable to send reply.");
691 pjdlog_debug(2, "send: (%p) Moving request to the free queue.",
693 nv_free(hio->hio_nv);
695 mtx_lock(&hio_free_list_lock);
696 wakeup = TAILQ_EMPTY(&hio_free_list);
697 TAILQ_INSERT_TAIL(&hio_free_list, hio, hio_next);
698 mtx_unlock(&hio_free_list_lock);
700 cv_signal(&hio_free_list_cond);