]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/raidframe/rf_parityloggingdags.c
This commit was generated by cvs2svn to compensate for changes in r106907,
[FreeBSD/FreeBSD.git] / sys / dev / raidframe / rf_parityloggingdags.c
1 /*      $FreeBSD$ */
2 /*      $NetBSD: rf_parityloggingdags.c,v 1.4 2000/01/07 03:41:04 oster Exp $   */
3 /*
4  * Copyright (c) 1995 Carnegie-Mellon University.
5  * All rights reserved.
6  *
7  * Author: William V. Courtright II
8  *
9  * Permission to use, copy, modify and distribute this software and
10  * its documentation is hereby granted, provided that both the copyright
11  * notice and this permission notice appear in all copies of the
12  * software, derivative works or modified versions, and any portions
13  * thereof, and that both notices appear in supporting documentation.
14  *
15  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18  *
19  * Carnegie Mellon requests users of this software to return to
20  *
21  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
22  *  School of Computer Science
23  *  Carnegie Mellon University
24  *  Pittsburgh PA 15213-3890
25  *
26  * any improvements or extensions that they make and grant Carnegie the
27  * rights to redistribute these changes.
28  */
29
30 #include <dev/raidframe/rf_archs.h>
31
32 #if RF_INCLUDE_PARITYLOGGING > 0
33
34 /*
35   DAGs specific to parity logging are created here
36  */
37
38 #include <dev/raidframe/rf_types.h>
39 #include <dev/raidframe/rf_raid.h>
40 #include <dev/raidframe/rf_dag.h>
41 #include <dev/raidframe/rf_dagutils.h>
42 #include <dev/raidframe/rf_dagfuncs.h>
43 #include <dev/raidframe/rf_debugMem.h>
44 #include <dev/raidframe/rf_paritylog.h>
45 #include <dev/raidframe/rf_memchunk.h>
46 #include <dev/raidframe/rf_general.h>
47
48 #include <dev/raidframe/rf_parityloggingdags.h>
49
50 /******************************************************************************
51  *
52  * creates a DAG to perform a large-write operation:
53  *
54  *         / Rod \     / Wnd \
55  * H -- NIL- Rod - NIL - Wnd ------ NIL - T
56  *         \ Rod /     \ Xor - Lpo /
57  *
58  * The writes are not done until the reads complete because if they were done in
59  * parallel, a failure on one of the reads could leave the parity in an inconsistent
60  * state, so that the retry with a new DAG would produce erroneous parity.
61  *
62  * Note:  this DAG has the nasty property that none of the buffers allocated for reading
63  *        old data can be freed until the XOR node fires.  Need to fix this.
64  *
65  * The last two arguments are the number of faults tolerated, and function for the
66  * redundancy calculation. The undo for the redundancy calc is assumed to be null
67  *
68  *****************************************************************************/
69
70 void 
71 rf_CommonCreateParityLoggingLargeWriteDAG(
72     RF_Raid_t * raidPtr,
73     RF_AccessStripeMap_t * asmap,
74     RF_DagHeader_t * dag_h,
75     void *bp,
76     RF_RaidAccessFlags_t flags,
77     RF_AllocListElem_t * allocList,
78     int nfaults,
79     int (*redFunc) (RF_DagNode_t *))
80 {
81         RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode,
82                *lpoNode, *blockNode, *unblockNode, *termNode;
83         int     nWndNodes, nRodNodes, i;
84         RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
85         RF_AccessStripeMapHeader_t *new_asm_h[2];
86         int     nodeNum, asmNum;
87         RF_ReconUnitNum_t which_ru;
88         char   *sosBuffer, *eosBuffer;
89         RF_PhysDiskAddr_t *pda;
90         RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru);
91
92         if (rf_dagDebug)
93                 printf("[Creating parity-logging large-write DAG]\n");
94         RF_ASSERT(nfaults == 1);/* this arch only single fault tolerant */
95         dag_h->creator = "ParityLoggingLargeWriteDAG";
96
97         /* alloc the Wnd nodes, the xor node, and the Lpo node */
98         nWndNodes = asmap->numStripeUnitsAccessed;
99         RF_CallocAndAdd(nodes, nWndNodes + 6, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
100         i = 0;
101         wndNodes = &nodes[i];
102         i += nWndNodes;
103         xorNode = &nodes[i];
104         i += 1;
105         lpoNode = &nodes[i];
106         i += 1;
107         blockNode = &nodes[i];
108         i += 1;
109         syncNode = &nodes[i];
110         i += 1;
111         unblockNode = &nodes[i];
112         i += 1;
113         termNode = &nodes[i];
114         i += 1;
115
116         dag_h->numCommitNodes = nWndNodes + 1;
117         dag_h->numCommits = 0;
118         dag_h->numSuccedents = 1;
119
120         rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h, &nRodNodes, &sosBuffer, &eosBuffer, allocList);
121         if (nRodNodes > 0)
122                 RF_CallocAndAdd(rodNodes, nRodNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
123
124         /* begin node initialization */
125         rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nRodNodes + 1, 0, 0, 0, dag_h, "Nil", allocList);
126         rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nWndNodes + 1, 0, 0, dag_h, "Nil", allocList);
127         rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, nRodNodes + 1, 0, 0, dag_h, "Nil", allocList);
128         rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
129
130         /* initialize the Rod nodes */
131         for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) {
132                 if (new_asm_h[asmNum]) {
133                         pda = new_asm_h[asmNum]->stripeMap->physInfo;
134                         while (pda) {
135                                 rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rod", allocList);
136                                 rodNodes[nodeNum].params[0].p = pda;
137                                 rodNodes[nodeNum].params[1].p = pda->bufPtr;
138                                 rodNodes[nodeNum].params[2].v = parityStripeID;
139                                 rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
140                                 nodeNum++;
141                                 pda = pda->next;
142                         }
143                 }
144         }
145         RF_ASSERT(nodeNum == nRodNodes);
146
147         /* initialize the wnd nodes */
148         pda = asmap->physInfo;
149         for (i = 0; i < nWndNodes; i++) {
150                 rf_InitNode(&wndNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList);
151                 RF_ASSERT(pda != NULL);
152                 wndNodes[i].params[0].p = pda;
153                 wndNodes[i].params[1].p = pda->bufPtr;
154                 wndNodes[i].params[2].v = parityStripeID;
155                 wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
156                 pda = pda->next;
157         }
158
159         /* initialize the redundancy node */
160         rf_InitNode(xorNode, rf_wait, RF_TRUE, redFunc, rf_NullNodeUndoFunc, NULL, 1, 1, 2 * (nWndNodes + nRodNodes) + 1, 1, dag_h, "Xr ", allocList);
161         xorNode->flags |= RF_DAGNODE_FLAG_YIELD;
162         for (i = 0; i < nWndNodes; i++) {
163                 xorNode->params[2 * i + 0] = wndNodes[i].params[0];     /* pda */
164                 xorNode->params[2 * i + 1] = wndNodes[i].params[1];     /* buf ptr */
165         }
166         for (i = 0; i < nRodNodes; i++) {
167                 xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0];       /* pda */
168                 xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1];       /* buf ptr */
169         }
170         xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr;       /* xor node needs to get
171                                                                          * at RAID information */
172
173         /* look for an Rod node that reads a complete SU.  If none, alloc a
174          * buffer to receive the parity info. Note that we can't use a new
175          * data buffer because it will not have gotten written when the xor
176          * occurs. */
177         for (i = 0; i < nRodNodes; i++)
178                 if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit)
179                         break;
180         if (i == nRodNodes) {
181                 RF_CallocAndAdd(xorNode->results[0], 1, rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList);
182         } else {
183                 xorNode->results[0] = rodNodes[i].params[1].p;
184         }
185
186         /* initialize the Lpo node */
187         rf_InitNode(lpoNode, rf_wait, RF_FALSE, rf_ParityLogOverwriteFunc, rf_ParityLogOverwriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpo", allocList);
188
189         lpoNode->params[0].p = asmap->parityInfo;
190         lpoNode->params[1].p = xorNode->results[0];
191         RF_ASSERT(asmap->parityInfo->next == NULL);     /* parityInfo must
192                                                          * describe entire
193                                                          * parity unit */
194
195         /* connect nodes to form graph */
196
197         /* connect dag header to block node */
198         RF_ASSERT(dag_h->numSuccedents == 1);
199         RF_ASSERT(blockNode->numAntecedents == 0);
200         dag_h->succedents[0] = blockNode;
201
202         /* connect the block node to the Rod nodes */
203         RF_ASSERT(blockNode->numSuccedents == nRodNodes + 1);
204         for (i = 0; i < nRodNodes; i++) {
205                 RF_ASSERT(rodNodes[i].numAntecedents == 1);
206                 blockNode->succedents[i] = &rodNodes[i];
207                 rodNodes[i].antecedents[0] = blockNode;
208                 rodNodes[i].antType[0] = rf_control;
209         }
210
211         /* connect the block node to the sync node */
212         /* necessary if nRodNodes == 0 */
213         RF_ASSERT(syncNode->numAntecedents == nRodNodes + 1);
214         blockNode->succedents[nRodNodes] = syncNode;
215         syncNode->antecedents[0] = blockNode;
216         syncNode->antType[0] = rf_control;
217
218         /* connect the Rod nodes to the syncNode */
219         for (i = 0; i < nRodNodes; i++) {
220                 rodNodes[i].succedents[0] = syncNode;
221                 syncNode->antecedents[1 + i] = &rodNodes[i];
222                 syncNode->antType[1 + i] = rf_control;
223         }
224
225         /* connect the sync node to the xor node */
226         RF_ASSERT(syncNode->numSuccedents == nWndNodes + 1);
227         RF_ASSERT(xorNode->numAntecedents == 1);
228         syncNode->succedents[0] = xorNode;
229         xorNode->antecedents[0] = syncNode;
230         xorNode->antType[0] = rf_trueData;      /* carry forward from sync */
231
232         /* connect the sync node to the Wnd nodes */
233         for (i = 0; i < nWndNodes; i++) {
234                 RF_ASSERT(wndNodes->numAntecedents == 1);
235                 syncNode->succedents[1 + i] = &wndNodes[i];
236                 wndNodes[i].antecedents[0] = syncNode;
237                 wndNodes[i].antType[0] = rf_control;
238         }
239
240         /* connect the xor node to the Lpo node */
241         RF_ASSERT(xorNode->numSuccedents == 1);
242         RF_ASSERT(lpoNode->numAntecedents == 1);
243         xorNode->succedents[0] = lpoNode;
244         lpoNode->antecedents[0] = xorNode;
245         lpoNode->antType[0] = rf_trueData;
246
247         /* connect the Wnd nodes to the unblock node */
248         RF_ASSERT(unblockNode->numAntecedents == nWndNodes + 1);
249         for (i = 0; i < nWndNodes; i++) {
250                 RF_ASSERT(wndNodes->numSuccedents == 1);
251                 wndNodes[i].succedents[0] = unblockNode;
252                 unblockNode->antecedents[i] = &wndNodes[i];
253                 unblockNode->antType[i] = rf_control;
254         }
255
256         /* connect the Lpo node to the unblock node */
257         RF_ASSERT(lpoNode->numSuccedents == 1);
258         lpoNode->succedents[0] = unblockNode;
259         unblockNode->antecedents[nWndNodes] = lpoNode;
260         unblockNode->antType[nWndNodes] = rf_control;
261
262         /* connect unblock node to terminator */
263         RF_ASSERT(unblockNode->numSuccedents == 1);
264         RF_ASSERT(termNode->numAntecedents == 1);
265         RF_ASSERT(termNode->numSuccedents == 0);
266         unblockNode->succedents[0] = termNode;
267         termNode->antecedents[0] = unblockNode;
268         termNode->antType[0] = rf_control;
269 }
270
271
272
273
274 /******************************************************************************
275  *
276  * creates a DAG to perform a small-write operation (either raid 5 or pq), which is as follows:
277  *
278  *                                     Header
279  *                                       |
280  *                                     Block
281  *                                 / |  ... \   \
282  *                                /  |       \   \
283  *                             Rod  Rod      Rod  Rop
284  *                             | \ /| \    / |  \/ |
285  *                             |    |        |  /\ |
286  *                             Wnd  Wnd      Wnd   X
287  *                              |    \       /     |
288  *                              |     \     /      |
289  *                               \     \   /      Lpo
290  *                                \     \ /       /
291  *                                 +-> Unblock <-+
292  *                                       |
293  *                                       T
294  *
295  *
296  * R = Read, W = Write, X = Xor, o = old, n = new, d = data, p = parity.
297  * When the access spans a stripe unit boundary and is less than one SU in size, there will
298  * be two Rop -- X -- Wnp branches.  I call this the "double-XOR" case.
299  * The second output from each Rod node goes to the X node.  In the double-XOR
300  * case, there are exactly 2 Rod nodes, and each sends one output to one X node.
301  * There is one Rod -- Wnd -- T branch for each stripe unit being updated.
302  *
303  * The block and unblock nodes are unused.  See comment above CreateFaultFreeReadDAG.
304  *
305  * Note:  this DAG ignores all the optimizations related to making the RMWs atomic.
306  *        it also has the nasty property that none of the buffers allocated for reading
307  *        old data & parity can be freed until the XOR node fires.  Need to fix this.
308  *
309  * A null qfuncs indicates single fault tolerant
310  *****************************************************************************/
311
312 void 
313 rf_CommonCreateParityLoggingSmallWriteDAG(
314     RF_Raid_t * raidPtr,
315     RF_AccessStripeMap_t * asmap,
316     RF_DagHeader_t * dag_h,
317     void *bp,
318     RF_RaidAccessFlags_t flags,
319     RF_AllocListElem_t * allocList,
320     RF_RedFuncs_t * pfuncs,
321     RF_RedFuncs_t * qfuncs)
322 {
323         RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes;
324         RF_DagNode_t *readDataNodes, *readParityNodes;
325         RF_DagNode_t *writeDataNodes, *lpuNodes;
326         RF_DagNode_t *unlockDataNodes = NULL, *termNode;
327         RF_PhysDiskAddr_t *pda = asmap->physInfo;
328         int     numDataNodes = asmap->numStripeUnitsAccessed;
329         int     numParityNodes = (asmap->parityInfo->next) ? 2 : 1;
330         int     i, j, nNodes, totalNumNodes;
331         RF_ReconUnitNum_t which_ru;
332         int     (*func) (RF_DagNode_t * node), (*undoFunc) (RF_DagNode_t * node);
333         int     (*qfunc) (RF_DagNode_t * node);
334         char   *name, *qname;
335         RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru);
336         long    nfaults = qfuncs ? 2 : 1;
337         int     lu_flag = (rf_enableAtomicRMW) ? 1 : 0; /* lock/unlock flag */
338
339         if (rf_dagDebug)
340                 printf("[Creating parity-logging small-write DAG]\n");
341         RF_ASSERT(numDataNodes > 0);
342         RF_ASSERT(nfaults == 1);
343         dag_h->creator = "ParityLoggingSmallWriteDAG";
344
345         /* DAG creation occurs in three steps: 1. count the number of nodes in
346          * the DAG 2. create the nodes 3. initialize the nodes 4. connect the
347          * nodes */
348
349         /* Step 1. compute number of nodes in the graph */
350
351         /* number of nodes: a read and write for each data unit a redundancy
352          * computation node for each parity node a read and Lpu for each
353          * parity unit a block and unblock node (2) a terminator node if
354          * atomic RMW an unlock node for each data unit, redundancy unit */
355         totalNumNodes = (2 * numDataNodes) + numParityNodes + (2 * numParityNodes) + 3;
356         if (lu_flag)
357                 totalNumNodes += numDataNodes;
358
359         nNodes = numDataNodes + numParityNodes;
360
361         dag_h->numCommitNodes = numDataNodes + numParityNodes;
362         dag_h->numCommits = 0;
363         dag_h->numSuccedents = 1;
364
365         /* Step 2. create the nodes */
366         RF_CallocAndAdd(nodes, totalNumNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
367         i = 0;
368         blockNode = &nodes[i];
369         i += 1;
370         unblockNode = &nodes[i];
371         i += 1;
372         readDataNodes = &nodes[i];
373         i += numDataNodes;
374         readParityNodes = &nodes[i];
375         i += numParityNodes;
376         writeDataNodes = &nodes[i];
377         i += numDataNodes;
378         lpuNodes = &nodes[i];
379         i += numParityNodes;
380         xorNodes = &nodes[i];
381         i += numParityNodes;
382         termNode = &nodes[i];
383         i += 1;
384         if (lu_flag) {
385                 unlockDataNodes = &nodes[i];
386                 i += numDataNodes;
387         }
388         RF_ASSERT(i == totalNumNodes);
389
390         /* Step 3. initialize the nodes */
391         /* initialize block node (Nil) */
392         rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList);
393
394         /* initialize unblock node (Nil) */
395         rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", allocList);
396
397         /* initialize terminatory node (Trm) */
398         rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
399
400         /* initialize nodes which read old data (Rod) */
401         for (i = 0; i < numDataNodes; i++) {
402                 rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rod", allocList);
403                 RF_ASSERT(pda != NULL);
404                 readDataNodes[i].params[0].p = pda;     /* physical disk addr
405                                                          * desc */
406                 readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList);  /* buffer to hold old
407                                                                                                  * data */
408                 readDataNodes[i].params[2].v = parityStripeID;
409                 readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru);
410                 pda = pda->next;
411                 readDataNodes[i].propList[0] = NULL;
412                 readDataNodes[i].propList[1] = NULL;
413         }
414
415         /* initialize nodes which read old parity (Rop) */
416         pda = asmap->parityInfo;
417         i = 0;
418         for (i = 0; i < numParityNodes; i++) {
419                 RF_ASSERT(pda != NULL);
420                 rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rop", allocList);
421                 readParityNodes[i].params[0].p = pda;
422                 readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList);        /* buffer to hold old
423                                                                                                          * parity */
424                 readParityNodes[i].params[2].v = parityStripeID;
425                 readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
426                 readParityNodes[i].propList[0] = NULL;
427                 pda = pda->next;
428         }
429
430         /* initialize nodes which write new data (Wnd) */
431         pda = asmap->physInfo;
432         for (i = 0; i < numDataNodes; i++) {
433                 RF_ASSERT(pda != NULL);
434                 rf_InitNode(&writeDataNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, nNodes, 4, 0, dag_h, "Wnd", allocList);
435                 writeDataNodes[i].params[0].p = pda;    /* physical disk addr
436                                                          * desc */
437                 writeDataNodes[i].params[1].p = pda->bufPtr;    /* buffer holding new
438                                                                  * data to be written */
439                 writeDataNodes[i].params[2].v = parityStripeID;
440                 writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
441
442                 if (lu_flag) {
443                         /* initialize node to unlock the disk queue */
444                         rf_InitNode(&unlockDataNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Und", allocList);
445                         unlockDataNodes[i].params[0].p = pda;   /* physical disk addr
446                                                                  * desc */
447                         unlockDataNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru);
448                 }
449                 pda = pda->next;
450         }
451
452
453         /* initialize nodes which compute new parity */
454         /* we use the simple XOR func in the double-XOR case, and when we're
455          * accessing only a portion of one stripe unit. the distinction
456          * between the two is that the regular XOR func assumes that the
457          * targbuf is a full SU in size, and examines the pda associated with
458          * the buffer to decide where within the buffer to XOR the data,
459          * whereas the simple XOR func just XORs the data into the start of
460          * the buffer. */
461         if ((numParityNodes == 2) || ((numDataNodes == 1) && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) {
462                 func = pfuncs->simple;
463                 undoFunc = rf_NullNodeUndoFunc;
464                 name = pfuncs->SimpleName;
465                 if (qfuncs) {
466                         qfunc = qfuncs->simple;
467                         qname = qfuncs->SimpleName;
468                 }
469         } else {
470                 func = pfuncs->regular;
471                 undoFunc = rf_NullNodeUndoFunc;
472                 name = pfuncs->RegularName;
473                 if (qfuncs) {
474                         qfunc = qfuncs->regular;
475                         qname = qfuncs->RegularName;
476                 }
477         }
478         /* initialize the xor nodes: params are {pda,buf} from {Rod,Wnd,Rop}
479          * nodes, and raidPtr  */
480         if (numParityNodes == 2) {      /* double-xor case */
481                 for (i = 0; i < numParityNodes; i++) {
482                         rf_InitNode(&xorNodes[i], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, 7, 1, dag_h, name, allocList);     /* no wakeup func for
483                                                                                                                                          * xor */
484                         xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD;
485                         xorNodes[i].params[0] = readDataNodes[i].params[0];
486                         xorNodes[i].params[1] = readDataNodes[i].params[1];
487                         xorNodes[i].params[2] = readParityNodes[i].params[0];
488                         xorNodes[i].params[3] = readParityNodes[i].params[1];
489                         xorNodes[i].params[4] = writeDataNodes[i].params[0];
490                         xorNodes[i].params[5] = writeDataNodes[i].params[1];
491                         xorNodes[i].params[6].p = raidPtr;
492                         xorNodes[i].results[0] = readParityNodes[i].params[1].p;        /* use old parity buf as
493                                                                                          * target buf */
494                 }
495         } else {
496                 /* there is only one xor node in this case */
497                 rf_InitNode(&xorNodes[0], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList);
498                 xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD;
499                 for (i = 0; i < numDataNodes + 1; i++) {
500                         /* set up params related to Rod and Rop nodes */
501                         xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0];     /* pda */
502                         xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1];     /* buffer pointer */
503                 }
504                 for (i = 0; i < numDataNodes; i++) {
505                         /* set up params related to Wnd and Wnp nodes */
506                         xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0];       /* pda */
507                         xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1];       /* buffer pointer */
508                 }
509                 xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr;  /* xor node needs to get
510                                                                                          * at RAID information */
511                 xorNodes[0].results[0] = readParityNodes[0].params[1].p;
512         }
513
514         /* initialize the log node(s) */
515         pda = asmap->parityInfo;
516         for (i = 0; i < numParityNodes; i++) {
517                 RF_ASSERT(pda);
518                 rf_InitNode(&lpuNodes[i], rf_wait, RF_FALSE, rf_ParityLogUpdateFunc, rf_ParityLogUpdateUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpu", allocList);
519                 lpuNodes[i].params[0].p = pda;  /* PhysDiskAddr of parity */
520                 lpuNodes[i].params[1].p = xorNodes[i].results[0];       /* buffer pointer to
521                                                                          * parity */
522                 pda = pda->next;
523         }
524
525
526         /* Step 4. connect the nodes */
527
528         /* connect header to block node */
529         RF_ASSERT(dag_h->numSuccedents == 1);
530         RF_ASSERT(blockNode->numAntecedents == 0);
531         dag_h->succedents[0] = blockNode;
532
533         /* connect block node to read old data nodes */
534         RF_ASSERT(blockNode->numSuccedents == (numDataNodes + numParityNodes));
535         for (i = 0; i < numDataNodes; i++) {
536                 blockNode->succedents[i] = &readDataNodes[i];
537                 RF_ASSERT(readDataNodes[i].numAntecedents == 1);
538                 readDataNodes[i].antecedents[0] = blockNode;
539                 readDataNodes[i].antType[0] = rf_control;
540         }
541
542         /* connect block node to read old parity nodes */
543         for (i = 0; i < numParityNodes; i++) {
544                 blockNode->succedents[numDataNodes + i] = &readParityNodes[i];
545                 RF_ASSERT(readParityNodes[i].numAntecedents == 1);
546                 readParityNodes[i].antecedents[0] = blockNode;
547                 readParityNodes[i].antType[0] = rf_control;
548         }
549
550         /* connect read old data nodes to write new data nodes */
551         for (i = 0; i < numDataNodes; i++) {
552                 RF_ASSERT(readDataNodes[i].numSuccedents == numDataNodes + numParityNodes);
553                 for (j = 0; j < numDataNodes; j++) {
554                         RF_ASSERT(writeDataNodes[j].numAntecedents == numDataNodes + numParityNodes);
555                         readDataNodes[i].succedents[j] = &writeDataNodes[j];
556                         writeDataNodes[j].antecedents[i] = &readDataNodes[i];
557                         if (i == j)
558                                 writeDataNodes[j].antType[i] = rf_antiData;
559                         else
560                                 writeDataNodes[j].antType[i] = rf_control;
561                 }
562         }
563
564         /* connect read old data nodes to xor nodes */
565         for (i = 0; i < numDataNodes; i++)
566                 for (j = 0; j < numParityNodes; j++) {
567                         RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes);
568                         readDataNodes[i].succedents[numDataNodes + j] = &xorNodes[j];
569                         xorNodes[j].antecedents[i] = &readDataNodes[i];
570                         xorNodes[j].antType[i] = rf_trueData;
571                 }
572
573         /* connect read old parity nodes to write new data nodes */
574         for (i = 0; i < numParityNodes; i++) {
575                 RF_ASSERT(readParityNodes[i].numSuccedents == numDataNodes + numParityNodes);
576                 for (j = 0; j < numDataNodes; j++) {
577                         readParityNodes[i].succedents[j] = &writeDataNodes[j];
578                         writeDataNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i];
579                         writeDataNodes[j].antType[numDataNodes + i] = rf_control;
580                 }
581         }
582
583         /* connect read old parity nodes to xor nodes */
584         for (i = 0; i < numParityNodes; i++)
585                 for (j = 0; j < numParityNodes; j++) {
586                         readParityNodes[i].succedents[numDataNodes + j] = &xorNodes[j];
587                         xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i];
588                         xorNodes[j].antType[numDataNodes + i] = rf_trueData;
589                 }
590
591         /* connect xor nodes to write new parity nodes */
592         for (i = 0; i < numParityNodes; i++) {
593                 RF_ASSERT(xorNodes[i].numSuccedents == 1);
594                 RF_ASSERT(lpuNodes[i].numAntecedents == 1);
595                 xorNodes[i].succedents[0] = &lpuNodes[i];
596                 lpuNodes[i].antecedents[0] = &xorNodes[i];
597                 lpuNodes[i].antType[0] = rf_trueData;
598         }
599
600         for (i = 0; i < numDataNodes; i++) {
601                 if (lu_flag) {
602                         /* connect write new data nodes to unlock nodes */
603                         RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
604                         RF_ASSERT(unlockDataNodes[i].numAntecedents == 1);
605                         writeDataNodes[i].succedents[0] = &unlockDataNodes[i];
606                         unlockDataNodes[i].antecedents[0] = &writeDataNodes[i];
607                         unlockDataNodes[i].antType[0] = rf_control;
608
609                         /* connect unlock nodes to unblock node */
610                         RF_ASSERT(unlockDataNodes[i].numSuccedents == 1);
611                         RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
612                         unlockDataNodes[i].succedents[0] = unblockNode;
613                         unblockNode->antecedents[i] = &unlockDataNodes[i];
614                         unblockNode->antType[i] = rf_control;
615                 } else {
616                         /* connect write new data nodes to unblock node */
617                         RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
618                         RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
619                         writeDataNodes[i].succedents[0] = unblockNode;
620                         unblockNode->antecedents[i] = &writeDataNodes[i];
621                         unblockNode->antType[i] = rf_control;
622                 }
623         }
624
625         /* connect write new parity nodes to unblock node */
626         for (i = 0; i < numParityNodes; i++) {
627                 RF_ASSERT(lpuNodes[i].numSuccedents == 1);
628                 lpuNodes[i].succedents[0] = unblockNode;
629                 unblockNode->antecedents[numDataNodes + i] = &lpuNodes[i];
630                 unblockNode->antType[numDataNodes + i] = rf_control;
631         }
632
633         /* connect unblock node to terminator */
634         RF_ASSERT(unblockNode->numSuccedents == 1);
635         RF_ASSERT(termNode->numAntecedents == 1);
636         RF_ASSERT(termNode->numSuccedents == 0);
637         unblockNode->succedents[0] = termNode;
638         termNode->antecedents[0] = unblockNode;
639         termNode->antType[0] = rf_control;
640 }
641
642
643 void 
644 rf_CreateParityLoggingSmallWriteDAG(
645     RF_Raid_t * raidPtr,
646     RF_AccessStripeMap_t * asmap,
647     RF_DagHeader_t * dag_h,
648     void *bp,
649     RF_RaidAccessFlags_t flags,
650     RF_AllocListElem_t * allocList,
651     RF_RedFuncs_t * pfuncs,
652     RF_RedFuncs_t * qfuncs)
653 {
654         dag_h->creator = "ParityLoggingSmallWriteDAG";
655         rf_CommonCreateParityLoggingSmallWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, &rf_xorFuncs, NULL);
656 }
657
658
659 void 
660 rf_CreateParityLoggingLargeWriteDAG(
661     RF_Raid_t * raidPtr,
662     RF_AccessStripeMap_t * asmap,
663     RF_DagHeader_t * dag_h,
664     void *bp,
665     RF_RaidAccessFlags_t flags,
666     RF_AllocListElem_t * allocList,
667     int nfaults,
668     int (*redFunc) (RF_DagNode_t *))
669 {
670         dag_h->creator = "ParityLoggingSmallWriteDAG";
671         rf_CommonCreateParityLoggingLargeWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, 1, rf_RegularXorFunc);
672 }
673 #endif                          /* RF_INCLUDE_PARITYLOGGING > 0 */