]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/xfs_trans_buf.c
xfs: change the xfs_iext_insert / xfs_iext_remove
[net-next-2.6.git] / fs / xfs / xfs_trans_buf.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4 24#include "xfs_trans.h"
1da177e4
LT
25#include "xfs_sb.h"
26#include "xfs_ag.h"
a844f451 27#include "xfs_dir2.h"
1da177e4
LT
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
a844f451
NS
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
a844f451
NS
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
37#include "xfs_buf_item.h"
1da177e4
LT
38#include "xfs_trans_priv.h"
39#include "xfs_error.h"
40#include "xfs_rw.h"
41
42
43STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
44 xfs_daddr_t, int);
45STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
46 xfs_daddr_t, int);
47
48
49/*
50 * Get and lock the buffer for the caller if it is not already
51 * locked within the given transaction. If it is already locked
52 * within the transaction, just increment its lock recursion count
53 * and return a pointer to it.
54 *
55 * Use the fast path function xfs_trans_buf_item_match() or the buffer
56 * cache routine incore_match() to find the buffer
57 * if it is already owned by this transaction.
58 *
59 * If we don't already own the buffer, use get_buf() to get it.
60 * If it doesn't yet have an associated xfs_buf_log_item structure,
61 * then allocate one and add the item to this transaction.
62 *
63 * If the transaction pointer is NULL, make this just a normal
64 * get_buf() call.
65 */
66xfs_buf_t *
67xfs_trans_get_buf(xfs_trans_t *tp,
68 xfs_buftarg_t *target_dev,
69 xfs_daddr_t blkno,
70 int len,
71 uint flags)
72{
73 xfs_buf_t *bp;
74 xfs_buf_log_item_t *bip;
75
76 if (flags == 0)
77 flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
78
79 /*
80 * Default to a normal get_buf() call if the tp is NULL.
81 */
6ad112bf
CH
82 if (tp == NULL)
83 return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
1da177e4
LT
84
85 /*
86 * If we find the buffer in the cache with this transaction
87 * pointer in its b_fsprivate2 field, then we know we already
88 * have it locked. In this case we just increment the lock
89 * recursion count and return the buffer to the caller.
90 */
91 if (tp->t_items.lic_next == NULL) {
92 bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
93 } else {
94 bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
95 }
96 if (bp != NULL) {
97 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
98 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
99 xfs_buftrace("TRANS GET RECUR SHUT", bp);
100 XFS_BUF_SUPER_STALE(bp);
101 }
102 /*
103 * If the buffer is stale then it was binval'ed
104 * since last read. This doesn't matter since the
105 * caller isn't allowed to use the data anyway.
106 */
107 else if (XFS_BUF_ISSTALE(bp)) {
108 xfs_buftrace("TRANS GET RECUR STALE", bp);
109 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
110 }
111 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
112 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
113 ASSERT(bip != NULL);
114 ASSERT(atomic_read(&bip->bli_refcount) > 0);
115 bip->bli_recur++;
116 xfs_buftrace("TRANS GET RECUR", bp);
117 xfs_buf_item_trace("GET RECUR", bip);
118 return (bp);
119 }
120
121 /*
122 * We always specify the BUF_BUSY flag within a transaction so
123 * that get_buf does not try to push out a delayed write buffer
124 * which might cause another transaction to take place (if the
125 * buffer was delayed alloc). Such recursive transactions can
126 * easily deadlock with our current transaction as well as cause
127 * us to run out of stack space.
128 */
6ad112bf 129 bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
1da177e4
LT
130 if (bp == NULL) {
131 return NULL;
132 }
133
134 ASSERT(!XFS_BUF_GETERROR(bp));
135
136 /*
137 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
138 * it doesn't have one yet, then allocate one and initialize it.
139 * The checks to see if one is there are in xfs_buf_item_init().
140 */
141 xfs_buf_item_init(bp, tp->t_mountp);
142
143 /*
144 * Set the recursion count for the buffer within this transaction
145 * to 0.
146 */
147 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
148 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
149 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
150 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
151 bip->bli_recur = 0;
152
153 /*
154 * Take a reference for this transaction on the buf item.
155 */
156 atomic_inc(&bip->bli_refcount);
157
158 /*
159 * Get a log_item_desc to point at the new item.
160 */
161 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
162
163 /*
164 * Initialize b_fsprivate2 so we can find it with incore_match()
165 * above.
166 */
167 XFS_BUF_SET_FSPRIVATE2(bp, tp);
168
169 xfs_buftrace("TRANS GET", bp);
170 xfs_buf_item_trace("GET", bip);
171 return (bp);
172}
173
174/*
175 * Get and lock the superblock buffer of this file system for the
176 * given transaction.
177 *
178 * We don't need to use incore_match() here, because the superblock
179 * buffer is a private buffer which we keep a pointer to in the
180 * mount structure.
181 */
182xfs_buf_t *
183xfs_trans_getsb(xfs_trans_t *tp,
184 struct xfs_mount *mp,
185 int flags)
186{
187 xfs_buf_t *bp;
188 xfs_buf_log_item_t *bip;
189
190 /*
191 * Default to just trying to lock the superblock buffer
192 * if tp is NULL.
193 */
194 if (tp == NULL) {
195 return (xfs_getsb(mp, flags));
196 }
197
198 /*
199 * If the superblock buffer already has this transaction
200 * pointer in its b_fsprivate2 field, then we know we already
201 * have it locked. In this case we just increment the lock
202 * recursion count and return the buffer to the caller.
203 */
204 bp = mp->m_sb_bp;
205 if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
206 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
207 ASSERT(bip != NULL);
208 ASSERT(atomic_read(&bip->bli_refcount) > 0);
209 bip->bli_recur++;
210 xfs_buf_item_trace("GETSB RECUR", bip);
211 return (bp);
212 }
213
214 bp = xfs_getsb(mp, flags);
215 if (bp == NULL) {
216 return NULL;
217 }
218
219 /*
220 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
221 * it doesn't have one yet, then allocate one and initialize it.
222 * The checks to see if one is there are in xfs_buf_item_init().
223 */
224 xfs_buf_item_init(bp, mp);
225
226 /*
227 * Set the recursion count for the buffer within this transaction
228 * to 0.
229 */
230 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
231 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
232 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
233 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
234 bip->bli_recur = 0;
235
236 /*
237 * Take a reference for this transaction on the buf item.
238 */
239 atomic_inc(&bip->bli_refcount);
240
241 /*
242 * Get a log_item_desc to point at the new item.
243 */
244 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
245
246 /*
247 * Initialize b_fsprivate2 so we can find it with incore_match()
248 * above.
249 */
250 XFS_BUF_SET_FSPRIVATE2(bp, tp);
251
252 xfs_buf_item_trace("GETSB", bip);
253 return (bp);
254}
255
256#ifdef DEBUG
257xfs_buftarg_t *xfs_error_target;
258int xfs_do_error;
259int xfs_req_num;
260int xfs_error_mod = 33;
261#endif
262
263/*
264 * Get and lock the buffer for the caller if it is not already
265 * locked within the given transaction. If it has not yet been
266 * read in, read it from disk. If it is already locked
267 * within the transaction and already read in, just increment its
268 * lock recursion count and return a pointer to it.
269 *
270 * Use the fast path function xfs_trans_buf_item_match() or the buffer
271 * cache routine incore_match() to find the buffer
272 * if it is already owned by this transaction.
273 *
274 * If we don't already own the buffer, use read_buf() to get it.
275 * If it doesn't yet have an associated xfs_buf_log_item structure,
276 * then allocate one and add the item to this transaction.
277 *
278 * If the transaction pointer is NULL, make this just a normal
279 * read_buf() call.
280 */
281int
282xfs_trans_read_buf(
283 xfs_mount_t *mp,
284 xfs_trans_t *tp,
285 xfs_buftarg_t *target,
286 xfs_daddr_t blkno,
287 int len,
288 uint flags,
289 xfs_buf_t **bpp)
290{
291 xfs_buf_t *bp;
292 xfs_buf_log_item_t *bip;
293 int error;
294
295 if (flags == 0)
296 flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
297
298 /*
299 * Default to a normal get_buf() call if the tp is NULL.
300 */
301 if (tp == NULL) {
6ad112bf 302 bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
1da177e4 303 if (!bp)
a3f74ffb
DC
304 return (flags & XFS_BUF_TRYLOCK) ?
305 EAGAIN : XFS_ERROR(ENOMEM);
1da177e4 306
a0f7bfd3 307 if (XFS_BUF_GETERROR(bp) != 0) {
1da177e4
LT
308 xfs_ioerror_alert("xfs_trans_read_buf", mp,
309 bp, blkno);
310 error = XFS_BUF_GETERROR(bp);
311 xfs_buf_relse(bp);
312 return error;
313 }
314#ifdef DEBUG
a0f7bfd3 315 if (xfs_do_error) {
1da177e4
LT
316 if (xfs_error_target == target) {
317 if (((xfs_req_num++) % xfs_error_mod) == 0) {
318 xfs_buf_relse(bp);
b6574520 319 cmn_err(CE_DEBUG, "Returning error!\n");
1da177e4
LT
320 return XFS_ERROR(EIO);
321 }
322 }
323 }
324#endif
325 if (XFS_FORCED_SHUTDOWN(mp))
326 goto shutdown_abort;
327 *bpp = bp;
328 return 0;
329 }
330
331 /*
332 * If we find the buffer in the cache with this transaction
333 * pointer in its b_fsprivate2 field, then we know we already
334 * have it locked. If it is already read in we just increment
335 * the lock recursion count and return the buffer to the caller.
336 * If the buffer is not yet read in, then we read it in, increment
337 * the lock recursion count, and return it to the caller.
338 */
339 if (tp->t_items.lic_next == NULL) {
340 bp = xfs_trans_buf_item_match(tp, target, blkno, len);
341 } else {
342 bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
343 }
344 if (bp != NULL) {
345 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
346 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
347 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
348 ASSERT((XFS_BUF_ISERROR(bp)) == 0);
349 if (!(XFS_BUF_ISDONE(bp))) {
350 xfs_buftrace("READ_BUF_INCORE !DONE", bp);
351 ASSERT(!XFS_BUF_ISASYNC(bp));
352 XFS_BUF_READ(bp);
353 xfsbdstrat(tp->t_mountp, bp);
d64e31a2
DC
354 error = xfs_iowait(bp);
355 if (error) {
1da177e4
LT
356 xfs_ioerror_alert("xfs_trans_read_buf", mp,
357 bp, blkno);
1da177e4
LT
358 xfs_buf_relse(bp);
359 /*
d64e31a2
DC
360 * We can gracefully recover from most read
361 * errors. Ones we can't are those that happen
362 * after the transaction's already dirty.
1da177e4
LT
363 */
364 if (tp->t_flags & XFS_TRANS_DIRTY)
365 xfs_force_shutdown(tp->t_mountp,
7d04a335 366 SHUTDOWN_META_IO_ERROR);
1da177e4
LT
367 return error;
368 }
369 }
370 /*
371 * We never locked this buf ourselves, so we shouldn't
372 * brelse it either. Just get out.
373 */
374 if (XFS_FORCED_SHUTDOWN(mp)) {
375 xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
376 *bpp = NULL;
377 return XFS_ERROR(EIO);
378 }
379
380
381 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
382 bip->bli_recur++;
383
384 ASSERT(atomic_read(&bip->bli_refcount) > 0);
385 xfs_buf_item_trace("READ RECUR", bip);
386 *bpp = bp;
387 return 0;
388 }
389
390 /*
391 * We always specify the BUF_BUSY flag within a transaction so
392 * that get_buf does not try to push out a delayed write buffer
393 * which might cause another transaction to take place (if the
394 * buffer was delayed alloc). Such recursive transactions can
395 * easily deadlock with our current transaction as well as cause
396 * us to run out of stack space.
397 */
6ad112bf 398 bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
1da177e4
LT
399 if (bp == NULL) {
400 *bpp = NULL;
401 return 0;
402 }
403 if (XFS_BUF_GETERROR(bp) != 0) {
404 XFS_BUF_SUPER_STALE(bp);
405 xfs_buftrace("READ ERROR", bp);
406 error = XFS_BUF_GETERROR(bp);
407
408 xfs_ioerror_alert("xfs_trans_read_buf", mp,
409 bp, blkno);
410 if (tp->t_flags & XFS_TRANS_DIRTY)
7d04a335 411 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
1da177e4
LT
412 xfs_buf_relse(bp);
413 return error;
414 }
415#ifdef DEBUG
416 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
417 if (xfs_error_target == target) {
418 if (((xfs_req_num++) % xfs_error_mod) == 0) {
419 xfs_force_shutdown(tp->t_mountp,
7d04a335 420 SHUTDOWN_META_IO_ERROR);
1da177e4 421 xfs_buf_relse(bp);
b6574520 422 cmn_err(CE_DEBUG, "Returning trans error!\n");
1da177e4
LT
423 return XFS_ERROR(EIO);
424 }
425 }
426 }
427#endif
428 if (XFS_FORCED_SHUTDOWN(mp))
429 goto shutdown_abort;
430
431 /*
432 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
433 * it doesn't have one yet, then allocate one and initialize it.
434 * The checks to see if one is there are in xfs_buf_item_init().
435 */
436 xfs_buf_item_init(bp, tp->t_mountp);
437
438 /*
439 * Set the recursion count for the buffer within this transaction
440 * to 0.
441 */
442 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
443 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
444 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
445 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
446 bip->bli_recur = 0;
447
448 /*
449 * Take a reference for this transaction on the buf item.
450 */
451 atomic_inc(&bip->bli_refcount);
452
453 /*
454 * Get a log_item_desc to point at the new item.
455 */
456 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
457
458 /*
459 * Initialize b_fsprivate2 so we can find it with incore_match()
460 * above.
461 */
462 XFS_BUF_SET_FSPRIVATE2(bp, tp);
463
464 xfs_buftrace("TRANS READ", bp);
465 xfs_buf_item_trace("READ", bip);
466 *bpp = bp;
467 return 0;
468
469shutdown_abort:
470 /*
471 * the theory here is that buffer is good but we're
472 * bailing out because the filesystem is being forcibly
473 * shut down. So we should leave the b_flags alone since
474 * the buffer's not staled and just get out.
475 */
476#if defined(DEBUG)
477 if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
478 cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
479#endif
480 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
481 (XFS_B_STALE|XFS_B_DELWRI));
482
483 xfs_buftrace("READ_BUF XFSSHUTDN", bp);
484 xfs_buf_relse(bp);
485 *bpp = NULL;
486 return XFS_ERROR(EIO);
487}
488
489
490/*
491 * Release the buffer bp which was previously acquired with one of the
492 * xfs_trans_... buffer allocation routines if the buffer has not
493 * been modified within this transaction. If the buffer is modified
494 * within this transaction, do decrement the recursion count but do
495 * not release the buffer even if the count goes to 0. If the buffer is not
496 * modified within the transaction, decrement the recursion count and
497 * release the buffer if the recursion count goes to 0.
498 *
499 * If the buffer is to be released and it was not modified before
500 * this transaction began, then free the buf_log_item associated with it.
501 *
502 * If the transaction pointer is NULL, make this just a normal
503 * brelse() call.
504 */
505void
506xfs_trans_brelse(xfs_trans_t *tp,
507 xfs_buf_t *bp)
508{
509 xfs_buf_log_item_t *bip;
510 xfs_log_item_t *lip;
511 xfs_log_item_desc_t *lidp;
512
513 /*
514 * Default to a normal brelse() call if the tp is NULL.
515 */
516 if (tp == NULL) {
517 ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
518 /*
519 * If there's a buf log item attached to the buffer,
520 * then let the AIL know that the buffer is being
521 * unlocked.
522 */
523 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
524 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
525 if (lip->li_type == XFS_LI_BUF) {
526 bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
783a2f65
DC
527 xfs_trans_unlocked_item(bip->bli_item.li_ailp,
528 lip);
1da177e4
LT
529 }
530 }
531 xfs_buf_relse(bp);
532 return;
533 }
534
535 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
536 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
537 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
538 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
539 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
540 ASSERT(atomic_read(&bip->bli_refcount) > 0);
541
542 /*
543 * Find the item descriptor pointing to this buffer's
544 * log item. It must be there.
545 */
546 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
547 ASSERT(lidp != NULL);
548
549 /*
550 * If the release is just for a recursive lock,
551 * then decrement the count and return.
552 */
553 if (bip->bli_recur > 0) {
554 bip->bli_recur--;
555 xfs_buf_item_trace("RELSE RECUR", bip);
556 return;
557 }
558
559 /*
560 * If the buffer is dirty within this transaction, we can't
561 * release it until we commit.
562 */
563 if (lidp->lid_flags & XFS_LID_DIRTY) {
564 xfs_buf_item_trace("RELSE DIRTY", bip);
565 return;
566 }
567
568 /*
569 * If the buffer has been invalidated, then we can't release
570 * it until the transaction commits to disk unless it is re-dirtied
571 * as part of this transaction. This prevents us from pulling
572 * the item from the AIL before we should.
573 */
574 if (bip->bli_flags & XFS_BLI_STALE) {
575 xfs_buf_item_trace("RELSE STALE", bip);
576 return;
577 }
578
579 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
580 xfs_buf_item_trace("RELSE", bip);
581
582 /*
583 * Free up the log item descriptor tracking the released item.
584 */
585 xfs_trans_free_item(tp, lidp);
586
587 /*
588 * Clear the hold flag in the buf log item if it is set.
589 * We wouldn't want the next user of the buffer to
590 * get confused.
591 */
592 if (bip->bli_flags & XFS_BLI_HOLD) {
593 bip->bli_flags &= ~XFS_BLI_HOLD;
594 }
595
596 /*
597 * Drop our reference to the buf log item.
598 */
599 atomic_dec(&bip->bli_refcount);
600
601 /*
602 * If the buf item is not tracking data in the log, then
603 * we must free it before releasing the buffer back to the
604 * free pool. Before releasing the buffer to the free pool,
605 * clear the transaction pointer in b_fsprivate2 to dissolve
606 * its relation to this transaction.
607 */
608 if (!xfs_buf_item_dirty(bip)) {
609/***
610 ASSERT(bp->b_pincount == 0);
611***/
612 ASSERT(atomic_read(&bip->bli_refcount) == 0);
613 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
614 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
615 xfs_buf_item_relse(bp);
616 bip = NULL;
617 }
618 XFS_BUF_SET_FSPRIVATE2(bp, NULL);
619
620 /*
621 * If we've still got a buf log item on the buffer, then
622 * tell the AIL that the buffer is being unlocked.
623 */
624 if (bip != NULL) {
783a2f65 625 xfs_trans_unlocked_item(bip->bli_item.li_ailp,
1da177e4
LT
626 (xfs_log_item_t*)bip);
627 }
628
629 xfs_buf_relse(bp);
630 return;
631}
632
633/*
634 * Add the locked buffer to the transaction.
635 * The buffer must be locked, and it cannot be associated with any
636 * transaction.
637 *
638 * If the buffer does not yet have a buf log item associated with it,
639 * then allocate one for it. Then add the buf item to the transaction.
640 */
641void
642xfs_trans_bjoin(xfs_trans_t *tp,
643 xfs_buf_t *bp)
644{
645 xfs_buf_log_item_t *bip;
646
647 ASSERT(XFS_BUF_ISBUSY(bp));
648 ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
649
650 /*
651 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
652 * it doesn't have one yet, then allocate one and initialize it.
653 * The checks to see if one is there are in xfs_buf_item_init().
654 */
655 xfs_buf_item_init(bp, tp->t_mountp);
656 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
657 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
658 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
659 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
660
661 /*
662 * Take a reference for this transaction on the buf item.
663 */
664 atomic_inc(&bip->bli_refcount);
665
666 /*
667 * Get a log_item_desc to point at the new item.
668 */
669 (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
670
671 /*
672 * Initialize b_fsprivate2 so we can find it with incore_match()
673 * in xfs_trans_get_buf() and friends above.
674 */
675 XFS_BUF_SET_FSPRIVATE2(bp, tp);
676
677 xfs_buf_item_trace("BJOIN", bip);
678}
679
680/*
681 * Mark the buffer as not needing to be unlocked when the buf item's
682 * IOP_UNLOCK() routine is called. The buffer must already be locked
683 * and associated with the given transaction.
684 */
685/* ARGSUSED */
686void
687xfs_trans_bhold(xfs_trans_t *tp,
688 xfs_buf_t *bp)
689{
690 xfs_buf_log_item_t *bip;
691
692 ASSERT(XFS_BUF_ISBUSY(bp));
693 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
694 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
695
696 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
697 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
698 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
699 ASSERT(atomic_read(&bip->bli_refcount) > 0);
700 bip->bli_flags |= XFS_BLI_HOLD;
701 xfs_buf_item_trace("BHOLD", bip);
702}
703
efa092f3
TS
704/*
705 * Cancel the previous buffer hold request made on this buffer
706 * for this transaction.
707 */
708void
709xfs_trans_bhold_release(xfs_trans_t *tp,
710 xfs_buf_t *bp)
711{
712 xfs_buf_log_item_t *bip;
713
714 ASSERT(XFS_BUF_ISBUSY(bp));
715 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
716 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
717
718 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
719 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
720 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
721 ASSERT(atomic_read(&bip->bli_refcount) > 0);
722 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
723 bip->bli_flags &= ~XFS_BLI_HOLD;
724 xfs_buf_item_trace("BHOLD RELEASE", bip);
725}
726
1da177e4
LT
727/*
728 * This is called to mark bytes first through last inclusive of the given
729 * buffer as needing to be logged when the transaction is committed.
730 * The buffer must already be associated with the given transaction.
731 *
732 * First and last are numbers relative to the beginning of this buffer,
733 * so the first byte in the buffer is numbered 0 regardless of the
734 * value of b_blkno.
735 */
736void
737xfs_trans_log_buf(xfs_trans_t *tp,
738 xfs_buf_t *bp,
739 uint first,
740 uint last)
741{
742 xfs_buf_log_item_t *bip;
743 xfs_log_item_desc_t *lidp;
744
745 ASSERT(XFS_BUF_ISBUSY(bp));
746 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
747 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
748 ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
749 ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
750 (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
751
752 /*
753 * Mark the buffer as needing to be written out eventually,
754 * and set its iodone function to remove the buffer's buf log
755 * item from the AIL and free it when the buffer is flushed
756 * to disk. See xfs_buf_attach_iodone() for more details
757 * on li_cb and xfs_buf_iodone_callbacks().
758 * If we end up aborting this transaction, we trap this buffer
759 * inside the b_bdstrat callback so that this won't get written to
760 * disk.
761 */
762 XFS_BUF_DELAYWRITE(bp);
763 XFS_BUF_DONE(bp);
764
765 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
766 ASSERT(atomic_read(&bip->bli_refcount) > 0);
767 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
768 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
769
770 /*
771 * If we invalidated the buffer within this transaction, then
772 * cancel the invalidation now that we're dirtying the buffer
773 * again. There are no races with the code in xfs_buf_item_unpin(),
774 * because we have a reference to the buffer this entire time.
775 */
776 if (bip->bli_flags & XFS_BLI_STALE) {
777 xfs_buf_item_trace("BLOG UNSTALE", bip);
778 bip->bli_flags &= ~XFS_BLI_STALE;
779 ASSERT(XFS_BUF_ISSTALE(bp));
780 XFS_BUF_UNSTALE(bp);
781 bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL;
782 }
783
784 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
785 ASSERT(lidp != NULL);
786
787 tp->t_flags |= XFS_TRANS_DIRTY;
788 lidp->lid_flags |= XFS_LID_DIRTY;
789 lidp->lid_flags &= ~XFS_LID_BUF_STALE;
790 bip->bli_flags |= XFS_BLI_LOGGED;
791 xfs_buf_item_log(bip, first, last);
792 xfs_buf_item_trace("BLOG", bip);
793}
794
795
796/*
797 * This called to invalidate a buffer that is being used within
798 * a transaction. Typically this is because the blocks in the
799 * buffer are being freed, so we need to prevent it from being
800 * written out when we're done. Allowing it to be written again
801 * might overwrite data in the free blocks if they are reallocated
802 * to a file.
803 *
804 * We prevent the buffer from being written out by clearing the
805 * B_DELWRI flag. We can't always
806 * get rid of the buf log item at this point, though, because
807 * the buffer may still be pinned by another transaction. If that
808 * is the case, then we'll wait until the buffer is committed to
809 * disk for the last time (we can tell by the ref count) and
810 * free it in xfs_buf_item_unpin(). Until it is cleaned up we
811 * will keep the buffer locked so that the buffer and buf log item
812 * are not reused.
813 */
814void
815xfs_trans_binval(
816 xfs_trans_t *tp,
817 xfs_buf_t *bp)
818{
819 xfs_log_item_desc_t *lidp;
820 xfs_buf_log_item_t *bip;
821
822 ASSERT(XFS_BUF_ISBUSY(bp));
823 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
824 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
825
826 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
827 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
828 ASSERT(lidp != NULL);
829 ASSERT(atomic_read(&bip->bli_refcount) > 0);
830
831 if (bip->bli_flags & XFS_BLI_STALE) {
832 /*
833 * If the buffer is already invalidated, then
834 * just return.
835 */
836 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
837 ASSERT(XFS_BUF_ISSTALE(bp));
838 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
839 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF));
840 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
841 ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
842 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
843 xfs_buftrace("XFS_BINVAL RECUR", bp);
844 xfs_buf_item_trace("BINVAL RECUR", bip);
845 return;
846 }
847
848 /*
849 * Clear the dirty bit in the buffer and set the STALE flag
850 * in the buf log item. The STALE flag will be used in
851 * xfs_buf_item_unpin() to determine if it should clean up
852 * when the last reference to the buf item is given up.
853 * We set the XFS_BLI_CANCEL flag in the buf log format structure
854 * and log the buf item. This will be used at recovery time
855 * to determine that copies of the buffer in the log before
856 * this should not be replayed.
857 * We mark the item descriptor and the transaction dirty so
858 * that we'll hold the buffer until after the commit.
859 *
860 * Since we're invalidating the buffer, we also clear the state
861 * about which parts of the buffer have been logged. We also
862 * clear the flag indicating that this is an inode buffer since
863 * the data in the buffer will no longer be valid.
864 *
865 * We set the stale bit in the buffer as well since we're getting
866 * rid of it.
867 */
868 XFS_BUF_UNDELAYWRITE(bp);
869 XFS_BUF_STALE(bp);
870 bip->bli_flags |= XFS_BLI_STALE;
871 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
872 bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
873 bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
874 memset((char *)(bip->bli_format.blf_data_map), 0,
875 (bip->bli_format.blf_map_size * sizeof(uint)));
876 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
877 tp->t_flags |= XFS_TRANS_DIRTY;
878 xfs_buftrace("XFS_BINVAL", bp);
879 xfs_buf_item_trace("BINVAL", bip);
880}
881
882/*
883 * This call is used to indicate that the buffer contains on-disk
884 * inodes which must be handled specially during recovery. They
885 * require special handling because only the di_next_unlinked from
886 * the inodes in the buffer should be recovered. The rest of the
887 * data in the buffer is logged via the inodes themselves.
888 *
889 * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log
890 * format structure so that we'll know what to do at recovery time.
891 */
892/* ARGSUSED */
893void
894xfs_trans_inode_buf(
895 xfs_trans_t *tp,
896 xfs_buf_t *bp)
897{
898 xfs_buf_log_item_t *bip;
899
900 ASSERT(XFS_BUF_ISBUSY(bp));
901 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
902 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
903
904 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
905 ASSERT(atomic_read(&bip->bli_refcount) > 0);
906
907 bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF;
908}
909
910/*
911 * This call is used to indicate that the buffer is going to
912 * be staled and was an inode buffer. This means it gets
913 * special processing during unpin - where any inodes
914 * associated with the buffer should be removed from ail.
915 * There is also special processing during recovery,
916 * any replay of the inodes in the buffer needs to be
917 * prevented as the buffer may have been reused.
918 */
919void
920xfs_trans_stale_inode_buf(
921 xfs_trans_t *tp,
922 xfs_buf_t *bp)
923{
924 xfs_buf_log_item_t *bip;
925
926 ASSERT(XFS_BUF_ISBUSY(bp));
927 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
928 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
929
930 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
931 ASSERT(atomic_read(&bip->bli_refcount) > 0);
932
933 bip->bli_flags |= XFS_BLI_STALE_INODE;
934 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
935 xfs_buf_iodone;
936}
937
938
939
940/*
941 * Mark the buffer as being one which contains newly allocated
942 * inodes. We need to make sure that even if this buffer is
943 * relogged as an 'inode buf' we still recover all of the inode
944 * images in the face of a crash. This works in coordination with
945 * xfs_buf_item_committed() to ensure that the buffer remains in the
946 * AIL at its original location even after it has been relogged.
947 */
948/* ARGSUSED */
949void
950xfs_trans_inode_alloc_buf(
951 xfs_trans_t *tp,
952 xfs_buf_t *bp)
953{
954 xfs_buf_log_item_t *bip;
955
956 ASSERT(XFS_BUF_ISBUSY(bp));
957 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
958 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
959
960 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
961 ASSERT(atomic_read(&bip->bli_refcount) > 0);
962
963 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
964}
965
966
967/*
968 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
969 * dquots. However, unlike in inode buffer recovery, dquot buffers get
970 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
971 * The only thing that makes dquot buffers different from regular
972 * buffers is that we must not replay dquot bufs when recovering
973 * if a _corresponding_ quotaoff has happened. We also have to distinguish
974 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
975 * can be turned off independently.
976 */
977/* ARGSUSED */
978void
979xfs_trans_dquot_buf(
980 xfs_trans_t *tp,
981 xfs_buf_t *bp,
982 uint type)
983{
984 xfs_buf_log_item_t *bip;
985
986 ASSERT(XFS_BUF_ISBUSY(bp));
987 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
988 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
989 ASSERT(type == XFS_BLI_UDQUOT_BUF ||
c8ad20ff 990 type == XFS_BLI_PDQUOT_BUF ||
1da177e4
LT
991 type == XFS_BLI_GDQUOT_BUF);
992
993 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
994 ASSERT(atomic_read(&bip->bli_refcount) > 0);
995
996 bip->bli_format.blf_flags |= type;
997}
998
999/*
1000 * Check to see if a buffer matching the given parameters is already
1001 * a part of the given transaction. Only check the first, embedded
1002 * chunk, since we don't want to spend all day scanning large transactions.
1003 */
1004STATIC xfs_buf_t *
1005xfs_trans_buf_item_match(
1006 xfs_trans_t *tp,
1007 xfs_buftarg_t *target,
1008 xfs_daddr_t blkno,
1009 int len)
1010{
1011 xfs_log_item_chunk_t *licp;
1012 xfs_log_item_desc_t *lidp;
1013 xfs_buf_log_item_t *blip;
1014 xfs_buf_t *bp;
1015 int i;
1016
1017 bp = NULL;
1018 len = BBTOB(len);
1019 licp = &tp->t_items;
39dab9d7 1020 if (!xfs_lic_are_all_free(licp)) {
1da177e4
LT
1021 for (i = 0; i < licp->lic_unused; i++) {
1022 /*
1023 * Skip unoccupied slots.
1024 */
39dab9d7 1025 if (xfs_lic_isfree(licp, i)) {
1da177e4
LT
1026 continue;
1027 }
1028
39dab9d7 1029 lidp = xfs_lic_slot(licp, i);
1da177e4
LT
1030 blip = (xfs_buf_log_item_t *)lidp->lid_item;
1031 if (blip->bli_item.li_type != XFS_LI_BUF) {
1032 continue;
1033 }
1034
1035 bp = blip->bli_buf;
1036 if ((XFS_BUF_TARGET(bp) == target) &&
1037 (XFS_BUF_ADDR(bp) == blkno) &&
1038 (XFS_BUF_COUNT(bp) == len)) {
1039 /*
1040 * We found it. Break out and
1041 * return the pointer to the buffer.
1042 */
1043 break;
1044 } else {
1045 bp = NULL;
1046 }
1047 }
1048 }
1049 return bp;
1050}
1051
1052/*
1053 * Check to see if a buffer matching the given parameters is already
1054 * a part of the given transaction. Check all the chunks, we
1055 * want to be thorough.
1056 */
1057STATIC xfs_buf_t *
1058xfs_trans_buf_item_match_all(
1059 xfs_trans_t *tp,
1060 xfs_buftarg_t *target,
1061 xfs_daddr_t blkno,
1062 int len)
1063{
1064 xfs_log_item_chunk_t *licp;
1065 xfs_log_item_desc_t *lidp;
1066 xfs_buf_log_item_t *blip;
1067 xfs_buf_t *bp;
1068 int i;
1069
1070 bp = NULL;
1071 len = BBTOB(len);
1072 for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
39dab9d7 1073 if (xfs_lic_are_all_free(licp)) {
1da177e4
LT
1074 ASSERT(licp == &tp->t_items);
1075 ASSERT(licp->lic_next == NULL);
1076 return NULL;
1077 }
1078 for (i = 0; i < licp->lic_unused; i++) {
1079 /*
1080 * Skip unoccupied slots.
1081 */
39dab9d7 1082 if (xfs_lic_isfree(licp, i)) {
1da177e4
LT
1083 continue;
1084 }
1085
39dab9d7 1086 lidp = xfs_lic_slot(licp, i);
1da177e4
LT
1087 blip = (xfs_buf_log_item_t *)lidp->lid_item;
1088 if (blip->bli_item.li_type != XFS_LI_BUF) {
1089 continue;
1090 }
1091
1092 bp = blip->bli_buf;
1093 if ((XFS_BUF_TARGET(bp) == target) &&
1094 (XFS_BUF_ADDR(bp) == blkno) &&
1095 (XFS_BUF_COUNT(bp) == len)) {
1096 /*
1097 * We found it. Break out and
1098 * return the pointer to the buffer.
1099 */
1100 return bp;
1101 }
1102 }
1103 }
1104 return NULL;
1105}