]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/xfs_bmap.c
[XFS] Use uninitialized_var macro to stop warning about rtx
[net-next-2.6.git] / fs / xfs / xfs_bmap.c
CommitLineData
1da177e4 1/*
3e57ecf6 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4
LT
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4 27#include "xfs_dir2.h"
a844f451 28#include "xfs_da_btree.h"
1da177e4 29#include "xfs_bmap_btree.h"
a844f451 30#include "xfs_alloc_btree.h"
1da177e4 31#include "xfs_ialloc_btree.h"
1da177e4 32#include "xfs_dir2_sf.h"
a844f451 33#include "xfs_attr_sf.h"
1da177e4 34#include "xfs_dinode.h"
1da177e4 35#include "xfs_inode.h"
a844f451
NS
36#include "xfs_btree.h"
37#include "xfs_dmapi.h"
38#include "xfs_mount.h"
39#include "xfs_ialloc.h"
1da177e4 40#include "xfs_itable.h"
f6c2d1fa
NS
41#include "xfs_dir2_data.h"
42#include "xfs_dir2_leaf.h"
43#include "xfs_dir2_block.h"
a844f451 44#include "xfs_inode_item.h"
1da177e4
LT
45#include "xfs_extfree_item.h"
46#include "xfs_alloc.h"
47#include "xfs_bmap.h"
48#include "xfs_rtalloc.h"
49#include "xfs_error.h"
d8cc890d 50#include "xfs_attr_leaf.h"
1da177e4
LT
51#include "xfs_rw.h"
52#include "xfs_quota.h"
53#include "xfs_trans_space.h"
54#include "xfs_buf_item.h"
55
56
57#ifdef DEBUG
58STATIC void
59xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
60#endif
61
62kmem_zone_t *xfs_bmap_free_item_zone;
63
64/*
65 * Prototypes for internal bmap routines.
66 */
67
68
69/*
70 * Called from xfs_bmap_add_attrfork to handle extents format files.
71 */
72STATIC int /* error */
73xfs_bmap_add_attrfork_extents(
74 xfs_trans_t *tp, /* transaction pointer */
75 xfs_inode_t *ip, /* incore inode pointer */
76 xfs_fsblock_t *firstblock, /* first block allocated */
77 xfs_bmap_free_t *flist, /* blocks to free at commit */
78 int *flags); /* inode logging flags */
79
80/*
81 * Called from xfs_bmap_add_attrfork to handle local format files.
82 */
83STATIC int /* error */
84xfs_bmap_add_attrfork_local(
85 xfs_trans_t *tp, /* transaction pointer */
86 xfs_inode_t *ip, /* incore inode pointer */
87 xfs_fsblock_t *firstblock, /* first block allocated */
88 xfs_bmap_free_t *flist, /* blocks to free at commit */
89 int *flags); /* inode logging flags */
90
91/*
4eea22f0 92 * Called by xfs_bmapi to update file extent records and the btree
1da177e4
LT
93 * after allocating space (or doing a delayed allocation).
94 */
95STATIC int /* error */
96xfs_bmap_add_extent(
97 xfs_inode_t *ip, /* incore inode pointer */
98 xfs_extnum_t idx, /* extent number to update/insert */
99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 100 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4
LT
101 xfs_fsblock_t *first, /* pointer to firstblock variable */
102 xfs_bmap_free_t *flist, /* list of extents to be freed */
103 int *logflagsp, /* inode logging flags */
3e57ecf6 104 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
105 int whichfork, /* data or attr fork */
106 int rsvd); /* OK to allocate reserved blocks */
107
108/*
109 * Called by xfs_bmap_add_extent to handle cases converting a delayed
110 * allocation to a real allocation.
111 */
112STATIC int /* error */
113xfs_bmap_add_extent_delay_real(
114 xfs_inode_t *ip, /* incore inode pointer */
115 xfs_extnum_t idx, /* extent number to update/insert */
116 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 117 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4
LT
118 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
119 xfs_fsblock_t *first, /* pointer to firstblock variable */
120 xfs_bmap_free_t *flist, /* list of extents to be freed */
121 int *logflagsp, /* inode logging flags */
3e57ecf6 122 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
123 int rsvd); /* OK to allocate reserved blocks */
124
125/*
126 * Called by xfs_bmap_add_extent to handle cases converting a hole
127 * to a delayed allocation.
128 */
129STATIC int /* error */
130xfs_bmap_add_extent_hole_delay(
131 xfs_inode_t *ip, /* incore inode pointer */
132 xfs_extnum_t idx, /* extent number to update/insert */
4eea22f0 133 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4 134 int *logflagsp,/* inode logging flags */
3e57ecf6 135 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
136 int rsvd); /* OK to allocate reserved blocks */
137
138/*
139 * Called by xfs_bmap_add_extent to handle cases converting a hole
140 * to a real allocation.
141 */
142STATIC int /* error */
143xfs_bmap_add_extent_hole_real(
144 xfs_inode_t *ip, /* incore inode pointer */
145 xfs_extnum_t idx, /* extent number to update/insert */
146 xfs_btree_cur_t *cur, /* if null, not a btree */
4eea22f0 147 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4 148 int *logflagsp, /* inode logging flags */
3e57ecf6 149 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
150 int whichfork); /* data or attr fork */
151
152/*
153 * Called by xfs_bmap_add_extent to handle cases converting an unwritten
154 * allocation to a real allocation or vice versa.
155 */
156STATIC int /* error */
157xfs_bmap_add_extent_unwritten_real(
158 xfs_inode_t *ip, /* incore inode pointer */
159 xfs_extnum_t idx, /* extent number to update/insert */
160 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 161 xfs_bmbt_irec_t *new, /* new data to add to file extents */
3e57ecf6
OW
162 int *logflagsp, /* inode logging flags */
163 xfs_extdelta_t *delta); /* Change made to incore extents */
1da177e4
LT
164
165/*
166 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
167 * It figures out where to ask the underlying allocator to put the new extent.
168 */
169STATIC int /* error */
170xfs_bmap_alloc(
171 xfs_bmalloca_t *ap); /* bmap alloc argument struct */
172
173/*
174 * Transform a btree format file with only one leaf node, where the
175 * extents list will fit in the inode, into an extents format file.
4eea22f0 176 * Since the file extents are already in-core, all we have to do is
1da177e4
LT
177 * give up the space for the btree root and pitch the leaf block.
178 */
179STATIC int /* error */
180xfs_bmap_btree_to_extents(
181 xfs_trans_t *tp, /* transaction pointer */
182 xfs_inode_t *ip, /* incore inode pointer */
183 xfs_btree_cur_t *cur, /* btree cursor */
184 int *logflagsp, /* inode logging flags */
185 int whichfork); /* data or attr fork */
186
1da177e4 187/*
4eea22f0 188 * Called by xfs_bmapi to update file extent records and the btree
1da177e4
LT
189 * after removing space (or undoing a delayed allocation).
190 */
191STATIC int /* error */
192xfs_bmap_del_extent(
193 xfs_inode_t *ip, /* incore inode pointer */
194 xfs_trans_t *tp, /* current trans pointer */
195 xfs_extnum_t idx, /* extent number to update/insert */
196 xfs_bmap_free_t *flist, /* list of extents to be freed */
197 xfs_btree_cur_t *cur, /* if null, not a btree */
4eea22f0 198 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4 199 int *logflagsp,/* inode logging flags */
3e57ecf6 200 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
201 int whichfork, /* data or attr fork */
202 int rsvd); /* OK to allocate reserved blocks */
203
204/*
205 * Remove the entry "free" from the free item list. Prev points to the
206 * previous entry, unless "free" is the head of the list.
207 */
208STATIC void
209xfs_bmap_del_free(
210 xfs_bmap_free_t *flist, /* free item list header */
211 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
212 xfs_bmap_free_item_t *free); /* list item to be freed */
213
1da177e4
LT
214/*
215 * Convert an extents-format file into a btree-format file.
216 * The new file will have a root block (in the inode) and a single child block.
217 */
218STATIC int /* error */
219xfs_bmap_extents_to_btree(
220 xfs_trans_t *tp, /* transaction pointer */
221 xfs_inode_t *ip, /* incore inode pointer */
222 xfs_fsblock_t *firstblock, /* first-block-allocated */
223 xfs_bmap_free_t *flist, /* blocks freed in xaction */
224 xfs_btree_cur_t **curp, /* cursor returned to caller */
225 int wasdel, /* converting a delayed alloc */
226 int *logflagsp, /* inode logging flags */
227 int whichfork); /* data or attr fork */
228
1da177e4
LT
229/*
230 * Convert a local file to an extents file.
231 * This code is sort of bogus, since the file data needs to get
232 * logged so it won't be lost. The bmap-level manipulations are ok, though.
233 */
234STATIC int /* error */
235xfs_bmap_local_to_extents(
236 xfs_trans_t *tp, /* transaction pointer */
237 xfs_inode_t *ip, /* incore inode pointer */
238 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
239 xfs_extlen_t total, /* total blocks needed by transaction */
240 int *logflagsp, /* inode logging flags */
241 int whichfork); /* data or attr fork */
242
243/*
244 * Search the extents list for the inode, for the extent containing bno.
245 * If bno lies in a hole, point to the next entry. If bno lies past eof,
246 * *eofp will be set, and *prevp will contain the last entry (null if none).
247 * Else, *lastxp will be set to the index of the found
248 * entry; *gotp will contain the entry.
249 */
250STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */
251xfs_bmap_search_extents(
252 xfs_inode_t *ip, /* incore inode pointer */
253 xfs_fileoff_t bno, /* block number searched for */
254 int whichfork, /* data or attr fork */
255 int *eofp, /* out: end of file found */
256 xfs_extnum_t *lastxp, /* out: last extent index */
257 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
258 xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
259
ba0f32d4
CH
260/*
261 * Check the last inode extent to determine whether this allocation will result
262 * in blocks being allocated at the end of the file. When we allocate new data
263 * blocks at the end of the file which do not start at the previous data block,
264 * we will try to align the new blocks at stripe unit boundaries.
265 */
266STATIC int /* error */
267xfs_bmap_isaeof(
268 xfs_inode_t *ip, /* incore inode pointer */
269 xfs_fileoff_t off, /* file offset in fsblocks */
270 int whichfork, /* data or attribute fork */
271 char *aeof); /* return value */
272
1da177e4
LT
273#ifdef XFS_BMAP_TRACE
274/*
275 * Add a bmap trace buffer entry. Base routine for the others.
276 */
277STATIC void
278xfs_bmap_trace_addentry(
279 int opcode, /* operation */
280 char *fname, /* function name */
281 char *desc, /* operation description */
282 xfs_inode_t *ip, /* incore inode pointer */
283 xfs_extnum_t idx, /* index of entry(ies) */
284 xfs_extnum_t cnt, /* count of entries, 1 or 2 */
285 xfs_bmbt_rec_t *r1, /* first record */
286 xfs_bmbt_rec_t *r2, /* second record or null */
287 int whichfork); /* data or attr fork */
288
289/*
4eea22f0 290 * Add bmap trace entry prior to a call to xfs_iext_remove.
1da177e4
LT
291 */
292STATIC void
293xfs_bmap_trace_delete(
294 char *fname, /* function name */
295 char *desc, /* operation description */
296 xfs_inode_t *ip, /* incore inode pointer */
297 xfs_extnum_t idx, /* index of entry(entries) deleted */
298 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
299 int whichfork); /* data or attr fork */
300
301/*
4eea22f0 302 * Add bmap trace entry prior to a call to xfs_iext_insert, or
1da177e4
LT
303 * reading in the extents list from the disk (in the btree).
304 */
305STATIC void
306xfs_bmap_trace_insert(
307 char *fname, /* function name */
308 char *desc, /* operation description */
309 xfs_inode_t *ip, /* incore inode pointer */
310 xfs_extnum_t idx, /* index of entry(entries) inserted */
311 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
312 xfs_bmbt_irec_t *r1, /* inserted record 1 */
313 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
314 int whichfork); /* data or attr fork */
315
316/*
4eea22f0 317 * Add bmap trace entry after updating an extent record in place.
1da177e4
LT
318 */
319STATIC void
320xfs_bmap_trace_post_update(
321 char *fname, /* function name */
322 char *desc, /* operation description */
323 xfs_inode_t *ip, /* incore inode pointer */
324 xfs_extnum_t idx, /* index of entry updated */
325 int whichfork); /* data or attr fork */
326
327/*
4eea22f0 328 * Add bmap trace entry prior to updating an extent record in place.
1da177e4
LT
329 */
330STATIC void
331xfs_bmap_trace_pre_update(
332 char *fname, /* function name */
333 char *desc, /* operation description */
334 xfs_inode_t *ip, /* incore inode pointer */
335 xfs_extnum_t idx, /* index of entry to be updated */
336 int whichfork); /* data or attr fork */
337
338#else
339#define xfs_bmap_trace_delete(f,d,ip,i,c,w)
340#define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w)
341#define xfs_bmap_trace_post_update(f,d,ip,i,w)
342#define xfs_bmap_trace_pre_update(f,d,ip,i,w)
343#endif /* XFS_BMAP_TRACE */
344
345/*
346 * Compute the worst-case number of indirect blocks that will be used
347 * for ip's delayed extent of length "len".
348 */
349STATIC xfs_filblks_t
350xfs_bmap_worst_indlen(
351 xfs_inode_t *ip, /* incore inode pointer */
352 xfs_filblks_t len); /* delayed extent length */
353
354#ifdef DEBUG
355/*
356 * Perform various validation checks on the values being returned
357 * from xfs_bmapi().
358 */
359STATIC void
360xfs_bmap_validate_ret(
361 xfs_fileoff_t bno,
362 xfs_filblks_t len,
363 int flags,
364 xfs_bmbt_irec_t *mval,
365 int nmap,
366 int ret_nmap);
367#else
368#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
369#endif /* DEBUG */
370
371#if defined(XFS_RW_TRACE)
372STATIC void
373xfs_bunmap_trace(
374 xfs_inode_t *ip,
375 xfs_fileoff_t bno,
376 xfs_filblks_t len,
377 int flags,
378 inst_t *ra);
379#else
380#define xfs_bunmap_trace(ip, bno, len, flags, ra)
381#endif /* XFS_RW_TRACE */
382
383STATIC int
384xfs_bmap_count_tree(
385 xfs_mount_t *mp,
386 xfs_trans_t *tp,
4eea22f0 387 xfs_ifork_t *ifp,
1da177e4
LT
388 xfs_fsblock_t blockno,
389 int levelin,
390 int *count);
391
392STATIC int
393xfs_bmap_count_leaves(
4eea22f0
MK
394 xfs_ifork_t *ifp,
395 xfs_extnum_t idx,
1da177e4
LT
396 int numrecs,
397 int *count);
398
91e11088
YL
399STATIC int
400xfs_bmap_disk_count_leaves(
4eea22f0
MK
401 xfs_extnum_t idx,
402 xfs_bmbt_block_t *block,
91e11088
YL
403 int numrecs,
404 int *count);
405
1da177e4
LT
406/*
407 * Bmap internal routines.
408 */
409
410/*
411 * Called from xfs_bmap_add_attrfork to handle btree format files.
412 */
413STATIC int /* error */
414xfs_bmap_add_attrfork_btree(
415 xfs_trans_t *tp, /* transaction pointer */
416 xfs_inode_t *ip, /* incore inode pointer */
417 xfs_fsblock_t *firstblock, /* first block allocated */
418 xfs_bmap_free_t *flist, /* blocks to free at commit */
419 int *flags) /* inode logging flags */
420{
421 xfs_btree_cur_t *cur; /* btree cursor */
422 int error; /* error return value */
423 xfs_mount_t *mp; /* file system mount struct */
424 int stat; /* newroot status */
425
426 mp = ip->i_mount;
427 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
428 *flags |= XFS_ILOG_DBROOT;
429 else {
430 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
431 XFS_DATA_FORK);
432 cur->bc_private.b.flist = flist;
433 cur->bc_private.b.firstblock = *firstblock;
434 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
435 goto error0;
436 ASSERT(stat == 1); /* must be at least one entry */
437 if ((error = xfs_bmbt_newroot(cur, flags, &stat)))
438 goto error0;
439 if (stat == 0) {
440 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
441 return XFS_ERROR(ENOSPC);
442 }
443 *firstblock = cur->bc_private.b.firstblock;
444 cur->bc_private.b.allocated = 0;
445 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
446 }
447 return 0;
448error0:
449 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
450 return error;
451}
452
453/*
454 * Called from xfs_bmap_add_attrfork to handle extents format files.
455 */
456STATIC int /* error */
457xfs_bmap_add_attrfork_extents(
458 xfs_trans_t *tp, /* transaction pointer */
459 xfs_inode_t *ip, /* incore inode pointer */
460 xfs_fsblock_t *firstblock, /* first block allocated */
461 xfs_bmap_free_t *flist, /* blocks to free at commit */
462 int *flags) /* inode logging flags */
463{
464 xfs_btree_cur_t *cur; /* bmap btree cursor */
465 int error; /* error return value */
466
467 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
468 return 0;
469 cur = NULL;
470 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
471 flags, XFS_DATA_FORK);
472 if (cur) {
473 cur->bc_private.b.allocated = 0;
474 xfs_btree_del_cursor(cur,
475 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
476 }
477 return error;
478}
479
480/*
481 * Called from xfs_bmap_add_attrfork to handle local format files.
482 */
483STATIC int /* error */
484xfs_bmap_add_attrfork_local(
485 xfs_trans_t *tp, /* transaction pointer */
486 xfs_inode_t *ip, /* incore inode pointer */
487 xfs_fsblock_t *firstblock, /* first block allocated */
488 xfs_bmap_free_t *flist, /* blocks to free at commit */
489 int *flags) /* inode logging flags */
490{
491 xfs_da_args_t dargs; /* args for dir/attr code */
492 int error; /* error return value */
493 xfs_mount_t *mp; /* mount structure pointer */
494
495 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
496 return 0;
497 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
498 mp = ip->i_mount;
499 memset(&dargs, 0, sizeof(dargs));
500 dargs.dp = ip;
501 dargs.firstblock = firstblock;
502 dargs.flist = flist;
503 dargs.total = mp->m_dirblkfsbs;
504 dargs.whichfork = XFS_DATA_FORK;
505 dargs.trans = tp;
f6c2d1fa 506 error = xfs_dir2_sf_to_block(&dargs);
1da177e4
LT
507 } else
508 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
509 XFS_DATA_FORK);
510 return error;
511}
512
513/*
4eea22f0 514 * Called by xfs_bmapi to update file extent records and the btree
1da177e4
LT
515 * after allocating space (or doing a delayed allocation).
516 */
517STATIC int /* error */
518xfs_bmap_add_extent(
519 xfs_inode_t *ip, /* incore inode pointer */
520 xfs_extnum_t idx, /* extent number to update/insert */
521 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 522 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4
LT
523 xfs_fsblock_t *first, /* pointer to firstblock variable */
524 xfs_bmap_free_t *flist, /* list of extents to be freed */
525 int *logflagsp, /* inode logging flags */
3e57ecf6 526 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
527 int whichfork, /* data or attr fork */
528 int rsvd) /* OK to use reserved data blocks */
529{
530 xfs_btree_cur_t *cur; /* btree cursor or null */
531 xfs_filblks_t da_new; /* new count del alloc blocks used */
532 xfs_filblks_t da_old; /* old count del alloc blocks used */
533 int error; /* error return value */
534#ifdef XFS_BMAP_TRACE
535 static char fname[] = "xfs_bmap_add_extent";
536#endif
537 xfs_ifork_t *ifp; /* inode fork ptr */
538 int logflags; /* returned value */
539 xfs_extnum_t nextents; /* number of extents in file now */
540
541 XFS_STATS_INC(xs_add_exlist);
542 cur = *curp;
543 ifp = XFS_IFORK_PTR(ip, whichfork);
544 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
545 ASSERT(idx <= nextents);
546 da_old = da_new = 0;
547 error = 0;
548 /*
549 * This is the first extent added to a new/empty file.
550 * Special case this one, so other routines get to assume there are
551 * already extents in the list.
552 */
553 if (nextents == 0) {
554 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new,
555 NULL, whichfork);
4eea22f0 556 xfs_iext_insert(ifp, 0, 1, new);
1da177e4
LT
557 ASSERT(cur == NULL);
558 ifp->if_lastex = 0;
559 if (!ISNULLSTARTBLOCK(new->br_startblock)) {
560 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
561 logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
562 } else
563 logflags = 0;
3e57ecf6
OW
564 /* DELTA: single new extent */
565 if (delta) {
566 if (delta->xed_startoff > new->br_startoff)
567 delta->xed_startoff = new->br_startoff;
568 if (delta->xed_blockcount <
569 new->br_startoff + new->br_blockcount)
570 delta->xed_blockcount = new->br_startoff +
571 new->br_blockcount;
572 }
1da177e4
LT
573 }
574 /*
575 * Any kind of new delayed allocation goes here.
576 */
577 else if (ISNULLSTARTBLOCK(new->br_startblock)) {
578 if (cur)
579 ASSERT((cur->bc_private.b.flags &
580 XFS_BTCUR_BPRV_WASDEL) == 0);
e9ed9d22 581 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
3e57ecf6 582 &logflags, delta, rsvd)))
1da177e4
LT
583 goto done;
584 }
585 /*
586 * Real allocation off the end of the file.
587 */
588 else if (idx == nextents) {
589 if (cur)
590 ASSERT((cur->bc_private.b.flags &
591 XFS_BTCUR_BPRV_WASDEL) == 0);
592 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
3e57ecf6 593 &logflags, delta, whichfork)))
1da177e4
LT
594 goto done;
595 } else {
596 xfs_bmbt_irec_t prev; /* old extent at offset idx */
597
598 /*
599 * Get the record referred to by idx.
600 */
4eea22f0 601 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
1da177e4
LT
602 /*
603 * If it's a real allocation record, and the new allocation ends
604 * after the start of the referred to record, then we're filling
605 * in a delayed or unwritten allocation with a real one, or
606 * converting real back to unwritten.
607 */
608 if (!ISNULLSTARTBLOCK(new->br_startblock) &&
609 new->br_startoff + new->br_blockcount > prev.br_startoff) {
610 if (prev.br_state != XFS_EXT_UNWRITTEN &&
611 ISNULLSTARTBLOCK(prev.br_startblock)) {
612 da_old = STARTBLOCKVAL(prev.br_startblock);
613 if (cur)
614 ASSERT(cur->bc_private.b.flags &
615 XFS_BTCUR_BPRV_WASDEL);
616 if ((error = xfs_bmap_add_extent_delay_real(ip,
617 idx, &cur, new, &da_new, first, flist,
3e57ecf6 618 &logflags, delta, rsvd)))
1da177e4
LT
619 goto done;
620 } else if (new->br_state == XFS_EXT_NORM) {
621 ASSERT(new->br_state == XFS_EXT_NORM);
622 if ((error = xfs_bmap_add_extent_unwritten_real(
3e57ecf6 623 ip, idx, &cur, new, &logflags, delta)))
1da177e4
LT
624 goto done;
625 } else {
626 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
627 if ((error = xfs_bmap_add_extent_unwritten_real(
3e57ecf6 628 ip, idx, &cur, new, &logflags, delta)))
1da177e4
LT
629 goto done;
630 }
631 ASSERT(*curp == cur || *curp == NULL);
632 }
633 /*
634 * Otherwise we're filling in a hole with an allocation.
635 */
636 else {
637 if (cur)
638 ASSERT((cur->bc_private.b.flags &
639 XFS_BTCUR_BPRV_WASDEL) == 0);
640 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
3e57ecf6 641 new, &logflags, delta, whichfork)))
1da177e4
LT
642 goto done;
643 }
644 }
645
646 ASSERT(*curp == cur || *curp == NULL);
647 /*
648 * Convert to a btree if necessary.
649 */
650 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
651 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
652 int tmp_logflags; /* partial log flag return val */
653
654 ASSERT(cur == NULL);
655 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
656 flist, &cur, da_old > 0, &tmp_logflags, whichfork);
657 logflags |= tmp_logflags;
658 if (error)
659 goto done;
660 }
661 /*
662 * Adjust for changes in reserved delayed indirect blocks.
663 * Nothing to do for disk quotas here.
664 */
665 if (da_old || da_new) {
666 xfs_filblks_t nblks;
667
668 nblks = da_new;
669 if (cur)
670 nblks += cur->bc_private.b.allocated;
671 ASSERT(nblks <= da_old);
672 if (nblks < da_old)
673 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
20f4ebf2 674 (int64_t)(da_old - nblks), rsvd);
1da177e4
LT
675 }
676 /*
677 * Clear out the allocated field, done with it now in any case.
678 */
679 if (cur) {
680 cur->bc_private.b.allocated = 0;
681 *curp = cur;
682 }
683done:
684#ifdef DEBUG
685 if (!error)
686 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
687#endif
688 *logflagsp = logflags;
689 return error;
690}
691
692/*
693 * Called by xfs_bmap_add_extent to handle cases converting a delayed
694 * allocation to a real allocation.
695 */
696STATIC int /* error */
697xfs_bmap_add_extent_delay_real(
698 xfs_inode_t *ip, /* incore inode pointer */
699 xfs_extnum_t idx, /* extent number to update/insert */
700 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 701 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4
LT
702 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
703 xfs_fsblock_t *first, /* pointer to firstblock variable */
704 xfs_bmap_free_t *flist, /* list of extents to be freed */
705 int *logflagsp, /* inode logging flags */
3e57ecf6 706 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
707 int rsvd) /* OK to use reserved data block allocation */
708{
1da177e4
LT
709 xfs_btree_cur_t *cur; /* btree cursor */
710 int diff; /* temp value */
711 xfs_bmbt_rec_t *ep; /* extent entry for idx */
712 int error; /* error return value */
713#ifdef XFS_BMAP_TRACE
714 static char fname[] = "xfs_bmap_add_extent_delay_real";
715#endif
716 int i; /* temp state */
4eea22f0 717 xfs_ifork_t *ifp; /* inode fork pointer */
1da177e4
LT
718 xfs_fileoff_t new_endoff; /* end offset of new entry */
719 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
720 /* left is 0, right is 1, prev is 2 */
721 int rval=0; /* return value (logging flags) */
722 int state = 0;/* state bits, accessed thru macros */
3e57ecf6
OW
723 xfs_filblks_t temp=0; /* value for dnew calculations */
724 xfs_filblks_t temp2=0;/* value for dnew calculations */
1da177e4
LT
725 int tmp_rval; /* partial logging flags */
726 enum { /* bit number definitions for state */
727 LEFT_CONTIG, RIGHT_CONTIG,
728 LEFT_FILLING, RIGHT_FILLING,
729 LEFT_DELAY, RIGHT_DELAY,
730 LEFT_VALID, RIGHT_VALID
731 };
732
733#define LEFT r[0]
734#define RIGHT r[1]
735#define PREV r[2]
736#define MASK(b) (1 << (b))
737#define MASK2(a,b) (MASK(a) | MASK(b))
738#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
739#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
740#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
741#define STATE_TEST(b) (state & MASK(b))
742#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
743 ((state &= ~MASK(b)), 0))
744#define SWITCH_STATE \
745 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
746
747 /*
748 * Set up a bunch of variables to make the tests simpler.
749 */
750 cur = *curp;
4eea22f0
MK
751 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
752 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
753 xfs_bmbt_get_all(ep, &PREV);
754 new_endoff = new->br_startoff + new->br_blockcount;
755 ASSERT(PREV.br_startoff <= new->br_startoff);
756 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
757 /*
758 * Set flags determining what part of the previous delayed allocation
759 * extent is being replaced by a real allocation.
760 */
761 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
762 STATE_SET(RIGHT_FILLING,
763 PREV.br_startoff + PREV.br_blockcount == new_endoff);
764 /*
765 * Check and set flags if this segment has a left neighbor.
766 * Don't set contiguous if the combined extent would be too large.
767 */
768 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
4eea22f0 769 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1da177e4
LT
770 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
771 }
772 STATE_SET(LEFT_CONTIG,
773 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
774 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
775 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
776 LEFT.br_state == new->br_state &&
777 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
778 /*
779 * Check and set flags if this segment has a right neighbor.
780 * Don't set contiguous if the combined extent would be too large.
781 * Also check for all-three-contiguous being too large.
782 */
783 if (STATE_SET_TEST(RIGHT_VALID,
784 idx <
785 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
4eea22f0 786 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1da177e4
LT
787 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
788 }
789 STATE_SET(RIGHT_CONTIG,
790 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
791 new_endoff == RIGHT.br_startoff &&
792 new->br_startblock + new->br_blockcount ==
793 RIGHT.br_startblock &&
794 new->br_state == RIGHT.br_state &&
795 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
796 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
797 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
798 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
799 <= MAXEXTLEN));
800 error = 0;
801 /*
802 * Switch out based on the FILLING and CONTIG state bits.
803 */
804 switch (SWITCH_STATE) {
805
806 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
807 /*
808 * Filling in all of a previously delayed allocation extent.
809 * The left and right neighbors are both contiguous with new.
810 */
811 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
812 XFS_DATA_FORK);
4eea22f0 813 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
814 LEFT.br_blockcount + PREV.br_blockcount +
815 RIGHT.br_blockcount);
816 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
817 XFS_DATA_FORK);
818 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
819 XFS_DATA_FORK);
4eea22f0 820 xfs_iext_remove(ifp, idx, 2);
1da177e4
LT
821 ip->i_df.if_lastex = idx - 1;
822 ip->i_d.di_nextents--;
823 if (cur == NULL)
824 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
825 else {
826 rval = XFS_ILOG_CORE;
827 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
828 RIGHT.br_startblock,
829 RIGHT.br_blockcount, &i)))
830 goto done;
831 ASSERT(i == 1);
832 if ((error = xfs_bmbt_delete(cur, &i)))
833 goto done;
834 ASSERT(i == 1);
835 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
836 goto done;
837 ASSERT(i == 1);
838 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
839 LEFT.br_startblock,
840 LEFT.br_blockcount +
841 PREV.br_blockcount +
842 RIGHT.br_blockcount, LEFT.br_state)))
843 goto done;
844 }
845 *dnew = 0;
3e57ecf6
OW
846 /* DELTA: Three in-core extents are replaced by one. */
847 temp = LEFT.br_startoff;
848 temp2 = LEFT.br_blockcount +
849 PREV.br_blockcount +
850 RIGHT.br_blockcount;
1da177e4
LT
851 break;
852
853 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
854 /*
855 * Filling in all of a previously delayed allocation extent.
856 * The left neighbor is contiguous, the right is not.
857 */
858 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
859 XFS_DATA_FORK);
4eea22f0 860 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
861 LEFT.br_blockcount + PREV.br_blockcount);
862 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
863 XFS_DATA_FORK);
864 ip->i_df.if_lastex = idx - 1;
865 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
866 XFS_DATA_FORK);
4eea22f0 867 xfs_iext_remove(ifp, idx, 1);
1da177e4
LT
868 if (cur == NULL)
869 rval = XFS_ILOG_DEXT;
870 else {
871 rval = 0;
872 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
873 LEFT.br_startblock, LEFT.br_blockcount,
874 &i)))
875 goto done;
876 ASSERT(i == 1);
877 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
878 LEFT.br_startblock,
879 LEFT.br_blockcount +
880 PREV.br_blockcount, LEFT.br_state)))
881 goto done;
882 }
883 *dnew = 0;
3e57ecf6
OW
884 /* DELTA: Two in-core extents are replaced by one. */
885 temp = LEFT.br_startoff;
886 temp2 = LEFT.br_blockcount +
887 PREV.br_blockcount;
1da177e4
LT
888 break;
889
890 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
891 /*
892 * Filling in all of a previously delayed allocation extent.
893 * The right neighbor is contiguous, the left is not.
894 */
895 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
896 XFS_DATA_FORK);
897 xfs_bmbt_set_startblock(ep, new->br_startblock);
898 xfs_bmbt_set_blockcount(ep,
899 PREV.br_blockcount + RIGHT.br_blockcount);
900 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
901 XFS_DATA_FORK);
902 ip->i_df.if_lastex = idx;
903 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
904 XFS_DATA_FORK);
4eea22f0 905 xfs_iext_remove(ifp, idx + 1, 1);
1da177e4
LT
906 if (cur == NULL)
907 rval = XFS_ILOG_DEXT;
908 else {
909 rval = 0;
910 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
911 RIGHT.br_startblock,
912 RIGHT.br_blockcount, &i)))
913 goto done;
914 ASSERT(i == 1);
915 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
916 new->br_startblock,
917 PREV.br_blockcount +
918 RIGHT.br_blockcount, PREV.br_state)))
919 goto done;
920 }
921 *dnew = 0;
3e57ecf6
OW
922 /* DELTA: Two in-core extents are replaced by one. */
923 temp = PREV.br_startoff;
924 temp2 = PREV.br_blockcount +
925 RIGHT.br_blockcount;
1da177e4
LT
926 break;
927
928 case MASK2(LEFT_FILLING, RIGHT_FILLING):
929 /*
930 * Filling in all of a previously delayed allocation extent.
931 * Neither the left nor right neighbors are contiguous with
932 * the new one.
933 */
934 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
935 XFS_DATA_FORK);
936 xfs_bmbt_set_startblock(ep, new->br_startblock);
937 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
938 XFS_DATA_FORK);
939 ip->i_df.if_lastex = idx;
940 ip->i_d.di_nextents++;
941 if (cur == NULL)
942 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
943 else {
944 rval = XFS_ILOG_CORE;
945 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
946 new->br_startblock, new->br_blockcount,
947 &i)))
948 goto done;
949 ASSERT(i == 0);
950 cur->bc_rec.b.br_state = XFS_EXT_NORM;
951 if ((error = xfs_bmbt_insert(cur, &i)))
952 goto done;
953 ASSERT(i == 1);
954 }
955 *dnew = 0;
3e57ecf6
OW
956 /* DELTA: The in-core extent described by new changed type. */
957 temp = new->br_startoff;
958 temp2 = new->br_blockcount;
1da177e4
LT
959 break;
960
961 case MASK2(LEFT_FILLING, LEFT_CONTIG):
962 /*
963 * Filling in the first part of a previous delayed allocation.
964 * The left neighbor is contiguous.
965 */
966 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
967 XFS_DATA_FORK);
4eea22f0 968 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
969 LEFT.br_blockcount + new->br_blockcount);
970 xfs_bmbt_set_startoff(ep,
971 PREV.br_startoff + new->br_blockcount);
972 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
973 XFS_DATA_FORK);
974 temp = PREV.br_blockcount - new->br_blockcount;
975 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
976 XFS_DATA_FORK);
977 xfs_bmbt_set_blockcount(ep, temp);
978 ip->i_df.if_lastex = idx - 1;
979 if (cur == NULL)
980 rval = XFS_ILOG_DEXT;
981 else {
982 rval = 0;
983 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
984 LEFT.br_startblock, LEFT.br_blockcount,
985 &i)))
986 goto done;
987 ASSERT(i == 1);
988 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
989 LEFT.br_startblock,
990 LEFT.br_blockcount +
991 new->br_blockcount,
992 LEFT.br_state)))
993 goto done;
994 }
995 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
996 STARTBLOCKVAL(PREV.br_startblock));
997 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
998 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
999 XFS_DATA_FORK);
1000 *dnew = temp;
3e57ecf6
OW
1001 /* DELTA: The boundary between two in-core extents moved. */
1002 temp = LEFT.br_startoff;
1003 temp2 = LEFT.br_blockcount +
1004 PREV.br_blockcount;
1da177e4
LT
1005 break;
1006
1007 case MASK(LEFT_FILLING):
1008 /*
1009 * Filling in the first part of a previous delayed allocation.
1010 * The left neighbor is not contiguous.
1011 */
1012 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1013 xfs_bmbt_set_startoff(ep, new_endoff);
1014 temp = PREV.br_blockcount - new->br_blockcount;
1015 xfs_bmbt_set_blockcount(ep, temp);
1016 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1017 XFS_DATA_FORK);
4eea22f0 1018 xfs_iext_insert(ifp, idx, 1, new);
1da177e4
LT
1019 ip->i_df.if_lastex = idx;
1020 ip->i_d.di_nextents++;
1021 if (cur == NULL)
1022 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1023 else {
1024 rval = XFS_ILOG_CORE;
1025 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1026 new->br_startblock, new->br_blockcount,
1027 &i)))
1028 goto done;
1029 ASSERT(i == 0);
1030 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1031 if ((error = xfs_bmbt_insert(cur, &i)))
1032 goto done;
1033 ASSERT(i == 1);
1034 }
1035 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1036 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1037 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1038 first, flist, &cur, 1, &tmp_rval,
1039 XFS_DATA_FORK);
1040 rval |= tmp_rval;
1041 if (error)
1042 goto done;
1043 }
1044 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1045 STARTBLOCKVAL(PREV.br_startblock) -
1046 (cur ? cur->bc_private.b.allocated : 0));
4eea22f0 1047 ep = xfs_iext_get_ext(ifp, idx + 1);
1da177e4
LT
1048 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1049 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
1050 XFS_DATA_FORK);
1051 *dnew = temp;
3e57ecf6
OW
1052 /* DELTA: One in-core extent is split in two. */
1053 temp = PREV.br_startoff;
1054 temp2 = PREV.br_blockcount;
1da177e4
LT
1055 break;
1056
1057 case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1058 /*
1059 * Filling in the last part of a previous delayed allocation.
1060 * The right neighbor is contiguous with the new allocation.
1061 */
1062 temp = PREV.br_blockcount - new->br_blockcount;
1063 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1064 XFS_DATA_FORK);
1065 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1066 XFS_DATA_FORK);
1067 xfs_bmbt_set_blockcount(ep, temp);
4eea22f0
MK
1068 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1069 new->br_startoff, new->br_startblock,
1da177e4
LT
1070 new->br_blockcount + RIGHT.br_blockcount,
1071 RIGHT.br_state);
1072 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1073 XFS_DATA_FORK);
1074 ip->i_df.if_lastex = idx + 1;
1075 if (cur == NULL)
1076 rval = XFS_ILOG_DEXT;
1077 else {
1078 rval = 0;
1079 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1080 RIGHT.br_startblock,
1081 RIGHT.br_blockcount, &i)))
1082 goto done;
1083 ASSERT(i == 1);
1084 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1085 new->br_startblock,
1086 new->br_blockcount +
1087 RIGHT.br_blockcount,
1088 RIGHT.br_state)))
1089 goto done;
1090 }
1091 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1092 STARTBLOCKVAL(PREV.br_startblock));
1093 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1094 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1095 XFS_DATA_FORK);
1096 *dnew = temp;
3e57ecf6
OW
1097 /* DELTA: The boundary between two in-core extents moved. */
1098 temp = PREV.br_startoff;
1099 temp2 = PREV.br_blockcount +
1100 RIGHT.br_blockcount;
1da177e4
LT
1101 break;
1102
1103 case MASK(RIGHT_FILLING):
1104 /*
1105 * Filling in the last part of a previous delayed allocation.
1106 * The right neighbor is not contiguous.
1107 */
1108 temp = PREV.br_blockcount - new->br_blockcount;
1109 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1110 xfs_bmbt_set_blockcount(ep, temp);
1111 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1112 new, NULL, XFS_DATA_FORK);
4eea22f0 1113 xfs_iext_insert(ifp, idx + 1, 1, new);
1da177e4
LT
1114 ip->i_df.if_lastex = idx + 1;
1115 ip->i_d.di_nextents++;
1116 if (cur == NULL)
1117 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1118 else {
1119 rval = XFS_ILOG_CORE;
1120 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1121 new->br_startblock, new->br_blockcount,
1122 &i)))
1123 goto done;
1124 ASSERT(i == 0);
1125 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1126 if ((error = xfs_bmbt_insert(cur, &i)))
1127 goto done;
1128 ASSERT(i == 1);
1129 }
1130 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1131 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1132 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1133 first, flist, &cur, 1, &tmp_rval,
1134 XFS_DATA_FORK);
1135 rval |= tmp_rval;
1136 if (error)
1137 goto done;
1138 }
1139 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1140 STARTBLOCKVAL(PREV.br_startblock) -
1141 (cur ? cur->bc_private.b.allocated : 0));
4eea22f0 1142 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
1143 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1144 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1145 *dnew = temp;
3e57ecf6
OW
1146 /* DELTA: One in-core extent is split in two. */
1147 temp = PREV.br_startoff;
1148 temp2 = PREV.br_blockcount;
1da177e4
LT
1149 break;
1150
1151 case 0:
1152 /*
1153 * Filling in the middle part of a previous delayed allocation.
1154 * Contiguity is impossible here.
1155 * This case is avoided almost all the time.
1156 */
1157 temp = new->br_startoff - PREV.br_startoff;
1158 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1159 xfs_bmbt_set_blockcount(ep, temp);
1160 r[0] = *new;
d2133717
LM
1161 r[1].br_state = PREV.br_state;
1162 r[1].br_startblock = 0;
1da177e4
LT
1163 r[1].br_startoff = new_endoff;
1164 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1165 r[1].br_blockcount = temp2;
1166 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1167 XFS_DATA_FORK);
4eea22f0 1168 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1da177e4
LT
1169 ip->i_df.if_lastex = idx + 1;
1170 ip->i_d.di_nextents++;
1171 if (cur == NULL)
1172 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1173 else {
1174 rval = XFS_ILOG_CORE;
1175 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1176 new->br_startblock, new->br_blockcount,
1177 &i)))
1178 goto done;
1179 ASSERT(i == 0);
1180 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1181 if ((error = xfs_bmbt_insert(cur, &i)))
1182 goto done;
1183 ASSERT(i == 1);
1184 }
1185 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1186 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1187 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1188 first, flist, &cur, 1, &tmp_rval,
1189 XFS_DATA_FORK);
1190 rval |= tmp_rval;
1191 if (error)
1192 goto done;
1193 }
1194 temp = xfs_bmap_worst_indlen(ip, temp);
1195 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1196 diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
1197 (cur ? cur->bc_private.b.allocated : 0));
1198 if (diff > 0 &&
20f4ebf2 1199 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
1da177e4
LT
1200 /*
1201 * Ick gross gag me with a spoon.
1202 */
1203 ASSERT(0); /* want to see if this ever happens! */
1204 while (diff > 0) {
1205 if (temp) {
1206 temp--;
1207 diff--;
1208 if (!diff ||
1209 !xfs_mod_incore_sb(ip->i_mount,
20f4ebf2 1210 XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1da177e4
LT
1211 break;
1212 }
1213 if (temp2) {
1214 temp2--;
1215 diff--;
1216 if (!diff ||
1217 !xfs_mod_incore_sb(ip->i_mount,
20f4ebf2 1218 XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1da177e4
LT
1219 break;
1220 }
1221 }
1222 }
4eea22f0 1223 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
1224 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1225 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1226 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2,
1227 XFS_DATA_FORK);
4eea22f0
MK
1228 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1229 NULLSTARTBLOCK((int)temp2));
1da177e4
LT
1230 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
1231 XFS_DATA_FORK);
1232 *dnew = temp + temp2;
3e57ecf6
OW
1233 /* DELTA: One in-core extent is split in three. */
1234 temp = PREV.br_startoff;
1235 temp2 = PREV.br_blockcount;
1da177e4
LT
1236 break;
1237
1238 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1239 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1240 case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1241 case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1242 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1243 case MASK(LEFT_CONTIG):
1244 case MASK(RIGHT_CONTIG):
1245 /*
1246 * These cases are all impossible.
1247 */
1248 ASSERT(0);
1249 }
1250 *curp = cur;
3e57ecf6
OW
1251 if (delta) {
1252 temp2 += temp;
1253 if (delta->xed_startoff > temp)
1254 delta->xed_startoff = temp;
1255 if (delta->xed_blockcount < temp2)
1256 delta->xed_blockcount = temp2;
1257 }
1da177e4
LT
1258done:
1259 *logflagsp = rval;
1260 return error;
1261#undef LEFT
1262#undef RIGHT
1263#undef PREV
1264#undef MASK
1265#undef MASK2
1266#undef MASK3
1267#undef MASK4
1268#undef STATE_SET
1269#undef STATE_TEST
1270#undef STATE_SET_TEST
1271#undef SWITCH_STATE
1272}
1273
1274/*
1275 * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1276 * allocation to a real allocation or vice versa.
1277 */
1278STATIC int /* error */
1279xfs_bmap_add_extent_unwritten_real(
1280 xfs_inode_t *ip, /* incore inode pointer */
1281 xfs_extnum_t idx, /* extent number to update/insert */
1282 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
4eea22f0 1283 xfs_bmbt_irec_t *new, /* new data to add to file extents */
3e57ecf6
OW
1284 int *logflagsp, /* inode logging flags */
1285 xfs_extdelta_t *delta) /* Change made to incore extents */
1da177e4 1286{
1da177e4
LT
1287 xfs_btree_cur_t *cur; /* btree cursor */
1288 xfs_bmbt_rec_t *ep; /* extent entry for idx */
1289 int error; /* error return value */
1290#ifdef XFS_BMAP_TRACE
1291 static char fname[] = "xfs_bmap_add_extent_unwritten_real";
1292#endif
1293 int i; /* temp state */
4eea22f0 1294 xfs_ifork_t *ifp; /* inode fork pointer */
1da177e4
LT
1295 xfs_fileoff_t new_endoff; /* end offset of new entry */
1296 xfs_exntst_t newext; /* new extent state */
1297 xfs_exntst_t oldext; /* old extent state */
1298 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1299 /* left is 0, right is 1, prev is 2 */
1300 int rval=0; /* return value (logging flags) */
1301 int state = 0;/* state bits, accessed thru macros */
3e57ecf6
OW
1302 xfs_filblks_t temp=0;
1303 xfs_filblks_t temp2=0;
1da177e4
LT
1304 enum { /* bit number definitions for state */
1305 LEFT_CONTIG, RIGHT_CONTIG,
1306 LEFT_FILLING, RIGHT_FILLING,
1307 LEFT_DELAY, RIGHT_DELAY,
1308 LEFT_VALID, RIGHT_VALID
1309 };
1310
1311#define LEFT r[0]
1312#define RIGHT r[1]
1313#define PREV r[2]
1314#define MASK(b) (1 << (b))
1315#define MASK2(a,b) (MASK(a) | MASK(b))
1316#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
1317#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
1318#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1319#define STATE_TEST(b) (state & MASK(b))
1320#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1321 ((state &= ~MASK(b)), 0))
1322#define SWITCH_STATE \
1323 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
1324
1325 /*
1326 * Set up a bunch of variables to make the tests simpler.
1327 */
1328 error = 0;
1329 cur = *curp;
4eea22f0
MK
1330 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1331 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
1332 xfs_bmbt_get_all(ep, &PREV);
1333 newext = new->br_state;
1334 oldext = (newext == XFS_EXT_UNWRITTEN) ?
1335 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1336 ASSERT(PREV.br_state == oldext);
1337 new_endoff = new->br_startoff + new->br_blockcount;
1338 ASSERT(PREV.br_startoff <= new->br_startoff);
1339 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1340 /*
1341 * Set flags determining what part of the previous oldext allocation
1342 * extent is being replaced by a newext allocation.
1343 */
1344 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
1345 STATE_SET(RIGHT_FILLING,
1346 PREV.br_startoff + PREV.br_blockcount == new_endoff);
1347 /*
1348 * Check and set flags if this segment has a left neighbor.
1349 * Don't set contiguous if the combined extent would be too large.
1350 */
1351 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
4eea22f0 1352 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1da177e4
LT
1353 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
1354 }
1355 STATE_SET(LEFT_CONTIG,
1356 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
1357 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1358 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1359 LEFT.br_state == newext &&
1360 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1361 /*
1362 * Check and set flags if this segment has a right neighbor.
1363 * Don't set contiguous if the combined extent would be too large.
1364 * Also check for all-three-contiguous being too large.
1365 */
1366 if (STATE_SET_TEST(RIGHT_VALID,
1367 idx <
1368 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
4eea22f0 1369 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1da177e4
LT
1370 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
1371 }
1372 STATE_SET(RIGHT_CONTIG,
1373 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
1374 new_endoff == RIGHT.br_startoff &&
1375 new->br_startblock + new->br_blockcount ==
1376 RIGHT.br_startblock &&
1377 newext == RIGHT.br_state &&
1378 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1379 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
1380 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
1381 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1382 <= MAXEXTLEN));
1383 /*
1384 * Switch out based on the FILLING and CONTIG state bits.
1385 */
1386 switch (SWITCH_STATE) {
1387
1388 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1389 /*
1390 * Setting all of a previous oldext extent to newext.
1391 * The left and right neighbors are both contiguous with new.
1392 */
1393 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1394 XFS_DATA_FORK);
4eea22f0 1395 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
1396 LEFT.br_blockcount + PREV.br_blockcount +
1397 RIGHT.br_blockcount);
1398 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1399 XFS_DATA_FORK);
1400 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
1401 XFS_DATA_FORK);
4eea22f0 1402 xfs_iext_remove(ifp, idx, 2);
1da177e4
LT
1403 ip->i_df.if_lastex = idx - 1;
1404 ip->i_d.di_nextents -= 2;
1405 if (cur == NULL)
1406 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1407 else {
1408 rval = XFS_ILOG_CORE;
1409 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1410 RIGHT.br_startblock,
1411 RIGHT.br_blockcount, &i)))
1412 goto done;
1413 ASSERT(i == 1);
1414 if ((error = xfs_bmbt_delete(cur, &i)))
1415 goto done;
1416 ASSERT(i == 1);
1417 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1418 goto done;
1419 ASSERT(i == 1);
1420 if ((error = xfs_bmbt_delete(cur, &i)))
1421 goto done;
1422 ASSERT(i == 1);
1423 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1424 goto done;
1425 ASSERT(i == 1);
1426 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1427 LEFT.br_startblock,
1428 LEFT.br_blockcount + PREV.br_blockcount +
1429 RIGHT.br_blockcount, LEFT.br_state)))
1430 goto done;
1431 }
3e57ecf6
OW
1432 /* DELTA: Three in-core extents are replaced by one. */
1433 temp = LEFT.br_startoff;
1434 temp2 = LEFT.br_blockcount +
1435 PREV.br_blockcount +
1436 RIGHT.br_blockcount;
1da177e4
LT
1437 break;
1438
1439 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
1440 /*
1441 * Setting all of a previous oldext extent to newext.
1442 * The left neighbor is contiguous, the right is not.
1443 */
1444 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
1445 XFS_DATA_FORK);
4eea22f0 1446 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
1447 LEFT.br_blockcount + PREV.br_blockcount);
1448 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
1449 XFS_DATA_FORK);
1450 ip->i_df.if_lastex = idx - 1;
1451 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
1452 XFS_DATA_FORK);
4eea22f0 1453 xfs_iext_remove(ifp, idx, 1);
1da177e4
LT
1454 ip->i_d.di_nextents--;
1455 if (cur == NULL)
1456 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1457 else {
1458 rval = XFS_ILOG_CORE;
1459 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1460 PREV.br_startblock, PREV.br_blockcount,
1461 &i)))
1462 goto done;
1463 ASSERT(i == 1);
1464 if ((error = xfs_bmbt_delete(cur, &i)))
1465 goto done;
1466 ASSERT(i == 1);
1467 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1468 goto done;
1469 ASSERT(i == 1);
1470 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1471 LEFT.br_startblock,
1472 LEFT.br_blockcount + PREV.br_blockcount,
1473 LEFT.br_state)))
1474 goto done;
1475 }
3e57ecf6
OW
1476 /* DELTA: Two in-core extents are replaced by one. */
1477 temp = LEFT.br_startoff;
1478 temp2 = LEFT.br_blockcount +
1479 PREV.br_blockcount;
1da177e4
LT
1480 break;
1481
1482 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
1483 /*
1484 * Setting all of a previous oldext extent to newext.
1485 * The right neighbor is contiguous, the left is not.
1486 */
1487 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
1488 XFS_DATA_FORK);
1489 xfs_bmbt_set_blockcount(ep,
1490 PREV.br_blockcount + RIGHT.br_blockcount);
1491 xfs_bmbt_set_state(ep, newext);
1492 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
1493 XFS_DATA_FORK);
1494 ip->i_df.if_lastex = idx;
1495 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
1496 XFS_DATA_FORK);
4eea22f0 1497 xfs_iext_remove(ifp, idx + 1, 1);
1da177e4
LT
1498 ip->i_d.di_nextents--;
1499 if (cur == NULL)
1500 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1501 else {
1502 rval = XFS_ILOG_CORE;
1503 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1504 RIGHT.br_startblock,
1505 RIGHT.br_blockcount, &i)))
1506 goto done;
1507 ASSERT(i == 1);
1508 if ((error = xfs_bmbt_delete(cur, &i)))
1509 goto done;
1510 ASSERT(i == 1);
1511 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1512 goto done;
1513 ASSERT(i == 1);
1514 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1515 new->br_startblock,
1516 new->br_blockcount + RIGHT.br_blockcount,
1517 newext)))
1518 goto done;
1519 }
3e57ecf6
OW
1520 /* DELTA: Two in-core extents are replaced by one. */
1521 temp = PREV.br_startoff;
1522 temp2 = PREV.br_blockcount +
1523 RIGHT.br_blockcount;
1da177e4
LT
1524 break;
1525
1526 case MASK2(LEFT_FILLING, RIGHT_FILLING):
1527 /*
1528 * Setting all of a previous oldext extent to newext.
1529 * Neither the left nor right neighbors are contiguous with
1530 * the new one.
1531 */
1532 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
1533 XFS_DATA_FORK);
1534 xfs_bmbt_set_state(ep, newext);
1535 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
1536 XFS_DATA_FORK);
1537 ip->i_df.if_lastex = idx;
1538 if (cur == NULL)
1539 rval = XFS_ILOG_DEXT;
1540 else {
1541 rval = 0;
1542 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1543 new->br_startblock, new->br_blockcount,
1544 &i)))
1545 goto done;
1546 ASSERT(i == 1);
1547 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1548 new->br_startblock, new->br_blockcount,
1549 newext)))
1550 goto done;
1551 }
3e57ecf6
OW
1552 /* DELTA: The in-core extent described by new changed type. */
1553 temp = new->br_startoff;
1554 temp2 = new->br_blockcount;
1da177e4
LT
1555 break;
1556
1557 case MASK2(LEFT_FILLING, LEFT_CONTIG):
1558 /*
1559 * Setting the first part of a previous oldext extent to newext.
1560 * The left neighbor is contiguous.
1561 */
1562 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
1563 XFS_DATA_FORK);
4eea22f0 1564 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
1565 LEFT.br_blockcount + new->br_blockcount);
1566 xfs_bmbt_set_startoff(ep,
1567 PREV.br_startoff + new->br_blockcount);
1568 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
1569 XFS_DATA_FORK);
1570 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
1571 XFS_DATA_FORK);
1572 xfs_bmbt_set_startblock(ep,
1573 new->br_startblock + new->br_blockcount);
1574 xfs_bmbt_set_blockcount(ep,
1575 PREV.br_blockcount - new->br_blockcount);
1576 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
1577 XFS_DATA_FORK);
1578 ip->i_df.if_lastex = idx - 1;
1579 if (cur == NULL)
1580 rval = XFS_ILOG_DEXT;
1581 else {
1582 rval = 0;
1583 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1584 PREV.br_startblock, PREV.br_blockcount,
1585 &i)))
1586 goto done;
1587 ASSERT(i == 1);
1588 if ((error = xfs_bmbt_update(cur,
1589 PREV.br_startoff + new->br_blockcount,
1590 PREV.br_startblock + new->br_blockcount,
1591 PREV.br_blockcount - new->br_blockcount,
1592 oldext)))
1593 goto done;
1594 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1595 goto done;
1596 if (xfs_bmbt_update(cur, LEFT.br_startoff,
1597 LEFT.br_startblock,
1598 LEFT.br_blockcount + new->br_blockcount,
1599 LEFT.br_state))
1600 goto done;
1601 }
3e57ecf6
OW
1602 /* DELTA: The boundary between two in-core extents moved. */
1603 temp = LEFT.br_startoff;
1604 temp2 = LEFT.br_blockcount +
1605 PREV.br_blockcount;
1da177e4
LT
1606 break;
1607
1608 case MASK(LEFT_FILLING):
1609 /*
1610 * Setting the first part of a previous oldext extent to newext.
1611 * The left neighbor is not contiguous.
1612 */
1613 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1614 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1615 xfs_bmbt_set_startoff(ep, new_endoff);
1616 xfs_bmbt_set_blockcount(ep,
1617 PREV.br_blockcount - new->br_blockcount);
1618 xfs_bmbt_set_startblock(ep,
1619 new->br_startblock + new->br_blockcount);
1620 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1621 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1622 XFS_DATA_FORK);
4eea22f0 1623 xfs_iext_insert(ifp, idx, 1, new);
1da177e4
LT
1624 ip->i_df.if_lastex = idx;
1625 ip->i_d.di_nextents++;
1626 if (cur == NULL)
1627 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1628 else {
1629 rval = XFS_ILOG_CORE;
1630 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1631 PREV.br_startblock, PREV.br_blockcount,
1632 &i)))
1633 goto done;
1634 ASSERT(i == 1);
1635 if ((error = xfs_bmbt_update(cur,
1636 PREV.br_startoff + new->br_blockcount,
1637 PREV.br_startblock + new->br_blockcount,
1638 PREV.br_blockcount - new->br_blockcount,
1639 oldext)))
1640 goto done;
1641 cur->bc_rec.b = *new;
1642 if ((error = xfs_bmbt_insert(cur, &i)))
1643 goto done;
1644 ASSERT(i == 1);
1645 }
3e57ecf6
OW
1646 /* DELTA: One in-core extent is split in two. */
1647 temp = PREV.br_startoff;
1648 temp2 = PREV.br_blockcount;
1da177e4
LT
1649 break;
1650
1651 case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1652 /*
1653 * Setting the last part of a previous oldext extent to newext.
1654 * The right neighbor is contiguous with the new allocation.
1655 */
1656 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1657 XFS_DATA_FORK);
1658 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1659 XFS_DATA_FORK);
1660 xfs_bmbt_set_blockcount(ep,
1661 PREV.br_blockcount - new->br_blockcount);
1662 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1663 XFS_DATA_FORK);
4eea22f0
MK
1664 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1665 new->br_startoff, new->br_startblock,
1da177e4
LT
1666 new->br_blockcount + RIGHT.br_blockcount, newext);
1667 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1668 XFS_DATA_FORK);
1669 ip->i_df.if_lastex = idx + 1;
1670 if (cur == NULL)
1671 rval = XFS_ILOG_DEXT;
1672 else {
1673 rval = 0;
1674 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1675 PREV.br_startblock,
1676 PREV.br_blockcount, &i)))
1677 goto done;
1678 ASSERT(i == 1);
1679 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1680 PREV.br_startblock,
1681 PREV.br_blockcount - new->br_blockcount,
1682 oldext)))
1683 goto done;
1684 if ((error = xfs_bmbt_increment(cur, 0, &i)))
1685 goto done;
1686 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1687 new->br_startblock,
1688 new->br_blockcount + RIGHT.br_blockcount,
1689 newext)))
1690 goto done;
1691 }
3e57ecf6
OW
1692 /* DELTA: The boundary between two in-core extents moved. */
1693 temp = PREV.br_startoff;
1694 temp2 = PREV.br_blockcount +
1695 RIGHT.br_blockcount;
1da177e4
LT
1696 break;
1697
1698 case MASK(RIGHT_FILLING):
1699 /*
1700 * Setting the last part of a previous oldext extent to newext.
1701 * The right neighbor is not contiguous.
1702 */
1703 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1704 xfs_bmbt_set_blockcount(ep,
1705 PREV.br_blockcount - new->br_blockcount);
1706 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1707 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1708 new, NULL, XFS_DATA_FORK);
4eea22f0 1709 xfs_iext_insert(ifp, idx + 1, 1, new);
1da177e4
LT
1710 ip->i_df.if_lastex = idx + 1;
1711 ip->i_d.di_nextents++;
1712 if (cur == NULL)
1713 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1714 else {
1715 rval = XFS_ILOG_CORE;
1716 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1717 PREV.br_startblock, PREV.br_blockcount,
1718 &i)))
1719 goto done;
1720 ASSERT(i == 1);
1721 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1722 PREV.br_startblock,
1723 PREV.br_blockcount - new->br_blockcount,
1724 oldext)))
1725 goto done;
1726 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1727 new->br_startblock, new->br_blockcount,
1728 &i)))
1729 goto done;
1730 ASSERT(i == 0);
1731 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1732 if ((error = xfs_bmbt_insert(cur, &i)))
1733 goto done;
1734 ASSERT(i == 1);
1735 }
3e57ecf6
OW
1736 /* DELTA: One in-core extent is split in two. */
1737 temp = PREV.br_startoff;
1738 temp2 = PREV.br_blockcount;
1da177e4
LT
1739 break;
1740
1741 case 0:
1742 /*
1743 * Setting the middle part of a previous oldext extent to
1744 * newext. Contiguity is impossible here.
1745 * One extent becomes three extents.
1746 */
1747 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1748 xfs_bmbt_set_blockcount(ep,
1749 new->br_startoff - PREV.br_startoff);
1750 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1751 r[0] = *new;
1752 r[1].br_startoff = new_endoff;
1753 r[1].br_blockcount =
1754 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1755 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1756 r[1].br_state = oldext;
1757 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1758 XFS_DATA_FORK);
4eea22f0 1759 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1da177e4
LT
1760 ip->i_df.if_lastex = idx + 1;
1761 ip->i_d.di_nextents += 2;
1762 if (cur == NULL)
1763 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1764 else {
1765 rval = XFS_ILOG_CORE;
1766 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1767 PREV.br_startblock, PREV.br_blockcount,
1768 &i)))
1769 goto done;
1770 ASSERT(i == 1);
1771 /* new right extent - oldext */
1772 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1773 r[1].br_startblock, r[1].br_blockcount,
1774 r[1].br_state)))
1775 goto done;
1776 /* new left extent - oldext */
1777 PREV.br_blockcount =
1778 new->br_startoff - PREV.br_startoff;
1779 cur->bc_rec.b = PREV;
1780 if ((error = xfs_bmbt_insert(cur, &i)))
1781 goto done;
1782 ASSERT(i == 1);
1783 if ((error = xfs_bmbt_increment(cur, 0, &i)))
1784 goto done;
1785 ASSERT(i == 1);
1786 /* new middle extent - newext */
1787 cur->bc_rec.b = *new;
1788 if ((error = xfs_bmbt_insert(cur, &i)))
1789 goto done;
1790 ASSERT(i == 1);
1791 }
3e57ecf6
OW
1792 /* DELTA: One in-core extent is split in three. */
1793 temp = PREV.br_startoff;
1794 temp2 = PREV.br_blockcount;
1da177e4
LT
1795 break;
1796
1797 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1798 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1799 case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1800 case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1801 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1802 case MASK(LEFT_CONTIG):
1803 case MASK(RIGHT_CONTIG):
1804 /*
1805 * These cases are all impossible.
1806 */
1807 ASSERT(0);
1808 }
1809 *curp = cur;
3e57ecf6
OW
1810 if (delta) {
1811 temp2 += temp;
1812 if (delta->xed_startoff > temp)
1813 delta->xed_startoff = temp;
1814 if (delta->xed_blockcount < temp2)
1815 delta->xed_blockcount = temp2;
1816 }
1da177e4
LT
1817done:
1818 *logflagsp = rval;
1819 return error;
1820#undef LEFT
1821#undef RIGHT
1822#undef PREV
1823#undef MASK
1824#undef MASK2
1825#undef MASK3
1826#undef MASK4
1827#undef STATE_SET
1828#undef STATE_TEST
1829#undef STATE_SET_TEST
1830#undef SWITCH_STATE
1831}
1832
1833/*
1834 * Called by xfs_bmap_add_extent to handle cases converting a hole
1835 * to a delayed allocation.
1836 */
1837/*ARGSUSED*/
1838STATIC int /* error */
1839xfs_bmap_add_extent_hole_delay(
1840 xfs_inode_t *ip, /* incore inode pointer */
1841 xfs_extnum_t idx, /* extent number to update/insert */
4eea22f0 1842 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4 1843 int *logflagsp, /* inode logging flags */
3e57ecf6 1844 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
1845 int rsvd) /* OK to allocate reserved blocks */
1846{
4eea22f0 1847 xfs_bmbt_rec_t *ep; /* extent record for idx */
1da177e4
LT
1848#ifdef XFS_BMAP_TRACE
1849 static char fname[] = "xfs_bmap_add_extent_hole_delay";
1850#endif
4eea22f0 1851 xfs_ifork_t *ifp; /* inode fork pointer */
1da177e4
LT
1852 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1853 xfs_filblks_t newlen=0; /* new indirect size */
1854 xfs_filblks_t oldlen=0; /* old indirect size */
1855 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1856 int state; /* state bits, accessed thru macros */
3e57ecf6
OW
1857 xfs_filblks_t temp=0; /* temp for indirect calculations */
1858 xfs_filblks_t temp2=0;
1da177e4
LT
1859 enum { /* bit number definitions for state */
1860 LEFT_CONTIG, RIGHT_CONTIG,
1861 LEFT_DELAY, RIGHT_DELAY,
1862 LEFT_VALID, RIGHT_VALID
1863 };
1864
1865#define MASK(b) (1 << (b))
1866#define MASK2(a,b) (MASK(a) | MASK(b))
1867#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1868#define STATE_TEST(b) (state & MASK(b))
1869#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1870 ((state &= ~MASK(b)), 0))
1871#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1872
4eea22f0
MK
1873 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1874 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
1875 state = 0;
1876 ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
1877 /*
1878 * Check and set flags if this segment has a left neighbor
1879 */
1880 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
4eea22f0 1881 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1da177e4
LT
1882 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1883 }
1884 /*
1885 * Check and set flags if the current (right) segment exists.
1886 * If it doesn't exist, we're converting the hole at end-of-file.
1887 */
1888 if (STATE_SET_TEST(RIGHT_VALID,
1889 idx <
1890 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1891 xfs_bmbt_get_all(ep, &right);
1892 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
1893 }
1894 /*
1895 * Set contiguity flags on the left and right neighbors.
1896 * Don't let extents get too large, even if the pieces are contiguous.
1897 */
1898 STATE_SET(LEFT_CONTIG,
1899 STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
1900 left.br_startoff + left.br_blockcount == new->br_startoff &&
1901 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1902 STATE_SET(RIGHT_CONTIG,
1903 STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
1904 new->br_startoff + new->br_blockcount == right.br_startoff &&
1905 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1906 (!STATE_TEST(LEFT_CONTIG) ||
1907 (left.br_blockcount + new->br_blockcount +
1908 right.br_blockcount <= MAXEXTLEN)));
1909 /*
1910 * Switch out based on the contiguity flags.
1911 */
1912 switch (SWITCH_STATE) {
1913
1914 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1915 /*
1916 * New allocation is contiguous with delayed allocations
1917 * on the left and on the right.
4eea22f0 1918 * Merge all three into a single extent record.
1da177e4
LT
1919 */
1920 temp = left.br_blockcount + new->br_blockcount +
1921 right.br_blockcount;
1922 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
1923 XFS_DATA_FORK);
4eea22f0 1924 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1da177e4
LT
1925 oldlen = STARTBLOCKVAL(left.br_startblock) +
1926 STARTBLOCKVAL(new->br_startblock) +
1927 STARTBLOCKVAL(right.br_startblock);
1928 newlen = xfs_bmap_worst_indlen(ip, temp);
4eea22f0
MK
1929 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1930 NULLSTARTBLOCK((int)newlen));
1da177e4
LT
1931 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
1932 XFS_DATA_FORK);
1933 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
1934 XFS_DATA_FORK);
4eea22f0 1935 xfs_iext_remove(ifp, idx, 1);
1da177e4 1936 ip->i_df.if_lastex = idx - 1;
3e57ecf6
OW
1937 /* DELTA: Two in-core extents were replaced by one. */
1938 temp2 = temp;
1939 temp = left.br_startoff;
1da177e4
LT
1940 break;
1941
1942 case MASK(LEFT_CONTIG):
1943 /*
1944 * New allocation is contiguous with a delayed allocation
1945 * on the left.
1946 * Merge the new allocation with the left neighbor.
1947 */
1948 temp = left.br_blockcount + new->br_blockcount;
1949 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1,
1950 XFS_DATA_FORK);
4eea22f0 1951 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1da177e4
LT
1952 oldlen = STARTBLOCKVAL(left.br_startblock) +
1953 STARTBLOCKVAL(new->br_startblock);
1954 newlen = xfs_bmap_worst_indlen(ip, temp);
4eea22f0
MK
1955 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1956 NULLSTARTBLOCK((int)newlen));
1da177e4
LT
1957 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
1958 XFS_DATA_FORK);
1959 ip->i_df.if_lastex = idx - 1;
3e57ecf6
OW
1960 /* DELTA: One in-core extent grew into a hole. */
1961 temp2 = temp;
1962 temp = left.br_startoff;
1da177e4
LT
1963 break;
1964
1965 case MASK(RIGHT_CONTIG):
1966 /*
1967 * New allocation is contiguous with a delayed allocation
1968 * on the right.
1969 * Merge the new allocation with the right neighbor.
1970 */
1971 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1972 temp = new->br_blockcount + right.br_blockcount;
1973 oldlen = STARTBLOCKVAL(new->br_startblock) +
1974 STARTBLOCKVAL(right.br_startblock);
1975 newlen = xfs_bmap_worst_indlen(ip, temp);
1976 xfs_bmbt_set_allf(ep, new->br_startoff,
1977 NULLSTARTBLOCK((int)newlen), temp, right.br_state);
1978 xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1979 ip->i_df.if_lastex = idx;
3e57ecf6
OW
1980 /* DELTA: One in-core extent grew into a hole. */
1981 temp2 = temp;
1982 temp = new->br_startoff;
1da177e4
LT
1983 break;
1984
1985 case 0:
1986 /*
1987 * New allocation is not contiguous with another
1988 * delayed allocation.
1989 * Insert a new entry.
1990 */
1991 oldlen = newlen = 0;
1992 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
1993 XFS_DATA_FORK);
4eea22f0 1994 xfs_iext_insert(ifp, idx, 1, new);
1da177e4 1995 ip->i_df.if_lastex = idx;
3e57ecf6
OW
1996 /* DELTA: A new in-core extent was added in a hole. */
1997 temp2 = new->br_blockcount;
1998 temp = new->br_startoff;
1da177e4
LT
1999 break;
2000 }
2001 if (oldlen != newlen) {
2002 ASSERT(oldlen > newlen);
2003 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
20f4ebf2 2004 (int64_t)(oldlen - newlen), rsvd);
1da177e4
LT
2005 /*
2006 * Nothing to do for disk quota accounting here.
2007 */
2008 }
3e57ecf6
OW
2009 if (delta) {
2010 temp2 += temp;
2011 if (delta->xed_startoff > temp)
2012 delta->xed_startoff = temp;
2013 if (delta->xed_blockcount < temp2)
2014 delta->xed_blockcount = temp2;
2015 }
1da177e4
LT
2016 *logflagsp = 0;
2017 return 0;
2018#undef MASK
2019#undef MASK2
2020#undef STATE_SET
2021#undef STATE_TEST
2022#undef STATE_SET_TEST
2023#undef SWITCH_STATE
2024}
2025
2026/*
2027 * Called by xfs_bmap_add_extent to handle cases converting a hole
2028 * to a real allocation.
2029 */
2030STATIC int /* error */
2031xfs_bmap_add_extent_hole_real(
2032 xfs_inode_t *ip, /* incore inode pointer */
2033 xfs_extnum_t idx, /* extent number to update/insert */
2034 xfs_btree_cur_t *cur, /* if null, not a btree */
4eea22f0 2035 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1da177e4 2036 int *logflagsp, /* inode logging flags */
3e57ecf6 2037 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
2038 int whichfork) /* data or attr fork */
2039{
2040 xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */
2041 int error; /* error return value */
2042#ifdef XFS_BMAP_TRACE
2043 static char fname[] = "xfs_bmap_add_extent_hole_real";
2044#endif
2045 int i; /* temp state */
2046 xfs_ifork_t *ifp; /* inode fork pointer */
2047 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2048 xfs_bmbt_irec_t right; /* right neighbor extent entry */
3e57ecf6 2049 int rval=0; /* return value (logging flags) */
1da177e4 2050 int state; /* state bits, accessed thru macros */
3e57ecf6
OW
2051 xfs_filblks_t temp=0;
2052 xfs_filblks_t temp2=0;
1da177e4
LT
2053 enum { /* bit number definitions for state */
2054 LEFT_CONTIG, RIGHT_CONTIG,
2055 LEFT_DELAY, RIGHT_DELAY,
2056 LEFT_VALID, RIGHT_VALID
2057 };
2058
2059#define MASK(b) (1 << (b))
2060#define MASK2(a,b) (MASK(a) | MASK(b))
2061#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
2062#define STATE_TEST(b) (state & MASK(b))
2063#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
2064 ((state &= ~MASK(b)), 0))
2065#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
2066
2067 ifp = XFS_IFORK_PTR(ip, whichfork);
2068 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4eea22f0 2069 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
2070 state = 0;
2071 /*
2072 * Check and set flags if this segment has a left neighbor.
2073 */
2074 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
4eea22f0 2075 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1da177e4
LT
2076 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
2077 }
2078 /*
2079 * Check and set flags if this segment has a current value.
2080 * Not true if we're inserting into the "hole" at eof.
2081 */
2082 if (STATE_SET_TEST(RIGHT_VALID,
2083 idx <
2084 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
2085 xfs_bmbt_get_all(ep, &right);
2086 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
2087 }
2088 /*
2089 * We're inserting a real allocation between "left" and "right".
2090 * Set the contiguity flags. Don't let extents get too large.
2091 */
2092 STATE_SET(LEFT_CONTIG,
2093 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
2094 left.br_startoff + left.br_blockcount == new->br_startoff &&
2095 left.br_startblock + left.br_blockcount == new->br_startblock &&
2096 left.br_state == new->br_state &&
2097 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
2098 STATE_SET(RIGHT_CONTIG,
2099 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
2100 new->br_startoff + new->br_blockcount == right.br_startoff &&
2101 new->br_startblock + new->br_blockcount ==
2102 right.br_startblock &&
2103 new->br_state == right.br_state &&
2104 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2105 (!STATE_TEST(LEFT_CONTIG) ||
2106 left.br_blockcount + new->br_blockcount +
2107 right.br_blockcount <= MAXEXTLEN));
2108
3e57ecf6 2109 error = 0;
1da177e4
LT
2110 /*
2111 * Select which case we're in here, and implement it.
2112 */
2113 switch (SWITCH_STATE) {
2114
2115 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
2116 /*
2117 * New allocation is contiguous with real allocations on the
2118 * left and on the right.
4eea22f0 2119 * Merge all three into a single extent record.
1da177e4
LT
2120 */
2121 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
2122 whichfork);
4eea22f0 2123 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
2124 left.br_blockcount + new->br_blockcount +
2125 right.br_blockcount);
2126 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
2127 whichfork);
2128 xfs_bmap_trace_delete(fname, "LC|RC", ip,
2129 idx, 1, whichfork);
4eea22f0 2130 xfs_iext_remove(ifp, idx, 1);
1da177e4
LT
2131 ifp->if_lastex = idx - 1;
2132 XFS_IFORK_NEXT_SET(ip, whichfork,
2133 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2134 if (cur == NULL) {
3e57ecf6
OW
2135 rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2136 } else {
2137 rval = XFS_ILOG_CORE;
2138 if ((error = xfs_bmbt_lookup_eq(cur,
2139 right.br_startoff,
2140 right.br_startblock,
2141 right.br_blockcount, &i)))
2142 goto done;
2143 ASSERT(i == 1);
2144 if ((error = xfs_bmbt_delete(cur, &i)))
2145 goto done;
2146 ASSERT(i == 1);
2147 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
2148 goto done;
2149 ASSERT(i == 1);
2150 if ((error = xfs_bmbt_update(cur, left.br_startoff,
2151 left.br_startblock,
2152 left.br_blockcount +
2153 new->br_blockcount +
2154 right.br_blockcount,
2155 left.br_state)))
2156 goto done;
1da177e4 2157 }
3e57ecf6
OW
2158 /* DELTA: Two in-core extents were replaced by one. */
2159 temp = left.br_startoff;
2160 temp2 = left.br_blockcount +
2161 new->br_blockcount +
2162 right.br_blockcount;
2163 break;
1da177e4
LT
2164
2165 case MASK(LEFT_CONTIG):
2166 /*
2167 * New allocation is contiguous with a real allocation
2168 * on the left.
2169 * Merge the new allocation with the left neighbor.
2170 */
2171 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork);
4eea22f0 2172 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1da177e4
LT
2173 left.br_blockcount + new->br_blockcount);
2174 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
2175 ifp->if_lastex = idx - 1;
2176 if (cur == NULL) {
3e57ecf6
OW
2177 rval = XFS_ILOG_FEXT(whichfork);
2178 } else {
2179 rval = 0;
2180 if ((error = xfs_bmbt_lookup_eq(cur,
2181 left.br_startoff,
2182 left.br_startblock,
2183 left.br_blockcount, &i)))
2184 goto done;
2185 ASSERT(i == 1);
2186 if ((error = xfs_bmbt_update(cur, left.br_startoff,
2187 left.br_startblock,
2188 left.br_blockcount +
2189 new->br_blockcount,
2190 left.br_state)))
2191 goto done;
1da177e4 2192 }
3e57ecf6
OW
2193 /* DELTA: One in-core extent grew. */
2194 temp = left.br_startoff;
2195 temp2 = left.br_blockcount +
2196 new->br_blockcount;
2197 break;
1da177e4
LT
2198
2199 case MASK(RIGHT_CONTIG):
2200 /*
2201 * New allocation is contiguous with a real allocation
2202 * on the right.
2203 * Merge the new allocation with the right neighbor.
2204 */
2205 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork);
2206 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2207 new->br_blockcount + right.br_blockcount,
2208 right.br_state);
2209 xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork);
2210 ifp->if_lastex = idx;
2211 if (cur == NULL) {
3e57ecf6
OW
2212 rval = XFS_ILOG_FEXT(whichfork);
2213 } else {
2214 rval = 0;
2215 if ((error = xfs_bmbt_lookup_eq(cur,
2216 right.br_startoff,
2217 right.br_startblock,
2218 right.br_blockcount, &i)))
2219 goto done;
2220 ASSERT(i == 1);
2221 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2222 new->br_startblock,
2223 new->br_blockcount +
2224 right.br_blockcount,
2225 right.br_state)))
2226 goto done;
1da177e4 2227 }
3e57ecf6
OW
2228 /* DELTA: One in-core extent grew. */
2229 temp = new->br_startoff;
2230 temp2 = new->br_blockcount +
2231 right.br_blockcount;
2232 break;
1da177e4
LT
2233
2234 case 0:
2235 /*
2236 * New allocation is not contiguous with another
2237 * real allocation.
2238 * Insert a new entry.
2239 */
2240 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
2241 whichfork);
4eea22f0 2242 xfs_iext_insert(ifp, idx, 1, new);
1da177e4
LT
2243 ifp->if_lastex = idx;
2244 XFS_IFORK_NEXT_SET(ip, whichfork,
2245 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2246 if (cur == NULL) {
3e57ecf6
OW
2247 rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2248 } else {
2249 rval = XFS_ILOG_CORE;
2250 if ((error = xfs_bmbt_lookup_eq(cur,
2251 new->br_startoff,
2252 new->br_startblock,
2253 new->br_blockcount, &i)))
2254 goto done;
2255 ASSERT(i == 0);
2256 cur->bc_rec.b.br_state = new->br_state;
2257 if ((error = xfs_bmbt_insert(cur, &i)))
2258 goto done;
2259 ASSERT(i == 1);
1da177e4 2260 }
3e57ecf6
OW
2261 /* DELTA: A new extent was added in a hole. */
2262 temp = new->br_startoff;
2263 temp2 = new->br_blockcount;
2264 break;
1da177e4 2265 }
3e57ecf6
OW
2266 if (delta) {
2267 temp2 += temp;
2268 if (delta->xed_startoff > temp)
2269 delta->xed_startoff = temp;
2270 if (delta->xed_blockcount < temp2)
2271 delta->xed_blockcount = temp2;
2272 }
2273done:
2274 *logflagsp = rval;
2275 return error;
1da177e4
LT
2276#undef MASK
2277#undef MASK2
2278#undef STATE_SET
2279#undef STATE_TEST
2280#undef STATE_SET_TEST
2281#undef SWITCH_STATE
1da177e4
LT
2282}
2283
dd9f438e
NS
2284/*
2285 * Adjust the size of the new extent based on di_extsize and rt extsize.
2286 */
2287STATIC int
2288xfs_bmap_extsize_align(
2289 xfs_mount_t *mp,
2290 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2291 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2292 xfs_extlen_t extsz, /* align to this extent size */
2293 int rt, /* is this a realtime inode? */
2294 int eof, /* is extent at end-of-file? */
2295 int delay, /* creating delalloc extent? */
2296 int convert, /* overwriting unwritten extent? */
2297 xfs_fileoff_t *offp, /* in/out: aligned offset */
2298 xfs_extlen_t *lenp) /* in/out: aligned length */
2299{
2300 xfs_fileoff_t orig_off; /* original offset */
2301 xfs_extlen_t orig_alen; /* original length */
2302 xfs_fileoff_t orig_end; /* original off+len */
2303 xfs_fileoff_t nexto; /* next file offset */
2304 xfs_fileoff_t prevo; /* previous file offset */
2305 xfs_fileoff_t align_off; /* temp for offset */
2306 xfs_extlen_t align_alen; /* temp for length */
2307 xfs_extlen_t temp; /* temp for calculations */
2308
2309 if (convert)
2310 return 0;
2311
2312 orig_off = align_off = *offp;
2313 orig_alen = align_alen = *lenp;
2314 orig_end = orig_off + orig_alen;
2315
2316 /*
2317 * If this request overlaps an existing extent, then don't
2318 * attempt to perform any additional alignment.
2319 */
2320 if (!delay && !eof &&
2321 (orig_off >= gotp->br_startoff) &&
2322 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2323 return 0;
2324 }
2325
2326 /*
2327 * If the file offset is unaligned vs. the extent size
2328 * we need to align it. This will be possible unless
2329 * the file was previously written with a kernel that didn't
2330 * perform this alignment, or if a truncate shot us in the
2331 * foot.
2332 */
2333 temp = do_mod(orig_off, extsz);
2334 if (temp) {
2335 align_alen += temp;
2336 align_off -= temp;
2337 }
2338 /*
2339 * Same adjustment for the end of the requested area.
2340 */
2341 if ((temp = (align_alen % extsz))) {
2342 align_alen += extsz - temp;
2343 }
2344 /*
2345 * If the previous block overlaps with this proposed allocation
2346 * then move the start forward without adjusting the length.
2347 */
2348 if (prevp->br_startoff != NULLFILEOFF) {
2349 if (prevp->br_startblock == HOLESTARTBLOCK)
2350 prevo = prevp->br_startoff;
2351 else
2352 prevo = prevp->br_startoff + prevp->br_blockcount;
2353 } else
2354 prevo = 0;
2355 if (align_off != orig_off && align_off < prevo)
2356 align_off = prevo;
2357 /*
2358 * If the next block overlaps with this proposed allocation
2359 * then move the start back without adjusting the length,
2360 * but not before offset 0.
2361 * This may of course make the start overlap previous block,
2362 * and if we hit the offset 0 limit then the next block
2363 * can still overlap too.
2364 */
2365 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2366 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2367 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2368 nexto = gotp->br_startoff + gotp->br_blockcount;
2369 else
2370 nexto = gotp->br_startoff;
2371 } else
2372 nexto = NULLFILEOFF;
2373 if (!eof &&
2374 align_off + align_alen != orig_end &&
2375 align_off + align_alen > nexto)
2376 align_off = nexto > align_alen ? nexto - align_alen : 0;
2377 /*
2378 * If we're now overlapping the next or previous extent that
2379 * means we can't fit an extsz piece in this hole. Just move
2380 * the start forward to the first valid spot and set
2381 * the length so we hit the end.
2382 */
2383 if (align_off != orig_off && align_off < prevo)
2384 align_off = prevo;
2385 if (align_off + align_alen != orig_end &&
2386 align_off + align_alen > nexto &&
2387 nexto != NULLFILEOFF) {
2388 ASSERT(nexto > prevo);
2389 align_alen = nexto - align_off;
2390 }
2391
2392 /*
2393 * If realtime, and the result isn't a multiple of the realtime
2394 * extent size we need to remove blocks until it is.
2395 */
2396 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2397 /*
2398 * We're not covering the original request, or
2399 * we won't be able to once we fix the length.
2400 */
2401 if (orig_off < align_off ||
2402 orig_end > align_off + align_alen ||
2403 align_alen - temp < orig_alen)
2404 return XFS_ERROR(EINVAL);
2405 /*
2406 * Try to fix it by moving the start up.
2407 */
2408 if (align_off + temp <= orig_off) {
2409 align_alen -= temp;
2410 align_off += temp;
2411 }
2412 /*
2413 * Try to fix it by moving the end in.
2414 */
2415 else if (align_off + align_alen - temp >= orig_end)
2416 align_alen -= temp;
2417 /*
2418 * Set the start to the minimum then trim the length.
2419 */
2420 else {
2421 align_alen -= orig_off - align_off;
2422 align_off = orig_off;
2423 align_alen -= align_alen % mp->m_sb.sb_rextsize;
2424 }
2425 /*
2426 * Result doesn't cover the request, fail it.
2427 */
2428 if (orig_off < align_off || orig_end > align_off + align_alen)
2429 return XFS_ERROR(EINVAL);
2430 } else {
2431 ASSERT(orig_off >= align_off);
2432 ASSERT(orig_end <= align_off + align_alen);
2433 }
2434
2435#ifdef DEBUG
2436 if (!eof && gotp->br_startoff != NULLFILEOFF)
2437 ASSERT(align_off + align_alen <= gotp->br_startoff);
2438 if (prevp->br_startoff != NULLFILEOFF)
2439 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2440#endif
2441
2442 *lenp = align_alen;
2443 *offp = align_off;
2444 return 0;
2445}
2446
1da177e4
LT
2447#define XFS_ALLOC_GAP_UNITS 4
2448
dd9f438e 2449STATIC int
a365bdd5 2450xfs_bmap_adjacent(
1da177e4
LT
2451 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2452{
2453 xfs_fsblock_t adjust; /* adjustment to block numbers */
1da177e4
LT
2454 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2455 xfs_mount_t *mp; /* mount point structure */
2456 int nullfb; /* true if ap->firstblock isn't set */
2457 int rt; /* true if inode is realtime */
1da177e4
LT
2458
2459#define ISVALID(x,y) \
2460 (rt ? \
2461 (x) < mp->m_sb.sb_rblocks : \
2462 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2463 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2464 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2465
1da177e4
LT
2466 mp = ap->ip->i_mount;
2467 nullfb = ap->firstblock == NULLFSBLOCK;
2468 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2469 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
1da177e4
LT
2470 /*
2471 * If allocating at eof, and there's a previous real block,
2472 * try to use it's last block as our starting point.
2473 */
2474 if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2475 !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2476 ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2477 ap->prevp->br_startblock)) {
2478 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2479 /*
2480 * Adjust for the gap between prevp and us.
2481 */
2482 adjust = ap->off -
2483 (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2484 if (adjust &&
2485 ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2486 ap->rval += adjust;
2487 }
2488 /*
2489 * If not at eof, then compare the two neighbor blocks.
2490 * Figure out whether either one gives us a good starting point,
2491 * and pick the better one.
2492 */
2493 else if (!ap->eof) {
2494 xfs_fsblock_t gotbno; /* right side block number */
2495 xfs_fsblock_t gotdiff=0; /* right side difference */
2496 xfs_fsblock_t prevbno; /* left side block number */
2497 xfs_fsblock_t prevdiff=0; /* left side difference */
2498
2499 /*
2500 * If there's a previous (left) block, select a requested
2501 * start block based on it.
2502 */
2503 if (ap->prevp->br_startoff != NULLFILEOFF &&
2504 !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2505 (prevbno = ap->prevp->br_startblock +
2506 ap->prevp->br_blockcount) &&
2507 ISVALID(prevbno, ap->prevp->br_startblock)) {
2508 /*
2509 * Calculate gap to end of previous block.
2510 */
2511 adjust = prevdiff = ap->off -
2512 (ap->prevp->br_startoff +
2513 ap->prevp->br_blockcount);
2514 /*
2515 * Figure the startblock based on the previous block's
2516 * end and the gap size.
2517 * Heuristic!
2518 * If the gap is large relative to the piece we're
2519 * allocating, or using it gives us an invalid block
2520 * number, then just use the end of the previous block.
2521 */
2522 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2523 ISVALID(prevbno + prevdiff,
2524 ap->prevp->br_startblock))
2525 prevbno += adjust;
2526 else
2527 prevdiff += adjust;
2528 /*
2529 * If the firstblock forbids it, can't use it,
2530 * must use default.
2531 */
2532 if (!rt && !nullfb &&
2533 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2534 prevbno = NULLFSBLOCK;
2535 }
2536 /*
2537 * No previous block or can't follow it, just default.
2538 */
2539 else
2540 prevbno = NULLFSBLOCK;
2541 /*
2542 * If there's a following (right) block, select a requested
2543 * start block based on it.
2544 */
2545 if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
2546 /*
2547 * Calculate gap to start of next block.
2548 */
2549 adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2550 /*
2551 * Figure the startblock based on the next block's
2552 * start and the gap size.
2553 */
2554 gotbno = ap->gotp->br_startblock;
2555 /*
2556 * Heuristic!
2557 * If the gap is large relative to the piece we're
2558 * allocating, or using it gives us an invalid block
2559 * number, then just use the start of the next block
2560 * offset by our length.
2561 */
2562 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2563 ISVALID(gotbno - gotdiff, gotbno))
2564 gotbno -= adjust;
2565 else if (ISVALID(gotbno - ap->alen, gotbno)) {
2566 gotbno -= ap->alen;
2567 gotdiff += adjust - ap->alen;
2568 } else
2569 gotdiff += adjust;
2570 /*
2571 * If the firstblock forbids it, can't use it,
2572 * must use default.
2573 */
2574 if (!rt && !nullfb &&
2575 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2576 gotbno = NULLFSBLOCK;
2577 }
2578 /*
2579 * No next block, just default.
2580 */
2581 else
2582 gotbno = NULLFSBLOCK;
2583 /*
2584 * If both valid, pick the better one, else the only good
2585 * one, else ap->rval is already set (to 0 or the inode block).
2586 */
2587 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2588 ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2589 else if (prevbno != NULLFSBLOCK)
2590 ap->rval = prevbno;
2591 else if (gotbno != NULLFSBLOCK)
2592 ap->rval = gotbno;
2593 }
a365bdd5
NS
2594#undef ISVALID
2595 return 0;
2596}
2597
2598STATIC int
2599xfs_bmap_rtalloc(
2600 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2601{
2602 xfs_alloctype_t atype = 0; /* type for allocation routines */
2603 int error; /* error return value */
2604 xfs_mount_t *mp; /* mount point structure */
2605 xfs_extlen_t prod = 0; /* product factor for allocators */
2606 xfs_extlen_t ralen = 0; /* realtime allocation length */
2607 xfs_extlen_t align; /* minimum allocation alignment */
a365bdd5
NS
2608 xfs_rtblock_t rtb;
2609
2610 mp = ap->ip->i_mount;
957d0ebe 2611 align = xfs_get_extsz_hint(ap->ip);
a365bdd5
NS
2612 prod = align / mp->m_sb.sb_rextsize;
2613 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2614 align, 1, ap->eof, 0,
2615 ap->conv, &ap->off, &ap->alen);
2616 if (error)
2617 return error;
2618 ASSERT(ap->alen);
2619 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2620
2621 /*
2622 * If the offset & length are not perfectly aligned
2623 * then kill prod, it will just get us in trouble.
2624 */
2625 if (do_mod(ap->off, align) || ap->alen % align)
2626 prod = 1;
2627 /*
2628 * Set ralen to be the actual requested length in rtextents.
2629 */
2630 ralen = ap->alen / mp->m_sb.sb_rextsize;
2631 /*
2632 * If the old value was close enough to MAXEXTLEN that
2633 * we rounded up to it, cut it back so it's valid again.
2634 * Note that if it's a really large request (bigger than
2635 * MAXEXTLEN), we don't hear about that number, and can't
2636 * adjust the starting point to match it.
2637 */
2638 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2639 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2640 /*
2641 * If it's an allocation to an empty file at offset 0,
2642 * pick an extent that will space things out in the rt area.
2643 */
2644 if (ap->eof && ap->off == 0) {
0892ccd6
AM
2645 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2646
a365bdd5
NS
2647 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2648 if (error)
2649 return error;
2650 ap->rval = rtx * mp->m_sb.sb_rextsize;
2651 } else {
2652 ap->rval = 0;
2653 }
2654
2655 xfs_bmap_adjacent(ap);
2656
2657 /*
2658 * Realtime allocation, done through xfs_rtallocate_extent.
2659 */
2660 atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2661 do_div(ap->rval, mp->m_sb.sb_rextsize);
2662 rtb = ap->rval;
2663 ap->alen = ralen;
2664 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2665 &ralen, atype, ap->wasdel, prod, &rtb)))
2666 return error;
2667 if (rtb == NULLFSBLOCK && prod > 1 &&
2668 (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2669 ap->alen, &ralen, atype,
2670 ap->wasdel, 1, &rtb)))
2671 return error;
2672 ap->rval = rtb;
2673 if (ap->rval != NULLFSBLOCK) {
2674 ap->rval *= mp->m_sb.sb_rextsize;
2675 ralen *= mp->m_sb.sb_rextsize;
2676 ap->alen = ralen;
2677 ap->ip->i_d.di_nblocks += ralen;
2678 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2679 if (ap->wasdel)
2680 ap->ip->i_delayed_blks -= ralen;
2681 /*
2682 * Adjust the disk quota also. This was reserved
2683 * earlier.
2684 */
2685 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2686 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2687 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2688 } else {
2689 ap->alen = 0;
2690 }
2691 return 0;
2692}
2693
2694STATIC int
2695xfs_bmap_btalloc(
2696 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2697{
2698 xfs_mount_t *mp; /* mount point structure */
2699 xfs_alloctype_t atype = 0; /* type for allocation routines */
2700 xfs_extlen_t align; /* minimum allocation alignment */
2701 xfs_agnumber_t ag;
2702 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2703 xfs_agnumber_t startag;
2704 xfs_alloc_arg_t args;
2705 xfs_extlen_t blen;
2706 xfs_extlen_t delta;
2707 xfs_extlen_t longest;
2708 xfs_extlen_t need;
2709 xfs_extlen_t nextminlen = 0;
2710 xfs_perag_t *pag;
2711 int nullfb; /* true if ap->firstblock isn't set */
2712 int isaligned;
2713 int notinit;
2714 int tryagain;
2715 int error;
2716
2717 mp = ap->ip->i_mount;
957d0ebe 2718 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
a365bdd5
NS
2719 if (unlikely(align)) {
2720 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2721 align, 0, ap->eof, 0, ap->conv,
2722 &ap->off, &ap->alen);
2723 ASSERT(!error);
2724 ASSERT(ap->alen);
2725 }
2726 nullfb = ap->firstblock == NULLFSBLOCK;
2727 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2728 if (nullfb)
2729 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2730 else
2731 ap->rval = ap->firstblock;
2732
2733 xfs_bmap_adjacent(ap);
2734
1da177e4
LT
2735 /*
2736 * If allowed, use ap->rval; otherwise must use firstblock since
2737 * it's in the right allocation group.
2738 */
a365bdd5 2739 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
1da177e4
LT
2740 ;
2741 else
2742 ap->rval = ap->firstblock;
2743 /*
a365bdd5 2744 * Normal allocation, done through xfs_alloc_vextent.
1da177e4 2745 */
a365bdd5
NS
2746 tryagain = isaligned = 0;
2747 args.tp = ap->tp;
2748 args.mp = mp;
2749 args.fsbno = ap->rval;
2750 args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
d210a28c 2751 args.firstblock = ap->firstblock;
a365bdd5
NS
2752 blen = 0;
2753 if (nullfb) {
2754 args.type = XFS_ALLOCTYPE_START_BNO;
2755 args.total = ap->total;
2756 /*
2757 * Find the longest available space.
2758 * We're going to try for the whole allocation at once.
2759 */
2760 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2761 notinit = 0;
2762 down_read(&mp->m_peraglock);
2763 while (blen < ap->alen) {
2764 pag = &mp->m_perag[ag];
2765 if (!pag->pagf_init &&
2766 (error = xfs_alloc_pagf_init(mp, args.tp,
2767 ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2768 up_read(&mp->m_peraglock);
2769 return error;
2770 }
1da177e4 2771 /*
a365bdd5 2772 * See xfs_alloc_fix_freelist...
1da177e4 2773 */
a365bdd5
NS
2774 if (pag->pagf_init) {
2775 need = XFS_MIN_FREELIST_PAG(pag, mp);
2776 delta = need > pag->pagf_flcount ?
2777 need - pag->pagf_flcount : 0;
2778 longest = (pag->pagf_longest > delta) ?
2779 (pag->pagf_longest - delta) :
2780 (pag->pagf_flcount > 0 ||
2781 pag->pagf_longest > 0);
2782 if (blen < longest)
2783 blen = longest;
2784 } else
2785 notinit = 1;
2786 if (++ag == mp->m_sb.sb_agcount)
2787 ag = 0;
2788 if (ag == startag)
2789 break;
2790 }
2791 up_read(&mp->m_peraglock);
2792 /*
2793 * Since the above loop did a BUF_TRYLOCK, it is
2794 * possible that there is space for this request.
2795 */
2796 if (notinit || blen < ap->minlen)
2797 args.minlen = ap->minlen;
2798 /*
2799 * If the best seen length is less than the request
2800 * length, use the best as the minimum.
2801 */
2802 else if (blen < ap->alen)
2803 args.minlen = blen;
2804 /*
2805 * Otherwise we've seen an extent as big as alen,
2806 * use that as the minimum.
2807 */
2808 else
2809 args.minlen = ap->alen;
2810 } else if (ap->low) {
d210a28c 2811 args.type = XFS_ALLOCTYPE_START_BNO;
a365bdd5
NS
2812 args.total = args.minlen = ap->minlen;
2813 } else {
2814 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2815 args.total = ap->total;
2816 args.minlen = ap->minlen;
2817 }
957d0ebe
DC
2818 /* apply extent size hints if obtained earlier */
2819 if (unlikely(align)) {
2820 args.prod = align;
a365bdd5
NS
2821 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2822 args.mod = (xfs_extlen_t)(args.prod - args.mod);
6fe90e6d 2823 } else if (mp->m_sb.sb_blocksize >= NBPP) {
a365bdd5
NS
2824 args.prod = 1;
2825 args.mod = 0;
2826 } else {
2827 args.prod = NBPP >> mp->m_sb.sb_blocklog;
2828 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2829 args.mod = (xfs_extlen_t)(args.prod - args.mod);
1da177e4
LT
2830 }
2831 /*
a365bdd5
NS
2832 * If we are not low on available data blocks, and the
2833 * underlying logical volume manager is a stripe, and
2834 * the file offset is zero then try to allocate data
2835 * blocks on stripe unit boundary.
2836 * NOTE: ap->aeof is only set if the allocation length
2837 * is >= the stripe unit and the allocation offset is
2838 * at the end of file.
1da177e4 2839 */
a365bdd5
NS
2840 if (!ap->low && ap->aeof) {
2841 if (!ap->off) {
2842 args.alignment = mp->m_dalign;
2843 atype = args.type;
2844 isaligned = 1;
1da177e4 2845 /*
a365bdd5 2846 * Adjust for alignment
1da177e4 2847 */
a365bdd5
NS
2848 if (blen > args.alignment && blen <= ap->alen)
2849 args.minlen = blen - args.alignment;
2850 args.minalignslop = 0;
2851 } else {
1da177e4 2852 /*
a365bdd5
NS
2853 * First try an exact bno allocation.
2854 * If it fails then do a near or start bno
2855 * allocation with alignment turned on.
1da177e4 2856 */
a365bdd5
NS
2857 atype = args.type;
2858 tryagain = 1;
2859 args.type = XFS_ALLOCTYPE_THIS_BNO;
2860 args.alignment = 1;
1da177e4 2861 /*
a365bdd5
NS
2862 * Compute the minlen+alignment for the
2863 * next case. Set slop so that the value
2864 * of minlen+alignment+slop doesn't go up
2865 * between the calls.
1da177e4 2866 */
a365bdd5
NS
2867 if (blen > mp->m_dalign && blen <= ap->alen)
2868 nextminlen = blen - mp->m_dalign;
1da177e4 2869 else
a365bdd5
NS
2870 nextminlen = args.minlen;
2871 if (nextminlen + mp->m_dalign > args.minlen + 1)
2872 args.minalignslop =
2873 nextminlen + mp->m_dalign -
2874 args.minlen - 1;
2875 else
2876 args.minalignslop = 0;
1da177e4 2877 }
a365bdd5
NS
2878 } else {
2879 args.alignment = 1;
2880 args.minalignslop = 0;
2881 }
2882 args.minleft = ap->minleft;
2883 args.wasdel = ap->wasdel;
2884 args.isfl = 0;
2885 args.userdata = ap->userdata;
2886 if ((error = xfs_alloc_vextent(&args)))
2887 return error;
2888 if (tryagain && args.fsbno == NULLFSBLOCK) {
1da177e4 2889 /*
a365bdd5
NS
2890 * Exact allocation failed. Now try with alignment
2891 * turned on.
1da177e4 2892 */
a365bdd5
NS
2893 args.type = atype;
2894 args.fsbno = ap->rval;
2895 args.alignment = mp->m_dalign;
2896 args.minlen = nextminlen;
2897 args.minalignslop = 0;
2898 isaligned = 1;
1da177e4
LT
2899 if ((error = xfs_alloc_vextent(&args)))
2900 return error;
a365bdd5
NS
2901 }
2902 if (isaligned && args.fsbno == NULLFSBLOCK) {
2903 /*
2904 * allocation failed, so turn off alignment and
2905 * try again.
2906 */
2907 args.type = atype;
2908 args.fsbno = ap->rval;
2909 args.alignment = 0;
2910 if ((error = xfs_alloc_vextent(&args)))
2911 return error;
2912 }
2913 if (args.fsbno == NULLFSBLOCK && nullfb &&
2914 args.minlen > ap->minlen) {
2915 args.minlen = ap->minlen;
2916 args.type = XFS_ALLOCTYPE_START_BNO;
2917 args.fsbno = ap->rval;
2918 if ((error = xfs_alloc_vextent(&args)))
2919 return error;
2920 }
2921 if (args.fsbno == NULLFSBLOCK && nullfb) {
2922 args.fsbno = 0;
2923 args.type = XFS_ALLOCTYPE_FIRST_AG;
2924 args.total = ap->minlen;
2925 args.minleft = 0;
2926 if ((error = xfs_alloc_vextent(&args)))
2927 return error;
2928 ap->low = 1;
2929 }
2930 if (args.fsbno != NULLFSBLOCK) {
2931 ap->firstblock = ap->rval = args.fsbno;
2932 ASSERT(nullfb || fb_agno == args.agno ||
2933 (ap->low && fb_agno < args.agno));
2934 ap->alen = args.len;
2935 ap->ip->i_d.di_nblocks += args.len;
2936 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2937 if (ap->wasdel)
2938 ap->ip->i_delayed_blks -= args.len;
2939 /*
2940 * Adjust the disk quota also. This was reserved
2941 * earlier.
2942 */
2943 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2944 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2945 XFS_TRANS_DQ_BCOUNT,
2946 (long) args.len);
2947 } else {
2948 ap->rval = NULLFSBLOCK;
2949 ap->alen = 0;
1da177e4
LT
2950 }
2951 return 0;
a365bdd5
NS
2952}
2953
2954/*
2955 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2956 * It figures out where to ask the underlying allocator to put the new extent.
2957 */
2958STATIC int
2959xfs_bmap_alloc(
2960 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2961{
2962 if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata)
2963 return xfs_bmap_rtalloc(ap);
2964 return xfs_bmap_btalloc(ap);
1da177e4
LT
2965}
2966
2967/*
2968 * Transform a btree format file with only one leaf node, where the
2969 * extents list will fit in the inode, into an extents format file.
4eea22f0 2970 * Since the file extents are already in-core, all we have to do is
1da177e4
LT
2971 * give up the space for the btree root and pitch the leaf block.
2972 */
2973STATIC int /* error */
2974xfs_bmap_btree_to_extents(
2975 xfs_trans_t *tp, /* transaction pointer */
2976 xfs_inode_t *ip, /* incore inode pointer */
2977 xfs_btree_cur_t *cur, /* btree cursor */
2978 int *logflagsp, /* inode logging flags */
2979 int whichfork) /* data or attr fork */
2980{
2981 /* REFERENCED */
2982 xfs_bmbt_block_t *cblock;/* child btree block */
2983 xfs_fsblock_t cbno; /* child block number */
2984 xfs_buf_t *cbp; /* child block's buffer */
2985 int error; /* error return value */
2986 xfs_ifork_t *ifp; /* inode fork data */
2987 xfs_mount_t *mp; /* mount point structure */
576039cf 2988 __be64 *pp; /* ptr to block address */
1da177e4
LT
2989 xfs_bmbt_block_t *rblock;/* root btree block */
2990
2991 ifp = XFS_IFORK_PTR(ip, whichfork);
2992 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2993 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2994 rblock = ifp->if_broot;
16259e7d
CH
2995 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2996 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
1da177e4
LT
2997 ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
2998 mp = ip->i_mount;
2999 pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
576039cf 3000 cbno = be64_to_cpu(*pp);
1da177e4
LT
3001 *logflagsp = 0;
3002#ifdef DEBUG
576039cf 3003 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
1da177e4
LT
3004 return error;
3005#endif
1da177e4
LT
3006 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
3007 XFS_BMAP_BTREE_REF)))
3008 return error;
3009 cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
3010 if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
3011 return error;
3012 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
3013 ip->i_d.di_nblocks--;
3014 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
3015 xfs_trans_binval(tp, cbp);
3016 if (cur->bc_bufs[0] == cbp)
3017 cur->bc_bufs[0] = NULL;
3018 xfs_iroot_realloc(ip, -1, whichfork);
3019 ASSERT(ifp->if_broot == NULL);
3020 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
3021 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3022 *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
3023 return 0;
3024}
3025
3026/*
4eea22f0 3027 * Called by xfs_bmapi to update file extent records and the btree
1da177e4
LT
3028 * after removing space (or undoing a delayed allocation).
3029 */
3030STATIC int /* error */
3031xfs_bmap_del_extent(
3032 xfs_inode_t *ip, /* incore inode pointer */
3033 xfs_trans_t *tp, /* current transaction pointer */
3034 xfs_extnum_t idx, /* extent number to update/delete */
3035 xfs_bmap_free_t *flist, /* list of extents to be freed */
3036 xfs_btree_cur_t *cur, /* if null, not a btree */
4eea22f0 3037 xfs_bmbt_irec_t *del, /* data to remove from extents */
1da177e4 3038 int *logflagsp, /* inode logging flags */
3e57ecf6 3039 xfs_extdelta_t *delta, /* Change made to incore extents */
1da177e4
LT
3040 int whichfork, /* data or attr fork */
3041 int rsvd) /* OK to allocate reserved blocks */
3042{
3043 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
3044 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
3045 xfs_fsblock_t del_endblock=0; /* first block past del */
3046 xfs_fileoff_t del_endoff; /* first offset past del */
3047 int delay; /* current block is delayed allocated */
3048 int do_fx; /* free extent at end of routine */
3049 xfs_bmbt_rec_t *ep; /* current extent entry pointer */
3050 int error; /* error return value */
3051 int flags; /* inode logging flags */
3052#ifdef XFS_BMAP_TRACE
3053 static char fname[] = "xfs_bmap_del_extent";
3054#endif
3055 xfs_bmbt_irec_t got; /* current extent entry */
3056 xfs_fileoff_t got_endoff; /* first offset past got */
3057 int i; /* temp state */
3058 xfs_ifork_t *ifp; /* inode fork pointer */
3059 xfs_mount_t *mp; /* mount structure */
3060 xfs_filblks_t nblks; /* quota/sb block count */
3061 xfs_bmbt_irec_t new; /* new record to be inserted */
3062 /* REFERENCED */
1da177e4
LT
3063 uint qfield; /* quota field to update */
3064 xfs_filblks_t temp; /* for indirect length calculations */
3065 xfs_filblks_t temp2; /* for indirect length calculations */
3066
3067 XFS_STATS_INC(xs_del_exlist);
3068 mp = ip->i_mount;
3069 ifp = XFS_IFORK_PTR(ip, whichfork);
4eea22f0
MK
3070 ASSERT((idx >= 0) && (idx < ifp->if_bytes /
3071 (uint)sizeof(xfs_bmbt_rec_t)));
1da177e4 3072 ASSERT(del->br_blockcount > 0);
4eea22f0 3073 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
3074 xfs_bmbt_get_all(ep, &got);
3075 ASSERT(got.br_startoff <= del->br_startoff);
3076 del_endoff = del->br_startoff + del->br_blockcount;
3077 got_endoff = got.br_startoff + got.br_blockcount;
3078 ASSERT(got_endoff >= del_endoff);
3079 delay = ISNULLSTARTBLOCK(got.br_startblock);
3080 ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
3081 flags = 0;
3082 qfield = 0;
3083 error = 0;
3084 /*
3085 * If deleting a real allocation, must free up the disk space.
3086 */
3087 if (!delay) {
3088 flags = XFS_ILOG_CORE;
3089 /*
3090 * Realtime allocation. Free it and record di_nblocks update.
3091 */
3092 if (whichfork == XFS_DATA_FORK &&
3093 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
3094 xfs_fsblock_t bno;
3095 xfs_filblks_t len;
3096
3097 ASSERT(do_mod(del->br_blockcount,
3098 mp->m_sb.sb_rextsize) == 0);
3099 ASSERT(do_mod(del->br_startblock,
3100 mp->m_sb.sb_rextsize) == 0);
3101 bno = del->br_startblock;
3102 len = del->br_blockcount;
3103 do_div(bno, mp->m_sb.sb_rextsize);
3104 do_div(len, mp->m_sb.sb_rextsize);
3105 if ((error = xfs_rtfree_extent(ip->i_transp, bno,
3106 (xfs_extlen_t)len)))
3107 goto done;
3108 do_fx = 0;
3109 nblks = len * mp->m_sb.sb_rextsize;
3110 qfield = XFS_TRANS_DQ_RTBCOUNT;
3111 }
3112 /*
3113 * Ordinary allocation.
3114 */
3115 else {
3116 do_fx = 1;
3117 nblks = del->br_blockcount;
3118 qfield = XFS_TRANS_DQ_BCOUNT;
3119 }
3120 /*
3121 * Set up del_endblock and cur for later.
3122 */
3123 del_endblock = del->br_startblock + del->br_blockcount;
3124 if (cur) {
3125 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
3126 got.br_startblock, got.br_blockcount,
3127 &i)))
3128 goto done;
3129 ASSERT(i == 1);
3130 }
3131 da_old = da_new = 0;
3132 } else {
3133 da_old = STARTBLOCKVAL(got.br_startblock);
3134 da_new = 0;
3135 nblks = 0;
3136 do_fx = 0;
3137 }
3138 /*
3139 * Set flag value to use in switch statement.
3140 * Left-contig is 2, right-contig is 1.
3141 */
3142 switch (((got.br_startoff == del->br_startoff) << 1) |
3143 (got_endoff == del_endoff)) {
3144 case 3:
3145 /*
3146 * Matches the whole extent. Delete the entry.
3147 */
3148 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork);
4eea22f0 3149 xfs_iext_remove(ifp, idx, 1);
1da177e4
LT
3150 ifp->if_lastex = idx;
3151 if (delay)
3152 break;
3153 XFS_IFORK_NEXT_SET(ip, whichfork,
3154 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
3155 flags |= XFS_ILOG_CORE;
3156 if (!cur) {
3157 flags |= XFS_ILOG_FEXT(whichfork);
3158 break;
3159 }
3160 if ((error = xfs_bmbt_delete(cur, &i)))
3161 goto done;
3162 ASSERT(i == 1);
3163 break;
3164
3165 case 2:
3166 /*
3167 * Deleting the first part of the extent.
3168 */
3169 xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork);
3170 xfs_bmbt_set_startoff(ep, del_endoff);
3171 temp = got.br_blockcount - del->br_blockcount;
3172 xfs_bmbt_set_blockcount(ep, temp);
3173 ifp->if_lastex = idx;
3174 if (delay) {
3175 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3176 da_old);
3177 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3178 xfs_bmap_trace_post_update(fname, "2", ip, idx,
3179 whichfork);
3180 da_new = temp;
3181 break;
3182 }
3183 xfs_bmbt_set_startblock(ep, del_endblock);
3184 xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork);
3185 if (!cur) {
3186 flags |= XFS_ILOG_FEXT(whichfork);
3187 break;
3188 }
3189 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
3190 got.br_blockcount - del->br_blockcount,
3191 got.br_state)))
3192 goto done;
3193 break;
3194
3195 case 1:
3196 /*
3197 * Deleting the last part of the extent.
3198 */
3199 temp = got.br_blockcount - del->br_blockcount;
3200 xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork);
3201 xfs_bmbt_set_blockcount(ep, temp);
3202 ifp->if_lastex = idx;
3203 if (delay) {
3204 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3205 da_old);
3206 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3207 xfs_bmap_trace_post_update(fname, "1", ip, idx,
3208 whichfork);
3209 da_new = temp;
3210 break;
3211 }
3212 xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork);
3213 if (!cur) {
3214 flags |= XFS_ILOG_FEXT(whichfork);
3215 break;
3216 }
3217 if ((error = xfs_bmbt_update(cur, got.br_startoff,
3218 got.br_startblock,
3219 got.br_blockcount - del->br_blockcount,
3220 got.br_state)))
3221 goto done;
3222 break;
3223
3224 case 0:
3225 /*
3226 * Deleting the middle of the extent.
3227 */
3228 temp = del->br_startoff - got.br_startoff;
3229 xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork);
3230 xfs_bmbt_set_blockcount(ep, temp);
3231 new.br_startoff = del_endoff;
3232 temp2 = got_endoff - del_endoff;
3233 new.br_blockcount = temp2;
3234 new.br_state = got.br_state;
3235 if (!delay) {
3236 new.br_startblock = del_endblock;
3237 flags |= XFS_ILOG_CORE;
3238 if (cur) {
3239 if ((error = xfs_bmbt_update(cur,
3240 got.br_startoff,
3241 got.br_startblock, temp,
3242 got.br_state)))
3243 goto done;
3244 if ((error = xfs_bmbt_increment(cur, 0, &i)))
3245 goto done;
3246 cur->bc_rec.b = new;
3247 error = xfs_bmbt_insert(cur, &i);
3248 if (error && error != ENOSPC)
3249 goto done;
3250 /*
3251 * If get no-space back from btree insert,
3252 * it tried a split, and we have a zero
3253 * block reservation.
3254 * Fix up our state and return the error.
3255 */
3256 if (error == ENOSPC) {
3257 /*
3258 * Reset the cursor, don't trust
3259 * it after any insert operation.
3260 */
3261 if ((error = xfs_bmbt_lookup_eq(cur,
3262 got.br_startoff,
3263 got.br_startblock,
3264 temp, &i)))
3265 goto done;
3266 ASSERT(i == 1);
3267 /*
3268 * Update the btree record back
3269 * to the original value.
3270 */
3271 if ((error = xfs_bmbt_update(cur,
3272 got.br_startoff,
3273 got.br_startblock,
3274 got.br_blockcount,
3275 got.br_state)))
3276 goto done;
3277 /*
3278 * Reset the extent record back
3279 * to the original value.
3280 */
3281 xfs_bmbt_set_blockcount(ep,
3282 got.br_blockcount);
3283 flags = 0;
3284 error = XFS_ERROR(ENOSPC);
3285 goto done;
3286 }
3287 ASSERT(i == 1);
3288 } else
3289 flags |= XFS_ILOG_FEXT(whichfork);
3290 XFS_IFORK_NEXT_SET(ip, whichfork,
3291 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3292 } else {
3293 ASSERT(whichfork == XFS_DATA_FORK);
3294 temp = xfs_bmap_worst_indlen(ip, temp);
3295 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3296 temp2 = xfs_bmap_worst_indlen(ip, temp2);
3297 new.br_startblock = NULLSTARTBLOCK((int)temp2);
3298 da_new = temp + temp2;
3299 while (da_new > da_old) {
3300 if (temp) {
3301 temp--;
3302 da_new--;
3303 xfs_bmbt_set_startblock(ep,
3304 NULLSTARTBLOCK((int)temp));
3305 }
3306 if (da_new == da_old)
3307 break;
3308 if (temp2) {
3309 temp2--;
3310 da_new--;
3311 new.br_startblock =
3312 NULLSTARTBLOCK((int)temp2);
3313 }
3314 }
3315 }
3316 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork);
3317 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL,
3318 whichfork);
4eea22f0 3319 xfs_iext_insert(ifp, idx + 1, 1, &new);
1da177e4
LT
3320 ifp->if_lastex = idx + 1;
3321 break;
3322 }
3323 /*
3324 * If we need to, add to list of extents to delete.
3325 */
3326 if (do_fx)
3327 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3328 mp);
3329 /*
3330 * Adjust inode # blocks in the file.
3331 */
3332 if (nblks)
3333 ip->i_d.di_nblocks -= nblks;
3334 /*
3335 * Adjust quota data.
3336 */
3337 if (qfield)
3338 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
3339
3340 /*
3341 * Account for change in delayed indirect blocks.
3342 * Nothing to do for disk quota accounting here.
3343 */
3344 ASSERT(da_old >= da_new);
3345 if (da_old > da_new)
20f4ebf2 3346 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
1da177e4 3347 rsvd);
3e57ecf6
OW
3348 if (delta) {
3349 /* DELTA: report the original extent. */
3350 if (delta->xed_startoff > got.br_startoff)
3351 delta->xed_startoff = got.br_startoff;
3352 if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
3353 delta->xed_blockcount = got.br_startoff +
3354 got.br_blockcount;
3355 }
1da177e4
LT
3356done:
3357 *logflagsp = flags;
3358 return error;
3359}
3360
3361/*
3362 * Remove the entry "free" from the free item list. Prev points to the
3363 * previous entry, unless "free" is the head of the list.
3364 */
3365STATIC void
3366xfs_bmap_del_free(
3367 xfs_bmap_free_t *flist, /* free item list header */
3368 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
3369 xfs_bmap_free_item_t *free) /* list item to be freed */
3370{
3371 if (prev)
3372 prev->xbfi_next = free->xbfi_next;
3373 else
3374 flist->xbf_first = free->xbfi_next;
3375 flist->xbf_count--;
3376 kmem_zone_free(xfs_bmap_free_item_zone, free);
3377}
3378
1da177e4
LT
3379/*
3380 * Convert an extents-format file into a btree-format file.
3381 * The new file will have a root block (in the inode) and a single child block.
3382 */
3383STATIC int /* error */
3384xfs_bmap_extents_to_btree(
3385 xfs_trans_t *tp, /* transaction pointer */
3386 xfs_inode_t *ip, /* incore inode pointer */
3387 xfs_fsblock_t *firstblock, /* first-block-allocated */
3388 xfs_bmap_free_t *flist, /* blocks freed in xaction */
3389 xfs_btree_cur_t **curp, /* cursor returned to caller */
3390 int wasdel, /* converting a delayed alloc */
3391 int *logflagsp, /* inode logging flags */
3392 int whichfork) /* data or attr fork */
3393{
3394 xfs_bmbt_block_t *ablock; /* allocated (child) bt block */
3395 xfs_buf_t *abp; /* buffer for ablock */
3396 xfs_alloc_arg_t args; /* allocation arguments */
3397 xfs_bmbt_rec_t *arp; /* child record pointer */
3398 xfs_bmbt_block_t *block; /* btree root block */
3399 xfs_btree_cur_t *cur; /* bmap btree cursor */
4eea22f0 3400 xfs_bmbt_rec_t *ep; /* extent record pointer */
1da177e4 3401 int error; /* error return value */
4eea22f0 3402 xfs_extnum_t i, cnt; /* extent record index */
1da177e4
LT
3403 xfs_ifork_t *ifp; /* inode fork pointer */
3404 xfs_bmbt_key_t *kp; /* root block key pointer */
3405 xfs_mount_t *mp; /* mount structure */
4eea22f0 3406 xfs_extnum_t nextents; /* number of file extents */
1da177e4
LT
3407 xfs_bmbt_ptr_t *pp; /* root block address pointer */
3408
3409 ifp = XFS_IFORK_PTR(ip, whichfork);
3410 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3411 ASSERT(ifp->if_ext_max ==
3412 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3413 /*
3414 * Make space in the inode incore.
3415 */
3416 xfs_iroot_realloc(ip, 1, whichfork);
3417 ifp->if_flags |= XFS_IFBROOT;
3418 /*
3419 * Fill in the root.
3420 */
3421 block = ifp->if_broot;
16259e7d
CH
3422 block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3423 block->bb_level = cpu_to_be16(1);
3424 block->bb_numrecs = cpu_to_be16(1);
3425 block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
3426 block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
1da177e4
LT
3427 /*
3428 * Need a cursor. Can't allocate until bb_level is filled in.
3429 */
3430 mp = ip->i_mount;
3431 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
3432 whichfork);
3433 cur->bc_private.b.firstblock = *firstblock;
3434 cur->bc_private.b.flist = flist;
3435 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3436 /*
3437 * Convert to a btree with two levels, one record in root.
3438 */
3439 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3440 args.tp = tp;
3441 args.mp = mp;
d210a28c 3442 args.firstblock = *firstblock;
1da177e4
LT
3443 if (*firstblock == NULLFSBLOCK) {
3444 args.type = XFS_ALLOCTYPE_START_BNO;
3445 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3446 } else if (flist->xbf_low) {
3447 args.type = XFS_ALLOCTYPE_START_BNO;
3448 args.fsbno = *firstblock;
3449 } else {
3450 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3451 args.fsbno = *firstblock;
3452 }
3453 args.minlen = args.maxlen = args.prod = 1;
3454 args.total = args.minleft = args.alignment = args.mod = args.isfl =
3455 args.minalignslop = 0;
3456 args.wasdel = wasdel;
3457 *logflagsp = 0;
3458 if ((error = xfs_alloc_vextent(&args))) {
3459 xfs_iroot_realloc(ip, -1, whichfork);
3460 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3461 return error;
3462 }
3463 /*
3464 * Allocation can't fail, the space was reserved.
3465 */
3466 ASSERT(args.fsbno != NULLFSBLOCK);
3467 ASSERT(*firstblock == NULLFSBLOCK ||
3468 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3469 (flist->xbf_low &&
3470 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3471 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3472 cur->bc_private.b.allocated++;
3473 ip->i_d.di_nblocks++;
3474 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3475 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3476 /*
3477 * Fill in the child block.
3478 */
3479 ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
16259e7d 3480 ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1da177e4 3481 ablock->bb_level = 0;
16259e7d
CH
3482 ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
3483 ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
1da177e4
LT
3484 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3485 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4eea22f0
MK
3486 for (cnt = i = 0; i < nextents; i++) {
3487 ep = xfs_iext_get_ext(ifp, i);
1da177e4
LT
3488 if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
3489 arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
3490 arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
3491 arp++; cnt++;
3492 }
3493 }
16259e7d
CH
3494 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3495 ablock->bb_numrecs = cpu_to_be16(cnt);
1da177e4
LT
3496 /*
3497 * Fill in the root key and pointer.
3498 */
3499 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
3500 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
8801bb99 3501 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
1da177e4 3502 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
576039cf 3503 *pp = cpu_to_be64(args.fsbno);
1da177e4
LT
3504 /*
3505 * Do all this logging at the end so that
3506 * the root is at the right level.
3507 */
3508 xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
16259e7d 3509 xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
1da177e4
LT
3510 ASSERT(*curp == NULL);
3511 *curp = cur;
3512 *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
3513 return 0;
3514}
3515
d8cc890d
NS
3516/*
3517 * Helper routine to reset inode di_forkoff field when switching
3518 * attribute fork from local to extent format - we reset it where
3519 * possible to make space available for inline data fork extents.
3520 */
3521STATIC void
3522xfs_bmap_forkoff_reset(
3523 xfs_mount_t *mp,
3524 xfs_inode_t *ip,
3525 int whichfork)
3526{
3527 if (whichfork == XFS_ATTR_FORK &&
3528 (ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
3529 (ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
e5889e90 3530 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
d8cc890d
NS
3531 ((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
3532 ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3533 ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
3534 (uint)sizeof(xfs_bmbt_rec_t);
3535 ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
3536 (uint)sizeof(xfs_bmbt_rec_t);
3537 }
3538}
3539
1da177e4
LT
3540/*
3541 * Convert a local file to an extents file.
3542 * This code is out of bounds for data forks of regular files,
3543 * since the file data needs to get logged so things will stay consistent.
3544 * (The bmap-level manipulations are ok, though).
3545 */
3546STATIC int /* error */
3547xfs_bmap_local_to_extents(
3548 xfs_trans_t *tp, /* transaction pointer */
3549 xfs_inode_t *ip, /* incore inode pointer */
3550 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
3551 xfs_extlen_t total, /* total blocks needed by transaction */
3552 int *logflagsp, /* inode logging flags */
3553 int whichfork) /* data or attr fork */
3554{
3555 int error; /* error return value */
3556 int flags; /* logging flags returned */
3557#ifdef XFS_BMAP_TRACE
3558 static char fname[] = "xfs_bmap_local_to_extents";
3559#endif
3560 xfs_ifork_t *ifp; /* inode fork pointer */
3561
3562 /*
3563 * We don't want to deal with the case of keeping inode data inline yet.
3564 * So sending the data fork of a regular inode is invalid.
3565 */
3566 ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3567 whichfork == XFS_DATA_FORK));
3568 ifp = XFS_IFORK_PTR(ip, whichfork);
3569 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3570 flags = 0;
3571 error = 0;
3572 if (ifp->if_bytes) {
3573 xfs_alloc_arg_t args; /* allocation arguments */
4eea22f0
MK
3574 xfs_buf_t *bp; /* buffer for extent block */
3575 xfs_bmbt_rec_t *ep; /* extent record pointer */
1da177e4
LT
3576
3577 args.tp = tp;
3578 args.mp = ip->i_mount;
d210a28c 3579 args.firstblock = *firstblock;
f020b67f
MK
3580 ASSERT((ifp->if_flags &
3581 (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
1da177e4
LT
3582 /*
3583 * Allocate a block. We know we need only one, since the
3584 * file currently fits in an inode.
3585 */
3586 if (*firstblock == NULLFSBLOCK) {
3587 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3588 args.type = XFS_ALLOCTYPE_START_BNO;
3589 } else {
3590 args.fsbno = *firstblock;
3591 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3592 }
3593 args.total = total;
3594 args.mod = args.minleft = args.alignment = args.wasdel =
3595 args.isfl = args.minalignslop = 0;
3596 args.minlen = args.maxlen = args.prod = 1;
3597 if ((error = xfs_alloc_vextent(&args)))
3598 goto done;
3599 /*
3600 * Can't fail, the space was reserved.
3601 */
3602 ASSERT(args.fsbno != NULLFSBLOCK);
3603 ASSERT(args.len == 1);
3604 *firstblock = args.fsbno;
3605 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3606 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3607 ifp->if_bytes);
3608 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
d8cc890d 3609 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
1da177e4 3610 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
4eea22f0
MK
3611 xfs_iext_add(ifp, 0, 1);
3612 ep = xfs_iext_get_ext(ifp, 0);
1da177e4
LT
3613 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3614 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
3615 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3616 ip->i_d.di_nblocks = 1;
3617 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
3618 XFS_TRANS_DQ_BCOUNT, 1L);
3619 flags |= XFS_ILOG_FEXT(whichfork);
d8cc890d 3620 } else {
1da177e4 3621 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
d8cc890d
NS
3622 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3623 }
1da177e4
LT
3624 ifp->if_flags &= ~XFS_IFINLINE;
3625 ifp->if_flags |= XFS_IFEXTENTS;
3626 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3627 flags |= XFS_ILOG_CORE;
3628done:
3629 *logflagsp = flags;
3630 return error;
3631}
3632
0293ce3a 3633/*
8867bc9b
MK
3634 * Search the extent records for the entry containing block bno.
3635 * If bno lies in a hole, point to the next entry. If bno lies
3636 * past eof, *eofp will be set, and *prevp will contain the last
3637 * entry (null if none). Else, *lastxp will be set to the index
3638 * of the found entry; *gotp will contain the entry.
0293ce3a
MK
3639 */
3640xfs_bmbt_rec_t * /* pointer to found extent entry */
3641xfs_bmap_search_multi_extents(
3642 xfs_ifork_t *ifp, /* inode fork pointer */
3643 xfs_fileoff_t bno, /* block number searched for */
3644 int *eofp, /* out: end of file found */
3645 xfs_extnum_t *lastxp, /* out: last extent index */
3646 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3647 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3648{
0293ce3a 3649 xfs_bmbt_rec_t *ep; /* extent record pointer */
0293ce3a 3650 xfs_extnum_t lastx; /* last extent index */
0293ce3a
MK
3651
3652 /*
8867bc9b
MK
3653 * Initialize the extent entry structure to catch access to
3654 * uninitialized br_startblock field.
0293ce3a 3655 */
8867bc9b
MK
3656 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3657 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3658 gotp->br_state = XFS_EXT_INVALID;
3659#if XFS_BIG_BLKNOS
3660 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3661#else
3662 gotp->br_startblock = 0xffffa5a5;
3663#endif
3664 prevp->br_startoff = NULLFILEOFF;
3665
3666 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3667 if (lastx > 0) {
3668 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
0293ce3a 3669 }
8867bc9b
MK
3670 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3671 xfs_bmbt_get_all(ep, gotp);
3672 *eofp = 0;
3673 } else {
3674 if (lastx > 0) {
3675 *gotp = *prevp;
3676 }
3677 *eofp = 1;
3678 ep = NULL;
0293ce3a 3679 }
8867bc9b 3680 *lastxp = lastx;
0293ce3a
MK
3681 return ep;
3682}
3683
1da177e4
LT
3684/*
3685 * Search the extents list for the inode, for the extent containing bno.
3686 * If bno lies in a hole, point to the next entry. If bno lies past eof,
3687 * *eofp will be set, and *prevp will contain the last entry (null if none).
3688 * Else, *lastxp will be set to the index of the found
3689 * entry; *gotp will contain the entry.
3690 */
3691STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */
3692xfs_bmap_search_extents(
3693 xfs_inode_t *ip, /* incore inode pointer */
3694 xfs_fileoff_t bno, /* block number searched for */
572d95f4 3695 int fork, /* data or attr fork */
1da177e4
LT
3696 int *eofp, /* out: end of file found */
3697 xfs_extnum_t *lastxp, /* out: last extent index */
3698 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3699 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3700{
3701 xfs_ifork_t *ifp; /* inode fork pointer */
4eea22f0 3702 xfs_bmbt_rec_t *ep; /* extent record pointer */
1da177e4
LT
3703
3704 XFS_STATS_INC(xs_look_exlist);
572d95f4 3705 ifp = XFS_IFORK_PTR(ip, fork);
1da177e4 3706
0293ce3a
MK
3707 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3708
572d95f4
NS
3709 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3710 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3711 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
3712 "Access to block zero in inode %llu "
3713 "start_block: %llx start_off: %llx "
3714 "blkcnt: %llx extent-state: %x lastx: %x\n",
3715 (unsigned long long)ip->i_ino,
3ddb8fa9
NS
3716 (unsigned long long)gotp->br_startblock,
3717 (unsigned long long)gotp->br_startoff,
3718 (unsigned long long)gotp->br_blockcount,
572d95f4
NS
3719 gotp->br_state, *lastxp);
3720 *lastxp = NULLEXTNUM;
3721 *eofp = 1;
3722 return NULL;
3723 }
3724 return ep;
1da177e4
LT
3725}
3726
3727
3728#ifdef XFS_BMAP_TRACE
3729ktrace_t *xfs_bmap_trace_buf;
3730
3731/*
3732 * Add a bmap trace buffer entry. Base routine for the others.
3733 */
3734STATIC void
3735xfs_bmap_trace_addentry(
3736 int opcode, /* operation */
3737 char *fname, /* function name */
3738 char *desc, /* operation description */
3739 xfs_inode_t *ip, /* incore inode pointer */
3740 xfs_extnum_t idx, /* index of entry(ies) */
3741 xfs_extnum_t cnt, /* count of entries, 1 or 2 */
3742 xfs_bmbt_rec_t *r1, /* first record */
3743 xfs_bmbt_rec_t *r2, /* second record or null */
3744 int whichfork) /* data or attr fork */
3745{
3746 xfs_bmbt_rec_t tr2;
3747
3748 ASSERT(cnt == 1 || cnt == 2);
3749 ASSERT(r1 != NULL);
3750 if (cnt == 1) {
3751 ASSERT(r2 == NULL);
3752 r2 = &tr2;
3753 memset(&tr2, 0, sizeof(tr2));
3754 } else
3755 ASSERT(r2 != NULL);
3756 ktrace_enter(xfs_bmap_trace_buf,
3757 (void *)(__psint_t)(opcode | (whichfork << 16)),
3758 (void *)fname, (void *)desc, (void *)ip,
3759 (void *)(__psint_t)idx,
3760 (void *)(__psint_t)cnt,
3761 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3762 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3763 (void *)(__psunsigned_t)(r1->l0 >> 32),
3764 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3765 (void *)(__psunsigned_t)(r1->l1 >> 32),
3766 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3767 (void *)(__psunsigned_t)(r2->l0 >> 32),
3768 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3769 (void *)(__psunsigned_t)(r2->l1 >> 32),
3770 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3771 );
3772 ASSERT(ip->i_xtrace);
3773 ktrace_enter(ip->i_xtrace,
3774 (void *)(__psint_t)(opcode | (whichfork << 16)),
3775 (void *)fname, (void *)desc, (void *)ip,
3776 (void *)(__psint_t)idx,
3777 (void *)(__psint_t)cnt,
3778 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3779 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3780 (void *)(__psunsigned_t)(r1->l0 >> 32),
3781 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3782 (void *)(__psunsigned_t)(r1->l1 >> 32),
3783 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3784 (void *)(__psunsigned_t)(r2->l0 >> 32),
3785 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3786 (void *)(__psunsigned_t)(r2->l1 >> 32),
3787 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3788 );
3789}
3790
3791/*
4eea22f0 3792 * Add bmap trace entry prior to a call to xfs_iext_remove.
1da177e4
LT
3793 */
3794STATIC void
3795xfs_bmap_trace_delete(
3796 char *fname, /* function name */
3797 char *desc, /* operation description */
3798 xfs_inode_t *ip, /* incore inode pointer */
3799 xfs_extnum_t idx, /* index of entry(entries) deleted */
3800 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
3801 int whichfork) /* data or attr fork */
3802{
3803 xfs_ifork_t *ifp; /* inode fork pointer */
3804
3805 ifp = XFS_IFORK_PTR(ip, whichfork);
3806 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
4eea22f0
MK
3807 cnt, xfs_iext_get_ext(ifp, idx),
3808 cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
1da177e4
LT
3809 whichfork);
3810}
3811
3812/*
4eea22f0 3813 * Add bmap trace entry prior to a call to xfs_iext_insert, or
1da177e4
LT
3814 * reading in the extents list from the disk (in the btree).
3815 */
3816STATIC void
3817xfs_bmap_trace_insert(
3818 char *fname, /* function name */
3819 char *desc, /* operation description */
3820 xfs_inode_t *ip, /* incore inode pointer */
3821 xfs_extnum_t idx, /* index of entry(entries) inserted */
3822 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
3823 xfs_bmbt_irec_t *r1, /* inserted record 1 */
3824 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
3825 int whichfork) /* data or attr fork */
3826{
3827 xfs_bmbt_rec_t tr1; /* compressed record 1 */
3828 xfs_bmbt_rec_t tr2; /* compressed record 2 if needed */
3829
3830 xfs_bmbt_set_all(&tr1, r1);
3831 if (cnt == 2) {
3832 ASSERT(r2 != NULL);
3833 xfs_bmbt_set_all(&tr2, r2);
3834 } else {
3835 ASSERT(cnt == 1);
3836 ASSERT(r2 == NULL);
3837 }
3838 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3839 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3840}
3841
3842/*
4eea22f0 3843 * Add bmap trace entry after updating an extent record in place.
1da177e4
LT
3844 */
3845STATIC void
3846xfs_bmap_trace_post_update(
3847 char *fname, /* function name */
3848 char *desc, /* operation description */
3849 xfs_inode_t *ip, /* incore inode pointer */
3850 xfs_extnum_t idx, /* index of entry updated */
3851 int whichfork) /* data or attr fork */
3852{
3853 xfs_ifork_t *ifp; /* inode fork pointer */
3854
3855 ifp = XFS_IFORK_PTR(ip, whichfork);
3856 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
4eea22f0 3857 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
1da177e4
LT
3858}
3859
3860/*
4eea22f0 3861 * Add bmap trace entry prior to updating an extent record in place.
1da177e4
LT
3862 */
3863STATIC void
3864xfs_bmap_trace_pre_update(
3865 char *fname, /* function name */
3866 char *desc, /* operation description */
3867 xfs_inode_t *ip, /* incore inode pointer */
3868 xfs_extnum_t idx, /* index of entry to be updated */
3869 int whichfork) /* data or attr fork */
3870{
3871 xfs_ifork_t *ifp; /* inode fork pointer */
3872
3873 ifp = XFS_IFORK_PTR(ip, whichfork);
3874 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
4eea22f0 3875 xfs_iext_get_ext(ifp, idx), NULL, whichfork);
1da177e4
LT
3876}
3877#endif /* XFS_BMAP_TRACE */
3878
3879/*
3880 * Compute the worst-case number of indirect blocks that will be used
3881 * for ip's delayed extent of length "len".
3882 */
3883STATIC xfs_filblks_t
3884xfs_bmap_worst_indlen(
3885 xfs_inode_t *ip, /* incore inode pointer */
3886 xfs_filblks_t len) /* delayed extent length */
3887{
3888 int level; /* btree level number */
3889 int maxrecs; /* maximum record count at this level */
3890 xfs_mount_t *mp; /* mount structure */
3891 xfs_filblks_t rval; /* return value */
3892
3893 mp = ip->i_mount;
3894 maxrecs = mp->m_bmap_dmxr[0];
3895 for (level = 0, rval = 0;
3896 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3897 level++) {
3898 len += maxrecs - 1;
3899 do_div(len, maxrecs);
3900 rval += len;
3901 if (len == 1)
3902 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3903 level - 1;
3904 if (level == 0)
3905 maxrecs = mp->m_bmap_dmxr[1];
3906 }
3907 return rval;
3908}
3909
3910#if defined(XFS_RW_TRACE)
3911STATIC void
3912xfs_bunmap_trace(
3913 xfs_inode_t *ip,
3914 xfs_fileoff_t bno,
3915 xfs_filblks_t len,
3916 int flags,
3917 inst_t *ra)
3918{
3919 if (ip->i_rwtrace == NULL)
3920 return;
3921 ktrace_enter(ip->i_rwtrace,
3e57ecf6 3922 (void *)(__psint_t)XFS_BUNMAP,
1da177e4
LT
3923 (void *)ip,
3924 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
3925 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
3926 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
3927 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
3928 (void *)(__psint_t)len,
3929 (void *)(__psint_t)flags,
3930 (void *)(unsigned long)current_cpu(),
3931 (void *)ra,
3932 (void *)0,
3933 (void *)0,
3934 (void *)0,
3935 (void *)0,
3936 (void *)0,
3937 (void *)0);
3938}
3939#endif
3940
3941/*
3942 * Convert inode from non-attributed to attributed.
3943 * Must not be in a transaction, ip must not be locked.
3944 */
3945int /* error code */
3946xfs_bmap_add_attrfork(
3947 xfs_inode_t *ip, /* incore inode pointer */
d8cc890d
NS
3948 int size, /* space new attribute needs */
3949 int rsvd) /* xact may use reserved blks */
1da177e4 3950{
1da177e4 3951 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
4eea22f0 3952 xfs_bmap_free_t flist; /* freed extent records */
1da177e4 3953 xfs_mount_t *mp; /* mount structure */
1da177e4 3954 xfs_trans_t *tp; /* transaction pointer */
d8cc890d
NS
3955 unsigned long s; /* spinlock spl value */
3956 int blks; /* space reservation */
3957 int version = 1; /* superblock attr version */
3958 int committed; /* xaction was committed */
3959 int logflags; /* logging flags */
3960 int error; /* error return value */
1da177e4 3961
d8cc890d 3962 ASSERT(XFS_IFORK_Q(ip) == 0);
1da177e4
LT
3963 ASSERT(ip->i_df.if_ext_max ==
3964 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
d8cc890d 3965
1da177e4
LT
3966 mp = ip->i_mount;
3967 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3968 tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3969 blks = XFS_ADDAFORK_SPACE_RES(mp);
3970 if (rsvd)
3971 tp->t_flags |= XFS_TRANS_RESERVE;
3972 if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3973 XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3974 goto error0;
3975 xfs_ilock(ip, XFS_ILOCK_EXCL);
3976 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
3977 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3978 XFS_QMOPT_RES_REGBLKS);
3979 if (error) {
3980 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3981 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3982 return error;
3983 }
3984 if (XFS_IFORK_Q(ip))
3985 goto error1;
3986 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3987 /*
3988 * For inodes coming from pre-6.2 filesystems.
3989 */
3990 ASSERT(ip->i_d.di_aformat == 0);
3991 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3992 }
3993 ASSERT(ip->i_d.di_anextents == 0);
3994 VN_HOLD(XFS_ITOV(ip));
3995 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3996 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3997 switch (ip->i_d.di_format) {
3998 case XFS_DINODE_FMT_DEV:
3999 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
4000 break;
4001 case XFS_DINODE_FMT_UUID:
4002 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
4003 break;
4004 case XFS_DINODE_FMT_LOCAL:
4005 case XFS_DINODE_FMT_EXTENTS:
4006 case XFS_DINODE_FMT_BTREE:
d8cc890d
NS
4007 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
4008 if (!ip->i_d.di_forkoff)
4009 ip->i_d.di_forkoff = mp->m_attroffset >> 3;
13059ff0 4010 else if (mp->m_flags & XFS_MOUNT_ATTR2)
d8cc890d 4011 version = 2;
1da177e4
LT
4012 break;
4013 default:
4014 ASSERT(0);
4015 error = XFS_ERROR(EINVAL);
4016 goto error1;
4017 }
4018 ip->i_df.if_ext_max =
4019 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
4020 ASSERT(ip->i_afp == NULL);
4021 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
4022 ip->i_afp->if_ext_max =
4023 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
4024 ip->i_afp->if_flags = XFS_IFEXTENTS;
4025 logflags = 0;
4026 XFS_BMAP_INIT(&flist, &firstblock);
4027 switch (ip->i_d.di_format) {
4028 case XFS_DINODE_FMT_LOCAL:
4029 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
4030 &logflags);
4031 break;
4032 case XFS_DINODE_FMT_EXTENTS:
4033 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
4034 &flist, &logflags);
4035 break;
4036 case XFS_DINODE_FMT_BTREE:
4037 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
4038 &logflags);
4039 break;
4040 default:
4041 error = 0;
4042 break;
4043 }
4044 if (logflags)
4045 xfs_trans_log_inode(tp, ip, logflags);
4046 if (error)
4047 goto error2;
d8cc890d
NS
4048 if (!XFS_SB_VERSION_HASATTR(&mp->m_sb) ||
4049 (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) {
da087bad
NS
4050 __int64_t sbfields = 0;
4051
1da177e4
LT
4052 s = XFS_SB_LOCK(mp);
4053 if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
4054 XFS_SB_VERSION_ADDATTR(&mp->m_sb);
da087bad 4055 sbfields |= XFS_SB_VERSIONNUM;
d8cc890d
NS
4056 }
4057 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2) {
4058 XFS_SB_VERSION_ADDATTR2(&mp->m_sb);
da087bad 4059 sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
d8cc890d 4060 }
da087bad 4061 if (sbfields) {
1da177e4 4062 XFS_SB_UNLOCK(mp, s);
da087bad 4063 xfs_mod_sb(tp, sbfields);
1da177e4
LT
4064 } else
4065 XFS_SB_UNLOCK(mp, s);
4066 }
f7c99b6f 4067 if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
1da177e4 4068 goto error2;
1c72bf90 4069 error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES);
1da177e4
LT
4070 ASSERT(ip->i_df.if_ext_max ==
4071 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4072 return error;
4073error2:
4074 xfs_bmap_cancel(&flist);
4075error1:
4076 ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
4077 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4078error0:
4079 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
4080 ASSERT(ip->i_df.if_ext_max ==
4081 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4082 return error;
4083}
4084
4085/*
4086 * Add the extent to the list of extents to be free at transaction end.
4087 * The list is maintained sorted (by block number).
4088 */
4089/* ARGSUSED */
4090void
4091xfs_bmap_add_free(
4092 xfs_fsblock_t bno, /* fs block number of extent */
4093 xfs_filblks_t len, /* length of extent */
4094 xfs_bmap_free_t *flist, /* list of extents */
4095 xfs_mount_t *mp) /* mount point structure */
4096{
4097 xfs_bmap_free_item_t *cur; /* current (next) element */
4098 xfs_bmap_free_item_t *new; /* new element */
4099 xfs_bmap_free_item_t *prev; /* previous element */
4100#ifdef DEBUG
4101 xfs_agnumber_t agno;
4102 xfs_agblock_t agbno;
4103
4104 ASSERT(bno != NULLFSBLOCK);
4105 ASSERT(len > 0);
4106 ASSERT(len <= MAXEXTLEN);
4107 ASSERT(!ISNULLSTARTBLOCK(bno));
4108 agno = XFS_FSB_TO_AGNO(mp, bno);
4109 agbno = XFS_FSB_TO_AGBNO(mp, bno);
4110 ASSERT(agno < mp->m_sb.sb_agcount);
4111 ASSERT(agbno < mp->m_sb.sb_agblocks);
4112 ASSERT(len < mp->m_sb.sb_agblocks);
4113 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
4114#endif
4115 ASSERT(xfs_bmap_free_item_zone != NULL);
4116 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
4117 new->xbfi_startblock = bno;
4118 new->xbfi_blockcount = (xfs_extlen_t)len;
4119 for (prev = NULL, cur = flist->xbf_first;
4120 cur != NULL;
4121 prev = cur, cur = cur->xbfi_next) {
4122 if (cur->xbfi_startblock >= bno)
4123 break;
4124 }
4125 if (prev)
4126 prev->xbfi_next = new;
4127 else
4128 flist->xbf_first = new;
4129 new->xbfi_next = cur;
4130 flist->xbf_count++;
4131}
4132
4133/*
4134 * Compute and fill in the value of the maximum depth of a bmap btree
4135 * in this filesystem. Done once, during mount.
4136 */
4137void
4138xfs_bmap_compute_maxlevels(
4139 xfs_mount_t *mp, /* file system mount structure */
4140 int whichfork) /* data or attr fork */
4141{
4142 int level; /* btree level */
4143 uint maxblocks; /* max blocks at this level */
4144 uint maxleafents; /* max leaf entries possible */
4145 int maxrootrecs; /* max records in root block */
4146 int minleafrecs; /* min records in leaf block */
4147 int minnoderecs; /* min records in node block */
4148 int sz; /* root block size */
4149
4150 /*
4151 * The maximum number of extents in a file, hence the maximum
4152 * number of leaf entries, is controlled by the type of di_nextents
4153 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
4154 * (a signed 16-bit number, xfs_aextnum_t).
4155 */
d8cc890d
NS
4156 if (whichfork == XFS_DATA_FORK) {
4157 maxleafents = MAXEXTNUM;
13059ff0
NS
4158 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4159 XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
d8cc890d
NS
4160 } else {
4161 maxleafents = MAXAEXTNUM;
13059ff0
NS
4162 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4163 XFS_BMDR_SPACE_CALC(MINABTPTRS) :
4164 mp->m_sb.sb_inodesize - mp->m_attroffset;
d8cc890d
NS
4165 }
4166 maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
1da177e4
LT
4167 minleafrecs = mp->m_bmap_dmnr[0];
4168 minnoderecs = mp->m_bmap_dmnr[1];
1da177e4
LT
4169 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
4170 for (level = 1; maxblocks > 1; level++) {
4171 if (maxblocks <= maxrootrecs)
4172 maxblocks = 1;
4173 else
4174 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
4175 }
4176 mp->m_bm_maxlevels[whichfork] = level;
4177}
4178
4179/*
4180 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
4181 * caller. Frees all the extents that need freeing, which must be done
4182 * last due to locking considerations. We never free any extents in
4183 * the first transaction. This is to allow the caller to make the first
4184 * transaction a synchronous one so that the pointers to the data being
4185 * broken in this transaction will be permanent before the data is actually
4186 * freed. This is necessary to prevent blocks from being reallocated
4187 * and written to before the free and reallocation are actually permanent.
4188 * We do not just make the first transaction synchronous here, because
4189 * there are more efficient ways to gain the same protection in some cases
4190 * (see the file truncation code).
4191 *
4192 * Return 1 if the given transaction was committed and a new one
4193 * started, and 0 otherwise in the committed parameter.
4194 */
4195/*ARGSUSED*/
4196int /* error */
4197xfs_bmap_finish(
4198 xfs_trans_t **tp, /* transaction pointer addr */
4199 xfs_bmap_free_t *flist, /* i/o: list extents to free */
1da177e4
LT
4200 int *committed) /* xact committed or not */
4201{
4202 xfs_efd_log_item_t *efd; /* extent free data */
4203 xfs_efi_log_item_t *efi; /* extent free intention */
4204 int error; /* error return value */
4eea22f0 4205 xfs_bmap_free_item_t *free; /* free extent item */
1da177e4
LT
4206 unsigned int logres; /* new log reservation */
4207 unsigned int logcount; /* new log count */
4208 xfs_mount_t *mp; /* filesystem mount structure */
4209 xfs_bmap_free_item_t *next; /* next item on free list */
4210 xfs_trans_t *ntp; /* new transaction pointer */
4211
4212 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
4213 if (flist->xbf_count == 0) {
4214 *committed = 0;
4215 return 0;
4216 }
4217 ntp = *tp;
4218 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
4219 for (free = flist->xbf_first; free; free = free->xbfi_next)
4220 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
4221 free->xbfi_blockcount);
4222 logres = ntp->t_log_res;
4223 logcount = ntp->t_log_count;
4224 ntp = xfs_trans_dup(*tp);
1c72bf90 4225 error = xfs_trans_commit(*tp, 0);
1da177e4
LT
4226 *tp = ntp;
4227 *committed = 1;
4228 /*
4229 * We have a new transaction, so we should return committed=1,
4230 * even though we're returning an error.
4231 */
4232 if (error) {
4233 return error;
4234 }
4235 if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4236 logcount)))
4237 return error;
4238 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4239 for (free = flist->xbf_first; free != NULL; free = next) {
4240 next = free->xbfi_next;
4241 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4242 free->xbfi_blockcount))) {
4243 /*
4244 * The bmap free list will be cleaned up at a
4245 * higher level. The EFI will be canceled when
4246 * this transaction is aborted.
4247 * Need to force shutdown here to make sure it
4248 * happens, since this transaction may not be
4249 * dirty yet.
4250 */
4251 mp = ntp->t_mountp;
4252 if (!XFS_FORCED_SHUTDOWN(mp))
4253 xfs_force_shutdown(mp,
4254 (error == EFSCORRUPTED) ?
7d04a335
NS
4255 SHUTDOWN_CORRUPT_INCORE :
4256 SHUTDOWN_META_IO_ERROR);
1da177e4
LT
4257 return error;
4258 }
4259 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4260 free->xbfi_blockcount);
4261 xfs_bmap_del_free(flist, NULL, free);
4262 }
4263 return 0;
4264}
4265
4266/*
4267 * Free up any items left in the list.
4268 */
4269void
4270xfs_bmap_cancel(
4271 xfs_bmap_free_t *flist) /* list of bmap_free_items */
4272{
4273 xfs_bmap_free_item_t *free; /* free list item */
4274 xfs_bmap_free_item_t *next;
4275
4276 if (flist->xbf_count == 0)
4277 return;
4278 ASSERT(flist->xbf_first != NULL);
4279 for (free = flist->xbf_first; free; free = next) {
4280 next = free->xbfi_next;
4281 xfs_bmap_del_free(flist, NULL, free);
4282 }
4283 ASSERT(flist->xbf_count == 0);
4284}
4285
4286/*
4287 * Returns the file-relative block number of the first unused block(s)
4288 * in the file with at least "len" logically contiguous blocks free.
4289 * This is the lowest-address hole if the file has holes, else the first block
4290 * past the end of file.
4291 * Return 0 if the file is currently local (in-inode).
4292 */
4293int /* error */
4294xfs_bmap_first_unused(
4295 xfs_trans_t *tp, /* transaction pointer */
4296 xfs_inode_t *ip, /* incore inode */
4297 xfs_extlen_t len, /* size of hole to find */
4298 xfs_fileoff_t *first_unused, /* unused block */
4299 int whichfork) /* data or attr fork */
4300{
1da177e4
LT
4301 xfs_bmbt_rec_t *ep; /* pointer to an extent entry */
4302 int error; /* error return value */
4eea22f0 4303 int idx; /* extent record index */
1da177e4
LT
4304 xfs_ifork_t *ifp; /* inode fork pointer */
4305 xfs_fileoff_t lastaddr; /* last block number seen */
4306 xfs_fileoff_t lowest; /* lowest useful block */
4307 xfs_fileoff_t max; /* starting useful block */
4308 xfs_fileoff_t off; /* offset for this block */
4309 xfs_extnum_t nextents; /* number of extent entries */
4310
4311 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4312 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4313 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4314 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4315 *first_unused = 0;
4316 return 0;
4317 }
4318 ifp = XFS_IFORK_PTR(ip, whichfork);
4319 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4320 (error = xfs_iread_extents(tp, ip, whichfork)))
4321 return error;
4322 lowest = *first_unused;
4323 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4eea22f0
MK
4324 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
4325 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
4326 off = xfs_bmbt_get_startoff(ep);
4327 /*
4328 * See if the hole before this extent will work.
4329 */
4330 if (off >= lowest + len && off - max >= len) {
4331 *first_unused = max;
4332 return 0;
4333 }
4334 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4335 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4336 }
4337 *first_unused = max;
4338 return 0;
4339}
4340
4341/*
4342 * Returns the file-relative block number of the last block + 1 before
4343 * last_block (input value) in the file.
4eea22f0
MK
4344 * This is not based on i_size, it is based on the extent records.
4345 * Returns 0 for local files, as they do not have extent records.
1da177e4
LT
4346 */
4347int /* error */
4348xfs_bmap_last_before(
4349 xfs_trans_t *tp, /* transaction pointer */
4350 xfs_inode_t *ip, /* incore inode */
4351 xfs_fileoff_t *last_block, /* last block */
4352 int whichfork) /* data or attr fork */
4353{
4354 xfs_fileoff_t bno; /* input file offset */
4355 int eof; /* hit end of file */
4356 xfs_bmbt_rec_t *ep; /* pointer to last extent */
4357 int error; /* error return value */
4358 xfs_bmbt_irec_t got; /* current extent value */
4359 xfs_ifork_t *ifp; /* inode fork pointer */
4360 xfs_extnum_t lastx; /* last extent used */
4361 xfs_bmbt_irec_t prev; /* previous extent value */
4362
4363 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4364 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4365 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4366 return XFS_ERROR(EIO);
4367 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4368 *last_block = 0;
4369 return 0;
4370 }
4371 ifp = XFS_IFORK_PTR(ip, whichfork);
4372 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4373 (error = xfs_iread_extents(tp, ip, whichfork)))
4374 return error;
4375 bno = *last_block - 1;
4376 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4377 &prev);
4378 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4379 if (prev.br_startoff == NULLFILEOFF)
4380 *last_block = 0;
4381 else
4382 *last_block = prev.br_startoff + prev.br_blockcount;
4383 }
4384 /*
4385 * Otherwise *last_block is already the right answer.
4386 */
4387 return 0;
4388}
4389
4390/*
4391 * Returns the file-relative block number of the first block past eof in
4eea22f0
MK
4392 * the file. This is not based on i_size, it is based on the extent records.
4393 * Returns 0 for local files, as they do not have extent records.
1da177e4
LT
4394 */
4395int /* error */
4396xfs_bmap_last_offset(
4397 xfs_trans_t *tp, /* transaction pointer */
4398 xfs_inode_t *ip, /* incore inode */
4399 xfs_fileoff_t *last_block, /* last block */
4400 int whichfork) /* data or attr fork */
4401{
1da177e4
LT
4402 xfs_bmbt_rec_t *ep; /* pointer to last extent */
4403 int error; /* error return value */
4404 xfs_ifork_t *ifp; /* inode fork pointer */
4405 xfs_extnum_t nextents; /* number of extent entries */
4406
4407 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4408 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4409 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4410 return XFS_ERROR(EIO);
4411 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4412 *last_block = 0;
4413 return 0;
4414 }
4415 ifp = XFS_IFORK_PTR(ip, whichfork);
4416 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4417 (error = xfs_iread_extents(tp, ip, whichfork)))
4418 return error;
4419 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4420 if (!nextents) {
4421 *last_block = 0;
4422 return 0;
4423 }
4eea22f0 4424 ep = xfs_iext_get_ext(ifp, nextents - 1);
1da177e4
LT
4425 *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4426 return 0;
4427}
4428
4429/*
4430 * Returns whether the selected fork of the inode has exactly one
4431 * block or not. For the data fork we check this matches di_size,
4432 * implying the file's range is 0..bsize-1.
4433 */
4434int /* 1=>1 block, 0=>otherwise */
4435xfs_bmap_one_block(
4436 xfs_inode_t *ip, /* incore inode */
4437 int whichfork) /* data or attr fork */
4438{
4439 xfs_bmbt_rec_t *ep; /* ptr to fork's extent */
4440 xfs_ifork_t *ifp; /* inode fork pointer */
4441 int rval; /* return value */
4442 xfs_bmbt_irec_t s; /* internal version of extent */
4443
4444#ifndef DEBUG
ba87ea69
LM
4445 if (whichfork == XFS_DATA_FORK) {
4446 return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ?
4447 (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4448 (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4449 }
1da177e4
LT
4450#endif /* !DEBUG */
4451 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4452 return 0;
4453 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4454 return 0;
4455 ifp = XFS_IFORK_PTR(ip, whichfork);
4456 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4eea22f0 4457 ep = xfs_iext_get_ext(ifp, 0);
1da177e4
LT
4458 xfs_bmbt_get_all(ep, &s);
4459 rval = s.br_startoff == 0 && s.br_blockcount == 1;
4460 if (rval && whichfork == XFS_DATA_FORK)
ba87ea69 4461 ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
1da177e4
LT
4462 return rval;
4463}
4464
4465/*
4466 * Read in the extents to if_extents.
4467 * All inode fields are set up by caller, we just traverse the btree
4468 * and copy the records in. If the file system cannot contain unwritten
4469 * extents, the records are checked for no "state" flags.
4470 */
4471int /* error */
4472xfs_bmap_read_extents(
4473 xfs_trans_t *tp, /* transaction pointer */
4474 xfs_inode_t *ip, /* incore inode */
4475 int whichfork) /* data or attr fork */
4476{
4477 xfs_bmbt_block_t *block; /* current btree block */
4478 xfs_fsblock_t bno; /* block # of "block" */
4479 xfs_buf_t *bp; /* buffer for "block" */
4480 int error; /* error return value */
4481 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
4482#ifdef XFS_BMAP_TRACE
4483 static char fname[] = "xfs_bmap_read_extents";
4484#endif
4485 xfs_extnum_t i, j; /* index into the extents list */
4486 xfs_ifork_t *ifp; /* fork structure */
4487 int level; /* btree level, for checking */
4488 xfs_mount_t *mp; /* file system mount structure */
576039cf 4489 __be64 *pp; /* pointer to block address */
1da177e4
LT
4490 /* REFERENCED */
4491 xfs_extnum_t room; /* number of entries there's room for */
1da177e4
LT
4492
4493 bno = NULLFSBLOCK;
4494 mp = ip->i_mount;
4495 ifp = XFS_IFORK_PTR(ip, whichfork);
4496 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4497 XFS_EXTFMT_INODE(ip);
4498 block = ifp->if_broot;
4499 /*
4500 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4501 */
16259e7d
CH
4502 level = be16_to_cpu(block->bb_level);
4503 ASSERT(level > 0);
1da177e4 4504 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
576039cf
CH
4505 bno = be64_to_cpu(*pp);
4506 ASSERT(bno != NULLDFSBNO);
4507 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4508 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1da177e4
LT
4509 /*
4510 * Go down the tree until leaf level is reached, following the first
4511 * pointer (leftmost) at each level.
4512 */
4513 while (level-- > 0) {
4514 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4515 XFS_BMAP_BTREE_REF)))
4516 return error;
4517 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4518 XFS_WANT_CORRUPTED_GOTO(
4519 XFS_BMAP_SANITY_CHECK(mp, block, level),
4520 error0);
4521 if (level == 0)
4522 break;
2c36dded 4523 pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
576039cf
CH
4524 bno = be64_to_cpu(*pp);
4525 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
1da177e4
LT
4526 xfs_trans_brelse(tp, bp);
4527 }
4528 /*
4529 * Here with bp and block set to the leftmost leaf node in the tree.
4530 */
4eea22f0 4531 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1da177e4
LT
4532 i = 0;
4533 /*
4eea22f0 4534 * Loop over all leaf nodes. Copy information to the extent records.
1da177e4
LT
4535 */
4536 for (;;) {
4eea22f0 4537 xfs_bmbt_rec_t *frp, *trp;
1da177e4
LT
4538 xfs_fsblock_t nextbno;
4539 xfs_extnum_t num_recs;
4eea22f0 4540 xfs_extnum_t start;
1da177e4
LT
4541
4542
16259e7d 4543 num_recs = be16_to_cpu(block->bb_numrecs);
1da177e4
LT
4544 if (unlikely(i + num_recs > room)) {
4545 ASSERT(i + num_recs <= room);
3762ec6b
NS
4546 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4547 "corrupt dinode %Lu, (btree extents).",
1da177e4
LT
4548 (unsigned long long) ip->i_ino);
4549 XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4550 XFS_ERRLEVEL_LOW,
4551 ip->i_mount);
4552 goto error0;
4553 }
4554 XFS_WANT_CORRUPTED_GOTO(
4555 XFS_BMAP_SANITY_CHECK(mp, block, 0),
4556 error0);
4557 /*
4558 * Read-ahead the next leaf block, if any.
4559 */
16259e7d 4560 nextbno = be64_to_cpu(block->bb_rightsib);
1da177e4
LT
4561 if (nextbno != NULLFSBLOCK)
4562 xfs_btree_reada_bufl(mp, nextbno, 1);
4563 /*
4eea22f0 4564 * Copy records into the extent records.
1da177e4 4565 */
2c36dded 4566 frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
4eea22f0
MK
4567 start = i;
4568 for (j = 0; j < num_recs; j++, i++, frp++) {
4569 trp = xfs_iext_get_ext(ifp, i);
1da177e4
LT
4570 trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
4571 trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
4572 }
4573 if (exntf == XFS_EXTFMT_NOSTATE) {
4574 /*
4575 * Check all attribute bmap btree records and
4576 * any "older" data bmap btree records for a
4577 * set bit in the "extent flag" position.
4578 */
4eea22f0
MK
4579 if (unlikely(xfs_check_nostate_extents(ifp,
4580 start, num_recs))) {
1da177e4
LT
4581 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4582 XFS_ERRLEVEL_LOW,
4583 ip->i_mount);
4584 goto error0;
4585 }
4586 }
1da177e4
LT
4587 xfs_trans_brelse(tp, bp);
4588 bno = nextbno;
4589 /*
4590 * If we've reached the end, stop.
4591 */
4592 if (bno == NULLFSBLOCK)
4593 break;
4594 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4595 XFS_BMAP_BTREE_REF)))
4596 return error;
4597 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4598 }
4eea22f0 4599 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
1da177e4
LT
4600 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4601 xfs_bmap_trace_exlist(fname, ip, i, whichfork);
4602 return 0;
4603error0:
4604 xfs_trans_brelse(tp, bp);
4605 return XFS_ERROR(EFSCORRUPTED);
4606}
4607
4608#ifdef XFS_BMAP_TRACE
4609/*
4eea22f0 4610 * Add bmap trace insert entries for all the contents of the extent records.
1da177e4
LT
4611 */
4612void
4613xfs_bmap_trace_exlist(
4614 char *fname, /* function name */
4615 xfs_inode_t *ip, /* incore inode pointer */
4616 xfs_extnum_t cnt, /* count of entries in the list */
4617 int whichfork) /* data or attr fork */
4618{
4eea22f0
MK
4619 xfs_bmbt_rec_t *ep; /* current extent record */
4620 xfs_extnum_t idx; /* extent record index */
1da177e4 4621 xfs_ifork_t *ifp; /* inode fork pointer */
4eea22f0 4622 xfs_bmbt_irec_t s; /* file extent record */
1da177e4
LT
4623
4624 ifp = XFS_IFORK_PTR(ip, whichfork);
4eea22f0
MK
4625 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4626 for (idx = 0; idx < cnt; idx++) {
4627 ep = xfs_iext_get_ext(ifp, idx);
1da177e4
LT
4628 xfs_bmbt_get_all(ep, &s);
4629 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL,
4630 whichfork);
4631 }
4632}
4633#endif
4634
4635#ifdef DEBUG
4636/*
4637 * Validate that the bmbt_irecs being returned from bmapi are valid
4638 * given the callers original parameters. Specifically check the
4639 * ranges of the returned irecs to ensure that they only extent beyond
4640 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4641 */
4642STATIC void
4643xfs_bmap_validate_ret(
4644 xfs_fileoff_t bno,
4645 xfs_filblks_t len,
4646 int flags,
4647 xfs_bmbt_irec_t *mval,
4648 int nmap,
4649 int ret_nmap)
4650{
4651 int i; /* index to map values */
4652
4653 ASSERT(ret_nmap <= nmap);
4654
4655 for (i = 0; i < ret_nmap; i++) {
4656 ASSERT(mval[i].br_blockcount > 0);
4657 if (!(flags & XFS_BMAPI_ENTIRE)) {
4658 ASSERT(mval[i].br_startoff >= bno);
4659 ASSERT(mval[i].br_blockcount <= len);
4660 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4661 bno + len);
4662 } else {
4663 ASSERT(mval[i].br_startoff < bno + len);
4664 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4665 bno);
4666 }
4667 ASSERT(i == 0 ||
4668 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4669 mval[i].br_startoff);
4670 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4671 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4672 mval[i].br_startblock != HOLESTARTBLOCK);
4673 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4674 mval[i].br_state == XFS_EXT_UNWRITTEN);
4675 }
4676}
4677#endif /* DEBUG */
4678
4679
4680/*
4681 * Map file blocks to filesystem blocks.
4682 * File range is given by the bno/len pair.
4683 * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4684 * into a hole or past eof.
4685 * Only allocates blocks from a single allocation group,
4686 * to avoid locking problems.
4687 * The returned value in "firstblock" from the first call in a transaction
4688 * must be remembered and presented to subsequent calls in "firstblock".
4689 * An upper bound for the number of blocks to be allocated is supplied to
4690 * the first call in "total"; if no allocation group has that many free
4691 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4692 */
4693int /* error */
4694xfs_bmapi(
4695 xfs_trans_t *tp, /* transaction pointer */
4696 xfs_inode_t *ip, /* incore inode */
4697 xfs_fileoff_t bno, /* starting file offs. mapped */
4698 xfs_filblks_t len, /* length to map in file */
4699 int flags, /* XFS_BMAPI_... */
4700 xfs_fsblock_t *firstblock, /* first allocated block
4701 controls a.g. for allocs */
4702 xfs_extlen_t total, /* total blocks needed */
4703 xfs_bmbt_irec_t *mval, /* output: map values */
4704 int *nmap, /* i/o: mval size/count */
3e57ecf6
OW
4705 xfs_bmap_free_t *flist, /* i/o: list extents to free */
4706 xfs_extdelta_t *delta) /* o: change made to incore extents */
1da177e4
LT
4707{
4708 xfs_fsblock_t abno; /* allocated block number */
4709 xfs_extlen_t alen; /* allocated extent length */
4710 xfs_fileoff_t aoff; /* allocated file offset */
4711 xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
1da177e4 4712 xfs_btree_cur_t *cur; /* bmap btree cursor */
1da177e4 4713 xfs_fileoff_t end; /* end of mapped file region */
4eea22f0 4714 int eof; /* we've hit the end of extents */
4eea22f0 4715 xfs_bmbt_rec_t *ep; /* extent record pointer */
1da177e4 4716 int error; /* error return */
4eea22f0 4717 xfs_bmbt_irec_t got; /* current file extent record */
1da177e4
LT
4718 xfs_ifork_t *ifp; /* inode fork pointer */
4719 xfs_extlen_t indlen; /* indirect blocks length */
1da177e4
LT
4720 xfs_extnum_t lastx; /* last useful extent number */
4721 int logflags; /* flags for transaction logging */
4722 xfs_extlen_t minleft; /* min blocks left after allocation */
4723 xfs_extlen_t minlen; /* min allocation size */
4724 xfs_mount_t *mp; /* xfs mount structure */
4725 int n; /* current extent index */
4726 int nallocs; /* number of extents alloc\'d */
4727 xfs_extnum_t nextents; /* number of extents in file */
4728 xfs_fileoff_t obno; /* old block number (offset) */
4eea22f0 4729 xfs_bmbt_irec_t prev; /* previous file extent record */
1da177e4 4730 int tmp_logflags; /* temp flags holder */
06d10dd9
NS
4731 int whichfork; /* data or attr fork */
4732 char inhole; /* current location is hole in file */
1da177e4 4733 char wasdelay; /* old extent was delayed */
1da177e4 4734 char wr; /* this is a write request */
06d10dd9 4735 char rt; /* this is a realtime file */
1da177e4
LT
4736#ifdef DEBUG
4737 xfs_fileoff_t orig_bno; /* original block number value */
4738 int orig_flags; /* original flags arg value */
4739 xfs_filblks_t orig_len; /* original value of len arg */
4740 xfs_bmbt_irec_t *orig_mval; /* original value of mval */
4741 int orig_nmap; /* original value of *nmap */
4742
4743 orig_bno = bno;
4744 orig_len = len;
4745 orig_flags = flags;
4746 orig_mval = mval;
4747 orig_nmap = *nmap;
4748#endif
4749 ASSERT(*nmap >= 1);
4750 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4751 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4752 XFS_ATTR_FORK : XFS_DATA_FORK;
4753 mp = ip->i_mount;
4754 if (unlikely(XFS_TEST_ERROR(
4755 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4756 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4757 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4758 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4759 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4760 return XFS_ERROR(EFSCORRUPTED);
4761 }
4762 if (XFS_FORCED_SHUTDOWN(mp))
4763 return XFS_ERROR(EIO);
dd9f438e 4764 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
1da177e4
LT
4765 ifp = XFS_IFORK_PTR(ip, whichfork);
4766 ASSERT(ifp->if_ext_max ==
4767 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4768 if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4769 XFS_STATS_INC(xs_blk_mapw);
4770 else
4771 XFS_STATS_INC(xs_blk_mapr);
1da177e4 4772 /*
39269e29 4773 * IGSTATE flag is used to combine extents which
1da177e4
LT
4774 * differ only due to the state of the extents.
4775 * This technique is used from xfs_getbmap()
4776 * when the caller does not wish to see the
4777 * separation (which is the default).
4778 *
4779 * This technique is also used when writing a
4780 * buffer which has been partially written,
4781 * (usually by being flushed during a chunkread),
4782 * to ensure one write takes place. This also
4783 * prevents a change in the xfs inode extents at
4784 * this time, intentionally. This change occurs
4785 * on completion of the write operation, in
4786 * xfs_strat_comp(), where the xfs_bmapi() call
4787 * is transactioned, and the extents combined.
4788 */
39269e29
NS
4789 if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */
4790 wr = 0; /* no allocations are allowed */
4791 ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
1da177e4
LT
4792 logflags = 0;
4793 nallocs = 0;
4794 cur = NULL;
4795 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4796 ASSERT(wr && tp);
4797 if ((error = xfs_bmap_local_to_extents(tp, ip,
4798 firstblock, total, &logflags, whichfork)))
4799 goto error0;
4800 }
4801 if (wr && *firstblock == NULLFSBLOCK) {
4802 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
16259e7d 4803 minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
1da177e4
LT
4804 else
4805 minleft = 1;
4806 } else
4807 minleft = 0;
4808 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4809 (error = xfs_iread_extents(tp, ip, whichfork)))
4810 goto error0;
4811 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4812 &prev);
4813 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4814 n = 0;
4815 end = bno + len;
4816 obno = bno;
4817 bma.ip = NULL;
3e57ecf6
OW
4818 if (delta) {
4819 delta->xed_startoff = NULLFILEOFF;
4820 delta->xed_blockcount = 0;
4821 }
1da177e4
LT
4822 while (bno < end && n < *nmap) {
4823 /*
4824 * Reading past eof, act as though there's a hole
4825 * up to end.
4826 */
4827 if (eof && !wr)
4828 got.br_startoff = end;
4829 inhole = eof || got.br_startoff > bno;
39269e29 4830 wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
1da177e4
LT
4831 ISNULLSTARTBLOCK(got.br_startblock);
4832 /*
4833 * First, deal with the hole before the allocated space
4834 * that we found, if any.
4835 */
4836 if (wr && (inhole || wasdelay)) {
4837 /*
4838 * For the wasdelay case, we could also just
4839 * allocate the stuff asked for in this bmap call
4840 * but that wouldn't be as good.
4841 */
39269e29 4842 if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
1da177e4
LT
4843 alen = (xfs_extlen_t)got.br_blockcount;
4844 aoff = got.br_startoff;
4845 if (lastx != NULLEXTNUM && lastx) {
4eea22f0 4846 ep = xfs_iext_get_ext(ifp, lastx - 1);
1da177e4
LT
4847 xfs_bmbt_get_all(ep, &prev);
4848 }
4849 } else if (wasdelay) {
4850 alen = (xfs_extlen_t)
4851 XFS_FILBLKS_MIN(len,
4852 (got.br_startoff +
4853 got.br_blockcount) - bno);
4854 aoff = bno;
4855 } else {
4856 alen = (xfs_extlen_t)
4857 XFS_FILBLKS_MIN(len, MAXEXTLEN);
4858 if (!eof)
4859 alen = (xfs_extlen_t)
4860 XFS_FILBLKS_MIN(alen,
4861 got.br_startoff - bno);
4862 aoff = bno;
4863 }
39269e29
NS
4864 minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
4865 if (flags & XFS_BMAPI_DELAY) {
dd9f438e 4866 xfs_extlen_t extsz;
06d10dd9
NS
4867
4868 /* Figure out the extent size, adjust alen */
957d0ebe 4869 extsz = xfs_get_extsz_hint(ip);
dd9f438e
NS
4870 if (extsz) {
4871 error = xfs_bmap_extsize_align(mp,
4872 &got, &prev, extsz,
39269e29
NS
4873 rt, eof,
4874 flags&XFS_BMAPI_DELAY,
4875 flags&XFS_BMAPI_CONVERT,
dd9f438e
NS
4876 &aoff, &alen);
4877 ASSERT(!error);
06d10dd9
NS
4878 }
4879
dd9f438e
NS
4880 if (rt)
4881 extsz = alen / mp->m_sb.sb_rextsize;
4882
1da177e4
LT
4883 /*
4884 * Make a transaction-less quota reservation for
4885 * delayed allocation blocks. This number gets
9a2a7de2
NS
4886 * adjusted later. We return if we haven't
4887 * allocated blocks already inside this loop.
1da177e4 4888 */
9a2a7de2 4889 if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS(
06d10dd9
NS
4890 mp, NULL, ip, (long)alen, 0,
4891 rt ? XFS_QMOPT_RES_RTBLKS :
9a2a7de2 4892 XFS_QMOPT_RES_REGBLKS))) {
1da177e4
LT
4893 if (n == 0) {
4894 *nmap = 0;
4895 ASSERT(cur == NULL);
9a2a7de2 4896 return error;
1da177e4
LT
4897 }
4898 break;
4899 }
4900
4901 /*
4902 * Split changing sb for alen and indlen since
4903 * they could be coming from different places.
4904 */
06d10dd9
NS
4905 indlen = (xfs_extlen_t)
4906 xfs_bmap_worst_indlen(ip, alen);
4907 ASSERT(indlen > 0);
1da177e4 4908
dd9f438e 4909 if (rt) {
06d10dd9
NS
4910 error = xfs_mod_incore_sb(mp,
4911 XFS_SBS_FREXTENTS,
20f4ebf2 4912 -((int64_t)extsz), (flags &
39269e29 4913 XFS_BMAPI_RSVBLOCKS));
dd9f438e 4914 } else {
06d10dd9
NS
4915 error = xfs_mod_incore_sb(mp,
4916 XFS_SBS_FDBLOCKS,
20f4ebf2 4917 -((int64_t)alen), (flags &
39269e29 4918 XFS_BMAPI_RSVBLOCKS));
dd9f438e 4919 }
3bdbfb10 4920 if (!error) {
06d10dd9
NS
4921 error = xfs_mod_incore_sb(mp,
4922 XFS_SBS_FDBLOCKS,
20f4ebf2 4923 -((int64_t)indlen), (flags &
39269e29 4924 XFS_BMAPI_RSVBLOCKS));
3ddb8fa9
NS
4925 if (error && rt)
4926 xfs_mod_incore_sb(mp,
3bdbfb10 4927 XFS_SBS_FREXTENTS,
20f4ebf2 4928 (int64_t)extsz, (flags &
39269e29 4929 XFS_BMAPI_RSVBLOCKS));
3ddb8fa9
NS
4930 else if (error)
4931 xfs_mod_incore_sb(mp,
3bdbfb10 4932 XFS_SBS_FDBLOCKS,
20f4ebf2 4933 (int64_t)alen, (flags &
39269e29 4934 XFS_BMAPI_RSVBLOCKS));
3bdbfb10 4935 }
06d10dd9
NS
4936
4937 if (error) {
3ddb8fa9 4938 if (XFS_IS_QUOTA_ON(mp))
06d10dd9 4939 /* unreserve the blocks now */
dd9f438e 4940 (void)
06d10dd9
NS
4941 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
4942 mp, NULL, ip,
4943 (long)alen, 0, rt ?
4944 XFS_QMOPT_RES_RTBLKS :
4945 XFS_QMOPT_RES_REGBLKS);
1da177e4
LT
4946 break;
4947 }
06d10dd9 4948
1da177e4
LT
4949 ip->i_delayed_blks += alen;
4950 abno = NULLSTARTBLOCK(indlen);
4951 } else {
4952 /*
4953 * If first time, allocate and fill in
4954 * once-only bma fields.
4955 */
4956 if (bma.ip == NULL) {
4957 bma.tp = tp;
4958 bma.ip = ip;
4959 bma.prevp = &prev;
4960 bma.gotp = &got;
4961 bma.total = total;
4962 bma.userdata = 0;
4963 }
4964 /* Indicate if this is the first user data
4965 * in the file, or just any user data.
4966 */
39269e29 4967 if (!(flags & XFS_BMAPI_METADATA)) {
1da177e4
LT
4968 bma.userdata = (aoff == 0) ?
4969 XFS_ALLOC_INITIAL_USER_DATA :
4970 XFS_ALLOC_USERDATA;
4971 }
4972 /*
4973 * Fill in changeable bma fields.
4974 */
4975 bma.eof = eof;
4976 bma.firstblock = *firstblock;
4977 bma.alen = alen;
4978 bma.off = aoff;
7288026b 4979 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
1da177e4
LT
4980 bma.wasdel = wasdelay;
4981 bma.minlen = minlen;
4982 bma.low = flist->xbf_low;
4983 bma.minleft = minleft;
4984 /*
4985 * Only want to do the alignment at the
4986 * eof if it is userdata and allocation length
4987 * is larger than a stripe unit.
4988 */
4989 if (mp->m_dalign && alen >= mp->m_dalign &&
39269e29
NS
4990 (!(flags & XFS_BMAPI_METADATA)) &&
4991 (whichfork == XFS_DATA_FORK)) {
1da177e4
LT
4992 if ((error = xfs_bmap_isaeof(ip, aoff,
4993 whichfork, &bma.aeof)))
4994 goto error0;
4995 } else
4996 bma.aeof = 0;
4997 /*
4998 * Call allocator.
4999 */
5000 if ((error = xfs_bmap_alloc(&bma)))
5001 goto error0;
5002 /*
5003 * Copy out result fields.
5004 */
5005 abno = bma.rval;
5006 if ((flist->xbf_low = bma.low))
5007 minleft = 0;
5008 alen = bma.alen;
5009 aoff = bma.off;
5010 ASSERT(*firstblock == NULLFSBLOCK ||
5011 XFS_FSB_TO_AGNO(mp, *firstblock) ==
5012 XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
5013 (flist->xbf_low &&
5014 XFS_FSB_TO_AGNO(mp, *firstblock) <
5015 XFS_FSB_TO_AGNO(mp, bma.firstblock)));
5016 *firstblock = bma.firstblock;
5017 if (cur)
5018 cur->bc_private.b.firstblock =
5019 *firstblock;
5020 if (abno == NULLFSBLOCK)
5021 break;
5022 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5023 cur = xfs_btree_init_cursor(mp,
5024 tp, NULL, 0, XFS_BTNUM_BMAP,
5025 ip, whichfork);
5026 cur->bc_private.b.firstblock =
5027 *firstblock;
5028 cur->bc_private.b.flist = flist;
5029 }
5030 /*
5031 * Bump the number of extents we've allocated
5032 * in this call.
5033 */
5034 nallocs++;
5035 }
5036 if (cur)
5037 cur->bc_private.b.flags =
5038 wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
5039 got.br_startoff = aoff;
5040 got.br_startblock = abno;
5041 got.br_blockcount = alen;
5042 got.br_state = XFS_EXT_NORM; /* assume normal */
5043 /*
5044 * Determine state of extent, and the filesystem.
5045 * A wasdelay extent has been initialized, so
5046 * shouldn't be flagged as unwritten.
5047 */
5048 if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5049 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
5050 got.br_state = XFS_EXT_UNWRITTEN;
5051 }
5052 error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
3e57ecf6
OW
5053 firstblock, flist, &tmp_logflags, delta,
5054 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
1da177e4
LT
5055 logflags |= tmp_logflags;
5056 if (error)
5057 goto error0;
5058 lastx = ifp->if_lastex;
4eea22f0 5059 ep = xfs_iext_get_ext(ifp, lastx);
1da177e4
LT
5060 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5061 xfs_bmbt_get_all(ep, &got);
5062 ASSERT(got.br_startoff <= aoff);
5063 ASSERT(got.br_startoff + got.br_blockcount >=
5064 aoff + alen);
5065#ifdef DEBUG
39269e29 5066 if (flags & XFS_BMAPI_DELAY) {
1da177e4
LT
5067 ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
5068 ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
5069 }
5070 ASSERT(got.br_state == XFS_EXT_NORM ||
5071 got.br_state == XFS_EXT_UNWRITTEN);
5072#endif
5073 /*
5074 * Fall down into the found allocated space case.
5075 */
5076 } else if (inhole) {
5077 /*
5078 * Reading in a hole.
5079 */
5080 mval->br_startoff = bno;
5081 mval->br_startblock = HOLESTARTBLOCK;
5082 mval->br_blockcount =
5083 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
5084 mval->br_state = XFS_EXT_NORM;
5085 bno += mval->br_blockcount;
5086 len -= mval->br_blockcount;
5087 mval++;
5088 n++;
5089 continue;
5090 }
5091 /*
5092 * Then deal with the allocated space we found.
5093 */
5094 ASSERT(ep != NULL);
39269e29
NS
5095 if (!(flags & XFS_BMAPI_ENTIRE) &&
5096 (got.br_startoff + got.br_blockcount > obno)) {
1da177e4
LT
5097 if (obno > bno)
5098 bno = obno;
5099 ASSERT((bno >= obno) || (n == 0));
5100 ASSERT(bno < end);
5101 mval->br_startoff = bno;
5102 if (ISNULLSTARTBLOCK(got.br_startblock)) {
39269e29 5103 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
1da177e4
LT
5104 mval->br_startblock = DELAYSTARTBLOCK;
5105 } else
5106 mval->br_startblock =
5107 got.br_startblock +
5108 (bno - got.br_startoff);
5109 /*
5110 * Return the minimum of what we got and what we
5111 * asked for for the length. We can use the len
5112 * variable here because it is modified below
5113 * and we could have been there before coming
5114 * here if the first part of the allocation
5115 * didn't overlap what was asked for.
5116 */
5117 mval->br_blockcount =
5118 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
5119 (bno - got.br_startoff));
5120 mval->br_state = got.br_state;
5121 ASSERT(mval->br_blockcount <= len);
5122 } else {
5123 *mval = got;
5124 if (ISNULLSTARTBLOCK(mval->br_startblock)) {
39269e29 5125 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
1da177e4
LT
5126 mval->br_startblock = DELAYSTARTBLOCK;
5127 }
5128 }
5129
5130 /*
5131 * Check if writing previously allocated but
5132 * unwritten extents.
5133 */
5134 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
5135 ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
5136 /*
5137 * Modify (by adding) the state flag, if writing.
5138 */
5139 ASSERT(mval->br_blockcount <= len);
5140 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5141 cur = xfs_btree_init_cursor(mp,
5142 tp, NULL, 0, XFS_BTNUM_BMAP,
5143 ip, whichfork);
5144 cur->bc_private.b.firstblock =
5145 *firstblock;
5146 cur->bc_private.b.flist = flist;
5147 }
5148 mval->br_state = XFS_EXT_NORM;
5149 error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
3e57ecf6
OW
5150 firstblock, flist, &tmp_logflags, delta,
5151 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
1da177e4
LT
5152 logflags |= tmp_logflags;
5153 if (error)
5154 goto error0;
5155 lastx = ifp->if_lastex;
4eea22f0 5156 ep = xfs_iext_get_ext(ifp, lastx);
1da177e4
LT
5157 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5158 xfs_bmbt_get_all(ep, &got);
5159 /*
5160 * We may have combined previously unwritten
5161 * space with written space, so generate
5162 * another request.
5163 */
5164 if (mval->br_blockcount < len)
5165 continue;
5166 }
5167
39269e29 5168 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
1da177e4 5169 ((mval->br_startoff + mval->br_blockcount) <= end));
39269e29
NS
5170 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
5171 (mval->br_blockcount <= len) ||
1da177e4
LT
5172 (mval->br_startoff < obno));
5173 bno = mval->br_startoff + mval->br_blockcount;
5174 len = end - bno;
5175 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
5176 ASSERT(mval->br_startblock == mval[-1].br_startblock);
5177 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
5178 ASSERT(mval->br_state == mval[-1].br_state);
5179 mval[-1].br_blockcount = mval->br_blockcount;
5180 mval[-1].br_state = mval->br_state;
5181 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
5182 mval[-1].br_startblock != DELAYSTARTBLOCK &&
5183 mval[-1].br_startblock != HOLESTARTBLOCK &&
5184 mval->br_startblock ==
5185 mval[-1].br_startblock + mval[-1].br_blockcount &&
39269e29
NS
5186 ((flags & XFS_BMAPI_IGSTATE) ||
5187 mval[-1].br_state == mval->br_state)) {
1da177e4
LT
5188 ASSERT(mval->br_startoff ==
5189 mval[-1].br_startoff + mval[-1].br_blockcount);
5190 mval[-1].br_blockcount += mval->br_blockcount;
5191 } else if (n > 0 &&
5192 mval->br_startblock == DELAYSTARTBLOCK &&
5193 mval[-1].br_startblock == DELAYSTARTBLOCK &&
5194 mval->br_startoff ==
5195 mval[-1].br_startoff + mval[-1].br_blockcount) {
5196 mval[-1].br_blockcount += mval->br_blockcount;
5197 mval[-1].br_state = mval->br_state;
5198 } else if (!((n == 0) &&
5199 ((mval->br_startoff + mval->br_blockcount) <=
5200 obno))) {
5201 mval++;
5202 n++;
5203 }
5204 /*
5205 * If we're done, stop now. Stop when we've allocated
5206 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
5207 * the transaction may get too big.
5208 */
5209 if (bno >= end || n >= *nmap || nallocs >= *nmap)
5210 break;
5211 /*
5212 * Else go on to the next record.
5213 */
4eea22f0 5214 ep = xfs_iext_get_ext(ifp, ++lastx);
4e5ae838
DC
5215 prev = got;
5216 if (lastx >= nextents)
1da177e4 5217 eof = 1;
4e5ae838 5218 else
1da177e4
LT
5219 xfs_bmbt_get_all(ep, &got);
5220 }
5221 ifp->if_lastex = lastx;
5222 *nmap = n;
5223 /*
5224 * Transform from btree to extents, give it cur.
5225 */
5226 if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5227 XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5228 ASSERT(wr && cur);
5229 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5230 &tmp_logflags, whichfork);
5231 logflags |= tmp_logflags;
5232 if (error)
5233 goto error0;
5234 }
5235 ASSERT(ifp->if_ext_max ==
5236 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5237 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5238 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5239 error = 0;
3e57ecf6
OW
5240 if (delta && delta->xed_startoff != NULLFILEOFF) {
5241 /* A change was actually made.
5242 * Note that delta->xed_blockount is an offset at this
5243 * point and needs to be converted to a block count.
5244 */
5245 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5246 delta->xed_blockcount -= delta->xed_startoff;
5247 }
1da177e4
LT
5248error0:
5249 /*
5250 * Log everything. Do this after conversion, there's no point in
4eea22f0 5251 * logging the extent records if we've converted to btree format.
1da177e4
LT
5252 */
5253 if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5254 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5255 logflags &= ~XFS_ILOG_FEXT(whichfork);
5256 else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5257 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5258 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5259 /*
5260 * Log whatever the flags say, even if error. Otherwise we might miss
5261 * detecting a case where the data is changed, there's an error,
5262 * and it's not logged so we don't shutdown when we should.
5263 */
5264 if (logflags) {
5265 ASSERT(tp && wr);
5266 xfs_trans_log_inode(tp, ip, logflags);
5267 }
5268 if (cur) {
5269 if (!error) {
5270 ASSERT(*firstblock == NULLFSBLOCK ||
5271 XFS_FSB_TO_AGNO(mp, *firstblock) ==
5272 XFS_FSB_TO_AGNO(mp,
5273 cur->bc_private.b.firstblock) ||
5274 (flist->xbf_low &&
5275 XFS_FSB_TO_AGNO(mp, *firstblock) <
5276 XFS_FSB_TO_AGNO(mp,
5277 cur->bc_private.b.firstblock)));
5278 *firstblock = cur->bc_private.b.firstblock;
5279 }
5280 xfs_btree_del_cursor(cur,
5281 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5282 }
5283 if (!error)
5284 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5285 orig_nmap, *nmap);
5286 return error;
5287}
5288
5289/*
5290 * Map file blocks to filesystem blocks, simple version.
5291 * One block (extent) only, read-only.
5292 * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5293 * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5294 * was set and all the others were clear.
5295 */
5296int /* error */
5297xfs_bmapi_single(
5298 xfs_trans_t *tp, /* transaction pointer */
5299 xfs_inode_t *ip, /* incore inode */
5300 int whichfork, /* data or attr fork */
5301 xfs_fsblock_t *fsb, /* output: mapped block */
5302 xfs_fileoff_t bno) /* starting file offs. mapped */
5303{
4eea22f0 5304 int eof; /* we've hit the end of extents */
1da177e4 5305 int error; /* error return */
4eea22f0 5306 xfs_bmbt_irec_t got; /* current file extent record */
1da177e4
LT
5307 xfs_ifork_t *ifp; /* inode fork pointer */
5308 xfs_extnum_t lastx; /* last useful extent number */
4eea22f0 5309 xfs_bmbt_irec_t prev; /* previous file extent record */
1da177e4
LT
5310
5311 ifp = XFS_IFORK_PTR(ip, whichfork);
5312 if (unlikely(
5313 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5314 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5315 XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5316 ip->i_mount);
5317 return XFS_ERROR(EFSCORRUPTED);
5318 }
5319 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5320 return XFS_ERROR(EIO);
5321 XFS_STATS_INC(xs_blk_mapr);
5322 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5323 (error = xfs_iread_extents(tp, ip, whichfork)))
5324 return error;
5325 (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5326 &prev);
5327 /*
5328 * Reading past eof, act as though there's a hole
5329 * up to end.
5330 */
5331 if (eof || got.br_startoff > bno) {
5332 *fsb = NULLFSBLOCK;
5333 return 0;
5334 }
5335 ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
5336 ASSERT(bno < got.br_startoff + got.br_blockcount);
5337 *fsb = got.br_startblock + (bno - got.br_startoff);
5338 ifp->if_lastex = lastx;
5339 return 0;
5340}
5341
5342/*
5343 * Unmap (remove) blocks from a file.
5344 * If nexts is nonzero then the number of extents to remove is limited to
5345 * that value. If not all extents in the block range can be removed then
5346 * *done is set.
5347 */
5348int /* error */
5349xfs_bunmapi(
5350 xfs_trans_t *tp, /* transaction pointer */
5351 struct xfs_inode *ip, /* incore inode */
5352 xfs_fileoff_t bno, /* starting offset to unmap */
5353 xfs_filblks_t len, /* length to unmap in file */
5354 int flags, /* misc flags */
5355 xfs_extnum_t nexts, /* number of extents max */
5356 xfs_fsblock_t *firstblock, /* first allocated block
5357 controls a.g. for allocs */
5358 xfs_bmap_free_t *flist, /* i/o: list extents to free */
3e57ecf6
OW
5359 xfs_extdelta_t *delta, /* o: change made to incore
5360 extents */
1da177e4
LT
5361 int *done) /* set if not done yet */
5362{
5363 xfs_btree_cur_t *cur; /* bmap btree cursor */
5364 xfs_bmbt_irec_t del; /* extent being deleted */
5365 int eof; /* is deleting at eof */
4eea22f0 5366 xfs_bmbt_rec_t *ep; /* extent record pointer */
1da177e4
LT
5367 int error; /* error return value */
5368 xfs_extnum_t extno; /* extent number in list */
4eea22f0 5369 xfs_bmbt_irec_t got; /* current extent record */
1da177e4
LT
5370 xfs_ifork_t *ifp; /* inode fork pointer */
5371 int isrt; /* freeing in rt area */
5372 xfs_extnum_t lastx; /* last extent index used */
5373 int logflags; /* transaction logging flags */
5374 xfs_extlen_t mod; /* rt extent offset */
5375 xfs_mount_t *mp; /* mount structure */
4eea22f0
MK
5376 xfs_extnum_t nextents; /* number of file extents */
5377 xfs_bmbt_irec_t prev; /* previous extent record */
1da177e4
LT
5378 xfs_fileoff_t start; /* first file offset deleted */
5379 int tmp_logflags; /* partial logging flags */
5380 int wasdel; /* was a delayed alloc extent */
5381 int whichfork; /* data or attribute fork */
5382 int rsvd; /* OK to allocate reserved blocks */
5383 xfs_fsblock_t sum;
5384
5385 xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
5386 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5387 XFS_ATTR_FORK : XFS_DATA_FORK;
5388 ifp = XFS_IFORK_PTR(ip, whichfork);
5389 if (unlikely(
5390 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5391 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5392 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5393 ip->i_mount);
5394 return XFS_ERROR(EFSCORRUPTED);
5395 }
5396 mp = ip->i_mount;
5397 if (XFS_FORCED_SHUTDOWN(mp))
5398 return XFS_ERROR(EIO);
5399 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5400 ASSERT(len > 0);
5401 ASSERT(nexts >= 0);
5402 ASSERT(ifp->if_ext_max ==
5403 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5404 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5405 (error = xfs_iread_extents(tp, ip, whichfork)))
5406 return error;
5407 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5408 if (nextents == 0) {
5409 *done = 1;
5410 return 0;
5411 }
5412 XFS_STATS_INC(xs_blk_unmap);
dd9f438e 5413 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
1da177e4
LT
5414 start = bno;
5415 bno = start + len - 1;
5416 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5417 &prev);
3e57ecf6
OW
5418 if (delta) {
5419 delta->xed_startoff = NULLFILEOFF;
5420 delta->xed_blockcount = 0;
5421 }
1da177e4
LT
5422 /*
5423 * Check to see if the given block number is past the end of the
5424 * file, back up to the last block if so...
5425 */
5426 if (eof) {
4eea22f0 5427 ep = xfs_iext_get_ext(ifp, --lastx);
1da177e4
LT
5428 xfs_bmbt_get_all(ep, &got);
5429 bno = got.br_startoff + got.br_blockcount - 1;
5430 }
5431 logflags = 0;
5432 if (ifp->if_flags & XFS_IFBROOT) {
5433 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5434 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
5435 whichfork);
5436 cur->bc_private.b.firstblock = *firstblock;
5437 cur->bc_private.b.flist = flist;
5438 cur->bc_private.b.flags = 0;
5439 } else
5440 cur = NULL;
5441 extno = 0;
5442 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5443 (nexts == 0 || extno < nexts)) {
5444 /*
5445 * Is the found extent after a hole in which bno lives?
5446 * Just back up to the previous extent, if so.
5447 */
5448 if (got.br_startoff > bno) {
5449 if (--lastx < 0)
5450 break;
4eea22f0 5451 ep = xfs_iext_get_ext(ifp, lastx);
1da177e4
LT
5452 xfs_bmbt_get_all(ep, &got);
5453 }
5454 /*
5455 * Is the last block of this extent before the range
5456 * we're supposed to delete? If so, we're done.
5457 */
5458 bno = XFS_FILEOFF_MIN(bno,
5459 got.br_startoff + got.br_blockcount - 1);
5460 if (bno < start)
5461 break;
5462 /*
5463 * Then deal with the (possibly delayed) allocated space
5464 * we found.
5465 */
5466 ASSERT(ep != NULL);
5467 del = got;
5468 wasdel = ISNULLSTARTBLOCK(del.br_startblock);
5469 if (got.br_startoff < start) {
5470 del.br_startoff = start;
5471 del.br_blockcount -= start - got.br_startoff;
5472 if (!wasdel)
5473 del.br_startblock += start - got.br_startoff;
5474 }
5475 if (del.br_startoff + del.br_blockcount > bno + 1)
5476 del.br_blockcount = bno + 1 - del.br_startoff;
5477 sum = del.br_startblock + del.br_blockcount;
5478 if (isrt &&
5479 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5480 /*
5481 * Realtime extent not lined up at the end.
5482 * The extent could have been split into written
5483 * and unwritten pieces, or we could just be
5484 * unmapping part of it. But we can't really
5485 * get rid of part of a realtime extent.
5486 */
5487 if (del.br_state == XFS_EXT_UNWRITTEN ||
5488 !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5489 /*
5490 * This piece is unwritten, or we're not
5491 * using unwritten extents. Skip over it.
5492 */
5493 ASSERT(bno >= mod);
5494 bno -= mod > del.br_blockcount ?
5495 del.br_blockcount : mod;
5496 if (bno < got.br_startoff) {
5497 if (--lastx >= 0)
4eea22f0
MK
5498 xfs_bmbt_get_all(xfs_iext_get_ext(
5499 ifp, lastx), &got);
1da177e4
LT
5500 }
5501 continue;
5502 }
5503 /*
5504 * It's written, turn it unwritten.
5505 * This is better than zeroing it.
5506 */
5507 ASSERT(del.br_state == XFS_EXT_NORM);
5508 ASSERT(xfs_trans_get_block_res(tp) > 0);
5509 /*
5510 * If this spans a realtime extent boundary,
5511 * chop it back to the start of the one we end at.
5512 */
5513 if (del.br_blockcount > mod) {
5514 del.br_startoff += del.br_blockcount - mod;
5515 del.br_startblock += del.br_blockcount - mod;
5516 del.br_blockcount = mod;
5517 }
5518 del.br_state = XFS_EXT_UNWRITTEN;
5519 error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
3e57ecf6
OW
5520 firstblock, flist, &logflags, delta,
5521 XFS_DATA_FORK, 0);
1da177e4
LT
5522 if (error)
5523 goto error0;
5524 goto nodelete;
5525 }
5526 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5527 /*
5528 * Realtime extent is lined up at the end but not
5529 * at the front. We'll get rid of full extents if
5530 * we can.
5531 */
5532 mod = mp->m_sb.sb_rextsize - mod;
5533 if (del.br_blockcount > mod) {
5534 del.br_blockcount -= mod;
5535 del.br_startoff += mod;
5536 del.br_startblock += mod;
5537 } else if ((del.br_startoff == start &&
5538 (del.br_state == XFS_EXT_UNWRITTEN ||
5539 xfs_trans_get_block_res(tp) == 0)) ||
5540 !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5541 /*
5542 * Can't make it unwritten. There isn't
5543 * a full extent here so just skip it.
5544 */
5545 ASSERT(bno >= del.br_blockcount);
5546 bno -= del.br_blockcount;
5547 if (bno < got.br_startoff) {
5548 if (--lastx >= 0)
5549 xfs_bmbt_get_all(--ep, &got);
5550 }
5551 continue;
5552 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5553 /*
5554 * This one is already unwritten.
5555 * It must have a written left neighbor.
5556 * Unwrite the killed part of that one and
5557 * try again.
5558 */
5559 ASSERT(lastx > 0);
4eea22f0
MK
5560 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5561 lastx - 1), &prev);
1da177e4
LT
5562 ASSERT(prev.br_state == XFS_EXT_NORM);
5563 ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
5564 ASSERT(del.br_startblock ==
5565 prev.br_startblock + prev.br_blockcount);
5566 if (prev.br_startoff < start) {
5567 mod = start - prev.br_startoff;
5568 prev.br_blockcount -= mod;
5569 prev.br_startblock += mod;
5570 prev.br_startoff = start;
5571 }
5572 prev.br_state = XFS_EXT_UNWRITTEN;
5573 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5574 &prev, firstblock, flist, &logflags,
3e57ecf6 5575 delta, XFS_DATA_FORK, 0);
1da177e4
LT
5576 if (error)
5577 goto error0;
5578 goto nodelete;
5579 } else {
5580 ASSERT(del.br_state == XFS_EXT_NORM);
5581 del.br_state = XFS_EXT_UNWRITTEN;
5582 error = xfs_bmap_add_extent(ip, lastx, &cur,
5583 &del, firstblock, flist, &logflags,
3e57ecf6 5584 delta, XFS_DATA_FORK, 0);
1da177e4
LT
5585 if (error)
5586 goto error0;
5587 goto nodelete;
5588 }
5589 }
5590 if (wasdel) {
5591 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
dd9f438e 5592 /* Update realtime/data freespace, unreserve quota */
06d10dd9
NS
5593 if (isrt) {
5594 xfs_filblks_t rtexts;
5595
5596 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5597 do_div(rtexts, mp->m_sb.sb_rextsize);
5598 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
20f4ebf2 5599 (int64_t)rtexts, rsvd);
dd9f438e
NS
5600 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5601 NULL, ip, -((long)del.br_blockcount), 0,
06d10dd9
NS
5602 XFS_QMOPT_RES_RTBLKS);
5603 } else {
5604 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
20f4ebf2 5605 (int64_t)del.br_blockcount, rsvd);
dd9f438e
NS
5606 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5607 NULL, ip, -((long)del.br_blockcount), 0,
1da177e4 5608 XFS_QMOPT_RES_REGBLKS);
06d10dd9 5609 }
1da177e4
LT
5610 ip->i_delayed_blks -= del.br_blockcount;
5611 if (cur)
5612 cur->bc_private.b.flags |=
5613 XFS_BTCUR_BPRV_WASDEL;
5614 } else if (cur)
5615 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5616 /*
5617 * If it's the case where the directory code is running
5618 * with no block reservation, and the deleted block is in
5619 * the middle of its extent, and the resulting insert
5620 * of an extent would cause transformation to btree format,
5621 * then reject it. The calling code will then swap
5622 * blocks around instead.
5623 * We have to do this now, rather than waiting for the
5624 * conversion to btree format, since the transaction
5625 * will be dirty.
5626 */
5627 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5628 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5629 XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5630 del.br_startoff > got.br_startoff &&
5631 del.br_startoff + del.br_blockcount <
5632 got.br_startoff + got.br_blockcount) {
5633 error = XFS_ERROR(ENOSPC);
5634 goto error0;
5635 }
5636 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
3e57ecf6 5637 &tmp_logflags, delta, whichfork, rsvd);
1da177e4
LT
5638 logflags |= tmp_logflags;
5639 if (error)
5640 goto error0;
5641 bno = del.br_startoff - 1;
5642nodelete:
5643 lastx = ifp->if_lastex;
5644 /*
5645 * If not done go on to the next (previous) record.
5646 * Reset ep in case the extents array was re-alloced.
5647 */
4eea22f0 5648 ep = xfs_iext_get_ext(ifp, lastx);
1da177e4
LT
5649 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5650 if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5651 xfs_bmbt_get_startoff(ep) > bno) {
4eea22f0
MK
5652 if (--lastx >= 0)
5653 ep = xfs_iext_get_ext(ifp, lastx);
1da177e4
LT
5654 }
5655 if (lastx >= 0)
5656 xfs_bmbt_get_all(ep, &got);
5657 extno++;
5658 }
5659 }
5660 ifp->if_lastex = lastx;
5661 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5662 ASSERT(ifp->if_ext_max ==
5663 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5664 /*
5665 * Convert to a btree if necessary.
5666 */
5667 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5668 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5669 ASSERT(cur == NULL);
5670 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5671 &cur, 0, &tmp_logflags, whichfork);
5672 logflags |= tmp_logflags;
5673 if (error)
5674 goto error0;
5675 }
5676 /*
5677 * transform from btree to extents, give it cur
5678 */
5679 else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5680 XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5681 ASSERT(cur != NULL);
5682 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5683 whichfork);
5684 logflags |= tmp_logflags;
5685 if (error)
5686 goto error0;
5687 }
5688 /*
5689 * transform from extents to local?
5690 */
5691 ASSERT(ifp->if_ext_max ==
5692 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5693 error = 0;
3e57ecf6
OW
5694 if (delta && delta->xed_startoff != NULLFILEOFF) {
5695 /* A change was actually made.
5696 * Note that delta->xed_blockount is an offset at this
5697 * point and needs to be converted to a block count.
5698 */
5699 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5700 delta->xed_blockcount -= delta->xed_startoff;
5701 }
1da177e4
LT
5702error0:
5703 /*
5704 * Log everything. Do this after conversion, there's no point in
4eea22f0 5705 * logging the extent records if we've converted to btree format.
1da177e4
LT
5706 */
5707 if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5708 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5709 logflags &= ~XFS_ILOG_FEXT(whichfork);
5710 else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5711 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5712 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5713 /*
5714 * Log inode even in the error case, if the transaction
5715 * is dirty we'll need to shut down the filesystem.
5716 */
5717 if (logflags)
5718 xfs_trans_log_inode(tp, ip, logflags);
5719 if (cur) {
5720 if (!error) {
5721 *firstblock = cur->bc_private.b.firstblock;
5722 cur->bc_private.b.allocated = 0;
5723 }
5724 xfs_btree_del_cursor(cur,
5725 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5726 }
5727 return error;
5728}
5729
5730/*
5731 * Fcntl interface to xfs_bmapi.
5732 */
5733int /* error code */
5734xfs_getbmap(
5735 bhv_desc_t *bdp, /* XFS behavior descriptor*/
5736 struct getbmap *bmv, /* user bmap structure */
5737 void __user *ap, /* pointer to user's array */
5738 int interface) /* interface flags */
5739{
5740 __int64_t bmvend; /* last block requested */
5741 int error; /* return value */
5742 __int64_t fixlen; /* length for -1 case */
5743 int i; /* extent number */
5744 xfs_inode_t *ip; /* xfs incore inode pointer */
67fcaa73 5745 bhv_vnode_t *vp; /* corresponding vnode */
1da177e4
LT
5746 int lock; /* lock state */
5747 xfs_bmbt_irec_t *map; /* buffer for user's data */
5748 xfs_mount_t *mp; /* file system mount point */
5749 int nex; /* # of user extents can do */
5750 int nexleft; /* # of user extents left */
5751 int subnex; /* # of bmapi's can do */
5752 int nmap; /* number of map entries */
5753 struct getbmap out; /* output structure */
5754 int whichfork; /* data or attr fork */
5755 int prealloced; /* this is a file with
5756 * preallocated data space */
5757 int sh_unwritten; /* true, if unwritten */
5758 /* extents listed separately */
5759 int bmapi_flags; /* flags for xfs_bmapi */
5760 __int32_t oflags; /* getbmapx bmv_oflags field */
5761
5762 vp = BHV_TO_VNODE(bdp);
5763 ip = XFS_BHVTOI(bdp);
5764 mp = ip->i_mount;
5765
5766 whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5767 sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
5768
5769 /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
5770 * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
5771 * bit is set for the file, generate a read event in order
5772 * that the DMAPI application may do its thing before we return
5773 * the extents. Usually this means restoring user file data to
5774 * regions of the file that look like holes.
5775 *
5776 * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5777 * BMV_IF_NO_DMAPI_READ so that read events are generated.
5778 * If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
5779 * could misinterpret holes in a DMAPI file as true holes,
5780 * when in fact they may represent offline user data.
5781 */
5782 if ( (interface & BMV_IF_NO_DMAPI_READ) == 0
5783 && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
5784 && whichfork == XFS_DATA_FORK) {
5785
5786 error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
5787 if (error)
5788 return XFS_ERROR(error);
5789 }
5790
5791 if (whichfork == XFS_ATTR_FORK) {
5792 if (XFS_IFORK_Q(ip)) {
5793 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5794 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5795 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5796 return XFS_ERROR(EINVAL);
5797 } else if (unlikely(
5798 ip->i_d.di_aformat != 0 &&
5799 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5800 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5801 ip->i_mount);
5802 return XFS_ERROR(EFSCORRUPTED);
5803 }
5804 } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5805 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5806 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5807 return XFS_ERROR(EINVAL);
5808 if (whichfork == XFS_DATA_FORK) {
957d0ebe 5809 if (xfs_get_extsz_hint(ip) ||
dd9f438e 5810 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
1da177e4
LT
5811 prealloced = 1;
5812 fixlen = XFS_MAXIOFFSET(mp);
5813 } else {
5814 prealloced = 0;
ba87ea69 5815 fixlen = ip->i_size;
1da177e4
LT
5816 }
5817 } else {
5818 prealloced = 0;
5819 fixlen = 1LL << 32;
5820 }
5821
5822 if (bmv->bmv_length == -1) {
5823 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5824 bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
5825 (__int64_t)0);
5826 } else if (bmv->bmv_length < 0)
5827 return XFS_ERROR(EINVAL);
5828 if (bmv->bmv_length == 0) {
5829 bmv->bmv_entries = 0;
5830 return 0;
5831 }
5832 nex = bmv->bmv_count - 1;
5833 if (nex <= 0)
5834 return XFS_ERROR(EINVAL);
5835 bmvend = bmv->bmv_offset + bmv->bmv_length;
5836
5837 xfs_ilock(ip, XFS_IOLOCK_SHARED);
5838
ba87ea69
LM
5839 if (whichfork == XFS_DATA_FORK &&
5840 (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) {
1da177e4 5841 /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
67fcaa73 5842 error = bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1da177e4
LT
5843 }
5844
5845 ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
5846
5847 lock = xfs_ilock_map_shared(ip);
5848
5849 /*
5850 * Don't let nex be bigger than the number of extents
5851 * we can have assuming alternating holes and real extents.
5852 */
5853 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5854 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5855
5856 bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
5857 ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
5858
5859 /*
5860 * Allocate enough space to handle "subnex" maps at a time.
5861 */
5862 subnex = 16;
5863 map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
5864
5865 bmv->bmv_entries = 0;
5866
5867 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) {
5868 error = 0;
5869 goto unlock_and_return;
5870 }
5871
5872 nexleft = nex;
5873
5874 do {
5875 nmap = (nexleft > subnex) ? subnex : nexleft;
5876 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5877 XFS_BB_TO_FSB(mp, bmv->bmv_length),
3e57ecf6
OW
5878 bmapi_flags, NULL, 0, map, &nmap,
5879 NULL, NULL);
1da177e4
LT
5880 if (error)
5881 goto unlock_and_return;
5882 ASSERT(nmap <= subnex);
5883
5884 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5885 nexleft--;
5886 oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
5887 BMV_OF_PREALLOC : 0;
5888 out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
5889 out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5890 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
9af0a70c
YL
5891 if (map[i].br_startblock == HOLESTARTBLOCK &&
5892 ((prealloced && out.bmv_offset + out.bmv_length == bmvend) ||
5893 whichfork == XFS_ATTR_FORK )) {
5894 /*
5895 * came to hole at end of file or the end of
5896 attribute fork
5897 */
1da177e4
LT
5898 goto unlock_and_return;
5899 } else {
5900 out.bmv_block =
5901 (map[i].br_startblock == HOLESTARTBLOCK) ?
5902 -1 :
5903 XFS_FSB_TO_DB(ip, map[i].br_startblock);
5904
5905 /* return either getbmap/getbmapx structure. */
5906 if (interface & BMV_IF_EXTENDED) {
5907 struct getbmapx outx;
5908
5909 GETBMAP_CONVERT(out,outx);
5910 outx.bmv_oflags = oflags;
5911 outx.bmv_unused1 = outx.bmv_unused2 = 0;
5912 if (copy_to_user(ap, &outx,
5913 sizeof(outx))) {
5914 error = XFS_ERROR(EFAULT);
5915 goto unlock_and_return;
5916 }
5917 } else {
5918 if (copy_to_user(ap, &out,
5919 sizeof(out))) {
5920 error = XFS_ERROR(EFAULT);
5921 goto unlock_and_return;
5922 }
5923 }
5924 bmv->bmv_offset =
5925 out.bmv_offset + out.bmv_length;
5926 bmv->bmv_length = MAX((__int64_t)0,
5927 (__int64_t)(bmvend - bmv->bmv_offset));
5928 bmv->bmv_entries++;
5929 ap = (interface & BMV_IF_EXTENDED) ?
5930 (void __user *)
5931 ((struct getbmapx __user *)ap + 1) :
5932 (void __user *)
5933 ((struct getbmap __user *)ap + 1);
5934 }
5935 }
5936 } while (nmap && nexleft && bmv->bmv_length);
5937
5938unlock_and_return:
5939 xfs_iunlock_map_shared(ip, lock);
5940 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5941
5942 kmem_free(map, subnex * sizeof(*map));
5943
5944 return error;
5945}
5946
5947/*
5948 * Check the last inode extent to determine whether this allocation will result
5949 * in blocks being allocated at the end of the file. When we allocate new data
5950 * blocks at the end of the file which do not start at the previous data block,
5951 * we will try to align the new blocks at stripe unit boundaries.
5952 */
ba0f32d4 5953STATIC int /* error */
1da177e4
LT
5954xfs_bmap_isaeof(
5955 xfs_inode_t *ip, /* incore inode pointer */
5956 xfs_fileoff_t off, /* file offset in fsblocks */
5957 int whichfork, /* data or attribute fork */
5958 char *aeof) /* return value */
5959{
5960 int error; /* error return value */
5961 xfs_ifork_t *ifp; /* inode fork pointer */
4eea22f0
MK
5962 xfs_bmbt_rec_t *lastrec; /* extent record pointer */
5963 xfs_extnum_t nextents; /* number of file extents */
5964 xfs_bmbt_irec_t s; /* expanded extent record */
1da177e4
LT
5965
5966 ASSERT(whichfork == XFS_DATA_FORK);
5967 ifp = XFS_IFORK_PTR(ip, whichfork);
5968 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5969 (error = xfs_iread_extents(NULL, ip, whichfork)))
5970 return error;
5971 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5972 if (nextents == 0) {
5973 *aeof = 1;
5974 return 0;
5975 }
5976 /*
5977 * Go to the last extent
5978 */
4eea22f0 5979 lastrec = xfs_iext_get_ext(ifp, nextents - 1);
1da177e4
LT
5980 xfs_bmbt_get_all(lastrec, &s);
5981 /*
5982 * Check we are allocating in the last extent (for delayed allocations)
5983 * or past the last extent for non-delayed allocations.
5984 */
5985 *aeof = (off >= s.br_startoff &&
5986 off < s.br_startoff + s.br_blockcount &&
5987 ISNULLSTARTBLOCK(s.br_startblock)) ||
5988 off >= s.br_startoff + s.br_blockcount;
5989 return 0;
5990}
5991
5992/*
5993 * Check if the endoff is outside the last extent. If so the caller will grow
5994 * the allocation to a stripe unit boundary.
5995 */
5996int /* error */
5997xfs_bmap_eof(
5998 xfs_inode_t *ip, /* incore inode pointer */
5999 xfs_fileoff_t endoff, /* file offset in fsblocks */
6000 int whichfork, /* data or attribute fork */
6001 int *eof) /* result value */
6002{
6003 xfs_fsblock_t blockcount; /* extent block count */
6004 int error; /* error return value */
6005 xfs_ifork_t *ifp; /* inode fork pointer */
4eea22f0
MK
6006 xfs_bmbt_rec_t *lastrec; /* extent record pointer */
6007 xfs_extnum_t nextents; /* number of file extents */
1da177e4
LT
6008 xfs_fileoff_t startoff; /* extent starting file offset */
6009
6010 ASSERT(whichfork == XFS_DATA_FORK);
6011 ifp = XFS_IFORK_PTR(ip, whichfork);
6012 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
6013 (error = xfs_iread_extents(NULL, ip, whichfork)))
6014 return error;
6015 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
6016 if (nextents == 0) {
6017 *eof = 1;
6018 return 0;
6019 }
6020 /*
6021 * Go to the last extent
6022 */
4eea22f0 6023 lastrec = xfs_iext_get_ext(ifp, nextents - 1);
1da177e4
LT
6024 startoff = xfs_bmbt_get_startoff(lastrec);
6025 blockcount = xfs_bmbt_get_blockcount(lastrec);
6026 *eof = endoff >= startoff + blockcount;
6027 return 0;
6028}
6029
6030#ifdef DEBUG
1da177e4
LT
6031STATIC
6032xfs_buf_t *
6033xfs_bmap_get_bp(
6034 xfs_btree_cur_t *cur,
6035 xfs_fsblock_t bno)
6036{
6037 int i;
6038 xfs_buf_t *bp;
6039
6040 if (!cur)
6041 return(NULL);
6042
6043 bp = NULL;
6044 for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
6045 bp = cur->bc_bufs[i];
6046 if (!bp) break;
6047 if (XFS_BUF_ADDR(bp) == bno)
6048 break; /* Found it */
6049 }
6050 if (i == XFS_BTREE_MAXLEVELS)
6051 bp = NULL;
6052
6053 if (!bp) { /* Chase down all the log items to see if the bp is there */
6054 xfs_log_item_chunk_t *licp;
6055 xfs_trans_t *tp;
6056
6057 tp = cur->bc_tp;
6058 licp = &tp->t_items;
6059 while (!bp && licp != NULL) {
6060 if (XFS_LIC_ARE_ALL_FREE(licp)) {
6061 licp = licp->lic_next;
6062 continue;
6063 }
6064 for (i = 0; i < licp->lic_unused; i++) {
6065 xfs_log_item_desc_t *lidp;
6066 xfs_log_item_t *lip;
6067 xfs_buf_log_item_t *bip;
6068 xfs_buf_t *lbp;
6069
6070 if (XFS_LIC_ISFREE(licp, i)) {
6071 continue;
6072 }
6073
6074 lidp = XFS_LIC_SLOT(licp, i);
6075 lip = lidp->lid_item;
6076 if (lip->li_type != XFS_LI_BUF)
6077 continue;
6078
6079 bip = (xfs_buf_log_item_t *)lip;
6080 lbp = bip->bli_buf;
6081
6082 if (XFS_BUF_ADDR(lbp) == bno) {
6083 bp = lbp;
6084 break; /* Found it */
6085 }
6086 }
6087 licp = licp->lic_next;
6088 }
6089 }
6090 return(bp);
6091}
6092
6093void
6094xfs_check_block(
6095 xfs_bmbt_block_t *block,
6096 xfs_mount_t *mp,
6097 int root,
6098 short sz)
6099{
6100 int i, j, dmxr;
576039cf 6101 __be64 *pp, *thispa; /* pointer to block address */
1da177e4
LT
6102 xfs_bmbt_key_t *prevp, *keyp;
6103
16259e7d 6104 ASSERT(be16_to_cpu(block->bb_level) > 0);
1da177e4
LT
6105
6106 prevp = NULL;
16259e7d 6107 for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) {
1da177e4
LT
6108 dmxr = mp->m_bmap_dmxr[0];
6109
6110 if (root) {
6111 keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
6112 } else {
2c36dded 6113 keyp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, i);
1da177e4
LT
6114 }
6115
6116 if (prevp) {
6117 xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
6118 }
6119 prevp = keyp;
6120
6121 /*
6122 * Compare the block numbers to see if there are dups.
6123 */
6124
6125 if (root) {
6126 pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
6127 } else {
2c36dded 6128 pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr);
1da177e4 6129 }
16259e7d 6130 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
1da177e4
LT
6131 if (root) {
6132 thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
6133 } else {
2c36dded
ES
6134 thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j,
6135 dmxr);
1da177e4 6136 }
576039cf 6137 if (*thispa == *pp) {
1da177e4
LT
6138 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
6139 __FUNCTION__, j, i,
576039cf 6140 (unsigned long long)be64_to_cpu(*thispa));
1da177e4
LT
6141 panic("%s: ptrs are equal in node\n",
6142 __FUNCTION__);
6143 }
6144 }
6145 }
6146}
6147
6148/*
6149 * Check that the extents for the inode ip are in the right order in all
6150 * btree leaves.
6151 */
6152
6153STATIC void
6154xfs_bmap_check_leaf_extents(
6155 xfs_btree_cur_t *cur, /* btree cursor or null */
6156 xfs_inode_t *ip, /* incore inode pointer */
6157 int whichfork) /* data or attr fork */
6158{
6159 xfs_bmbt_block_t *block; /* current btree block */
6160 xfs_fsblock_t bno; /* block # of "block" */
6161 xfs_buf_t *bp; /* buffer for "block" */
6162 int error; /* error return value */
4eea22f0 6163 xfs_extnum_t i=0, j; /* index into the extents list */
1da177e4
LT
6164 xfs_ifork_t *ifp; /* fork structure */
6165 int level; /* btree level, for checking */
6166 xfs_mount_t *mp; /* file system mount structure */
576039cf 6167 __be64 *pp; /* pointer to block address */
4eea22f0
MK
6168 xfs_bmbt_rec_t *ep; /* pointer to current extent */
6169 xfs_bmbt_rec_t *lastp; /* pointer to previous extent */
6170 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
1da177e4
LT
6171 int bp_release = 0;
6172
6173 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
6174 return;
6175 }
6176
6177 bno = NULLFSBLOCK;
6178 mp = ip->i_mount;
6179 ifp = XFS_IFORK_PTR(ip, whichfork);
6180 block = ifp->if_broot;
6181 /*
6182 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6183 */
16259e7d
CH
6184 level = be16_to_cpu(block->bb_level);
6185 ASSERT(level > 0);
1da177e4
LT
6186 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
6187 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
576039cf
CH
6188 bno = be64_to_cpu(*pp);
6189
6190 ASSERT(bno != NULLDFSBNO);
6191 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6192 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6193
1da177e4
LT
6194 /*
6195 * Go down the tree until leaf level is reached, following the first
6196 * pointer (leftmost) at each level.
6197 */
6198 while (level-- > 0) {
6199 /* See if buf is in cur first */
6200 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6201 if (bp) {
6202 bp_release = 0;
6203 } else {
6204 bp_release = 1;
6205 }
6206 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6207 XFS_BMAP_BTREE_REF)))
6208 goto error_norelse;
6209 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6210 XFS_WANT_CORRUPTED_GOTO(
6211 XFS_BMAP_SANITY_CHECK(mp, block, level),
6212 error0);
6213 if (level == 0)
6214 break;
6215
6216 /*
6217 * Check this block for basic sanity (increasing keys and
6218 * no duplicate blocks).
6219 */
6220
6221 xfs_check_block(block, mp, 0, 0);
2c36dded 6222 pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
576039cf
CH
6223 bno = be64_to_cpu(*pp);
6224 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
1da177e4
LT
6225 if (bp_release) {
6226 bp_release = 0;
6227 xfs_trans_brelse(NULL, bp);
6228 }
6229 }
6230
6231 /*
6232 * Here with bp and block set to the leftmost leaf node in the tree.
6233 */
6234 i = 0;
6235
6236 /*
6237 * Loop over all leaf nodes checking that all extents are in the right order.
6238 */
6239 lastp = NULL;
6240 for (;;) {
1da177e4
LT
6241 xfs_fsblock_t nextbno;
6242 xfs_extnum_t num_recs;
6243
6244
16259e7d 6245 num_recs = be16_to_cpu(block->bb_numrecs);
1da177e4
LT
6246
6247 /*
6248 * Read-ahead the next leaf block, if any.
6249 */
6250
16259e7d 6251 nextbno = be64_to_cpu(block->bb_rightsib);
1da177e4
LT
6252
6253 /*
6254 * Check all the extents to make sure they are OK.
6255 * If we had a previous block, the last entry should
6256 * conform with the first entry in this one.
6257 */
6258
2c36dded 6259 ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
4eea22f0 6260 for (j = 1; j < num_recs; j++) {
2c36dded 6261 nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
1da177e4
LT
6262 if (lastp) {
6263 xfs_btree_check_rec(XFS_BTNUM_BMAP,
6264 (void *)lastp, (void *)ep);
6265 }
6266 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
4eea22f0
MK
6267 (void *)(nextp));
6268 lastp = ep;
6269 ep = nextp;
1da177e4 6270 }
1da177e4
LT
6271
6272 i += num_recs;
6273 if (bp_release) {
6274 bp_release = 0;
6275 xfs_trans_brelse(NULL, bp);
6276 }
6277 bno = nextbno;
6278 /*
6279 * If we've reached the end, stop.
6280 */
6281 if (bno == NULLFSBLOCK)
6282 break;
6283
6284 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6285 if (bp) {
6286 bp_release = 0;
6287 } else {
6288 bp_release = 1;
6289 }
6290 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6291 XFS_BMAP_BTREE_REF)))
6292 goto error_norelse;
6293 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6294 }
6295 if (bp_release) {
6296 bp_release = 0;
6297 xfs_trans_brelse(NULL, bp);
6298 }
6299 return;
6300
6301error0:
6302 cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
6303 if (bp_release)
6304 xfs_trans_brelse(NULL, bp);
6305error_norelse:
6306 cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
da1650a5 6307 __FUNCTION__, i);
1da177e4
LT
6308 panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
6309 return;
6310}
6311#endif
6312
6313/*
6314 * Count fsblocks of the given fork.
6315 */
6316int /* error */
6317xfs_bmap_count_blocks(
6318 xfs_trans_t *tp, /* transaction pointer */
6319 xfs_inode_t *ip, /* incore inode */
6320 int whichfork, /* data or attr fork */
6321 int *count) /* out: count of blocks */
6322{
6323 xfs_bmbt_block_t *block; /* current btree block */
6324 xfs_fsblock_t bno; /* block # of "block" */
6325 xfs_ifork_t *ifp; /* fork structure */
6326 int level; /* btree level, for checking */
6327 xfs_mount_t *mp; /* file system mount structure */
576039cf 6328 __be64 *pp; /* pointer to block address */
1da177e4
LT
6329
6330 bno = NULLFSBLOCK;
6331 mp = ip->i_mount;
6332 ifp = XFS_IFORK_PTR(ip, whichfork);
6333 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
4eea22f0 6334 if (unlikely(xfs_bmap_count_leaves(ifp, 0,
1da177e4
LT
6335 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6336 count) < 0)) {
6337 XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
6338 XFS_ERRLEVEL_LOW, mp);
6339 return XFS_ERROR(EFSCORRUPTED);
6340 }
6341 return 0;
6342 }
6343
6344 /*
6345 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6346 */
6347 block = ifp->if_broot;
16259e7d
CH
6348 level = be16_to_cpu(block->bb_level);
6349 ASSERT(level > 0);
1da177e4 6350 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
576039cf
CH
6351 bno = be64_to_cpu(*pp);
6352 ASSERT(bno != NULLDFSBNO);
6353 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6354 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1da177e4 6355
4eea22f0 6356 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
1da177e4
LT
6357 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6358 mp);
6359 return XFS_ERROR(EFSCORRUPTED);
6360 }
6361
6362 return 0;
6363}
6364
6365/*
6366 * Recursively walks each level of a btree
6367 * to count total fsblocks is use.
6368 */
6369int /* error */
6370xfs_bmap_count_tree(
6371 xfs_mount_t *mp, /* file system mount point */
6372 xfs_trans_t *tp, /* transaction pointer */
4eea22f0 6373 xfs_ifork_t *ifp, /* inode fork pointer */
1da177e4
LT
6374 xfs_fsblock_t blockno, /* file system block number */
6375 int levelin, /* level in btree */
6376 int *count) /* Count of blocks */
6377{
6378 int error;
6379 xfs_buf_t *bp, *nbp;
6380 int level = levelin;
576039cf 6381 __be64 *pp;
1da177e4
LT
6382 xfs_fsblock_t bno = blockno;
6383 xfs_fsblock_t nextbno;
6384 xfs_bmbt_block_t *block, *nextblock;
6385 int numrecs;
1da177e4
LT
6386
6387 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6388 return error;
6389 *count += 1;
6390 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6391
6392 if (--level) {
6393 /* Not at node above leafs, count this level of nodes */
16259e7d 6394 nextbno = be64_to_cpu(block->bb_rightsib);
1da177e4
LT
6395 while (nextbno != NULLFSBLOCK) {
6396 if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6397 0, &nbp, XFS_BMAP_BTREE_REF)))
6398 return error;
6399 *count += 1;
6400 nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
16259e7d 6401 nextbno = be64_to_cpu(nextblock->bb_rightsib);
1da177e4
LT
6402 xfs_trans_brelse(tp, nbp);
6403 }
6404
6405 /* Dive to the next level */
2c36dded 6406 pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
576039cf 6407 bno = be64_to_cpu(*pp);
1da177e4 6408 if (unlikely((error =
4eea22f0 6409 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
1da177e4
LT
6410 xfs_trans_brelse(tp, bp);
6411 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6412 XFS_ERRLEVEL_LOW, mp);
6413 return XFS_ERROR(EFSCORRUPTED);
6414 }
6415 xfs_trans_brelse(tp, bp);
6416 } else {
6417 /* count all level 1 nodes and their leaves */
6418 for (;;) {
16259e7d
CH
6419 nextbno = be64_to_cpu(block->bb_rightsib);
6420 numrecs = be16_to_cpu(block->bb_numrecs);
e9ed9d22
ES
6421 if (unlikely(xfs_bmap_disk_count_leaves(0,
6422 block, numrecs, count) < 0)) {
1da177e4
LT
6423 xfs_trans_brelse(tp, bp);
6424 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
6425 XFS_ERRLEVEL_LOW, mp);
6426 return XFS_ERROR(EFSCORRUPTED);
6427 }
6428 xfs_trans_brelse(tp, bp);
6429 if (nextbno == NULLFSBLOCK)
6430 break;
6431 bno = nextbno;
6432 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6433 XFS_BMAP_BTREE_REF)))
6434 return error;
6435 *count += 1;
6436 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6437 }
6438 }
6439 return 0;
6440}
6441
6442/*
4eea22f0 6443 * Count leaf blocks given a range of extent records.
1da177e4
LT
6444 */
6445int
6446xfs_bmap_count_leaves(
4eea22f0
MK
6447 xfs_ifork_t *ifp,
6448 xfs_extnum_t idx,
1da177e4
LT
6449 int numrecs,
6450 int *count)
6451{
6452 int b;
4eea22f0 6453 xfs_bmbt_rec_t *frp;
1da177e4 6454
4eea22f0
MK
6455 for (b = 0; b < numrecs; b++) {
6456 frp = xfs_iext_get_ext(ifp, idx + b);
91e11088 6457 *count += xfs_bmbt_get_blockcount(frp);
4eea22f0 6458 }
91e11088
YL
6459 return 0;
6460}
6461
6462/*
4eea22f0
MK
6463 * Count leaf blocks given a range of extent records originally
6464 * in btree format.
91e11088
YL
6465 */
6466int
6467xfs_bmap_disk_count_leaves(
4eea22f0
MK
6468 xfs_extnum_t idx,
6469 xfs_bmbt_block_t *block,
91e11088
YL
6470 int numrecs,
6471 int *count)
6472{
6473 int b;
4eea22f0 6474 xfs_bmbt_rec_t *frp;
91e11088 6475
4eea22f0 6476 for (b = 1; b <= numrecs; b++) {
2c36dded 6477 frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
1da177e4 6478 *count += xfs_bmbt_disk_get_blockcount(frp);
4eea22f0 6479 }
1da177e4
LT
6480 return 0;
6481}