2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
44 #include "xfs_error.h"
45 #include "xfs_quota.h"
48 * Prototypes for internal btree functions.
52 STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
53 STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
54 STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
55 STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
56 STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *);
57 STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
58 __uint64_t *, xfs_btree_cur_t **, int *);
59 STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
63 #define ENTRY XBT_ENTRY
64 #define ERROR XBT_ERROR
68 * Keep the XFS_BMBT_TRACE_ names around for now until all code using them
69 * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names.
71 #define XFS_BMBT_TRACE_ARGBI(c,b,i) \
72 XFS_BTREE_TRACE_ARGBI(c,b,i)
73 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
74 XFS_BTREE_TRACE_ARGBII(c,b,i,j)
75 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
76 XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j)
77 #define XFS_BMBT_TRACE_ARGI(c,i) \
78 XFS_BTREE_TRACE_ARGI(c,i)
79 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
80 XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s)
81 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
82 XFS_BTREE_TRACE_ARGIPR(c,i, \
83 (union xfs_btree_ptr)f, (union xfs_btree_rec *)r)
84 #define XFS_BMBT_TRACE_ARGIK(c,i,k) \
85 XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k)
86 #define XFS_BMBT_TRACE_CURSOR(c,s) \
87 XFS_BTREE_TRACE_CURSOR(c,s)
95 * Delete record pointed to by cur/level.
97 STATIC int /* error */
101 int *stat) /* success/failure */
103 xfs_bmbt_block_t *block; /* bmap btree block */
104 xfs_fsblock_t bno; /* fs-relative block number */
105 xfs_buf_t *bp; /* buffer for block */
106 int error; /* error return value */
107 int i; /* loop counter */
108 int j; /* temp state */
109 xfs_bmbt_key_t key; /* bmap btree key */
110 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
111 xfs_fsblock_t lbno; /* left sibling block number */
112 xfs_buf_t *lbp; /* left buffer pointer */
113 xfs_bmbt_block_t *left; /* left btree block */
114 xfs_bmbt_key_t *lkp; /* left btree key */
115 xfs_bmbt_ptr_t *lpp; /* left address pointer */
116 int lrecs=0; /* left record count */
117 xfs_bmbt_rec_t *lrp; /* left record pointer */
118 xfs_mount_t *mp; /* file system mount point */
119 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
120 int ptr; /* key/record index */
121 xfs_fsblock_t rbno; /* right sibling block number */
122 xfs_buf_t *rbp; /* right buffer pointer */
123 xfs_bmbt_block_t *right; /* right btree block */
124 xfs_bmbt_key_t *rkp; /* right btree key */
125 xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
126 xfs_bmbt_ptr_t *rpp; /* right address pointer */
127 xfs_bmbt_block_t *rrblock; /* right-right btree block */
128 xfs_buf_t *rrbp; /* right-right buffer pointer */
129 int rrecs=0; /* right record count */
130 xfs_bmbt_rec_t *rrp; /* right record pointer */
131 xfs_btree_cur_t *tcur; /* temporary btree cursor */
132 int numrecs; /* temporary numrec count */
133 int numlrecs, numrrecs;
135 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
136 XFS_BMBT_TRACE_ARGI(cur, level);
137 ptr = cur->bc_ptrs[level];
140 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
144 block = xfs_bmbt_get_block(cur, level, &bp);
145 numrecs = be16_to_cpu(block->bb_numrecs);
147 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
148 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
153 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
157 XFS_STATS_INC(xs_bmbt_delrec);
159 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
160 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
162 for (i = ptr; i < numrecs; i++) {
163 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
164 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
170 memmove(&kp[ptr - 1], &kp[ptr],
171 (numrecs - ptr) * sizeof(*kp));
172 memmove(&pp[ptr - 1], &pp[ptr],
173 (numrecs - ptr) * sizeof(*pp));
174 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
175 xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
178 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
180 memmove(&rp[ptr - 1], &rp[ptr],
181 (numrecs - ptr) * sizeof(*rp));
182 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
186 cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
191 block->bb_numrecs = cpu_to_be16(numrecs);
192 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
194 * We're at the root level.
195 * First, shrink the root block in-memory.
196 * Try to get rid of the next level down.
197 * If we can't then there's nothing left to do.
199 if (level == cur->bc_nlevels - 1) {
200 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
201 cur->bc_private.b.whichfork);
202 if ((error = xfs_bmbt_killroot(cur))) {
203 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
206 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
207 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
210 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
214 if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) {
215 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
218 if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
219 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
220 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
223 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
227 rbno = be64_to_cpu(block->bb_rightsib);
228 lbno = be64_to_cpu(block->bb_leftsib);
230 * One child of root, need to get a chance to copy its contents
231 * into the root and delete it. Can't go up to next level,
232 * there's nothing to delete there.
234 if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
235 level == cur->bc_nlevels - 2) {
236 if ((error = xfs_bmbt_killroot(cur))) {
237 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
240 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
241 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
244 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
248 ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
249 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
250 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
254 if (rbno != NULLFSBLOCK) {
255 i = xfs_btree_lastrec(tcur, level);
256 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
257 if ((error = xfs_btree_increment(tcur, level, &i))) {
258 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
261 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
262 i = xfs_btree_lastrec(tcur, level);
263 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
264 rbp = tcur->bc_bufs[level];
265 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
267 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
268 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
272 bno = be64_to_cpu(right->bb_leftsib);
273 if (be16_to_cpu(right->bb_numrecs) - 1 >=
274 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
275 if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
276 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
280 ASSERT(be16_to_cpu(block->bb_numrecs) >=
281 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
282 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
285 if ((error = xfs_btree_decrement(cur,
287 XFS_BMBT_TRACE_CURSOR(cur,
292 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
297 rrecs = be16_to_cpu(right->bb_numrecs);
298 if (lbno != NULLFSBLOCK) {
299 i = xfs_btree_firstrec(tcur, level);
300 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
301 if ((error = xfs_btree_decrement(tcur, level, &i))) {
302 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
305 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
308 if (lbno != NULLFSBLOCK) {
309 i = xfs_btree_firstrec(tcur, level);
310 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
312 * decrement to last in block
314 if ((error = xfs_btree_decrement(tcur, level, &i))) {
315 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
318 i = xfs_btree_firstrec(tcur, level);
319 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
320 lbp = tcur->bc_bufs[level];
321 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
323 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
324 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
328 bno = be64_to_cpu(left->bb_rightsib);
329 if (be16_to_cpu(left->bb_numrecs) - 1 >=
330 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
331 if ((error = xfs_bmbt_rshift(tcur, level, &i))) {
332 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
336 ASSERT(be16_to_cpu(block->bb_numrecs) >=
337 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
338 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
342 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
347 lrecs = be16_to_cpu(left->bb_numrecs);
349 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
352 ASSERT(bno != NULLFSBLOCK);
353 if (lbno != NULLFSBLOCK &&
354 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
358 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
359 XFS_BMAP_BTREE_REF))) {
360 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
363 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
364 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
365 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
368 } else if (rbno != NULLFSBLOCK &&
369 rrecs + be16_to_cpu(block->bb_numrecs) <=
370 XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
374 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
375 XFS_BMAP_BTREE_REF))) {
376 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
379 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
380 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
381 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
384 lrecs = be16_to_cpu(left->bb_numrecs);
386 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
387 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
390 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
394 numlrecs = be16_to_cpu(left->bb_numrecs);
395 numrrecs = be16_to_cpu(right->bb_numrecs);
397 lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
398 lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
399 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
400 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
402 for (i = 0; i < numrrecs; i++) {
403 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
404 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
409 memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
410 memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
411 xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
412 xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
414 lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
415 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
416 memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
417 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
419 be16_add_cpu(&left->bb_numrecs, numrrecs);
420 left->bb_rightsib = right->bb_rightsib;
421 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
422 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
423 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
424 be64_to_cpu(left->bb_rightsib),
425 0, &rrbp, XFS_BMAP_BTREE_REF))) {
426 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
429 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
430 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
431 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
434 rrblock->bb_leftsib = cpu_to_be64(lbno);
435 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
437 xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
438 cur->bc_private.b.flist, mp);
439 cur->bc_private.b.ip->i_d.di_nblocks--;
440 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
441 XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
442 XFS_TRANS_DQ_BCOUNT, -1L);
443 xfs_trans_binval(cur->bc_tp, rbp);
445 cur->bc_bufs[level] = lbp;
446 cur->bc_ptrs[level] += lrecs;
447 cur->bc_ra[level] = 0;
448 } else if ((error = xfs_btree_increment(cur, level + 1, &i))) {
449 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
453 cur->bc_ptrs[level]--;
454 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
460 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
465 * Insert one record/level. Return information to the caller
466 * allowing the next level up to proceed if necessary.
468 STATIC int /* error */
470 xfs_btree_cur_t *cur,
473 xfs_bmbt_rec_t *recp,
474 xfs_btree_cur_t **curp,
475 int *stat) /* no-go/done/continue */
477 xfs_bmbt_block_t *block; /* bmap btree block */
478 xfs_buf_t *bp; /* buffer for block */
479 int error; /* error return value */
480 int i; /* loop index */
481 xfs_bmbt_key_t key; /* bmap btree key */
482 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
483 int logflags; /* inode logging flags */
484 xfs_fsblock_t nbno; /* new block number */
485 struct xfs_btree_cur *ncur; /* new btree cursor */
486 __uint64_t startoff; /* new btree key value */
487 xfs_bmbt_rec_t nrec; /* new record count */
488 int optr; /* old key/record index */
489 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
490 int ptr; /* key/record index */
491 xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
494 ASSERT(level < cur->bc_nlevels);
495 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
496 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
498 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
499 optr = ptr = cur->bc_ptrs[level];
501 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
505 XFS_STATS_INC(xs_bmbt_insrec);
506 block = xfs_bmbt_get_block(cur, level, &bp);
507 numrecs = be16_to_cpu(block->bb_numrecs);
509 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
510 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
513 if (ptr <= numrecs) {
515 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
516 xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
518 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
519 xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
524 if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
525 if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
527 * A root block, that can be made bigger.
529 xfs_iroot_realloc(cur->bc_private.b.ip, 1,
530 cur->bc_private.b.whichfork);
531 block = xfs_bmbt_get_block(cur, level, &bp);
532 } else if (level == cur->bc_nlevels - 1) {
533 if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) ||
535 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
538 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
540 block = xfs_bmbt_get_block(cur, level, &bp);
542 if ((error = xfs_bmbt_rshift(cur, level, &i))) {
543 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
549 if ((error = xfs_bmbt_lshift(cur, level, &i))) {
550 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
554 optr = ptr = cur->bc_ptrs[level];
556 if ((error = xfs_bmbt_split(cur, level,
557 &nbno, &startoff, &ncur,
559 XFS_BMBT_TRACE_CURSOR(cur,
564 block = xfs_bmbt_get_block(
568 xfs_btree_check_lblock(cur,
569 block, level, bp))) {
570 XFS_BMBT_TRACE_CURSOR(
575 ptr = cur->bc_ptrs[level];
576 xfs_bmbt_disk_set_allf(&nrec,
580 XFS_BMBT_TRACE_CURSOR(cur,
589 numrecs = be16_to_cpu(block->bb_numrecs);
591 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
592 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
594 for (i = numrecs; i >= ptr; i--) {
595 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
597 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
602 memmove(&kp[ptr], &kp[ptr - 1],
603 (numrecs - ptr + 1) * sizeof(*kp));
604 memmove(&pp[ptr], &pp[ptr - 1],
605 (numrecs - ptr + 1) * sizeof(*pp));
607 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
608 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
613 pp[ptr - 1] = cpu_to_be64(*bnop);
615 block->bb_numrecs = cpu_to_be16(numrecs);
616 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
617 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
619 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
620 memmove(&rp[ptr], &rp[ptr - 1],
621 (numrecs - ptr + 1) * sizeof(*rp));
624 block->bb_numrecs = cpu_to_be16(numrecs);
625 xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
627 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
631 xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
634 xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
638 if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) {
639 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
643 if (nbno != NULLFSBLOCK) {
647 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
654 xfs_btree_cur_t *cur)
656 xfs_bmbt_block_t *block;
657 xfs_bmbt_block_t *cblock;
671 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
672 level = cur->bc_nlevels - 1;
675 * Don't deal with the root block needs to be a leaf case.
676 * We're just going to turn the thing back into extents anyway.
679 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
682 block = xfs_bmbt_get_block(cur, level, &cbp);
684 * Give up if the root has multiple children.
686 if (be16_to_cpu(block->bb_numrecs) != 1) {
687 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
691 * Only do this if the next level will fit.
692 * Then the data must be copied up to the inode,
693 * instead of freeing the root you free the next level.
695 cbp = cur->bc_bufs[level - 1];
696 cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
697 if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
698 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
701 ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
702 ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
703 ip = cur->bc_private.b.ip;
704 ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
705 ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
706 XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
707 i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
709 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
710 block = ifp->if_broot;
712 be16_add_cpu(&block->bb_numrecs, i);
713 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
714 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
715 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
716 memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
717 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
718 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
720 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
721 if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
722 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
727 memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
728 xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
729 cur->bc_private.b.flist, cur->bc_mp);
730 ip->i_d.di_nblocks--;
731 XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
732 XFS_TRANS_DQ_BCOUNT, -1L);
733 xfs_trans_binval(cur->bc_tp, cbp);
734 cur->bc_bufs[level - 1] = NULL;
735 be16_add_cpu(&block->bb_level, -1);
736 xfs_trans_log_inode(cur->bc_tp, ip,
737 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
739 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
744 * Log key values from the btree block.
748 xfs_btree_cur_t *cur,
755 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
756 XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
759 xfs_bmbt_block_t *block;
764 block = XFS_BUF_TO_BMBT_BLOCK(bp);
765 kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
766 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
767 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
768 xfs_trans_log_buf(tp, bp, first, last);
772 ip = cur->bc_private.b.ip;
773 xfs_trans_log_inode(tp, ip,
774 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
776 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
780 * Log pointer values from the btree block.
784 xfs_btree_cur_t *cur,
791 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
792 XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
795 xfs_bmbt_block_t *block;
800 block = XFS_BUF_TO_BMBT_BLOCK(bp);
801 pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
802 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
803 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
804 xfs_trans_log_buf(tp, bp, first, last);
808 ip = cur->bc_private.b.ip;
809 xfs_trans_log_inode(tp, ip,
810 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
812 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
816 * Lookup the record. The cursor is made to point to it, based on dir.
818 STATIC int /* error */
820 xfs_btree_cur_t *cur,
822 int *stat) /* success/failure */
824 xfs_bmbt_block_t *block=NULL;
828 int error; /* error return value */
829 xfs_fsblock_t fsbno=0;
833 xfs_bmbt_key_t *kkbase=NULL;
835 xfs_bmbt_rec_t *krbase=NULL;
842 xfs_fileoff_t startoff;
845 XFS_STATS_INC(xs_bmbt_lookup);
846 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
847 XFS_BMBT_TRACE_ARGI(cur, (int)dir);
851 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
852 if (level < cur->bc_nlevels - 1) {
853 d = XFS_FSB_TO_DADDR(mp, fsbno);
854 bp = cur->bc_bufs[level];
855 if (bp && XFS_BUF_ADDR(bp) != d)
858 if ((error = xfs_btree_read_bufl(mp, tp, fsbno,
859 0, &bp, XFS_BMAP_BTREE_REF))) {
860 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
863 xfs_btree_setbuf(cur, level, bp);
864 block = XFS_BUF_TO_BMBT_BLOCK(bp);
865 if ((error = xfs_btree_check_lblock(cur, block,
867 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
871 block = XFS_BUF_TO_BMBT_BLOCK(bp);
873 block = xfs_bmbt_get_block(cur, level, &bp);
878 kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur);
880 krbase = XFS_BMAP_REC_IADDR(block, 1, cur);
882 if (!(high = be16_to_cpu(block->bb_numrecs))) {
884 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
885 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
889 while (low <= high) {
890 XFS_STATS_INC(xs_bmbt_compare);
891 keyno = (low + high) >> 1;
893 kkp = kkbase + keyno - 1;
894 startoff = be64_to_cpu(kkp->br_startoff);
896 krp = krbase + keyno - 1;
897 startoff = xfs_bmbt_disk_get_startoff(krp);
899 diff = (xfs_sfiloff_t)
900 (startoff - rp->br_startoff);
910 if (diff > 0 && --keyno < 1)
912 pp = XFS_BMAP_PTR_IADDR(block, keyno, cur);
913 fsbno = be64_to_cpu(*pp);
915 if ((error = xfs_btree_check_lptr(cur, fsbno, level))) {
916 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
920 cur->bc_ptrs[level] = keyno;
923 if (dir != XFS_LOOKUP_LE && diff < 0) {
926 * If ge search and we went off the end of the block, but it's
927 * not the last block, we're in the wrong block.
929 if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) &&
930 be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) {
931 cur->bc_ptrs[0] = keyno;
932 if ((error = xfs_btree_increment(cur, 0, &i))) {
933 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
936 XFS_WANT_CORRUPTED_RETURN(i == 1);
937 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
942 else if (dir == XFS_LOOKUP_LE && diff > 0)
944 cur->bc_ptrs[0] = keyno;
945 if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) {
946 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
949 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
950 *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
956 * Move 1 record left from cur/level if possible.
957 * Update cur to reflect the new path.
959 STATIC int /* error */
961 xfs_btree_cur_t *cur,
963 int *stat) /* success/failure */
965 int error; /* error return value */
967 int i; /* loop counter */
969 xfs_bmbt_key_t key; /* bmap btree key */
970 xfs_buf_t *lbp; /* left buffer pointer */
971 xfs_bmbt_block_t *left; /* left btree block */
972 xfs_bmbt_key_t *lkp=NULL; /* left btree key */
973 xfs_bmbt_ptr_t *lpp; /* left address pointer */
974 int lrecs; /* left record count */
975 xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */
976 xfs_mount_t *mp; /* file system mount point */
977 xfs_buf_t *rbp; /* right buffer pointer */
978 xfs_bmbt_block_t *right; /* right btree block */
979 xfs_bmbt_key_t *rkp=NULL; /* right btree key */
980 xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */
981 xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
982 int rrecs; /* right record count */
984 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
985 XFS_BMBT_TRACE_ARGI(cur, level);
986 if (level == cur->bc_nlevels - 1) {
987 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
991 rbp = cur->bc_bufs[level];
992 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
994 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
995 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
999 if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) {
1000 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1004 if (cur->bc_ptrs[level] <= 1) {
1005 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1010 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0,
1011 &lbp, XFS_BMAP_BTREE_REF))) {
1012 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1015 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1016 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
1017 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1020 if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
1021 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1025 lrecs = be16_to_cpu(left->bb_numrecs) + 1;
1027 lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
1028 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1030 xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs);
1031 lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
1032 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1034 if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) {
1035 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1040 xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs);
1042 lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur);
1043 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1045 xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
1047 left->bb_numrecs = cpu_to_be16(lrecs);
1048 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
1051 xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp);
1053 xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
1055 rrecs = be16_to_cpu(right->bb_numrecs) - 1;
1056 right->bb_numrecs = cpu_to_be16(rrecs);
1057 xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
1060 for (i = 0; i < rrecs; i++) {
1061 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1],
1063 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1068 memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
1069 memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
1070 xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
1071 xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
1073 memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
1074 xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
1075 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1078 if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
1079 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1082 cur->bc_ptrs[level]--;
1083 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1089 * Move 1 record right from cur/level if possible.
1090 * Update cur to reflect the new path.
1092 STATIC int /* error */
1094 xfs_btree_cur_t *cur,
1096 int *stat) /* success/failure */
1098 int error; /* error return value */
1099 int i; /* loop counter */
1100 xfs_bmbt_key_t key; /* bmap btree key */
1101 xfs_buf_t *lbp; /* left buffer pointer */
1102 xfs_bmbt_block_t *left; /* left btree block */
1103 xfs_bmbt_key_t *lkp; /* left btree key */
1104 xfs_bmbt_ptr_t *lpp; /* left address pointer */
1105 xfs_bmbt_rec_t *lrp; /* left record pointer */
1106 xfs_mount_t *mp; /* file system mount point */
1107 xfs_buf_t *rbp; /* right buffer pointer */
1108 xfs_bmbt_block_t *right; /* right btree block */
1109 xfs_bmbt_key_t *rkp; /* right btree key */
1110 xfs_bmbt_ptr_t *rpp; /* right address pointer */
1111 xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
1112 struct xfs_btree_cur *tcur; /* temporary btree cursor */
1114 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1115 XFS_BMBT_TRACE_ARGI(cur, level);
1116 if (level == cur->bc_nlevels - 1) {
1117 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1121 lbp = cur->bc_bufs[level];
1122 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1124 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
1125 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1129 if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) {
1130 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1134 if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
1135 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1140 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0,
1141 &rbp, XFS_BMAP_BTREE_REF))) {
1142 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1145 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1146 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
1147 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1150 if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
1151 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1156 lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1157 lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1158 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1159 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1161 for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
1162 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
1163 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1168 memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1169 memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1171 if ((error = xfs_btree_check_lptr_disk(cur, *lpp, level))) {
1172 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1178 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1179 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1181 lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1182 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1183 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1185 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1186 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1189 be16_add_cpu(&left->bb_numrecs, -1);
1190 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
1191 be16_add_cpu(&right->bb_numrecs, 1);
1194 xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
1196 xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1);
1198 xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
1199 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
1200 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1203 i = xfs_btree_lastrec(tcur, level);
1204 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1205 if ((error = xfs_btree_increment(tcur, level, &i))) {
1206 XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
1209 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1210 if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) {
1211 XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
1214 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1215 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1219 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1221 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1226 * Determine the extent state.
1235 ASSERT(blks != 0); /* saved for DMIG */
1236 return XFS_EXT_UNWRITTEN;
1238 return XFS_EXT_NORM;
1243 * Split cur/level block in half.
1244 * Return new block number and its first record (to be inserted into parent).
1246 STATIC int /* error */
1248 xfs_btree_cur_t *cur,
1250 xfs_fsblock_t *bnop,
1251 __uint64_t *startoff,
1252 xfs_btree_cur_t **curp,
1253 int *stat) /* success/failure */
1255 xfs_alloc_arg_t args; /* block allocation args */
1256 int error; /* error return value */
1257 int i; /* loop counter */
1258 xfs_fsblock_t lbno; /* left sibling block number */
1259 xfs_buf_t *lbp; /* left buffer pointer */
1260 xfs_bmbt_block_t *left; /* left btree block */
1261 xfs_bmbt_key_t *lkp; /* left btree key */
1262 xfs_bmbt_ptr_t *lpp; /* left address pointer */
1263 xfs_bmbt_rec_t *lrp; /* left record pointer */
1264 xfs_buf_t *rbp; /* right buffer pointer */
1265 xfs_bmbt_block_t *right; /* right btree block */
1266 xfs_bmbt_key_t *rkp; /* right btree key */
1267 xfs_bmbt_ptr_t *rpp; /* right address pointer */
1268 xfs_bmbt_block_t *rrblock; /* right-right btree block */
1269 xfs_buf_t *rrbp; /* right-right buffer pointer */
1270 xfs_bmbt_rec_t *rrp; /* right record pointer */
1272 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1273 // disable until merged into common code
1274 // XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
1275 args.tp = cur->bc_tp;
1276 args.mp = cur->bc_mp;
1277 lbp = cur->bc_bufs[level];
1278 lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
1279 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1280 args.fsbno = cur->bc_private.b.firstblock;
1281 args.firstblock = args.fsbno;
1283 if (args.fsbno == NULLFSBLOCK) {
1285 args.type = XFS_ALLOCTYPE_START_BNO;
1287 * Make sure there is sufficient room left in the AG to
1288 * complete a full tree split for an extent insert. If
1289 * we are converting the middle part of an extent then
1290 * we may need space for two tree splits.
1292 * We are relying on the caller to make the correct block
1293 * reservation for this operation to succeed. If the
1294 * reservation amount is insufficient then we may fail a
1295 * block allocation here and corrupt the filesystem.
1297 args.minleft = xfs_trans_get_block_res(args.tp);
1298 } else if (cur->bc_private.b.flist->xbf_low)
1299 args.type = XFS_ALLOCTYPE_START_BNO;
1301 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1302 args.mod = args.alignment = args.total = args.isfl =
1303 args.userdata = args.minalignslop = 0;
1304 args.minlen = args.maxlen = args.prod = 1;
1305 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1306 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
1307 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1308 return XFS_ERROR(ENOSPC);
1310 if ((error = xfs_alloc_vextent(&args))) {
1311 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1314 if (args.fsbno == NULLFSBLOCK && args.minleft) {
1316 * Could not find an AG with enough free space to satisfy
1317 * a full btree split. Try again without minleft and if
1318 * successful activate the lowspace algorithm.
1321 args.type = XFS_ALLOCTYPE_FIRST_AG;
1323 if ((error = xfs_alloc_vextent(&args))) {
1324 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1327 cur->bc_private.b.flist->xbf_low = 1;
1329 if (args.fsbno == NULLFSBLOCK) {
1330 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1334 ASSERT(args.len == 1);
1335 cur->bc_private.b.firstblock = args.fsbno;
1336 cur->bc_private.b.allocated++;
1337 cur->bc_private.b.ip->i_d.di_nblocks++;
1338 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
1339 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1340 XFS_TRANS_DQ_BCOUNT, 1L);
1341 rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
1342 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1344 if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
1345 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1349 right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1350 right->bb_level = left->bb_level;
1351 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1352 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1353 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1354 be16_add_cpu(&right->bb_numrecs, 1);
1355 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1357 lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
1358 lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
1359 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1360 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1362 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1363 if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
1364 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1369 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1370 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1371 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1372 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1373 *startoff = be64_to_cpu(rkp->br_startoff);
1375 lrp = XFS_BMAP_REC_IADDR(left, i, cur);
1376 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1377 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1378 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1379 *startoff = xfs_bmbt_disk_get_startoff(rrp);
1381 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1382 right->bb_rightsib = left->bb_rightsib;
1383 left->bb_rightsib = cpu_to_be64(args.fsbno);
1384 right->bb_leftsib = cpu_to_be64(lbno);
1385 xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
1386 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1387 if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
1388 if ((error = xfs_btree_read_bufl(args.mp, args.tp,
1389 be64_to_cpu(right->bb_rightsib), 0, &rrbp,
1390 XFS_BMAP_BTREE_REF))) {
1391 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1394 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
1395 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
1396 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1399 rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
1400 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
1402 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
1403 xfs_btree_setbuf(cur, level, rbp);
1404 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
1406 if (level + 1 < cur->bc_nlevels) {
1407 if ((error = xfs_btree_dup_cursor(cur, curp))) {
1408 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1411 (*curp)->bc_ptrs[level + 1]++;
1414 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1421 * Update keys for the record.
1425 xfs_btree_cur_t *cur,
1426 xfs_bmbt_key_t *keyp, /* on-disk format */
1429 xfs_bmbt_block_t *block;
1438 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1439 XFS_BMBT_TRACE_ARGIK(cur, level, keyp);
1440 for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
1441 block = xfs_bmbt_get_block(cur, level, &bp);
1443 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
1444 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1448 ptr = cur->bc_ptrs[level];
1449 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
1451 xfs_bmbt_log_keys(cur, bp, ptr, ptr);
1453 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1458 * Convert on-disk form of btree root to in-memory form.
1462 xfs_bmdr_block_t *dblock,
1464 xfs_bmbt_block_t *rblock,
1468 xfs_bmbt_key_t *fkp;
1470 xfs_bmbt_key_t *tkp;
1473 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1474 rblock->bb_level = dblock->bb_level;
1475 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
1476 rblock->bb_numrecs = dblock->bb_numrecs;
1477 rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
1478 rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
1479 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
1480 fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
1481 tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
1482 fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
1483 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1484 dmxr = be16_to_cpu(dblock->bb_numrecs);
1485 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1486 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1490 * Delete the record pointed to by cur.
1494 xfs_btree_cur_t *cur,
1495 int *stat) /* success/failure */
1497 int error; /* error return value */
1501 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1502 for (level = 0, i = 2; i == 2; level++) {
1503 if ((error = xfs_bmbt_delrec(cur, level, &i))) {
1504 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1509 for (level = 1; level < cur->bc_nlevels; level++) {
1510 if (cur->bc_ptrs[level] == 0) {
1511 if ((error = xfs_btree_decrement(cur, level,
1513 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1520 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1526 * Convert a compressed bmap extent record to an uncompressed form.
1527 * This code must be in sync with the routines xfs_bmbt_get_startoff,
1528 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
1540 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
1541 s->br_startoff = ((xfs_fileoff_t)l0 &
1542 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1544 s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
1545 (((xfs_fsblock_t)l1) >> 21);
1551 b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
1552 (((xfs_dfsbno_t)l1) >> 21);
1553 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1554 s->br_startblock = (xfs_fsblock_t)b;
1557 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
1559 #endif /* XFS_BIG_BLKNOS */
1560 s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
1561 /* This is xfs_extent_state() in-line */
1563 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
1564 st = XFS_EXT_UNWRITTEN;
1572 xfs_bmbt_rec_host_t *r,
1575 __xfs_bmbt_get_all(r->l0, r->l1, s);
1579 * Get the block pointer for the given level of the cursor.
1580 * Fill in the buffer pointer, if applicable.
1584 xfs_btree_cur_t *cur,
1589 xfs_bmbt_block_t *rval;
1591 if (level < cur->bc_nlevels - 1) {
1592 *bpp = cur->bc_bufs[level];
1593 rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
1596 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
1597 cur->bc_private.b.whichfork);
1598 rval = ifp->if_broot;
1604 * Extract the blockcount field from an in memory bmap extent record.
1607 xfs_bmbt_get_blockcount(
1608 xfs_bmbt_rec_host_t *r)
1610 return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
1614 * Extract the startblock field from an in memory bmap extent record.
1617 xfs_bmbt_get_startblock(
1618 xfs_bmbt_rec_host_t *r)
1621 return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1622 (((xfs_fsblock_t)r->l1) >> 21);
1627 b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1628 (((xfs_dfsbno_t)r->l1) >> 21);
1629 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1630 return (xfs_fsblock_t)b;
1632 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
1634 #endif /* XFS_BIG_BLKNOS */
1638 * Extract the startoff field from an in memory bmap extent record.
1641 xfs_bmbt_get_startoff(
1642 xfs_bmbt_rec_host_t *r)
1644 return ((xfs_fileoff_t)r->l0 &
1645 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1650 xfs_bmbt_rec_host_t *r)
1654 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
1655 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
1659 /* Endian flipping versions of the bmbt extraction functions */
1661 xfs_bmbt_disk_get_all(
1665 __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
1669 * Extract the blockcount field from an on disk bmap extent record.
1672 xfs_bmbt_disk_get_blockcount(
1675 return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
1679 * Extract the startoff field from a disk format bmap extent record.
1682 xfs_bmbt_disk_get_startoff(
1685 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
1686 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1690 * Insert the current record at the point referenced by cur.
1692 * A multi-level split of the tree on insert will invalidate the original
1693 * cursor. All callers of this function should assume that the cursor is
1694 * no longer valid and revalidate it.
1698 xfs_btree_cur_t *cur,
1699 int *stat) /* success/failure */
1701 int error; /* error return value */
1705 xfs_btree_cur_t *ncur;
1706 xfs_bmbt_rec_t nrec;
1707 xfs_btree_cur_t *pcur;
1709 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1712 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
1716 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
1719 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1720 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1723 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1724 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
1725 cur->bc_nlevels = pcur->bc_nlevels;
1726 cur->bc_private.b.allocated +=
1727 pcur->bc_private.b.allocated;
1728 pcur->bc_private.b.allocated = 0;
1729 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
1730 XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
1731 cur->bc_private.b.firstblock =
1732 pcur->bc_private.b.firstblock;
1733 ASSERT(cur->bc_private.b.flist ==
1734 pcur->bc_private.b.flist);
1735 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1741 } while (nbno != NULLFSBLOCK);
1742 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1746 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1751 * Log fields from the btree block header.
1755 xfs_btree_cur_t *cur,
1762 static const short offsets[] = {
1763 offsetof(xfs_bmbt_block_t, bb_magic),
1764 offsetof(xfs_bmbt_block_t, bb_level),
1765 offsetof(xfs_bmbt_block_t, bb_numrecs),
1766 offsetof(xfs_bmbt_block_t, bb_leftsib),
1767 offsetof(xfs_bmbt_block_t, bb_rightsib),
1768 sizeof(xfs_bmbt_block_t)
1771 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1772 XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
1775 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
1777 xfs_trans_log_buf(tp, bp, first, last);
1779 xfs_trans_log_inode(tp, cur->bc_private.b.ip,
1780 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
1781 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1785 * Log record values from the btree block.
1789 xfs_btree_cur_t *cur,
1794 xfs_bmbt_block_t *block;
1800 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1801 XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
1804 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1805 rp = XFS_BMAP_REC_DADDR(block, 1, cur);
1806 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
1807 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
1808 xfs_trans_log_buf(tp, bp, first, last);
1809 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1814 xfs_btree_cur_t *cur,
1818 int *stat) /* success/failure */
1820 cur->bc_rec.b.br_startoff = off;
1821 cur->bc_rec.b.br_startblock = bno;
1822 cur->bc_rec.b.br_blockcount = len;
1823 return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat);
1828 xfs_btree_cur_t *cur,
1832 int *stat) /* success/failure */
1834 cur->bc_rec.b.br_startoff = off;
1835 cur->bc_rec.b.br_startblock = bno;
1836 cur->bc_rec.b.br_blockcount = len;
1837 return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat);
1841 * Give the bmap btree a new root block. Copy the old broot contents
1842 * down into a real block and make the broot point to it.
1846 xfs_btree_cur_t *cur, /* btree cursor */
1847 int *logflags, /* logging flags for inode */
1848 int *stat) /* return status - 0 fail */
1850 xfs_alloc_arg_t args; /* allocation arguments */
1851 xfs_bmbt_block_t *block; /* bmap btree block */
1852 xfs_buf_t *bp; /* buffer for block */
1853 xfs_bmbt_block_t *cblock; /* child btree block */
1854 xfs_bmbt_key_t *ckp; /* child key pointer */
1855 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */
1856 int error; /* error return code */
1858 int i; /* loop counter */
1860 xfs_bmbt_key_t *kp; /* pointer to bmap btree key */
1861 int level; /* btree level */
1862 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
1864 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1865 level = cur->bc_nlevels - 1;
1866 block = xfs_bmbt_get_block(cur, level, &bp);
1868 * Copy the root into a real block.
1870 args.mp = cur->bc_mp;
1871 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
1872 args.tp = cur->bc_tp;
1873 args.fsbno = cur->bc_private.b.firstblock;
1874 args.mod = args.minleft = args.alignment = args.total = args.isfl =
1875 args.userdata = args.minalignslop = 0;
1876 args.minlen = args.maxlen = args.prod = 1;
1877 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1878 args.firstblock = args.fsbno;
1879 if (args.fsbno == NULLFSBLOCK) {
1881 if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) {
1882 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1886 args.fsbno = be64_to_cpu(*pp);
1887 args.type = XFS_ALLOCTYPE_START_BNO;
1888 } else if (cur->bc_private.b.flist->xbf_low)
1889 args.type = XFS_ALLOCTYPE_START_BNO;
1891 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1892 if ((error = xfs_alloc_vextent(&args))) {
1893 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1896 if (args.fsbno == NULLFSBLOCK) {
1897 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1901 ASSERT(args.len == 1);
1902 cur->bc_private.b.firstblock = args.fsbno;
1903 cur->bc_private.b.allocated++;
1904 cur->bc_private.b.ip->i_d.di_nblocks++;
1905 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1906 XFS_TRANS_DQ_BCOUNT, 1L);
1907 bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
1908 cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
1910 be16_add_cpu(&block->bb_level, 1);
1911 block->bb_numrecs = cpu_to_be16(1);
1913 cur->bc_ptrs[level + 1] = 1;
1914 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
1915 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
1916 memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp));
1917 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
1919 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
1920 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
1921 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1926 memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
1928 if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) {
1929 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1933 *pp = cpu_to_be64(args.fsbno);
1934 xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
1935 cur->bc_private.b.whichfork);
1936 xfs_btree_setbuf(cur, level, bp);
1938 * Do all this logging at the end so that
1939 * the root is at the right level.
1941 xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
1942 xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
1943 xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
1944 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1946 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
1952 * Set all the fields in a bmap extent record from the arguments.
1956 xfs_bmbt_rec_host_t *r,
1957 xfs_fileoff_t startoff,
1958 xfs_fsblock_t startblock,
1959 xfs_filblks_t blockcount,
1962 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
1964 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
1965 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
1966 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
1969 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
1971 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1972 ((xfs_bmbt_rec_base_t)startoff << 9) |
1973 ((xfs_bmbt_rec_base_t)startblock >> 43);
1974 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1975 ((xfs_bmbt_rec_base_t)blockcount &
1976 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1977 #else /* !XFS_BIG_BLKNOS */
1978 if (ISNULLSTARTBLOCK(startblock)) {
1979 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1980 ((xfs_bmbt_rec_base_t)startoff << 9) |
1981 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1982 r->l1 = XFS_MASK64HI(11) |
1983 ((xfs_bmbt_rec_base_t)startblock << 21) |
1984 ((xfs_bmbt_rec_base_t)blockcount &
1985 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1987 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1988 ((xfs_bmbt_rec_base_t)startoff << 9);
1989 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1990 ((xfs_bmbt_rec_base_t)blockcount &
1991 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1993 #endif /* XFS_BIG_BLKNOS */
1997 * Set all the fields in a bmap extent record from the uncompressed form.
2001 xfs_bmbt_rec_host_t *r,
2004 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
2005 s->br_blockcount, s->br_state);
2010 * Set all the fields in a disk format bmap extent record from the arguments.
2013 xfs_bmbt_disk_set_allf(
2015 xfs_fileoff_t startoff,
2016 xfs_fsblock_t startblock,
2017 xfs_filblks_t blockcount,
2020 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
2022 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
2023 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
2024 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
2027 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
2029 r->l0 = cpu_to_be64(
2030 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2031 ((xfs_bmbt_rec_base_t)startoff << 9) |
2032 ((xfs_bmbt_rec_base_t)startblock >> 43));
2033 r->l1 = cpu_to_be64(
2034 ((xfs_bmbt_rec_base_t)startblock << 21) |
2035 ((xfs_bmbt_rec_base_t)blockcount &
2036 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2037 #else /* !XFS_BIG_BLKNOS */
2038 if (ISNULLSTARTBLOCK(startblock)) {
2039 r->l0 = cpu_to_be64(
2040 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2041 ((xfs_bmbt_rec_base_t)startoff << 9) |
2042 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
2043 r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
2044 ((xfs_bmbt_rec_base_t)startblock << 21) |
2045 ((xfs_bmbt_rec_base_t)blockcount &
2046 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2048 r->l0 = cpu_to_be64(
2049 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2050 ((xfs_bmbt_rec_base_t)startoff << 9));
2051 r->l1 = cpu_to_be64(
2052 ((xfs_bmbt_rec_base_t)startblock << 21) |
2053 ((xfs_bmbt_rec_base_t)blockcount &
2054 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2056 #endif /* XFS_BIG_BLKNOS */
2060 * Set all the fields in a bmap extent record from the uncompressed form.
2063 xfs_bmbt_disk_set_all(
2067 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
2068 s->br_blockcount, s->br_state);
2072 * Set the blockcount field in a bmap extent record.
2075 xfs_bmbt_set_blockcount(
2076 xfs_bmbt_rec_host_t *r,
2079 ASSERT((v & XFS_MASK64HI(43)) == 0);
2080 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
2081 (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
2085 * Set the startblock field in a bmap extent record.
2088 xfs_bmbt_set_startblock(
2089 xfs_bmbt_rec_host_t *r,
2093 ASSERT((v & XFS_MASK64HI(12)) == 0);
2094 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
2095 (xfs_bmbt_rec_base_t)(v >> 43);
2096 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
2097 (xfs_bmbt_rec_base_t)(v << 21);
2098 #else /* !XFS_BIG_BLKNOS */
2099 if (ISNULLSTARTBLOCK(v)) {
2100 r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
2101 r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
2102 ((xfs_bmbt_rec_base_t)v << 21) |
2103 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2105 r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
2106 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
2107 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2109 #endif /* XFS_BIG_BLKNOS */
2113 * Set the startoff field in a bmap extent record.
2116 xfs_bmbt_set_startoff(
2117 xfs_bmbt_rec_host_t *r,
2120 ASSERT((v & XFS_MASK64HI(9)) == 0);
2121 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
2122 ((xfs_bmbt_rec_base_t)v << 9) |
2123 (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
2127 * Set the extent state field in a bmap extent record.
2131 xfs_bmbt_rec_host_t *r,
2134 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
2135 if (v == XFS_EXT_NORM)
2136 r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
2138 r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
2142 * Convert in-memory form of btree root to on-disk form.
2146 xfs_bmbt_block_t *rblock,
2148 xfs_bmdr_block_t *dblock,
2152 xfs_bmbt_key_t *fkp;
2154 xfs_bmbt_key_t *tkp;
2157 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
2158 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
2159 ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
2160 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
2161 dblock->bb_level = rblock->bb_level;
2162 dblock->bb_numrecs = rblock->bb_numrecs;
2163 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
2164 fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
2165 tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
2166 fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
2167 tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
2168 dmxr = be16_to_cpu(dblock->bb_numrecs);
2169 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
2170 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
2174 * Update the record to the passed values.
2178 xfs_btree_cur_t *cur,
2184 xfs_bmbt_block_t *block;
2191 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2192 XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno,
2193 (xfs_dfilblks_t)len, (int)state);
2194 block = xfs_bmbt_get_block(cur, 0, &bp);
2196 if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) {
2197 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2201 ptr = cur->bc_ptrs[0];
2202 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
2203 xfs_bmbt_disk_set_allf(rp, off, bno, len, state);
2204 xfs_bmbt_log_recs(cur, bp, ptr, ptr);
2206 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2209 key.br_startoff = cpu_to_be64(off);
2210 if ((error = xfs_bmbt_updkey(cur, &key, 1))) {
2211 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2214 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2219 * Check extent records, which have just been read, for
2220 * any bit in the extent flag field. ASSERT on debug
2221 * kernels, as this condition should not occur.
2222 * Return an error condition (1) if any flags found,
2223 * otherwise return 0.
2227 xfs_check_nostate_extents(
2232 for (; num > 0; num--, idx++) {
2233 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
2235 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
2244 STATIC struct xfs_btree_cur *
2245 xfs_bmbt_dup_cursor(
2246 struct xfs_btree_cur *cur)
2248 struct xfs_btree_cur *new;
2250 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
2251 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
2254 * Copy the firstblock, flist, and flags values,
2255 * since init cursor doesn't get them.
2257 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
2258 new->bc_private.b.flist = cur->bc_private.b.flist;
2259 new->bc_private.b.flags = cur->bc_private.b.flags;
2265 xfs_bmbt_get_maxrecs(
2266 struct xfs_btree_cur *cur,
2269 return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
2272 #ifdef XFS_BTREE_TRACE
2273 ktrace_t *xfs_bmbt_trace_buf;
2276 xfs_bmbt_trace_enter(
2277 struct xfs_btree_cur *cur,
2294 struct xfs_inode *ip = cur->bc_private.b.ip;
2295 int whichfork = cur->bc_private.b.whichfork;
2297 ktrace_enter(xfs_bmbt_trace_buf,
2298 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
2299 (void *)func, (void *)s, (void *)ip, (void *)cur,
2300 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
2301 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
2302 (void *)a8, (void *)a9, (void *)a10);
2303 ktrace_enter(ip->i_btrace,
2304 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
2305 (void *)func, (void *)s, (void *)ip, (void *)cur,
2306 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
2307 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
2308 (void *)a8, (void *)a9, (void *)a10);
2312 xfs_bmbt_trace_cursor(
2313 struct xfs_btree_cur *cur,
2318 struct xfs_bmbt_rec_host r;
2320 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
2322 *s0 = (cur->bc_nlevels << 24) |
2323 (cur->bc_private.b.flags << 16) |
2324 cur->bc_private.b.allocated;
2331 struct xfs_btree_cur *cur,
2332 union xfs_btree_key *key,
2336 *l0 = be64_to_cpu(key->bmbt.br_startoff);
2341 xfs_bmbt_trace_record(
2342 struct xfs_btree_cur *cur,
2343 union xfs_btree_rec *rec,
2348 struct xfs_bmbt_irec irec;
2350 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
2351 *l0 = irec.br_startoff;
2352 *l1 = irec.br_startblock;
2353 *l2 = irec.br_blockcount;
2355 #endif /* XFS_BTREE_TRACE */
2357 static const struct xfs_btree_ops xfs_bmbt_ops = {
2358 .rec_len = sizeof(xfs_bmbt_rec_t),
2359 .key_len = sizeof(xfs_bmbt_key_t),
2361 .dup_cursor = xfs_bmbt_dup_cursor,
2362 .get_maxrecs = xfs_bmbt_get_maxrecs,
2364 #ifdef XFS_BTREE_TRACE
2365 .trace_enter = xfs_bmbt_trace_enter,
2366 .trace_cursor = xfs_bmbt_trace_cursor,
2367 .trace_key = xfs_bmbt_trace_key,
2368 .trace_record = xfs_bmbt_trace_record,
2373 * Allocate a new bmap btree cursor.
2375 struct xfs_btree_cur * /* new bmap btree cursor */
2376 xfs_bmbt_init_cursor(
2377 struct xfs_mount *mp, /* file system mount point */
2378 struct xfs_trans *tp, /* transaction pointer */
2379 struct xfs_inode *ip, /* inode owning the btree */
2380 int whichfork) /* data or attr fork */
2382 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2383 struct xfs_btree_cur *cur;
2385 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
2389 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
2390 cur->bc_btnum = XFS_BTNUM_BMAP;
2391 cur->bc_blocklog = mp->m_sb.sb_blocklog;
2393 cur->bc_ops = &xfs_bmbt_ops;
2394 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
2396 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
2397 cur->bc_private.b.ip = ip;
2398 cur->bc_private.b.firstblock = NULLFSBLOCK;
2399 cur->bc_private.b.flist = NULL;
2400 cur->bc_private.b.allocated = 0;
2401 cur->bc_private.b.flags = 0;
2402 cur->bc_private.b.whichfork = whichfork;