2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 #undef UFS_BALLOC_DEBUG
26 #ifdef UFS_BALLOC_DEBUG
27 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
32 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
35 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
36 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
37 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
40 * Free 'count' fragments from fragment number 'fragment'
42 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
44 struct super_block * sb;
45 struct ufs_sb_private_info * uspi;
46 struct ufs_super_block_first * usb1;
47 struct ufs_cg_private_info * ucpi;
48 struct ufs_cylinder_group * ucg;
49 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
52 uspi = UFS_SB(sb)->s_uspi;
53 usb1 = ubh_get_usb_first(uspi);
55 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
57 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
58 ufs_error (sb, "ufs_free_fragments", "internal error");
62 cgno = ufs_dtog(fragment);
63 bit = ufs_dtogd(fragment);
64 if (cgno >= uspi->s_ncg) {
65 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
69 ucpi = ufs_load_cylinder (sb, cgno);
72 ucg = ubh_get_ucg (UCPI_UBH);
73 if (!ufs_cg_chkmagic(sb, ucg)) {
74 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
78 end_bit = bit + count;
79 bbase = ufs_blknum (bit);
80 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
81 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
82 for (i = bit; i < end_bit; i++) {
83 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
84 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
86 ufs_error (sb, "ufs_free_fragments",
87 "bit already cleared for fragment %u", i);
90 DQUOT_FREE_BLOCK (inode, count);
93 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
94 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
95 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
96 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
97 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
100 * Trying to reassemble free fragments into block
102 blkno = ufs_fragstoblks (bbase);
103 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
104 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
105 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
106 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
107 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
108 ufs_clusteracct (sb, ucpi, blkno, 1);
109 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
110 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
111 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
112 cylno = ufs_cbtocylno (bbase);
113 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
114 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
117 ubh_mark_buffer_dirty (USPI_UBH);
118 ubh_mark_buffer_dirty (UCPI_UBH);
119 if (sb->s_flags & MS_SYNCHRONOUS) {
120 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
121 ubh_wait_on_buffer (UCPI_UBH);
131 UFSD(("EXIT (FAILED)\n"))
136 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
138 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
140 struct super_block * sb;
141 struct ufs_sb_private_info * uspi;
142 struct ufs_super_block_first * usb1;
143 struct ufs_cg_private_info * ucpi;
144 struct ufs_cylinder_group * ucg;
145 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
148 uspi = UFS_SB(sb)->s_uspi;
149 usb1 = ubh_get_usb_first(uspi);
151 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
153 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
154 ufs_error (sb, "ufs_free_blocks", "internal error, "
155 "fragment %u, count %u\n", fragment, count);
163 cgno = ufs_dtog (fragment);
164 bit = ufs_dtogd (fragment);
165 if (cgno >= uspi->s_ncg) {
166 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
169 end_bit = bit + count;
170 if (end_bit > uspi->s_fpg) {
171 overflow = bit + count - uspi->s_fpg;
176 ucpi = ufs_load_cylinder (sb, cgno);
179 ucg = ubh_get_ucg (UCPI_UBH);
180 if (!ufs_cg_chkmagic(sb, ucg)) {
181 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
185 for (i = bit; i < end_bit; i += uspi->s_fpb) {
186 blkno = ufs_fragstoblks(i);
187 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
188 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
190 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
191 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
192 ufs_clusteracct (sb, ucpi, blkno, 1);
193 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
195 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
196 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
197 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
198 cylno = ufs_cbtocylno(i);
199 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
200 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 ubh_mark_buffer_dirty (USPI_UBH);
204 ubh_mark_buffer_dirty (UCPI_UBH);
205 if (sb->s_flags & MS_SYNCHRONOUS) {
206 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
207 ubh_wait_on_buffer (UCPI_UBH);
223 UFSD(("EXIT (FAILED)\n"))
227 static struct page *ufs_get_locked_page(struct address_space *mapping,
233 page = find_lock_page(mapping, index);
235 page = read_cache_page(mapping, index,
236 (filler_t*)mapping->a_ops->readpage,
239 printk(KERN_ERR "ufs_change_blocknr: "
240 "read_cache_page error: ino %lu, index: %lu\n",
241 mapping->host->i_ino, index);
247 if (!PageUptodate(page) || PageError(page)) {
249 page_cache_release(page);
251 printk(KERN_ERR "ufs_change_blocknr: "
252 "can not read page: ino %lu, index: %lu\n",
253 mapping->host->i_ino, index);
255 page = ERR_PTR(-EIO);
260 if (unlikely(!page->mapping || !page_has_buffers(page))) {
262 page_cache_release(page);
263 goto try_again;/*we really need these buffers*/
270 * Modify inode page cache in such way:
271 * have - blocks with b_blocknr equal to oldb...oldb+count-1
272 * get - blocks with b_blocknr equal to newb...newb+count-1
273 * also we suppose that oldb...oldb+count-1 blocks
274 * situated at the end of file.
276 * We can come here from ufs_writepage or ufs_prepare_write,
277 * locked_page is argument of these functions, so we already lock it.
279 static void ufs_change_blocknr(struct inode *inode, unsigned int count,
280 unsigned int oldb, unsigned int newb,
281 struct page *locked_page)
283 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
285 struct address_space *mapping = inode->i_mapping;
286 pgoff_t index, cur_index = locked_page->index;
289 struct buffer_head *head, *bh;
291 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
293 UFSD(("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
294 inode->i_ino, count, oldb, newb));
296 BUG_ON(!PageLocked(locked_page));
298 for (i = 0; i < count; i += blk_per_page) {
299 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
301 if (likely(cur_index != index)) {
302 page = ufs_get_locked_page(mapping, index);
309 head = page_buffers(page);
312 if (likely(bh->b_blocknr == j + oldb && j < count)) {
313 unmap_underlying_metadata(bh->b_bdev,
315 bh->b_blocknr = newb + j++;
316 mark_buffer_dirty(bh);
319 bh = bh->b_this_page;
320 } while (bh != head);
322 set_page_dirty(page);
324 if (likely(cur_index != index)) {
326 page_cache_release(page);
332 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
333 unsigned goal, unsigned count, int * err, struct page *locked_page)
335 struct super_block * sb;
336 struct ufs_sb_private_info * uspi;
337 struct ufs_super_block_first * usb1;
338 unsigned cgno, oldcount, newcount, tmp, request, result;
340 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
343 uspi = UFS_SB(sb)->s_uspi;
344 usb1 = ubh_get_usb_first(uspi);
349 tmp = fs32_to_cpu(sb, *p);
350 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
351 ufs_warning (sb, "ufs_new_fragments", "internal warning"
352 " fragment %u, count %u", fragment, count);
353 count = uspi->s_fpb - ufs_fragnum(fragment);
355 oldcount = ufs_fragnum (fragment);
356 newcount = oldcount + count;
359 * Somebody else has just allocated our fragments
363 ufs_error (sb, "ufs_new_fragments", "internal error, "
364 "fragment %u, tmp %u\n", fragment, tmp);
368 if (fragment < UFS_I(inode)->i_lastfrag) {
369 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
376 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
383 * There is not enough space for user on the device
385 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
387 UFSD(("EXIT (FAILED)\n"))
391 if (goal >= uspi->s_size)
394 cgno = ufs_inotocg (inode->i_ino);
396 cgno = ufs_dtog (goal);
399 * allocate new fragment
402 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
404 *p = cpu_to_fs32(sb, result);
406 inode->i_blocks += count << uspi->s_nspfshift;
407 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
410 UFSD(("EXIT, result %u\n", result))
417 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
420 inode->i_blocks += count << uspi->s_nspfshift;
421 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
423 UFSD(("EXIT, result %u\n", result))
428 * allocate new block and move data
430 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
433 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
434 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
436 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
439 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
442 request = uspi->s_fpb;
443 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
444 (uspi->s_minfree - 2) / 100)
446 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
449 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
451 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
453 *p = cpu_to_fs32(sb, result);
455 inode->i_blocks += count << uspi->s_nspfshift;
456 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
458 if (newcount < request)
459 ufs_free_fragments (inode, result + newcount, request - newcount);
460 ufs_free_fragments (inode, tmp, oldcount);
461 UFSD(("EXIT, result %u\n", result))
466 UFSD(("EXIT (FAILED)\n"))
471 ufs_add_fragments (struct inode * inode, unsigned fragment,
472 unsigned oldcount, unsigned newcount, int * err)
474 struct super_block * sb;
475 struct ufs_sb_private_info * uspi;
476 struct ufs_super_block_first * usb1;
477 struct ufs_cg_private_info * ucpi;
478 struct ufs_cylinder_group * ucg;
479 unsigned cgno, fragno, fragoff, count, fragsize, i;
481 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
484 uspi = UFS_SB(sb)->s_uspi;
485 usb1 = ubh_get_usb_first (uspi);
486 count = newcount - oldcount;
488 cgno = ufs_dtog(fragment);
489 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
491 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
493 ucpi = ufs_load_cylinder (sb, cgno);
496 ucg = ubh_get_ucg (UCPI_UBH);
497 if (!ufs_cg_chkmagic(sb, ucg)) {
498 ufs_panic (sb, "ufs_add_fragments",
499 "internal error, bad magic number on cg %u", cgno);
503 fragno = ufs_dtogd (fragment);
504 fragoff = ufs_fragnum (fragno);
505 for (i = oldcount; i < newcount; i++)
506 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
509 * Block can be extended
511 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
512 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
513 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
515 fragsize = i - oldcount;
516 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
517 ufs_panic (sb, "ufs_add_fragments",
518 "internal error or corrupted bitmap on cg %u", cgno);
519 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
520 if (fragsize != count)
521 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
522 for (i = oldcount; i < newcount; i++)
523 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
524 if(DQUOT_ALLOC_BLOCK(inode, count)) {
529 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
530 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
531 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
533 ubh_mark_buffer_dirty (USPI_UBH);
534 ubh_mark_buffer_dirty (UCPI_UBH);
535 if (sb->s_flags & MS_SYNCHRONOUS) {
536 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
537 ubh_wait_on_buffer (UCPI_UBH);
541 UFSD(("EXIT, fragment %u\n", fragment))
546 #define UFS_TEST_FREE_SPACE_CG \
547 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
548 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
550 for (k = count; k < uspi->s_fpb; k++) \
551 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
554 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
555 unsigned goal, unsigned count, int * err)
557 struct super_block * sb;
558 struct ufs_sb_private_info * uspi;
559 struct ufs_super_block_first * usb1;
560 struct ufs_cg_private_info * ucpi;
561 struct ufs_cylinder_group * ucg;
562 unsigned oldcg, i, j, k, result, allocsize;
564 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
567 uspi = UFS_SB(sb)->s_uspi;
568 usb1 = ubh_get_usb_first(uspi);
572 * 1. searching on preferred cylinder group
574 UFS_TEST_FREE_SPACE_CG
577 * 2. quadratic rehash
579 for (j = 1; j < uspi->s_ncg; j *= 2) {
581 if (cgno >= uspi->s_ncg)
583 UFS_TEST_FREE_SPACE_CG
587 * 3. brute force search
588 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
590 cgno = (oldcg + 1) % uspi->s_ncg;
591 for (j = 2; j < uspi->s_ncg; j++) {
593 if (cgno >= uspi->s_ncg)
595 UFS_TEST_FREE_SPACE_CG
598 UFSD(("EXIT (FAILED)\n"))
602 ucpi = ufs_load_cylinder (sb, cgno);
605 ucg = ubh_get_ucg (UCPI_UBH);
606 if (!ufs_cg_chkmagic(sb, ucg))
607 ufs_panic (sb, "ufs_alloc_fragments",
608 "internal error, bad magic number on cg %u", cgno);
609 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
611 if (count == uspi->s_fpb) {
612 result = ufs_alloccg_block (inode, ucpi, goal, err);
613 if (result == (unsigned)-1)
618 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
619 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
622 if (allocsize == uspi->s_fpb) {
623 result = ufs_alloccg_block (inode, ucpi, goal, err);
624 if (result == (unsigned)-1)
626 goal = ufs_dtogd (result);
627 for (i = count; i < uspi->s_fpb; i++)
628 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
629 i = uspi->s_fpb - count;
630 DQUOT_FREE_BLOCK(inode, i);
632 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
633 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
634 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
635 fs32_add(sb, &ucg->cg_frsum[i], 1);
639 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
640 if (result == (unsigned)-1)
642 if(DQUOT_ALLOC_BLOCK(inode, count)) {
646 for (i = 0; i < count; i++)
647 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
649 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
650 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
651 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
652 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
654 if (count != allocsize)
655 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
658 ubh_mark_buffer_dirty (USPI_UBH);
659 ubh_mark_buffer_dirty (UCPI_UBH);
660 if (sb->s_flags & MS_SYNCHRONOUS) {
661 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
662 ubh_wait_on_buffer (UCPI_UBH);
666 result += cgno * uspi->s_fpg;
667 UFSD(("EXIT3, result %u\n", result))
671 static unsigned ufs_alloccg_block (struct inode * inode,
672 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
674 struct super_block * sb;
675 struct ufs_sb_private_info * uspi;
676 struct ufs_super_block_first * usb1;
677 struct ufs_cylinder_group * ucg;
678 unsigned result, cylno, blkno;
680 UFSD(("ENTER, goal %u\n", goal))
683 uspi = UFS_SB(sb)->s_uspi;
684 usb1 = ubh_get_usb_first(uspi);
685 ucg = ubh_get_ucg(UCPI_UBH);
688 goal = ucpi->c_rotor;
691 goal = ufs_blknum (goal);
692 goal = ufs_dtogd (goal);
695 * If the requested block is available, use it.
697 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
703 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
704 if (result == (unsigned)-1)
706 ucpi->c_rotor = result;
708 blkno = ufs_fragstoblks(result);
709 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
710 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
711 ufs_clusteracct (sb, ucpi, blkno, -1);
712 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
717 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
718 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
719 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
720 cylno = ufs_cbtocylno(result);
721 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
722 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
724 UFSD(("EXIT, result %u\n", result))
729 static unsigned ufs_bitmap_search (struct super_block * sb,
730 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
732 struct ufs_sb_private_info * uspi;
733 struct ufs_super_block_first * usb1;
734 struct ufs_cylinder_group * ucg;
735 unsigned start, length, location, result;
736 unsigned possition, fragsize, blockmap, mask;
738 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
740 uspi = UFS_SB(sb)->s_uspi;
741 usb1 = ubh_get_usb_first (uspi);
742 ucg = ubh_get_ucg(UCPI_UBH);
745 start = ufs_dtogd(goal) >> 3;
747 start = ucpi->c_frotor >> 3;
749 length = ((uspi->s_fpg + 7) >> 3) - start;
750 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
751 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
752 1 << (count - 1 + (uspi->s_fpb & 7)));
755 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
756 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
757 1 << (count - 1 + (uspi->s_fpb & 7)));
759 ufs_error (sb, "ufs_bitmap_search",
760 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
761 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
766 result = (start + length - location) << 3;
767 ucpi->c_frotor = result;
770 * found the byte in the map
772 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
774 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
775 if (blockmap & mask) {
776 if (!(possition & uspi->s_fpbmask))
782 if (fragsize == count) {
783 result += possition - count;
784 UFSD(("EXIT, result %u\n", result))
790 if (fragsize == count) {
791 result += possition - count;
792 UFSD(("EXIT, result %u\n", result))
795 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
796 UFSD(("EXIT (FAILED)\n"))
800 static void ufs_clusteracct(struct super_block * sb,
801 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
803 struct ufs_sb_private_info * uspi;
804 int i, start, end, forw, back;
806 uspi = UFS_SB(sb)->s_uspi;
807 if (uspi->s_contigsumsize <= 0)
811 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
813 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
816 * Find the size of the cluster going forward.
819 end = start + uspi->s_contigsumsize;
820 if ( end >= ucpi->c_nclusterblks)
821 end = ucpi->c_nclusterblks;
822 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
828 * Find the size of the cluster going backward.
831 end = start - uspi->s_contigsumsize;
834 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
840 * Account for old cluster and the possibly new forward and
844 if (i > uspi->s_contigsumsize)
845 i = uspi->s_contigsumsize;
846 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
848 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
850 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
854 static unsigned char ufs_fragtable_8fpb[] = {
855 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
856 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
857 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
858 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
859 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
860 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
861 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
862 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
863 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
864 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
865 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
866 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
867 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
868 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
869 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
870 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
873 static unsigned char ufs_fragtable_other[] = {
874 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
875 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
876 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
877 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
878 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
879 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
880 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
881 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
882 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
883 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
884 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
885 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
886 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
887 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
888 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
889 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,