5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static int udf_alloc_i_data(struct inode *inode, size_t size);
53 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
55 static int8_t udf_insert_aext(struct inode *, struct extent_position,
56 kernel_lb_addr, uint32_t);
57 static void udf_split_extents(struct inode *, int *, int, int,
58 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
59 static void udf_prealloc_extents(struct inode *, int, int,
60 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
61 static void udf_merge_extents(struct inode *,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_update_extents(struct inode *,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
65 struct extent_position *);
66 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
72 * Clean-up before the specified inode is destroyed.
75 * This routine is called when the kernel destroys an inode structure
76 * ie. when iput() finds i_count == 0.
79 * July 1, 1997 - Andrew E. Mileski
80 * Written, tested, and released.
82 * Called at the last iput() if i_nlink is zero.
84 void udf_delete_inode(struct inode * inode)
86 truncate_inode_pages(&inode->i_data, 0);
88 if (is_bad_inode(inode))
95 udf_update_inode(inode, IS_SYNC(inode));
96 udf_free_inode(inode);
105 * If we are going to release inode from memory, we discard preallocation and
106 * truncate last inode extent to proper length. We could use drop_inode() but
107 * it's called under inode_lock and thus we cannot mark inode dirty there. We
108 * use clear_inode() but we have to make sure to write inode as it's not written
111 void udf_clear_inode(struct inode *inode)
113 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
115 /* Discard preallocation for directories, symlinks, etc. */
116 udf_discard_prealloc(inode);
117 udf_truncate_tail_extent(inode);
119 write_inode_now(inode, 1);
121 kfree(UDF_I_DATA(inode));
122 UDF_I_DATA(inode) = NULL;
125 static int udf_writepage(struct page *page, struct writeback_control *wbc)
127 return block_write_full_page(page, udf_get_block, wbc);
130 static int udf_readpage(struct file *file, struct page *page)
132 return block_read_full_page(page, udf_get_block);
135 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
137 return block_prepare_write(page, from, to, udf_get_block);
140 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
142 return generic_block_bmap(mapping,block,udf_get_block);
145 const struct address_space_operations udf_aops = {
146 .readpage = udf_readpage,
147 .writepage = udf_writepage,
148 .sync_page = block_sync_page,
149 .prepare_write = udf_prepare_write,
150 .commit_write = generic_commit_write,
154 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
158 struct writeback_control udf_wbc = {
159 .sync_mode = WB_SYNC_NONE,
163 /* from now on we have normal address_space methods */
164 inode->i_data.a_ops = &udf_aops;
166 if (!UDF_I_LENALLOC(inode))
168 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
169 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
171 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
172 mark_inode_dirty(inode);
176 page = grab_cache_page(inode->i_mapping, 0);
177 BUG_ON(!PageLocked(page));
179 if (!PageUptodate(page))
182 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
183 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
184 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
185 UDF_I_LENALLOC(inode));
186 flush_dcache_page(page);
187 SetPageUptodate(page);
190 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
191 UDF_I_LENALLOC(inode));
192 UDF_I_LENALLOC(inode) = 0;
193 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
194 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
196 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
198 inode->i_data.a_ops->writepage(page, &udf_wbc);
199 page_cache_release(page);
201 mark_inode_dirty(inode);
204 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
207 struct buffer_head *dbh = NULL;
211 struct extent_position epos;
213 struct udf_fileident_bh sfibh, dfibh;
214 loff_t f_pos = udf_ext0_offset(inode) >> 2;
215 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
216 struct fileIdentDesc cfi, *sfi, *dfi;
218 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
219 alloctype = ICBTAG_FLAG_AD_SHORT;
221 alloctype = ICBTAG_FLAG_AD_LONG;
225 UDF_I_ALLOCTYPE(inode) = alloctype;
226 mark_inode_dirty(inode);
230 /* alloc block, and copy data to it */
231 *block = udf_new_block(inode->i_sb, inode,
232 UDF_I_LOCATION(inode).partitionReferenceNum,
233 UDF_I_LOCATION(inode).logicalBlockNum, err);
237 newblock = udf_get_pblock(inode->i_sb, *block,
238 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
241 dbh = udf_tgetblk(inode->i_sb, newblock);
245 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
246 set_buffer_uptodate(dbh);
248 mark_buffer_dirty_inode(dbh, inode);
250 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
251 sfibh.sbh = sfibh.ebh = NULL;
252 dfibh.soffset = dfibh.eoffset = 0;
253 dfibh.sbh = dfibh.ebh = dbh;
254 while ( (f_pos < size) )
256 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
257 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
263 UDF_I_ALLOCTYPE(inode) = alloctype;
264 sfi->descTag.tagLocation = cpu_to_le32(*block);
265 dfibh.soffset = dfibh.eoffset;
266 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
267 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
268 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
269 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
271 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
276 mark_buffer_dirty_inode(dbh, inode);
278 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
279 UDF_I_LENALLOC(inode) = 0;
280 eloc.logicalBlockNum = *block;
281 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
282 elen = inode->i_size;
283 UDF_I_LENEXTENTS(inode) = elen;
285 epos.block = UDF_I_LOCATION(inode);
286 epos.offset = udf_file_entry_alloc_offset(inode);
287 udf_add_aext(inode, &epos, eloc, elen, 0);
291 mark_inode_dirty(inode);
295 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
298 struct buffer_head *bh;
303 phys = udf_block_map(inode, block);
305 map_bh(bh_result, inode->i_sb, phys);
318 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
320 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
321 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
326 bh = inode_getblk(inode, block, &err, &phys, &new);
333 set_buffer_new(bh_result);
334 map_bh(bh_result, inode->i_sb, phys);
340 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
344 static struct buffer_head *
345 udf_getblk(struct inode *inode, long block, int create, int *err)
347 struct buffer_head dummy;
350 dummy.b_blocknr = -1000;
351 *err = udf_get_block(inode, block, &dummy, create);
352 if (!*err && buffer_mapped(&dummy))
354 struct buffer_head *bh;
355 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
356 if (buffer_new(&dummy))
359 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
360 set_buffer_uptodate(bh);
362 mark_buffer_dirty_inode(bh, inode);
369 /* Extend the file by 'blocks' blocks, return the number of extents added */
370 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
371 kernel_long_ad *last_ext, sector_t blocks)
374 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
375 struct super_block *sb = inode->i_sb;
376 kernel_lb_addr prealloc_loc = {0, 0};
377 int prealloc_len = 0;
379 /* The previous extent is fake and we should not extend by anything
380 * - there's nothing to do... */
383 /* Round the last extent up to a multiple of block size */
384 if (last_ext->extLength & (sb->s_blocksize - 1)) {
385 last_ext->extLength =
386 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
387 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
388 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
389 UDF_I_LENEXTENTS(inode) =
390 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
391 ~(sb->s_blocksize - 1);
393 /* Last extent are just preallocated blocks? */
394 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
395 /* Save the extent so that we can reattach it to the end */
396 prealloc_loc = last_ext->extLocation;
397 prealloc_len = last_ext->extLength;
398 /* Mark the extent as a hole */
399 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
400 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
401 last_ext->extLocation.logicalBlockNum = 0;
402 last_ext->extLocation.partitionReferenceNum = 0;
404 /* Can we merge with the previous extent? */
405 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
406 add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
407 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
411 last_ext->extLength += add << sb->s_blocksize_bits;
415 udf_add_aext(inode, last_pos, last_ext->extLocation,
416 last_ext->extLength, 1);
420 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
421 /* Managed to do everything necessary? */
425 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
426 last_ext->extLocation.logicalBlockNum = 0;
427 last_ext->extLocation.partitionReferenceNum = 0;
428 add = (1 << (30-sb->s_blocksize_bits)) - 1;
429 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
430 /* Create enough extents to cover the whole hole */
431 while (blocks > add) {
433 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
434 last_ext->extLength, 1) == -1)
439 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
440 (blocks << sb->s_blocksize_bits);
441 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
442 last_ext->extLength, 1) == -1)
447 /* Do we have some preallocated blocks saved? */
449 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
451 last_ext->extLocation = prealloc_loc;
452 last_ext->extLength = prealloc_len;
455 /* last_pos should point to the last written extent... */
456 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
457 last_pos->offset -= sizeof(short_ad);
458 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
459 last_pos->offset -= sizeof(long_ad);
465 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
466 int *err, long *phys, int *new)
468 static sector_t last_block;
469 struct buffer_head *result = NULL;
470 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
471 struct extent_position prev_epos, cur_epos, next_epos;
472 int count = 0, startnum = 0, endnum = 0;
473 uint32_t elen = 0, tmpelen;
474 kernel_lb_addr eloc, tmpeloc;
476 loff_t lbcount = 0, b_off = 0;
477 uint32_t newblocknum, newblock;
480 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
483 prev_epos.offset = udf_file_entry_alloc_offset(inode);
484 prev_epos.block = UDF_I_LOCATION(inode);
486 cur_epos = next_epos = prev_epos;
487 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
489 /* find the extent which contains the block we are looking for.
490 alternate between laarr[0] and laarr[1] for locations of the
491 current extent, and the previous extent */
494 if (prev_epos.bh != cur_epos.bh)
496 brelse(prev_epos.bh);
498 prev_epos.bh = cur_epos.bh;
500 if (cur_epos.bh != next_epos.bh)
503 get_bh(next_epos.bh);
504 cur_epos.bh = next_epos.bh;
509 prev_epos.block = cur_epos.block;
510 cur_epos.block = next_epos.block;
512 prev_epos.offset = cur_epos.offset;
513 cur_epos.offset = next_epos.offset;
515 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
520 laarr[c].extLength = (etype << 30) | elen;
521 laarr[c].extLocation = eloc;
523 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
524 pgoal = eloc.logicalBlockNum +
525 ((elen + inode->i_sb->s_blocksize - 1) >>
526 inode->i_sb->s_blocksize_bits);
529 } while (lbcount + elen <= b_off);
532 offset = b_off >> inode->i_sb->s_blocksize_bits;
534 * Move prev_epos and cur_epos into indirect extent if we are at
537 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
538 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
540 /* if the extent is allocated and recorded, return the block
541 if the extent is not a multiple of the blocksize, round up */
543 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
545 if (elen & (inode->i_sb->s_blocksize - 1))
547 elen = EXT_RECORDED_ALLOCATED |
548 ((elen + inode->i_sb->s_blocksize - 1) &
549 ~(inode->i_sb->s_blocksize - 1));
550 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
552 brelse(prev_epos.bh);
554 brelse(next_epos.bh);
555 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
561 /* Are we beyond EOF? */
572 /* Create a fake extent when there's not one */
573 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
574 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
575 /* Will udf_extend_file() create real extent from a fake one? */
576 startnum = (offset > 0);
578 /* Create extents for the hole between EOF and offset */
579 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
581 brelse(prev_epos.bh);
583 brelse(next_epos.bh);
584 /* We don't really know the error here so we just make
592 /* We are not covered by a preallocated extent? */
593 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
594 /* Is there any real extent? - otherwise we overwrite
598 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
599 inode->i_sb->s_blocksize;
600 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
608 endnum = startnum = ((count > 2) ? 2 : count);
610 /* if the current extent is in position 0, swap it with the previous */
611 if (!c && count != 1)
619 /* if the current block is located in an extent, read the next extent */
620 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
622 laarr[c+1].extLength = (etype << 30) | elen;
623 laarr[c+1].extLocation = eloc;
633 /* if the current extent is not recorded but allocated, get the
634 block in the extent corresponding to the requested block */
635 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
636 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
637 else /* otherwise, allocate a new block */
639 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
640 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
645 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
648 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
649 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
651 brelse(prev_epos.bh);
655 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
658 /* if the extent the requsted block is located in contains multiple blocks,
659 split the extent into at most three extents. blocks prior to requested
660 block, requested block, and blocks after requested block */
661 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
663 #ifdef UDF_PREALLOCATE
664 /* preallocate blocks */
665 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
668 /* merge any continuous blocks in laarr */
669 udf_merge_extents(inode, laarr, &endnum);
671 /* write back the new extents, inserting new extents if the new number
672 of extents is greater than the old number, and deleting extents if
673 the new number of extents is less than the old number */
674 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
676 brelse(prev_epos.bh);
678 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
679 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
686 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
687 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
688 inode->i_ctime = current_fs_time(inode->i_sb);
691 udf_sync_inode(inode);
693 mark_inode_dirty(inode);
697 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
698 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
700 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
701 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
704 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
705 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
706 int8_t etype = (laarr[curr].extLength >> 30);
710 else if (!offset || blen == offset + 1)
712 laarr[curr+2] = laarr[curr+1];
713 laarr[curr+1] = laarr[curr];
717 laarr[curr+3] = laarr[curr+1];
718 laarr[curr+2] = laarr[curr+1] = laarr[curr];
723 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
725 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
726 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
727 (offset << inode->i_sb->s_blocksize_bits);
728 laarr[curr].extLocation.logicalBlockNum = 0;
729 laarr[curr].extLocation.partitionReferenceNum = 0;
732 laarr[curr].extLength = (etype << 30) |
733 (offset << inode->i_sb->s_blocksize_bits);
739 laarr[curr].extLocation.logicalBlockNum = newblocknum;
740 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
741 laarr[curr].extLocation.partitionReferenceNum =
742 UDF_I_LOCATION(inode).partitionReferenceNum;
743 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
744 inode->i_sb->s_blocksize;
747 if (blen != offset + 1)
749 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
750 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
751 laarr[curr].extLength = (etype << 30) |
752 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
759 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
760 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
762 int start, length = 0, currlength = 0, i;
764 if (*endnum >= (c+1))
773 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
776 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
777 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
783 for (i=start+1; i<=*endnum; i++)
788 length += UDF_DEFAULT_PREALLOC_BLOCKS;
790 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
791 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
792 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
799 int next = laarr[start].extLocation.logicalBlockNum +
800 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
801 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
802 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
803 laarr[start].extLocation.partitionReferenceNum,
804 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
805 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
810 laarr[start].extLength +=
811 (numalloc << inode->i_sb->s_blocksize_bits);
814 memmove(&laarr[c+2], &laarr[c+1],
815 sizeof(long_ad) * (*endnum - (c+1)));
817 laarr[c+1].extLocation.logicalBlockNum = next;
818 laarr[c+1].extLocation.partitionReferenceNum =
819 laarr[c].extLocation.partitionReferenceNum;
820 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
821 (numalloc << inode->i_sb->s_blocksize_bits);
825 for (i=start+1; numalloc && i<*endnum; i++)
827 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
828 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
832 laarr[i].extLength -=
833 (numalloc << inode->i_sb->s_blocksize_bits);
840 memmove(&laarr[i], &laarr[i+1],
841 sizeof(long_ad) * (*endnum - (i+1)));
846 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
851 static void udf_merge_extents(struct inode *inode,
852 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
856 for (i=0; i<(*endnum-1); i++)
858 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
860 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
861 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
862 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
863 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
865 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
866 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
867 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
869 laarr[i+1].extLength = (laarr[i+1].extLength -
870 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
871 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
872 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
873 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
874 laarr[i+1].extLocation.logicalBlockNum =
875 laarr[i].extLocation.logicalBlockNum +
876 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
877 inode->i_sb->s_blocksize_bits);
881 laarr[i].extLength = laarr[i+1].extLength +
882 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
883 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
885 memmove(&laarr[i+1], &laarr[i+2],
886 sizeof(long_ad) * (*endnum - (i+2)));
892 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
893 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
895 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
896 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
897 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
898 laarr[i].extLocation.logicalBlockNum = 0;
899 laarr[i].extLocation.partitionReferenceNum = 0;
901 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
902 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
903 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
905 laarr[i+1].extLength = (laarr[i+1].extLength -
906 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
907 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
908 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
909 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
913 laarr[i].extLength = laarr[i+1].extLength +
914 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
915 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
917 memmove(&laarr[i+1], &laarr[i+2],
918 sizeof(long_ad) * (*endnum - (i+2)));
923 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
925 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
926 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
927 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
928 laarr[i].extLocation.logicalBlockNum = 0;
929 laarr[i].extLocation.partitionReferenceNum = 0;
930 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
931 EXT_NOT_RECORDED_NOT_ALLOCATED;
936 static void udf_update_extents(struct inode *inode,
937 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
938 struct extent_position *epos)
941 kernel_lb_addr tmploc;
944 if (startnum > endnum)
946 for (i=0; i<(startnum-endnum); i++)
947 udf_delete_aext(inode, *epos, laarr[i].extLocation,
950 else if (startnum < endnum)
952 for (i=0; i<(endnum-startnum); i++)
954 udf_insert_aext(inode, *epos, laarr[i].extLocation,
956 udf_next_aext(inode, epos, &laarr[i].extLocation,
957 &laarr[i].extLength, 1);
962 for (i=start; i<endnum; i++)
964 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
965 udf_write_aext(inode, epos, laarr[i].extLocation,
966 laarr[i].extLength, 1);
970 struct buffer_head * udf_bread(struct inode * inode, int block,
971 int create, int * err)
973 struct buffer_head * bh = NULL;
975 bh = udf_getblk(inode, block, create, err);
979 if (buffer_uptodate(bh))
981 ll_rw_block(READ, 1, &bh);
983 if (buffer_uptodate(bh))
990 void udf_truncate(struct inode * inode)
995 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
996 S_ISLNK(inode->i_mode)))
998 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1002 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1004 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
1007 udf_expand_file_adinicb(inode, inode->i_size, &err);
1008 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1010 inode->i_size = UDF_I_LENALLOC(inode);
1015 udf_truncate_extents(inode);
1019 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1020 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1021 UDF_I_LENALLOC(inode) = inode->i_size;
1026 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1027 udf_truncate_extents(inode);
1030 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1032 udf_sync_inode (inode);
1034 mark_inode_dirty(inode);
1039 __udf_read_inode(struct inode *inode)
1041 struct buffer_head *bh = NULL;
1042 struct fileEntry *fe;
1046 * Set defaults, but the inode is still incomplete!
1047 * Note: get_new_inode() sets the following on a new inode:
1050 * i_flags = sb->s_flags
1052 * clean_inode(): zero fills and sets
1057 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1061 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1063 make_bad_inode(inode);
1067 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1068 ident != TAG_IDENT_USE)
1070 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1071 inode->i_ino, ident);
1073 make_bad_inode(inode);
1077 fe = (struct fileEntry *)bh->b_data;
1079 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
1081 struct buffer_head *ibh = NULL, *nbh = NULL;
1082 struct indirectEntry *ie;
1084 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1085 if (ident == TAG_IDENT_IE)
1090 ie = (struct indirectEntry *)ibh->b_data;
1092 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1094 if (ie->indirectICB.extLength &&
1095 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1097 if (ident == TAG_IDENT_FE ||
1098 ident == TAG_IDENT_EFE)
1100 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1104 __udf_read_inode(inode);
1120 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1122 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1123 le16_to_cpu(fe->icbTag.strategyType));
1125 make_bad_inode(inode);
1128 udf_fill_inode(inode, bh);
1133 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1135 struct fileEntry *fe;
1136 struct extendedFileEntry *efe;
1141 fe = (struct fileEntry *)bh->b_data;
1142 efe = (struct extendedFileEntry *)bh->b_data;
1144 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1145 UDF_I_STRAT4096(inode) = 0;
1146 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1147 UDF_I_STRAT4096(inode) = 1;
1149 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1150 UDF_I_UNIQUE(inode) = 0;
1151 UDF_I_LENEATTR(inode) = 0;
1152 UDF_I_LENEXTENTS(inode) = 0;
1153 UDF_I_LENALLOC(inode) = 0;
1154 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1155 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1156 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1158 UDF_I_EFE(inode) = 1;
1159 UDF_I_USE(inode) = 0;
1160 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)))
1162 make_bad_inode(inode);
1165 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1167 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1169 UDF_I_EFE(inode) = 0;
1170 UDF_I_USE(inode) = 0;
1171 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry)))
1173 make_bad_inode(inode);
1176 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1178 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1180 UDF_I_EFE(inode) = 0;
1181 UDF_I_USE(inode) = 1;
1182 UDF_I_LENALLOC(inode) =
1184 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1185 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)))
1187 make_bad_inode(inode);
1190 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1194 inode->i_uid = le32_to_cpu(fe->uid);
1195 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1196 UDF_FLAG_UID_IGNORE))
1197 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1199 inode->i_gid = le32_to_cpu(fe->gid);
1200 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1201 UDF_FLAG_GID_IGNORE))
1202 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1204 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1205 if (!inode->i_nlink)
1208 inode->i_size = le64_to_cpu(fe->informationLength);
1209 UDF_I_LENEXTENTS(inode) = inode->i_size;
1211 inode->i_mode = udf_convert_permissions(fe);
1212 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1214 if (UDF_I_EFE(inode) == 0)
1216 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1217 (inode->i_sb->s_blocksize_bits - 9);
1219 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1220 lets_to_cpu(fe->accessTime)) )
1222 inode->i_atime.tv_sec = convtime;
1223 inode->i_atime.tv_nsec = convtime_usec * 1000;
1227 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1230 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1231 lets_to_cpu(fe->modificationTime)) )
1233 inode->i_mtime.tv_sec = convtime;
1234 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1238 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1241 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1242 lets_to_cpu(fe->attrTime)) )
1244 inode->i_ctime.tv_sec = convtime;
1245 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1249 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1252 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1253 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1254 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1255 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1259 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1260 (inode->i_sb->s_blocksize_bits - 9);
1262 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1263 lets_to_cpu(efe->accessTime)) )
1265 inode->i_atime.tv_sec = convtime;
1266 inode->i_atime.tv_nsec = convtime_usec * 1000;
1270 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1273 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1274 lets_to_cpu(efe->modificationTime)) )
1276 inode->i_mtime.tv_sec = convtime;
1277 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1281 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1284 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1285 lets_to_cpu(efe->createTime)) )
1287 UDF_I_CRTIME(inode).tv_sec = convtime;
1288 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1292 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1295 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1296 lets_to_cpu(efe->attrTime)) )
1298 inode->i_ctime.tv_sec = convtime;
1299 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1303 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1306 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1307 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1308 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1309 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1312 switch (fe->icbTag.fileType)
1314 case ICBTAG_FILE_TYPE_DIRECTORY:
1316 inode->i_op = &udf_dir_inode_operations;
1317 inode->i_fop = &udf_dir_operations;
1318 inode->i_mode |= S_IFDIR;
1322 case ICBTAG_FILE_TYPE_REALTIME:
1323 case ICBTAG_FILE_TYPE_REGULAR:
1324 case ICBTAG_FILE_TYPE_UNDEF:
1326 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1327 inode->i_data.a_ops = &udf_adinicb_aops;
1329 inode->i_data.a_ops = &udf_aops;
1330 inode->i_op = &udf_file_inode_operations;
1331 inode->i_fop = &udf_file_operations;
1332 inode->i_mode |= S_IFREG;
1335 case ICBTAG_FILE_TYPE_BLOCK:
1337 inode->i_mode |= S_IFBLK;
1340 case ICBTAG_FILE_TYPE_CHAR:
1342 inode->i_mode |= S_IFCHR;
1345 case ICBTAG_FILE_TYPE_FIFO:
1347 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1350 case ICBTAG_FILE_TYPE_SOCKET:
1352 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1355 case ICBTAG_FILE_TYPE_SYMLINK:
1357 inode->i_data.a_ops = &udf_symlink_aops;
1358 inode->i_op = &page_symlink_inode_operations;
1359 inode->i_mode = S_IFLNK|S_IRWXUGO;
1364 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1365 inode->i_ino, fe->icbTag.fileType);
1366 make_bad_inode(inode);
1370 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1372 struct deviceSpec *dsea =
1373 (struct deviceSpec *)
1374 udf_get_extendedattr(inode, 12, 1);
1378 init_special_inode(inode, inode->i_mode, MKDEV(
1379 le32_to_cpu(dsea->majorDeviceIdent),
1380 le32_to_cpu(dsea->minorDeviceIdent)));
1381 /* Developer ID ??? */
1385 make_bad_inode(inode);
1390 static int udf_alloc_i_data(struct inode *inode, size_t size)
1392 UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL);
1394 if (!UDF_I_DATA(inode))
1396 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n",
1405 udf_convert_permissions(struct fileEntry *fe)
1408 uint32_t permissions;
1411 permissions = le32_to_cpu(fe->permissions);
1412 flags = le16_to_cpu(fe->icbTag.flags);
1414 mode = (( permissions ) & S_IRWXO) |
1415 (( permissions >> 2 ) & S_IRWXG) |
1416 (( permissions >> 4 ) & S_IRWXU) |
1417 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1418 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1419 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1428 * Write out the specified inode.
1431 * This routine is called whenever an inode is synced.
1432 * Currently this routine is just a placeholder.
1435 * July 1, 1997 - Andrew E. Mileski
1436 * Written, tested, and released.
1439 int udf_write_inode(struct inode * inode, int sync)
1443 ret = udf_update_inode(inode, sync);
1448 int udf_sync_inode(struct inode * inode)
1450 return udf_update_inode(inode, 1);
1454 udf_update_inode(struct inode *inode, int do_sync)
1456 struct buffer_head *bh = NULL;
1457 struct fileEntry *fe;
1458 struct extendedFileEntry *efe;
1463 kernel_timestamp cpu_time;
1466 bh = udf_tread(inode->i_sb,
1467 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1471 udf_debug("bread failure\n");
1475 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1477 fe = (struct fileEntry *)bh->b_data;
1478 efe = (struct extendedFileEntry *)bh->b_data;
1480 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1482 struct unallocSpaceEntry *use =
1483 (struct unallocSpaceEntry *)bh->b_data;
1485 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1486 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1487 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1489 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1490 use->descTag.descCRCLength = cpu_to_le16(crclen);
1491 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1493 use->descTag.tagChecksum = 0;
1494 for (i=0; i<16; i++)
1496 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1498 mark_buffer_dirty(bh);
1503 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1504 fe->uid = cpu_to_le32(-1);
1505 else fe->uid = cpu_to_le32(inode->i_uid);
1507 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1508 fe->gid = cpu_to_le32(-1);
1509 else fe->gid = cpu_to_le32(inode->i_gid);
1511 udfperms = ((inode->i_mode & S_IRWXO) ) |
1512 ((inode->i_mode & S_IRWXG) << 2) |
1513 ((inode->i_mode & S_IRWXU) << 4);
1515 udfperms |= (le32_to_cpu(fe->permissions) &
1516 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1517 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1518 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1519 fe->permissions = cpu_to_le32(udfperms);
1521 if (S_ISDIR(inode->i_mode))
1522 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1524 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1526 fe->informationLength = cpu_to_le64(inode->i_size);
1528 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1531 struct deviceSpec *dsea =
1532 (struct deviceSpec *)
1533 udf_get_extendedattr(inode, 12, 1);
1537 dsea = (struct deviceSpec *)
1538 udf_add_extendedattr(inode,
1539 sizeof(struct deviceSpec) +
1540 sizeof(regid), 12, 0x3);
1541 dsea->attrType = cpu_to_le32(12);
1542 dsea->attrSubtype = 1;
1543 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1545 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1547 eid = (regid *)dsea->impUse;
1548 memset(eid, 0, sizeof(regid));
1549 strcpy(eid->ident, UDF_ID_DEVELOPER);
1550 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1551 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1552 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1553 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1556 if (UDF_I_EFE(inode) == 0)
1558 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1559 fe->logicalBlocksRecorded = cpu_to_le64(
1560 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1561 (inode->i_sb->s_blocksize_bits - 9));
1563 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1564 fe->accessTime = cpu_to_lets(cpu_time);
1565 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1566 fe->modificationTime = cpu_to_lets(cpu_time);
1567 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1568 fe->attrTime = cpu_to_lets(cpu_time);
1569 memset(&(fe->impIdent), 0, sizeof(regid));
1570 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1571 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1572 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1573 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1574 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1575 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1576 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1577 crclen = sizeof(struct fileEntry);
1581 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1582 efe->objectSize = cpu_to_le64(inode->i_size);
1583 efe->logicalBlocksRecorded = cpu_to_le64(
1584 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1585 (inode->i_sb->s_blocksize_bits - 9));
1587 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1588 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1589 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1591 UDF_I_CRTIME(inode) = inode->i_atime;
1593 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1594 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1595 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1597 UDF_I_CRTIME(inode) = inode->i_mtime;
1599 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1600 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1601 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1603 UDF_I_CRTIME(inode) = inode->i_ctime;
1606 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1607 efe->accessTime = cpu_to_lets(cpu_time);
1608 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1609 efe->modificationTime = cpu_to_lets(cpu_time);
1610 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1611 efe->createTime = cpu_to_lets(cpu_time);
1612 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1613 efe->attrTime = cpu_to_lets(cpu_time);
1615 memset(&(efe->impIdent), 0, sizeof(regid));
1616 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1617 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1618 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1619 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1620 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1621 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1622 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1623 crclen = sizeof(struct extendedFileEntry);
1625 if (UDF_I_STRAT4096(inode))
1627 fe->icbTag.strategyType = cpu_to_le16(4096);
1628 fe->icbTag.strategyParameter = cpu_to_le16(1);
1629 fe->icbTag.numEntries = cpu_to_le16(2);
1633 fe->icbTag.strategyType = cpu_to_le16(4);
1634 fe->icbTag.numEntries = cpu_to_le16(1);
1637 if (S_ISDIR(inode->i_mode))
1638 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1639 else if (S_ISREG(inode->i_mode))
1640 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1641 else if (S_ISLNK(inode->i_mode))
1642 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1643 else if (S_ISBLK(inode->i_mode))
1644 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1645 else if (S_ISCHR(inode->i_mode))
1646 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1647 else if (S_ISFIFO(inode->i_mode))
1648 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1649 else if (S_ISSOCK(inode->i_mode))
1650 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1652 icbflags = UDF_I_ALLOCTYPE(inode) |
1653 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1654 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1655 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1656 (le16_to_cpu(fe->icbTag.flags) &
1657 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1658 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1660 fe->icbTag.flags = cpu_to_le16(icbflags);
1661 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1662 fe->descTag.descVersion = cpu_to_le16(3);
1664 fe->descTag.descVersion = cpu_to_le16(2);
1665 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1666 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1667 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1668 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1669 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1671 fe->descTag.tagChecksum = 0;
1672 for (i=0; i<16; i++)
1674 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1676 /* write the data blocks */
1677 mark_buffer_dirty(bh);
1680 sync_dirty_buffer(bh);
1681 if (buffer_req(bh) && !buffer_uptodate(bh))
1683 printk("IO error syncing udf inode [%s:%08lx]\n",
1684 inode->i_sb->s_id, inode->i_ino);
1693 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1695 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1696 struct inode *inode = iget_locked(sb, block);
1701 if (inode->i_state & I_NEW) {
1702 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1703 __udf_read_inode(inode);
1704 unlock_new_inode(inode);
1707 if (is_bad_inode(inode))
1710 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1711 udf_debug("block=%d, partition=%d out of range\n",
1712 ino.logicalBlockNum, ino.partitionReferenceNum);
1713 make_bad_inode(inode);
1724 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1725 kernel_lb_addr eloc, uint32_t elen, int inc)
1728 short_ad *sad = NULL;
1729 long_ad *lad = NULL;
1730 struct allocExtDesc *aed;
1735 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1737 ptr = epos->bh->b_data + epos->offset;
1739 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1740 adsize = sizeof(short_ad);
1741 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1742 adsize = sizeof(long_ad);
1746 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1749 struct buffer_head *nbh;
1751 kernel_lb_addr obloc = epos->block;
1753 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1754 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1758 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1764 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1765 set_buffer_uptodate(nbh);
1767 mark_buffer_dirty_inode(nbh, inode);
1769 aed = (struct allocExtDesc *)(nbh->b_data);
1770 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1771 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1772 if (epos->offset + adsize > inode->i_sb->s_blocksize)
1774 loffset = epos->offset;
1775 aed->lengthAllocDescs = cpu_to_le32(adsize);
1776 sptr = ptr - adsize;
1777 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1778 memcpy(dptr, sptr, adsize);
1779 epos->offset = sizeof(struct allocExtDesc) + adsize;
1783 loffset = epos->offset + adsize;
1784 aed->lengthAllocDescs = cpu_to_le32(0);
1786 epos->offset = sizeof(struct allocExtDesc);
1790 aed = (struct allocExtDesc *)epos->bh->b_data;
1791 aed->lengthAllocDescs =
1792 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1796 UDF_I_LENALLOC(inode) += adsize;
1797 mark_inode_dirty(inode);
1800 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1801 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1802 epos->block.logicalBlockNum, sizeof(tag));
1804 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1805 epos->block.logicalBlockNum, sizeof(tag));
1806 switch (UDF_I_ALLOCTYPE(inode))
1808 case ICBTAG_FLAG_AD_SHORT:
1810 sad = (short_ad *)sptr;
1811 sad->extLength = cpu_to_le32(
1812 EXT_NEXT_EXTENT_ALLOCDECS |
1813 inode->i_sb->s_blocksize);
1814 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1817 case ICBTAG_FLAG_AD_LONG:
1819 lad = (long_ad *)sptr;
1820 lad->extLength = cpu_to_le32(
1821 EXT_NEXT_EXTENT_ALLOCDECS |
1822 inode->i_sb->s_blocksize);
1823 lad->extLocation = cpu_to_lelb(epos->block);
1824 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1830 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1831 udf_update_tag(epos->bh->b_data, loffset);
1833 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1834 mark_buffer_dirty_inode(epos->bh, inode);
1838 mark_inode_dirty(inode);
1842 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1846 UDF_I_LENALLOC(inode) += adsize;
1847 mark_inode_dirty(inode);
1851 aed = (struct allocExtDesc *)epos->bh->b_data;
1852 aed->lengthAllocDescs =
1853 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1854 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1855 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1857 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1858 mark_buffer_dirty_inode(epos->bh, inode);
1864 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1865 kernel_lb_addr eloc, uint32_t elen, int inc)
1871 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1873 ptr = epos->bh->b_data + epos->offset;
1875 switch (UDF_I_ALLOCTYPE(inode))
1877 case ICBTAG_FLAG_AD_SHORT:
1879 short_ad *sad = (short_ad *)ptr;
1880 sad->extLength = cpu_to_le32(elen);
1881 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1882 adsize = sizeof(short_ad);
1885 case ICBTAG_FLAG_AD_LONG:
1887 long_ad *lad = (long_ad *)ptr;
1888 lad->extLength = cpu_to_le32(elen);
1889 lad->extLocation = cpu_to_lelb(eloc);
1890 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1891 adsize = sizeof(long_ad);
1900 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1902 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1903 udf_update_tag(epos->bh->b_data,
1904 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1906 mark_buffer_dirty_inode(epos->bh, inode);
1909 mark_inode_dirty(inode);
1912 epos->offset += adsize;
1913 return (elen >> 30);
1916 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1917 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1921 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1922 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1924 epos->block = *eloc;
1925 epos->offset = sizeof(struct allocExtDesc);
1927 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1929 udf_debug("reading block %d failed!\n",
1930 udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1938 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1939 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1948 epos->offset = udf_file_entry_alloc_offset(inode);
1949 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1950 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1955 epos->offset = sizeof(struct allocExtDesc);
1956 ptr = epos->bh->b_data + epos->offset;
1957 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1960 switch (UDF_I_ALLOCTYPE(inode))
1962 case ICBTAG_FLAG_AD_SHORT:
1966 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1969 etype = le32_to_cpu(sad->extLength) >> 30;
1970 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1971 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1972 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1975 case ICBTAG_FLAG_AD_LONG:
1979 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1982 etype = le32_to_cpu(lad->extLength) >> 30;
1983 *eloc = lelb_to_cpu(lad->extLocation);
1984 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1989 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1998 udf_insert_aext(struct inode *inode, struct extent_position epos,
1999 kernel_lb_addr neloc, uint32_t nelen)
2001 kernel_lb_addr oeloc;
2008 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
2010 udf_write_aext(inode, &epos, neloc, nelen, 1);
2013 nelen = (etype << 30) | oelen;
2015 udf_add_aext(inode, &epos, neloc, nelen, 1);
2017 return (nelen >> 30);
2020 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
2021 kernel_lb_addr eloc, uint32_t elen)
2023 struct extent_position oepos;
2026 struct allocExtDesc *aed;
2034 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2035 adsize = sizeof(short_ad);
2036 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2037 adsize = sizeof(long_ad);
2042 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2045 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
2047 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
2048 if (oepos.bh != epos.bh)
2050 oepos.block = epos.block;
2054 oepos.offset = epos.offset - adsize;
2057 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
2060 if (epos.bh != oepos.bh)
2062 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
2063 udf_write_aext(inode, &oepos, eloc, elen, 1);
2064 udf_write_aext(inode, &oepos, eloc, elen, 1);
2067 UDF_I_LENALLOC(inode) -= (adsize * 2);
2068 mark_inode_dirty(inode);
2072 aed = (struct allocExtDesc *)oepos.bh->b_data;
2073 aed->lengthAllocDescs =
2074 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2075 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2076 udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
2078 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2079 mark_buffer_dirty_inode(oepos.bh, inode);
2084 udf_write_aext(inode, &oepos, eloc, elen, 1);
2087 UDF_I_LENALLOC(inode) -= adsize;
2088 mark_inode_dirty(inode);
2092 aed = (struct allocExtDesc *)oepos.bh->b_data;
2093 aed->lengthAllocDescs =
2094 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2095 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2096 udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2098 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2099 mark_buffer_dirty_inode(oepos.bh, inode);
2105 return (elen >> 30);
2108 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
2109 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
2111 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
2116 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2121 pos->block = UDF_I_LOCATION(inode);
2127 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
2129 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2130 UDF_I_LENEXTENTS(inode) = lbcount;
2134 } while (lbcount <= bcount);
2136 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
2141 long udf_block_map(struct inode *inode, sector_t block)
2143 kernel_lb_addr eloc;
2146 struct extent_position epos = { NULL, 0, { 0, 0}};
2151 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2152 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2159 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2160 return udf_fixed_to_variable(ret);