]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/xfs/linux-2.6/xfs_lrw.c
[XFS] Handle inode semaphores properly for dmapi queues
[linux-2.6-omap-h63xx.git] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 /*
33  *  fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
34  *
35  */
36
37 #include "xfs.h"
38
39 #include "xfs_fs.h"
40 #include "xfs_inum.h"
41 #include "xfs_log.h"
42 #include "xfs_trans.h"
43 #include "xfs_sb.h"
44 #include "xfs_ag.h"
45 #include "xfs_dir.h"
46 #include "xfs_dir2.h"
47 #include "xfs_alloc.h"
48 #include "xfs_dmapi.h"
49 #include "xfs_quota.h"
50 #include "xfs_mount.h"
51 #include "xfs_alloc_btree.h"
52 #include "xfs_bmap_btree.h"
53 #include "xfs_ialloc_btree.h"
54 #include "xfs_btree.h"
55 #include "xfs_ialloc.h"
56 #include "xfs_attr_sf.h"
57 #include "xfs_dir_sf.h"
58 #include "xfs_dir2_sf.h"
59 #include "xfs_dinode.h"
60 #include "xfs_inode.h"
61 #include "xfs_bmap.h"
62 #include "xfs_bit.h"
63 #include "xfs_rtalloc.h"
64 #include "xfs_error.h"
65 #include "xfs_itable.h"
66 #include "xfs_rw.h"
67 #include "xfs_acl.h"
68 #include "xfs_cap.h"
69 #include "xfs_mac.h"
70 #include "xfs_attr.h"
71 #include "xfs_inode_item.h"
72 #include "xfs_buf_item.h"
73 #include "xfs_utils.h"
74 #include "xfs_iomap.h"
75
76 #include <linux/capability.h>
77 #include <linux/writeback.h>
78
79
80 #if defined(XFS_RW_TRACE)
81 void
82 xfs_rw_enter_trace(
83         int                     tag,
84         xfs_iocore_t            *io,
85         void                    *data,
86         size_t                  segs,
87         loff_t                  offset,
88         int                     ioflags)
89 {
90         xfs_inode_t     *ip = XFS_IO_INODE(io);
91
92         if (ip->i_rwtrace == NULL)
93                 return;
94         ktrace_enter(ip->i_rwtrace,
95                 (void *)(unsigned long)tag,
96                 (void *)ip,
97                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99                 (void *)data,
100                 (void *)((unsigned long)segs),
101                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102                 (void *)((unsigned long)(offset & 0xffffffff)),
103                 (void *)((unsigned long)ioflags),
104                 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
105                 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
106                 (void *)NULL,
107                 (void *)NULL,
108                 (void *)NULL,
109                 (void *)NULL,
110                 (void *)NULL);
111 }
112
113 void
114 xfs_inval_cached_trace(
115         xfs_iocore_t    *io,
116         xfs_off_t       offset,
117         xfs_off_t       len,
118         xfs_off_t       first,
119         xfs_off_t       last)
120 {
121         xfs_inode_t     *ip = XFS_IO_INODE(io);
122
123         if (ip->i_rwtrace == NULL)
124                 return;
125         ktrace_enter(ip->i_rwtrace,
126                 (void *)(__psint_t)XFS_INVAL_CACHED,
127                 (void *)ip,
128                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
129                 (void *)((unsigned long)(offset & 0xffffffff)),
130                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
131                 (void *)((unsigned long)(len & 0xffffffff)),
132                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
133                 (void *)((unsigned long)(first & 0xffffffff)),
134                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
135                 (void *)((unsigned long)(last & 0xffffffff)),
136                 (void *)NULL,
137                 (void *)NULL,
138                 (void *)NULL,
139                 (void *)NULL,
140                 (void *)NULL,
141                 (void *)NULL);
142 }
143 #endif
144
145 /*
146  *      xfs_iozero
147  *
148  *      xfs_iozero clears the specified range of buffer supplied,
149  *      and marks all the affected blocks as valid and modified.  If
150  *      an affected block is not allocated, it will be allocated.  If
151  *      an affected block is not completely overwritten, and is not
152  *      valid before the operation, it will be read from disk before
153  *      being partially zeroed.
154  */
155 STATIC int
156 xfs_iozero(
157         struct inode            *ip,    /* inode                        */
158         loff_t                  pos,    /* offset in file               */
159         size_t                  count,  /* size of data to zero         */
160         loff_t                  end_size)       /* max file size to set */
161 {
162         unsigned                bytes;
163         struct page             *page;
164         struct address_space    *mapping;
165         char                    *kaddr;
166         int                     status;
167
168         mapping = ip->i_mapping;
169         do {
170                 unsigned long index, offset;
171
172                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
173                 index = pos >> PAGE_CACHE_SHIFT;
174                 bytes = PAGE_CACHE_SIZE - offset;
175                 if (bytes > count)
176                         bytes = count;
177
178                 status = -ENOMEM;
179                 page = grab_cache_page(mapping, index);
180                 if (!page)
181                         break;
182
183                 kaddr = kmap(page);
184                 status = mapping->a_ops->prepare_write(NULL, page, offset,
185                                                         offset + bytes);
186                 if (status) {
187                         goto unlock;
188                 }
189
190                 memset((void *) (kaddr + offset), 0, bytes);
191                 flush_dcache_page(page);
192                 status = mapping->a_ops->commit_write(NULL, page, offset,
193                                                         offset + bytes);
194                 if (!status) {
195                         pos += bytes;
196                         count -= bytes;
197                         if (pos > i_size_read(ip))
198                                 i_size_write(ip, pos < end_size ? pos : end_size);
199                 }
200
201 unlock:
202                 kunmap(page);
203                 unlock_page(page);
204                 page_cache_release(page);
205                 if (status)
206                         break;
207         } while (count);
208
209         return (-status);
210 }
211
212 ssize_t                 /* bytes read, or (-)  error */
213 xfs_read(
214         bhv_desc_t              *bdp,
215         struct kiocb            *iocb,
216         const struct iovec      *iovp,
217         unsigned int            segs,
218         loff_t                  *offset,
219         int                     ioflags,
220         cred_t                  *credp)
221 {
222         struct file             *file = iocb->ki_filp;
223         struct inode            *inode = file->f_mapping->host;
224         size_t                  size = 0;
225         ssize_t                 ret;
226         xfs_fsize_t             n;
227         xfs_inode_t             *ip;
228         xfs_mount_t             *mp;
229         vnode_t                 *vp;
230         unsigned long           seg;
231
232         ip = XFS_BHVTOI(bdp);
233         vp = BHV_TO_VNODE(bdp);
234         mp = ip->i_mount;
235
236         XFS_STATS_INC(xs_read_calls);
237
238         /* START copy & waste from filemap.c */
239         for (seg = 0; seg < segs; seg++) {
240                 const struct iovec *iv = &iovp[seg];
241
242                 /*
243                  * If any segment has a negative length, or the cumulative
244                  * length ever wraps negative then return -EINVAL.
245                  */
246                 size += iv->iov_len;
247                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
248                         return XFS_ERROR(-EINVAL);
249         }
250         /* END copy & waste from filemap.c */
251
252         if (unlikely(ioflags & IO_ISDIRECT)) {
253                 xfs_buftarg_t   *target =
254                         (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
255                                 mp->m_rtdev_targp : mp->m_ddev_targp;
256                 if ((*offset & target->pbr_smask) ||
257                     (size & target->pbr_smask)) {
258                         if (*offset == ip->i_d.di_size) {
259                                 return (0);
260                         }
261                         return -XFS_ERROR(EINVAL);
262                 }
263         }
264
265         n = XFS_MAXIOFFSET(mp) - *offset;
266         if ((n <= 0) || (size == 0))
267                 return 0;
268
269         if (n < size)
270                 size = n;
271
272         if (XFS_FORCED_SHUTDOWN(mp)) {
273                 return -EIO;
274         }
275
276         if (unlikely(ioflags & IO_ISDIRECT))
277                 down(&inode->i_sem);
278         xfs_ilock(ip, XFS_IOLOCK_SHARED);
279
280         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
281             !(ioflags & IO_INVIS)) {
282                 vrwlock_t locktype = VRWLOCK_READ;
283                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
284
285                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
286                                         BHV_TO_VNODE(bdp), *offset, size,
287                                         dmflags, &locktype);
288                 if (ret) {
289                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
290                         goto unlock_isem;
291                 }
292         }
293
294         xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
295                                 (void *)iovp, segs, *offset, ioflags);
296         ret = __generic_file_aio_read(iocb, iovp, segs, offset);
297         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
298                 ret = wait_on_sync_kiocb(iocb);
299         if (ret > 0)
300                 XFS_STATS_ADD(xs_read_bytes, ret);
301
302         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
303
304         if (likely(!(ioflags & IO_INVIS)))
305                 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
306
307 unlock_isem:
308         if (unlikely(ioflags & IO_ISDIRECT))
309                 up(&inode->i_sem);
310         return ret;
311 }
312
313 ssize_t
314 xfs_sendfile(
315         bhv_desc_t              *bdp,
316         struct file             *filp,
317         loff_t                  *offset,
318         int                     ioflags,
319         size_t                  count,
320         read_actor_t            actor,
321         void                    *target,
322         cred_t                  *credp)
323 {
324         ssize_t                 ret;
325         xfs_fsize_t             n;
326         xfs_inode_t             *ip;
327         xfs_mount_t             *mp;
328         vnode_t                 *vp;
329
330         ip = XFS_BHVTOI(bdp);
331         vp = BHV_TO_VNODE(bdp);
332         mp = ip->i_mount;
333
334         XFS_STATS_INC(xs_read_calls);
335
336         n = XFS_MAXIOFFSET(mp) - *offset;
337         if ((n <= 0) || (count == 0))
338                 return 0;
339
340         if (n < count)
341                 count = n;
342
343         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
344                 return -EIO;
345
346         xfs_ilock(ip, XFS_IOLOCK_SHARED);
347
348         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
349             (!(ioflags & IO_INVIS))) {
350                 vrwlock_t locktype = VRWLOCK_READ;
351                 int error;
352
353                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
354                                       FILP_DELAY_FLAG(filp), &locktype);
355                 if (error) {
356                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
357                         return -error;
358                 }
359         }
360         xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
361                    (void *)(unsigned long)target, count, *offset, ioflags);
362         ret = generic_file_sendfile(filp, offset, count, actor, target);
363
364         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
365
366         if (ret > 0)
367                 XFS_STATS_ADD(xs_read_bytes, ret);
368
369         if (likely(!(ioflags & IO_INVIS)))
370                 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
371
372         return ret;
373 }
374
375 /*
376  * This routine is called to handle zeroing any space in the last
377  * block of the file that is beyond the EOF.  We do this since the
378  * size is being increased without writing anything to that block
379  * and we don't want anyone to read the garbage on the disk.
380  */
381 STATIC int                              /* error (positive) */
382 xfs_zero_last_block(
383         struct inode    *ip,
384         xfs_iocore_t    *io,
385         xfs_off_t       offset,
386         xfs_fsize_t     isize,
387         xfs_fsize_t     end_size)
388 {
389         xfs_fileoff_t   last_fsb;
390         xfs_mount_t     *mp;
391         int             nimaps;
392         int             zero_offset;
393         int             zero_len;
394         int             isize_fsb_offset;
395         int             error = 0;
396         xfs_bmbt_irec_t imap;
397         loff_t          loff;
398         size_t          lsize;
399
400         ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
401         ASSERT(offset > isize);
402
403         mp = io->io_mount;
404
405         isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
406         if (isize_fsb_offset == 0) {
407                 /*
408                  * There are no extra bytes in the last block on disk to
409                  * zero, so return.
410                  */
411                 return 0;
412         }
413
414         last_fsb = XFS_B_TO_FSBT(mp, isize);
415         nimaps = 1;
416         error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
417                           &nimaps, NULL);
418         if (error) {
419                 return error;
420         }
421         ASSERT(nimaps > 0);
422         /*
423          * If the block underlying isize is just a hole, then there
424          * is nothing to zero.
425          */
426         if (imap.br_startblock == HOLESTARTBLOCK) {
427                 return 0;
428         }
429         /*
430          * Zero the part of the last block beyond the EOF, and write it
431          * out sync.  We need to drop the ilock while we do this so we
432          * don't deadlock when the buffer cache calls back to us.
433          */
434         XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
435         loff = XFS_FSB_TO_B(mp, last_fsb);
436         lsize = XFS_FSB_TO_B(mp, 1);
437
438         zero_offset = isize_fsb_offset;
439         zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
440
441         error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
442
443         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
444         ASSERT(error >= 0);
445         return error;
446 }
447
448 /*
449  * Zero any on disk space between the current EOF and the new,
450  * larger EOF.  This handles the normal case of zeroing the remainder
451  * of the last block in the file and the unusual case of zeroing blocks
452  * out beyond the size of the file.  This second case only happens
453  * with fixed size extents and when the system crashes before the inode
454  * size was updated but after blocks were allocated.  If fill is set,
455  * then any holes in the range are filled and zeroed.  If not, the holes
456  * are left alone as holes.
457  */
458
459 int                                     /* error (positive) */
460 xfs_zero_eof(
461         vnode_t         *vp,
462         xfs_iocore_t    *io,
463         xfs_off_t       offset,         /* starting I/O offset */
464         xfs_fsize_t     isize,          /* current inode size */
465         xfs_fsize_t     end_size)       /* terminal inode size */
466 {
467         struct inode    *ip = LINVFS_GET_IP(vp);
468         xfs_fileoff_t   start_zero_fsb;
469         xfs_fileoff_t   end_zero_fsb;
470         xfs_fileoff_t   prev_zero_fsb;
471         xfs_fileoff_t   zero_count_fsb;
472         xfs_fileoff_t   last_fsb;
473         xfs_extlen_t    buf_len_fsb;
474         xfs_extlen_t    prev_zero_count;
475         xfs_mount_t     *mp;
476         int             nimaps;
477         int             error = 0;
478         xfs_bmbt_irec_t imap;
479         loff_t          loff;
480         size_t          lsize;
481
482         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
483         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
484
485         mp = io->io_mount;
486
487         /*
488          * First handle zeroing the block on which isize resides.
489          * We only zero a part of that block so it is handled specially.
490          */
491         error = xfs_zero_last_block(ip, io, offset, isize, end_size);
492         if (error) {
493                 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
494                 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
495                 return error;
496         }
497
498         /*
499          * Calculate the range between the new size and the old
500          * where blocks needing to be zeroed may exist.  To get the
501          * block where the last byte in the file currently resides,
502          * we need to subtract one from the size and truncate back
503          * to a block boundary.  We subtract 1 in case the size is
504          * exactly on a block boundary.
505          */
506         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
507         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
508         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
509         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
510         if (last_fsb == end_zero_fsb) {
511                 /*
512                  * The size was only incremented on its last block.
513                  * We took care of that above, so just return.
514                  */
515                 return 0;
516         }
517
518         ASSERT(start_zero_fsb <= end_zero_fsb);
519         prev_zero_fsb = NULLFILEOFF;
520         prev_zero_count = 0;
521         while (start_zero_fsb <= end_zero_fsb) {
522                 nimaps = 1;
523                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
524                 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
525                                   0, NULL, 0, &imap, &nimaps, NULL);
526                 if (error) {
527                         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
528                         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
529                         return error;
530                 }
531                 ASSERT(nimaps > 0);
532
533                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
534                     imap.br_startblock == HOLESTARTBLOCK) {
535                         /*
536                          * This loop handles initializing pages that were
537                          * partially initialized by the code below this
538                          * loop. It basically zeroes the part of the page
539                          * that sits on a hole and sets the page as P_HOLE
540                          * and calls remapf if it is a mapped file.
541                          */
542                         prev_zero_fsb = NULLFILEOFF;
543                         prev_zero_count = 0;
544                         start_zero_fsb = imap.br_startoff +
545                                          imap.br_blockcount;
546                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
547                         continue;
548                 }
549
550                 /*
551                  * There are blocks in the range requested.
552                  * Zero them a single write at a time.  We actually
553                  * don't zero the entire range returned if it is
554                  * too big and simply loop around to get the rest.
555                  * That is not the most efficient thing to do, but it
556                  * is simple and this path should not be exercised often.
557                  */
558                 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
559                                               mp->m_writeio_blocks << 8);
560                 /*
561                  * Drop the inode lock while we're doing the I/O.
562                  * We'll still have the iolock to protect us.
563                  */
564                 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
565
566                 loff = XFS_FSB_TO_B(mp, start_zero_fsb);
567                 lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
568
569                 error = xfs_iozero(ip, loff, lsize, end_size);
570
571                 if (error) {
572                         goto out_lock;
573                 }
574
575                 prev_zero_fsb = start_zero_fsb;
576                 prev_zero_count = buf_len_fsb;
577                 start_zero_fsb = imap.br_startoff + buf_len_fsb;
578                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
579
580                 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
581         }
582
583         return 0;
584
585 out_lock:
586
587         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
588         ASSERT(error >= 0);
589         return error;
590 }
591
592 ssize_t                         /* bytes written, or (-) error */
593 xfs_write(
594         bhv_desc_t              *bdp,
595         struct kiocb            *iocb,
596         const struct iovec      *iovp,
597         unsigned int            nsegs,
598         loff_t                  *offset,
599         int                     ioflags,
600         cred_t                  *credp)
601 {
602         struct file             *file = iocb->ki_filp;
603         struct address_space    *mapping = file->f_mapping;
604         struct inode            *inode = mapping->host;
605         unsigned long           segs = nsegs;
606         xfs_inode_t             *xip;
607         xfs_mount_t             *mp;
608         ssize_t                 ret = 0, error = 0;
609         xfs_fsize_t             isize, new_size;
610         xfs_iocore_t            *io;
611         vnode_t                 *vp;
612         unsigned long           seg;
613         int                     iolock;
614         int                     eventsent = 0;
615         vrwlock_t               locktype;
616         size_t                  ocount = 0, count;
617         loff_t                  pos;
618         int                     need_isem = 1, need_flush = 0;
619
620         XFS_STATS_INC(xs_write_calls);
621
622         vp = BHV_TO_VNODE(bdp);
623         xip = XFS_BHVTOI(bdp);
624
625         for (seg = 0; seg < segs; seg++) {
626                 const struct iovec *iv = &iovp[seg];
627
628                 /*
629                  * If any segment has a negative length, or the cumulative
630                  * length ever wraps negative then return -EINVAL.
631                  */
632                 ocount += iv->iov_len;
633                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
634                         return -EINVAL;
635                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
636                         continue;
637                 if (seg == 0)
638                         return -EFAULT;
639                 segs = seg;
640                 ocount -= iv->iov_len;  /* This segment is no good */
641                 break;
642         }
643
644         count = ocount;
645         pos = *offset;
646
647         if (count == 0)
648                 return 0;
649
650         io = &xip->i_iocore;
651         mp = io->io_mount;
652
653         if (XFS_FORCED_SHUTDOWN(mp))
654                 return -EIO;
655
656         fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
657
658         if (ioflags & IO_ISDIRECT) {
659                 xfs_buftarg_t   *target =
660                         (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
661                                 mp->m_rtdev_targp : mp->m_ddev_targp;
662
663                 if (ioflags & IO_ISAIO)
664                         return XFS_ERROR(-ENOSYS);
665
666                 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
667                         return XFS_ERROR(-EINVAL);
668
669                 if (!VN_CACHED(vp) && pos < i_size_read(inode))
670                         need_isem = 0;
671
672                 if (VN_CACHED(vp))
673                         need_flush = 1;
674         }
675
676 relock:
677         if (need_isem) {
678                 iolock = XFS_IOLOCK_EXCL;
679                 locktype = VRWLOCK_WRITE;
680
681                 down(&inode->i_sem);
682         } else {
683                 iolock = XFS_IOLOCK_SHARED;
684                 locktype = VRWLOCK_WRITE_DIRECT;
685         }
686
687         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
688
689         isize = i_size_read(inode);
690
691         if (file->f_flags & O_APPEND)
692                 *offset = isize;
693
694 start:
695         error = -generic_write_checks(file, &pos, &count,
696                                         S_ISBLK(inode->i_mode));
697         if (error) {
698                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
699                 goto out_unlock_isem;
700         }
701
702         new_size = pos + count;
703         if (new_size > isize)
704                 io->io_new_size = new_size;
705
706         if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
707             !(ioflags & IO_INVIS) && !eventsent)) {
708                 loff_t          savedsize = pos;
709                 int             dmflags = FILP_DELAY_FLAG(file);
710
711                 if (need_isem)
712                         dmflags |= DM_FLAGS_ISEM;
713
714                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
715                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
716                                       pos, count,
717                                       dmflags, &locktype);
718                 if (error) {
719                         xfs_iunlock(xip, iolock);
720                         goto out_unlock_isem;
721                 }
722                 xfs_ilock(xip, XFS_ILOCK_EXCL);
723                 eventsent = 1;
724
725                 /*
726                  * The iolock was dropped and reaquired in XFS_SEND_DATA
727                  * so we have to recheck the size when appending.
728                  * We will only "goto start;" once, since having sent the
729                  * event prevents another call to XFS_SEND_DATA, which is
730                  * what allows the size to change in the first place.
731                  */
732                 if ((file->f_flags & O_APPEND) && savedsize != isize) {
733                         pos = isize = xip->i_d.di_size;
734                         goto start;
735                 }
736         }
737
738         /*
739          * On Linux, generic_file_write updates the times even if
740          * no data is copied in so long as the write had a size.
741          *
742          * We must update xfs' times since revalidate will overcopy xfs.
743          */
744         if (!(ioflags & IO_INVIS)) {
745                 xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
746                 inode_update_time(inode, 1);
747         }
748
749         /*
750          * If the offset is beyond the size of the file, we have a couple
751          * of things to do. First, if there is already space allocated
752          * we need to either create holes or zero the disk or ...
753          *
754          * If there is a page where the previous size lands, we need
755          * to zero it out up to the new size.
756          */
757
758         if (pos > isize) {
759                 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
760                                         isize, pos + count);
761                 if (error) {
762                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
763                         goto out_unlock_isem;
764                 }
765         }
766         xfs_iunlock(xip, XFS_ILOCK_EXCL);
767
768         /*
769          * If we're writing the file then make sure to clear the
770          * setuid and setgid bits if the process is not being run
771          * by root.  This keeps people from modifying setuid and
772          * setgid binaries.
773          */
774
775         if (((xip->i_d.di_mode & S_ISUID) ||
776             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
777                 (S_ISGID | S_IXGRP))) &&
778              !capable(CAP_FSETID)) {
779                 error = xfs_write_clear_setuid(xip);
780                 if (likely(!error))
781                         error = -remove_suid(file->f_dentry);
782                 if (unlikely(error)) {
783                         xfs_iunlock(xip, iolock);
784                         goto out_unlock_isem;
785                 }
786         }
787
788 retry:
789         /* We can write back this queue in page reclaim */
790         current->backing_dev_info = mapping->backing_dev_info;
791
792         if ((ioflags & IO_ISDIRECT)) {
793                 if (need_flush) {
794                         xfs_inval_cached_trace(io, pos, -1,
795                                         ctooff(offtoct(pos)), -1);
796                         VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
797                                         -1, FI_REMAPF_LOCKED);
798                 }
799
800                 if (need_isem) {
801                         /* demote the lock now the cached pages are gone */
802                         XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
803                         up(&inode->i_sem);
804
805                         iolock = XFS_IOLOCK_SHARED;
806                         locktype = VRWLOCK_WRITE_DIRECT;
807                         need_isem = 0;
808                 }
809
810                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
811                                 *offset, ioflags);
812                 ret = generic_file_direct_write(iocb, iovp,
813                                 &segs, pos, offset, count, ocount);
814
815                 /*
816                  * direct-io write to a hole: fall through to buffered I/O
817                  * for completing the rest of the request.
818                  */
819                 if (ret >= 0 && ret != count) {
820                         XFS_STATS_ADD(xs_write_bytes, ret);
821
822                         pos += ret;
823                         count -= ret;
824
825                         need_isem = 1;
826                         ioflags &= ~IO_ISDIRECT;
827                         xfs_iunlock(xip, iolock);
828                         goto relock;
829                 }
830         } else {
831                 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
832                                 *offset, ioflags);
833                 ret = generic_file_buffered_write(iocb, iovp, segs,
834                                 pos, offset, count, ret);
835         }
836
837         current->backing_dev_info = NULL;
838
839         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
840                 ret = wait_on_sync_kiocb(iocb);
841
842         if ((ret == -ENOSPC) &&
843             DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
844             !(ioflags & IO_INVIS)) {
845
846                 xfs_rwunlock(bdp, locktype);
847                 if (need_isem)
848                         up(&inode->i_sem);
849                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
850                                 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
851                                 0, 0, 0); /* Delay flag intentionally  unused */
852                 if (error)
853                         goto out_nounlocks;
854                 if (need_isem)
855                         down(&inode->i_sem);
856                 xfs_rwlock(bdp, locktype);
857                 pos = xip->i_d.di_size;
858                 ret = 0;
859                 goto retry;
860         }
861
862         if (*offset > xip->i_d.di_size) {
863                 xfs_ilock(xip, XFS_ILOCK_EXCL);
864                 if (*offset > xip->i_d.di_size) {
865                         xip->i_d.di_size = *offset;
866                         i_size_write(inode, *offset);
867                         xip->i_update_core = 1;
868                         xip->i_update_size = 1;
869                 }
870                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
871         }
872
873         error = -ret;
874         if (ret <= 0)
875                 goto out_unlock_internal;
876
877         XFS_STATS_ADD(xs_write_bytes, ret);
878
879         /* Handle various SYNC-type writes */
880         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
881                 /*
882                  * If we're treating this as O_DSYNC and we have not updated the
883                  * size, force the log.
884                  */
885                 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
886                     !(xip->i_update_size)) {
887                         xfs_inode_log_item_t    *iip = xip->i_itemp;
888
889                         /*
890                          * If an allocation transaction occurred
891                          * without extending the size, then we have to force
892                          * the log up the proper point to ensure that the
893                          * allocation is permanent.  We can't count on
894                          * the fact that buffered writes lock out direct I/O
895                          * writes - the direct I/O write could have extended
896                          * the size nontransactionally, then finished before
897                          * we started.  xfs_write_file will think that the file
898                          * didn't grow but the update isn't safe unless the
899                          * size change is logged.
900                          *
901                          * Force the log if we've committed a transaction
902                          * against the inode or if someone else has and
903                          * the commit record hasn't gone to disk (e.g.
904                          * the inode is pinned).  This guarantees that
905                          * all changes affecting the inode are permanent
906                          * when we return.
907                          */
908                         if (iip && iip->ili_last_lsn) {
909                                 xfs_log_force(mp, iip->ili_last_lsn,
910                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
911                         } else if (xfs_ipincount(xip) > 0) {
912                                 xfs_log_force(mp, (xfs_lsn_t)0,
913                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
914                         }
915
916                 } else {
917                         xfs_trans_t     *tp;
918
919                         /*
920                          * O_SYNC or O_DSYNC _with_ a size update are handled
921                          * the same way.
922                          *
923                          * If the write was synchronous then we need to make
924                          * sure that the inode modification time is permanent.
925                          * We'll have updated the timestamp above, so here
926                          * we use a synchronous transaction to log the inode.
927                          * It's not fast, but it's necessary.
928                          *
929                          * If this a dsync write and the size got changed
930                          * non-transactionally, then we need to ensure that
931                          * the size change gets logged in a synchronous
932                          * transaction.
933                          */
934
935                         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
936                         if ((error = xfs_trans_reserve(tp, 0,
937                                                       XFS_SWRITE_LOG_RES(mp),
938                                                       0, 0, 0))) {
939                                 /* Transaction reserve failed */
940                                 xfs_trans_cancel(tp, 0);
941                         } else {
942                                 /* Transaction reserve successful */
943                                 xfs_ilock(xip, XFS_ILOCK_EXCL);
944                                 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
945                                 xfs_trans_ihold(tp, xip);
946                                 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
947                                 xfs_trans_set_sync(tp);
948                                 error = xfs_trans_commit(tp, 0, NULL);
949                                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
950                         }
951                         if (error)
952                                 goto out_unlock_internal;
953                 }
954         
955                 xfs_rwunlock(bdp, locktype);
956                 if (need_isem)
957                         up(&inode->i_sem);
958
959                 error = sync_page_range(inode, mapping, pos, ret);
960                 if (!error)
961                         error = ret;
962                 return error;
963         }
964
965  out_unlock_internal:
966         xfs_rwunlock(bdp, locktype);
967  out_unlock_isem:
968         if (need_isem)
969                 up(&inode->i_sem);
970  out_nounlocks:
971         return -error;
972 }
973
974 /*
975  * All xfs metadata buffers except log state machine buffers
976  * get this attached as their b_bdstrat callback function.
977  * This is so that we can catch a buffer
978  * after prematurely unpinning it to forcibly shutdown the filesystem.
979  */
980 int
981 xfs_bdstrat_cb(struct xfs_buf *bp)
982 {
983         xfs_mount_t     *mp;
984
985         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
986         if (!XFS_FORCED_SHUTDOWN(mp)) {
987                 pagebuf_iorequest(bp);
988                 return 0;
989         } else {
990                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
991                 /*
992                  * Metadata write that didn't get logged but
993                  * written delayed anyway. These aren't associated
994                  * with a transaction, and can be ignored.
995                  */
996                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
997                     (XFS_BUF_ISREAD(bp)) == 0)
998                         return (xfs_bioerror_relse(bp));
999                 else
1000                         return (xfs_bioerror(bp));
1001         }
1002 }
1003
1004
1005 int
1006 xfs_bmap(bhv_desc_t     *bdp,
1007         xfs_off_t       offset,
1008         ssize_t         count,
1009         int             flags,
1010         xfs_iomap_t     *iomapp,
1011         int             *niomaps)
1012 {
1013         xfs_inode_t     *ip = XFS_BHVTOI(bdp);
1014         xfs_iocore_t    *io = &ip->i_iocore;
1015
1016         ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
1017         ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
1018                ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
1019
1020         return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
1021 }
1022
1023 /*
1024  * Wrapper around bdstrat so that we can stop data
1025  * from going to disk in case we are shutting down the filesystem.
1026  * Typically user data goes thru this path; one of the exceptions
1027  * is the superblock.
1028  */
1029 int
1030 xfsbdstrat(
1031         struct xfs_mount        *mp,
1032         struct xfs_buf          *bp)
1033 {
1034         ASSERT(mp);
1035         if (!XFS_FORCED_SHUTDOWN(mp)) {
1036                 /* Grio redirection would go here
1037                  * if (XFS_BUF_IS_GRIO(bp)) {
1038                  */
1039
1040                 pagebuf_iorequest(bp);
1041                 return 0;
1042         }
1043
1044         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
1045         return (xfs_bioerror_relse(bp));
1046 }
1047
1048 /*
1049  * If the underlying (data/log/rt) device is readonly, there are some
1050  * operations that cannot proceed.
1051  */
1052 int
1053 xfs_dev_is_read_only(
1054         xfs_mount_t             *mp,
1055         char                    *message)
1056 {
1057         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1058             xfs_readonly_buftarg(mp->m_logdev_targp) ||
1059             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1060                 cmn_err(CE_NOTE,
1061                         "XFS: %s required on read-only device.", message);
1062                 cmn_err(CE_NOTE,
1063                         "XFS: write access unavailable, cannot proceed.");
1064                 return EROFS;
1065         }
1066         return 0;
1067 }