4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/backing-dev.h>
25 #include <linux/stat.h>
26 #include <linux/fcntl.h>
27 #include <linux/mpage.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <asm/div64.h>
36 #include "cifsproto.h"
37 #include "cifs_unicode.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
41 static inline struct cifsFileInfo *cifs_init_private(
42 struct cifsFileInfo *private_data, struct inode *inode,
43 struct file *file, __u16 netfid)
45 memset(private_data, 0, sizeof(struct cifsFileInfo));
46 private_data->netfid = netfid;
47 private_data->pid = current->tgid;
48 init_MUTEX(&private_data->fh_sem);
49 private_data->pfile = file; /* needed for writepage */
50 private_data->pInode = inode;
51 private_data->invalidHandle = FALSE;
52 private_data->closePend = FALSE;
57 static inline int cifs_convert_flags(unsigned int flags)
59 if ((flags & O_ACCMODE) == O_RDONLY)
61 else if ((flags & O_ACCMODE) == O_WRONLY)
63 else if ((flags & O_ACCMODE) == O_RDWR) {
64 /* GENERIC_ALL is too much permission to request
65 can cause unnecessary access denied on create */
66 /* return GENERIC_ALL; */
67 return (GENERIC_READ | GENERIC_WRITE);
73 static inline int cifs_get_disposition(unsigned int flags)
75 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
77 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
78 return FILE_OVERWRITE_IF;
79 else if ((flags & O_CREAT) == O_CREAT)
85 /* all arguments to this function must be checked for validity in caller */
86 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
87 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
88 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
89 char *full_path, int xid)
94 /* want handles we can use to read with first
95 in the list so we do not have to walk the
96 list to search for one in prepare_write */
97 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
98 list_add_tail(&pCifsFile->flist,
99 &pCifsInode->openFileList);
101 list_add(&pCifsFile->flist,
102 &pCifsInode->openFileList);
104 write_unlock(&GlobalSMBSeslock);
105 write_unlock(&file->f_owner.lock);
106 if (pCifsInode->clientCanCacheRead) {
107 /* we have the inode open somewhere else
108 no need to discard cache data */
109 goto client_can_cache;
112 /* BB need same check in cifs_create too? */
113 /* if not oplocked, invalidate inode pages if mtime or file
115 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
116 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
117 (file->f_dentry->d_inode->i_size ==
118 (loff_t)le64_to_cpu(buf->EndOfFile))) {
119 cFYI(1, ("inode unchanged on server"));
121 if (file->f_dentry->d_inode->i_mapping) {
122 /* BB no need to lock inode until after invalidate
123 since namei code should already have it locked? */
124 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
125 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
127 cFYI(1, ("invalidating remote inode since open detected it "
129 invalidate_remote_inode(file->f_dentry->d_inode);
133 if (pTcon->ses->capabilities & CAP_UNIX)
134 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
135 full_path, inode->i_sb, xid);
137 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
138 full_path, buf, inode->i_sb, xid);
140 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
141 pCifsInode->clientCanCacheAll = TRUE;
142 pCifsInode->clientCanCacheRead = TRUE;
143 cFYI(1, ("Exclusive Oplock granted on inode %p",
144 file->f_dentry->d_inode));
145 } else if ((*oplock & 0xF) == OPLOCK_READ)
146 pCifsInode->clientCanCacheRead = TRUE;
151 int cifs_open(struct inode *inode, struct file *file)
155 struct cifs_sb_info *cifs_sb;
156 struct cifsTconInfo *pTcon;
157 struct cifsFileInfo *pCifsFile;
158 struct cifsInodeInfo *pCifsInode;
159 struct list_head *tmp;
160 char *full_path = NULL;
164 FILE_ALL_INFO *buf = NULL;
168 cifs_sb = CIFS_SB(inode->i_sb);
169 pTcon = cifs_sb->tcon;
171 if (file->f_flags & O_CREAT) {
172 /* search inode for this file and fill in file->private_data */
173 pCifsInode = CIFS_I(file->f_dentry->d_inode);
174 read_lock(&GlobalSMBSeslock);
175 list_for_each(tmp, &pCifsInode->openFileList) {
176 pCifsFile = list_entry(tmp, struct cifsFileInfo,
178 if ((pCifsFile->pfile == NULL) &&
179 (pCifsFile->pid == current->tgid)) {
180 /* mode set in cifs_create */
182 /* needed for writepage */
183 pCifsFile->pfile = file;
185 file->private_data = pCifsFile;
189 read_unlock(&GlobalSMBSeslock);
190 if (file->private_data != NULL) {
195 if (file->f_flags & O_EXCL)
196 cERROR(1, ("could not find file instance for "
197 "new file %p ", file));
201 down(&inode->i_sb->s_vfs_rename_sem);
202 full_path = build_path_from_dentry(file->f_dentry);
203 up(&inode->i_sb->s_vfs_rename_sem);
204 if (full_path == NULL) {
209 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
210 inode, file->f_flags, full_path));
211 desiredAccess = cifs_convert_flags(file->f_flags);
213 /*********************************************************************
214 * open flag mapping table:
216 * POSIX Flag CIFS Disposition
217 * ---------- ----------------
218 * O_CREAT FILE_OPEN_IF
219 * O_CREAT | O_EXCL FILE_CREATE
220 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
221 * O_TRUNC FILE_OVERWRITE
222 * none of the above FILE_OPEN
224 * Note that there is not a direct match between disposition
225 * FILE_SUPERSEDE (ie create whether or not file exists although
226 * O_CREAT | O_TRUNC is similar but truncates the existing
227 * file rather than creating a new file as FILE_SUPERSEDE does
228 * (which uses the attributes / metadata passed in on open call)
230 *? O_SYNC is a reasonable match to CIFS writethrough flag
231 *? and the read write flags match reasonably. O_LARGEFILE
232 *? is irrelevant because largefile support is always used
233 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
234 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
235 *********************************************************************/
237 disposition = cifs_get_disposition(file->f_flags);
244 /* BB pass O_SYNC flag through on file attributes .. BB */
246 /* Also refresh inode by passing in file_info buf returned by SMBOpen
247 and calling get_inode_info with returned buf (at least helps
248 non-Unix server case) */
250 /* BB we can not do this if this is the second open of a file
251 and the first handle has writebehind data, we might be
252 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
253 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
258 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
259 CREATE_NOT_DIR, &netfid, &oplock, buf,
260 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
261 & CIFS_MOUNT_MAP_SPECIAL_CHR);
263 /* Old server, try legacy style OpenX */
264 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
270 cFYI(1, ("cifs_open returned 0x%x ", rc));
274 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
275 if (file->private_data == NULL) {
279 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
280 write_lock(&file->f_owner.lock);
281 write_lock(&GlobalSMBSeslock);
282 list_add(&pCifsFile->tlist, &pTcon->openFileList);
284 pCifsInode = CIFS_I(file->f_dentry->d_inode);
286 rc = cifs_open_inode_helper(inode, file, pCifsInode,
288 &oplock, buf, full_path, xid);
290 write_unlock(&GlobalSMBSeslock);
291 write_unlock(&file->f_owner.lock);
294 if (oplock & CIFS_CREATE_ACTION) {
295 /* time to set mode which we can not set earlier due to
296 problems creating new read-only files */
297 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
298 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
300 (__u64)-1, (__u64)-1, 0 /* dev */,
302 cifs_sb->mnt_cifs_flags &
303 CIFS_MOUNT_MAP_SPECIAL_CHR);
305 /* BB implement via Windows security descriptors eg
306 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
308 in the meantime could set r/o dos attribute when
309 perms are eg: mode & 0222 == 0 */
320 /* Try to reaquire byte range locks that were released when session */
321 /* to server was lost */
322 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
326 /* BB list all locks open on this file and relock */
331 static int cifs_reopen_file(struct inode *inode, struct file *file,
336 struct cifs_sb_info *cifs_sb;
337 struct cifsTconInfo *pTcon;
338 struct cifsFileInfo *pCifsFile;
339 struct cifsInodeInfo *pCifsInode;
340 char *full_path = NULL;
342 int disposition = FILE_OPEN;
347 if (file->private_data) {
348 pCifsFile = (struct cifsFileInfo *)file->private_data;
353 down(&pCifsFile->fh_sem);
354 if (pCifsFile->invalidHandle == FALSE) {
355 up(&pCifsFile->fh_sem);
360 if (file->f_dentry == NULL) {
361 up(&pCifsFile->fh_sem);
362 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
366 cifs_sb = CIFS_SB(inode->i_sb);
367 pTcon = cifs_sb->tcon;
368 /* can not grab rename sem here because various ops, including
369 those that already have the rename sem can end up causing writepage
370 to get called and if the server was down that means we end up here,
371 and we can never tell if the caller already has the rename_sem */
372 full_path = build_path_from_dentry(file->f_dentry);
373 if (full_path == NULL) {
374 up(&pCifsFile->fh_sem);
379 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
380 inode, file->f_flags,full_path));
381 desiredAccess = cifs_convert_flags(file->f_flags);
388 /* Can not refresh inode by passing in file_info buf to be returned
389 by SMBOpen and then calling get_inode_info with returned buf
390 since file might have write behind data that needs to be flushed
391 and server version of file size can be stale. If we knew for sure
392 that inode was not dirty locally we could do this */
394 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
396 up(&pCifsFile->fh_sem);
401 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
402 CREATE_NOT_DIR, &netfid, &oplock, NULL,
403 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
404 CIFS_MOUNT_MAP_SPECIAL_CHR);
406 up(&pCifsFile->fh_sem);
407 cFYI(1, ("cifs_open returned 0x%x ", rc));
408 cFYI(1, ("oplock: %d ", oplock));
410 pCifsFile->netfid = netfid;
411 pCifsFile->invalidHandle = FALSE;
412 up(&pCifsFile->fh_sem);
413 pCifsInode = CIFS_I(inode);
416 filemap_fdatawrite(inode->i_mapping);
417 filemap_fdatawait(inode->i_mapping);
418 /* temporarily disable caching while we
419 go to server to get inode info */
420 pCifsInode->clientCanCacheAll = FALSE;
421 pCifsInode->clientCanCacheRead = FALSE;
422 if (pTcon->ses->capabilities & CAP_UNIX)
423 rc = cifs_get_inode_info_unix(&inode,
424 full_path, inode->i_sb, xid);
426 rc = cifs_get_inode_info(&inode,
427 full_path, NULL, inode->i_sb,
429 } /* else we are writing out data to server already
430 and could deadlock if we tried to flush data, and
431 since we do not know if we have data that would
432 invalidate the current end of file on the server
433 we can not go to the server to get the new inod
435 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
436 pCifsInode->clientCanCacheAll = TRUE;
437 pCifsInode->clientCanCacheRead = TRUE;
438 cFYI(1, ("Exclusive Oplock granted on inode %p",
439 file->f_dentry->d_inode));
440 } else if ((oplock & 0xF) == OPLOCK_READ) {
441 pCifsInode->clientCanCacheRead = TRUE;
442 pCifsInode->clientCanCacheAll = FALSE;
444 pCifsInode->clientCanCacheRead = FALSE;
445 pCifsInode->clientCanCacheAll = FALSE;
447 cifs_relock_file(pCifsFile);
456 int cifs_close(struct inode *inode, struct file *file)
460 struct cifs_sb_info *cifs_sb;
461 struct cifsTconInfo *pTcon;
462 struct cifsFileInfo *pSMBFile =
463 (struct cifsFileInfo *)file->private_data;
467 cifs_sb = CIFS_SB(inode->i_sb);
468 pTcon = cifs_sb->tcon;
470 pSMBFile->closePend = TRUE;
471 write_lock(&file->f_owner.lock);
473 /* no sense reconnecting to close a file that is
475 if (pTcon->tidStatus != CifsNeedReconnect) {
476 write_unlock(&file->f_owner.lock);
477 rc = CIFSSMBClose(xid, pTcon,
479 write_lock(&file->f_owner.lock);
482 write_lock(&GlobalSMBSeslock);
483 list_del(&pSMBFile->flist);
484 list_del(&pSMBFile->tlist);
485 write_unlock(&GlobalSMBSeslock);
486 write_unlock(&file->f_owner.lock);
487 kfree(pSMBFile->search_resume_name);
488 kfree(file->private_data);
489 file->private_data = NULL;
493 if (list_empty(&(CIFS_I(inode)->openFileList))) {
494 cFYI(1, ("closing last open instance for inode %p", inode));
495 /* if the file is not open we do not know if we can cache info
496 on this inode, much less write behind and read ahead */
497 CIFS_I(inode)->clientCanCacheRead = FALSE;
498 CIFS_I(inode)->clientCanCacheAll = FALSE;
500 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
501 rc = CIFS_I(inode)->write_behind_rc;
506 int cifs_closedir(struct inode *inode, struct file *file)
510 struct cifsFileInfo *pCFileStruct =
511 (struct cifsFileInfo *)file->private_data;
514 cFYI(1, ("Closedir inode = 0x%p with ", inode));
519 struct cifsTconInfo *pTcon;
520 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
522 pTcon = cifs_sb->tcon;
524 cFYI(1, ("Freeing private data in close dir"));
525 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
526 (pCFileStruct->invalidHandle == FALSE)) {
527 pCFileStruct->invalidHandle = TRUE;
528 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
529 cFYI(1, ("Closing uncompleted readdir with rc %d",
531 /* not much we can do if it fails anyway, ignore rc */
534 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
536 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir"));
537 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
538 cifs_buf_release(ptmp);
540 ptmp = pCFileStruct->search_resume_name;
542 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir"));
543 pCFileStruct->search_resume_name = NULL;
546 kfree(file->private_data);
547 file->private_data = NULL;
549 /* BB can we lock the filestruct while this is going on? */
554 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
557 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
561 int wait_flag = FALSE;
562 struct cifs_sb_info *cifs_sb;
563 struct cifsTconInfo *pTcon;
565 length = 1 + pfLock->fl_end - pfLock->fl_start;
569 cFYI(1, ("Lock parm: 0x%x flockflags: "
570 "0x%x flocktype: 0x%x start: %lld end: %lld",
571 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
574 if (pfLock->fl_flags & FL_POSIX)
576 if (pfLock->fl_flags & FL_FLOCK)
578 if (pfLock->fl_flags & FL_SLEEP) {
579 cFYI(1, ("Blocking lock "));
582 if (pfLock->fl_flags & FL_ACCESS)
583 cFYI(1, ("Process suspended by mandatory locking - "
584 "not implemented yet "));
585 if (pfLock->fl_flags & FL_LEASE)
586 cFYI(1, ("Lease on file - not implemented yet"));
587 if (pfLock->fl_flags &
588 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
589 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
591 if (pfLock->fl_type == F_WRLCK) {
592 cFYI(1, ("F_WRLCK "));
594 } else if (pfLock->fl_type == F_UNLCK) {
595 cFYI(1, ("F_UNLCK "));
597 } else if (pfLock->fl_type == F_RDLCK) {
598 cFYI(1, ("F_RDLCK "));
599 lockType |= LOCKING_ANDX_SHARED_LOCK;
601 } else if (pfLock->fl_type == F_EXLCK) {
602 cFYI(1, ("F_EXLCK "));
604 } else if (pfLock->fl_type == F_SHLCK) {
605 cFYI(1, ("F_SHLCK "));
606 lockType |= LOCKING_ANDX_SHARED_LOCK;
609 cFYI(1, ("Unknown type of lock "));
611 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
612 pTcon = cifs_sb->tcon;
614 if (file->private_data == NULL) {
620 rc = CIFSSMBLock(xid, pTcon,
621 ((struct cifsFileInfo *)file->
622 private_data)->netfid,
624 pfLock->fl_start, 0, 1, lockType,
627 rc = CIFSSMBLock(xid, pTcon,
628 ((struct cifsFileInfo *) file->
629 private_data)->netfid,
631 pfLock->fl_start, 1 /* numUnlock */ ,
632 0 /* numLock */ , lockType,
634 pfLock->fl_type = F_UNLCK;
636 cERROR(1, ("Error unlocking previously locked "
637 "range %d during test of lock ",
642 /* if rc == ERR_SHARING_VIOLATION ? */
643 rc = 0; /* do not change lock type to unlock
644 since range in use */
651 rc = CIFSSMBLock(xid, pTcon,
652 ((struct cifsFileInfo *) file->private_data)->
654 pfLock->fl_start, numUnlock, numLock, lockType,
656 if (pfLock->fl_flags & FL_POSIX)
657 posix_lock_file_wait(file, pfLock);
662 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
663 size_t write_size, loff_t *poffset)
666 unsigned int bytes_written = 0;
667 unsigned int total_written;
668 struct cifs_sb_info *cifs_sb;
669 struct cifsTconInfo *pTcon;
671 struct cifsFileInfo *open_file;
673 if (file->f_dentry == NULL)
676 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
680 pTcon = cifs_sb->tcon;
683 (" write %d bytes to offset %lld of %s", write_size,
684 *poffset, file->f_dentry->d_name.name)); */
686 if (file->private_data == NULL)
689 open_file = (struct cifsFileInfo *) file->private_data;
692 if (file->f_dentry->d_inode == NULL) {
697 if (*poffset > file->f_dentry->d_inode->i_size)
698 long_op = 2; /* writes past end of file can take a long time */
702 for (total_written = 0; write_size > total_written;
703 total_written += bytes_written) {
705 while (rc == -EAGAIN) {
706 if (file->private_data == NULL) {
707 /* file has been closed on us */
709 /* if we have gotten here we have written some data
710 and blocked, and the file has been freed on us while
711 we blocked so return what we managed to write */
712 return total_written;
714 if (open_file->closePend) {
717 return total_written;
721 if (open_file->invalidHandle) {
722 if ((file->f_dentry == NULL) ||
723 (file->f_dentry->d_inode == NULL)) {
725 return total_written;
727 /* we could deadlock if we called
728 filemap_fdatawait from here so tell
729 reopen_file not to flush data to server
731 rc = cifs_reopen_file(file->f_dentry->d_inode,
737 rc = CIFSSMBWrite(xid, pTcon,
739 min_t(const int, cifs_sb->wsize,
740 write_size - total_written),
741 *poffset, &bytes_written,
742 NULL, write_data + total_written, long_op);
744 if (rc || (bytes_written == 0)) {
752 *poffset += bytes_written;
753 long_op = FALSE; /* subsequent writes fast -
754 15 seconds is plenty */
757 cifs_stats_bytes_written(pTcon, total_written);
759 /* since the write may have blocked check these pointers again */
760 if (file->f_dentry) {
761 if (file->f_dentry->d_inode) {
762 struct inode *inode = file->f_dentry->d_inode;
763 inode->i_ctime = inode->i_mtime =
764 current_fs_time(inode->i_sb);
765 if (total_written > 0) {
766 if (*poffset > file->f_dentry->d_inode->i_size)
767 i_size_write(file->f_dentry->d_inode,
770 mark_inode_dirty_sync(file->f_dentry->d_inode);
774 return total_written;
777 static ssize_t cifs_write(struct file *file, const char *write_data,
778 size_t write_size, loff_t *poffset)
781 unsigned int bytes_written = 0;
782 unsigned int total_written;
783 struct cifs_sb_info *cifs_sb;
784 struct cifsTconInfo *pTcon;
786 struct cifsFileInfo *open_file;
788 if (file->f_dentry == NULL)
791 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
795 pTcon = cifs_sb->tcon;
797 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
798 *poffset, file->f_dentry->d_name.name));
800 if (file->private_data == NULL)
803 open_file = (struct cifsFileInfo *)file->private_data;
806 if (file->f_dentry->d_inode == NULL) {
811 if (*poffset > file->f_dentry->d_inode->i_size)
812 long_op = 2; /* writes past end of file can take a long time */
816 for (total_written = 0; write_size > total_written;
817 total_written += bytes_written) {
819 while (rc == -EAGAIN) {
820 if (file->private_data == NULL) {
821 /* file has been closed on us */
823 /* if we have gotten here we have written some data
824 and blocked, and the file has been freed on us
825 while we blocked so return what we managed to
827 return total_written;
829 if (open_file->closePend) {
832 return total_written;
836 if (open_file->invalidHandle) {
837 if ((file->f_dentry == NULL) ||
838 (file->f_dentry->d_inode == NULL)) {
840 return total_written;
842 /* we could deadlock if we called
843 filemap_fdatawait from here so tell
844 reopen_file not to flush data to
846 rc = cifs_reopen_file(file->f_dentry->d_inode,
851 #ifdef CONFIG_CIFS_EXPERIMENTAL
852 /* BB FIXME We can not sign across two buffers yet */
853 if((experimEnabled) && ((pTcon->ses->server->secMode &
854 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) {
858 len = min(cifs_sb->wsize,
859 write_size - total_written);
860 /* iov[0] is reserved for smb header */
861 iov[1].iov_base = (char *)write_data +
863 iov[1].iov_len = len;
864 rc = CIFSSMBWrite2(xid, pTcon,
865 open_file->netfid, len,
866 *poffset, &bytes_written,
869 /* BB FIXME fixup indentation of line below */
871 rc = CIFSSMBWrite(xid, pTcon,
873 min_t(const int, cifs_sb->wsize,
874 write_size - total_written),
875 *poffset, &bytes_written,
876 write_data + total_written, NULL, long_op);
878 if (rc || (bytes_written == 0)) {
886 *poffset += bytes_written;
887 long_op = FALSE; /* subsequent writes fast -
888 15 seconds is plenty */
891 cifs_stats_bytes_written(pTcon, total_written);
893 /* since the write may have blocked check these pointers again */
894 if (file->f_dentry) {
895 if (file->f_dentry->d_inode) {
896 file->f_dentry->d_inode->i_ctime =
897 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
898 if (total_written > 0) {
899 if (*poffset > file->f_dentry->d_inode->i_size)
900 i_size_write(file->f_dentry->d_inode,
903 mark_inode_dirty_sync(file->f_dentry->d_inode);
907 return total_written;
910 static struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
912 struct cifsFileInfo *open_file;
914 read_lock(&GlobalSMBSeslock);
915 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
916 if (open_file->closePend)
918 if (open_file->pfile &&
919 ((open_file->pfile->f_flags & O_RDWR) ||
920 (open_file->pfile->f_flags & O_WRONLY))) {
921 read_unlock(&GlobalSMBSeslock);
922 if(open_file->invalidHandle) {
923 rc = cifs_reopen_file(cifs_inode->vfs_inode,
924 open_file->pfile, FALSE);
925 /* if it fails, try another handle - might be */
926 /* dangerous to hold up writepages with retry */
928 read_lock(&GlobalSMBSeslock);
935 read_unlock(&GlobalSMBSeslock);
939 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
941 struct address_space *mapping = page->mapping;
942 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
945 int bytes_written = 0;
946 struct cifs_sb_info *cifs_sb;
947 struct cifsTconInfo *pTcon;
949 struct cifsFileInfo *open_file;
951 if (!mapping || !mapping->host)
954 inode = page->mapping->host;
955 cifs_sb = CIFS_SB(inode->i_sb);
956 pTcon = cifs_sb->tcon;
958 offset += (loff_t)from;
959 write_data = kmap(page);
962 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
967 /* racing with truncate? */
968 if (offset > mapping->host->i_size) {
970 return 0; /* don't care */
973 /* check to make sure that we are not extending the file */
974 if (mapping->host->i_size - offset < (loff_t)to)
975 to = (unsigned)(mapping->host->i_size - offset);
977 open_file = find_writable_file(CIFS_I(mapping->host));
979 bytes_written = cifs_write(open_file->pfile, write_data,
981 /* Does mm or vfs already set times? */
982 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
983 if ((bytes_written > 0) && (offset)) {
985 } else if (bytes_written < 0) {
990 cFYI(1, ("No writeable filehandles for inode"));
998 #ifdef CONFIG_CIFS_EXPERIMENTAL
999 static int cifs_writepages(struct address_space *mapping,
1000 struct writeback_control *wbc)
1002 struct backing_dev_info *bdi = mapping->backing_dev_info;
1003 unsigned int bytes_to_write;
1004 unsigned int bytes_written;
1005 struct cifs_sb_info *cifs_sb;
1010 struct kvec iov[32];
1015 struct cifsFileInfo *open_file = NULL;
1017 struct pagevec pvec;
1022 cifs_sb = CIFS_SB(mapping->host->i_sb);
1025 * If wsize is smaller that the page cache size, default to writing
1026 * one page at a time via cifs_writepage
1028 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1029 return generic_writepages(mapping, wbc);
1032 * BB: Is this meaningful for a non-block-device file system?
1033 * If it is, we should test it again after we do I/O
1035 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1036 wbc->encountered_congestion = 1;
1042 pagevec_init(&pvec, 0);
1043 if (wbc->sync_mode == WB_SYNC_NONE)
1044 index = mapping->writeback_index; /* Start from prev offset */
1049 if (wbc->start || wbc->end) {
1050 index = wbc->start >> PAGE_CACHE_SHIFT;
1051 end = wbc->end >> PAGE_CACHE_SHIFT;
1056 while (!done && (index <= end) &&
1057 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1058 PAGECACHE_TAG_DIRTY,
1059 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1064 open_file = find_writable_file(CIFS_I(mapping->host));
1066 pagevec_release(&pvec);
1067 cERROR(1, ("No writable handles for inode"));
1077 for (i = 0; i < nr_pages; i++) {
1078 page = pvec.pages[i];
1080 * At this point we hold neither mapping->tree_lock nor
1081 * lock on the page itself: the page may be truncated or
1082 * invalidated (changing page->mapping to NULL), or even
1083 * swizzled back from swapper_space to tmpfs file
1089 else if (TestSetPageLocked(page))
1092 if (unlikely(page->mapping != mapping)) {
1097 if (unlikely(is_range) && (page->index > end)) {
1103 if (next && (page->index != next)) {
1104 /* Not next consecutive page */
1109 if (wbc->sync_mode != WB_SYNC_NONE)
1110 wait_on_page_writeback(page);
1112 if (PageWriteback(page) ||
1113 !test_clear_page_dirty(page)) {
1118 * BB can we get rid of this? pages are held by pvec
1120 page_cache_get(page);
1122 /* reserve iov[0] for the smb header */
1124 iov[n_iov].iov_base = kmap(page);
1125 iov[n_iov].iov_len = PAGE_CACHE_SIZE;
1126 bytes_to_write += PAGE_CACHE_SIZE;
1130 offset = page_offset(page);
1132 next = page->index + 1;
1133 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1137 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1138 open_file->netfid, bytes_to_write,
1139 offset, &bytes_written, iov, n_iov,
1141 if (rc || bytes_written < bytes_to_write) {
1142 cERROR(1,("CIFSSMBWrite2 returned %d, written = %x",
1143 rc, bytes_written));
1144 set_bit(AS_EIO, &mapping->flags);
1147 for (i = 0; i < n_iov; i++) {
1148 page = pvec.pages[first + i];
1151 page_cache_release(page);
1153 if ((wbc->nr_to_write -= n_iov) <= 0)
1157 pagevec_release(&pvec);
1159 if (!scanned && !done) {
1161 * We hit the last page and there is more work to be done: wrap
1162 * back to the start of the file
1169 mapping->writeback_index = index;
1177 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1183 /* BB add check for wbc flags */
1184 page_cache_get(page);
1185 if (!PageUptodate(page)) {
1186 cFYI(1, ("ppw - page not up to date"));
1189 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1190 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1192 page_cache_release(page);
1197 static int cifs_commit_write(struct file *file, struct page *page,
1198 unsigned offset, unsigned to)
1202 struct inode *inode = page->mapping->host;
1203 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1207 cFYI(1, ("commit write for page %p up to position %lld for %d",
1208 page, position, to));
1209 if (position > inode->i_size) {
1210 i_size_write(inode, position);
1211 /* if (file->private_data == NULL) {
1214 open_file = (struct cifsFileInfo *)file->private_data;
1215 cifs_sb = CIFS_SB(inode->i_sb);
1217 while (rc == -EAGAIN) {
1218 if ((open_file->invalidHandle) &&
1219 (!open_file->closePend)) {
1220 rc = cifs_reopen_file(
1221 file->f_dentry->d_inode, file);
1225 if (!open_file->closePend) {
1226 rc = CIFSSMBSetFileSize(xid,
1227 cifs_sb->tcon, position,
1229 open_file->pid, FALSE);
1235 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1238 if (!PageUptodate(page)) {
1239 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1240 /* can not rely on (or let) writepage write this data */
1242 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1247 /* this is probably better than directly calling
1248 partialpage_write since in this function the file handle is
1249 known which we might as well leverage */
1250 /* BB check if anything else missing out of ppw
1251 such as updating last write time */
1252 page_data = kmap(page);
1253 rc = cifs_write(file, page_data + offset, to-offset,
1257 /* else if (rc < 0) should we set writebehind rc? */
1260 set_page_dirty(page);
1267 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1271 struct inode *inode = file->f_dentry->d_inode;
1275 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1276 dentry->d_name.name, datasync));
1278 rc = filemap_fdatawrite(inode->i_mapping);
1280 CIFS_I(inode)->write_behind_rc = 0;
1285 /* static int cifs_sync_page(struct page *page)
1287 struct address_space *mapping;
1288 struct inode *inode;
1289 unsigned long index = page->index;
1290 unsigned int rpages = 0;
1293 cFYI(1, ("sync page %p",page));
1294 mapping = page->mapping;
1297 inode = mapping->host;
1301 /* fill in rpages then
1302 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1304 /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1312 * As file closes, flush all cached write data for this inode checking
1313 * for write behind errors.
1315 int cifs_flush(struct file *file)
1317 struct inode * inode = file->f_dentry->d_inode;
1320 /* Rather than do the steps manually:
1321 lock the inode for writing
1322 loop through pages looking for write behind data (dirty pages)
1323 coalesce into contiguous 16K (or smaller) chunks to write to server
1324 send to server (prefer in parallel)
1325 deal with writebehind errors
1326 unlock inode for writing
1327 filemapfdatawrite appears easier for the time being */
1329 rc = filemap_fdatawrite(inode->i_mapping);
1330 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1331 CIFS_I(inode)->write_behind_rc = 0;
1333 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1338 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1339 size_t read_size, loff_t *poffset)
1342 unsigned int bytes_read = 0;
1343 unsigned int total_read = 0;
1344 unsigned int current_read_size;
1345 struct cifs_sb_info *cifs_sb;
1346 struct cifsTconInfo *pTcon;
1348 struct cifsFileInfo *open_file;
1349 char *smb_read_data;
1350 char __user *current_offset;
1351 struct smb_com_read_rsp *pSMBr;
1354 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1355 pTcon = cifs_sb->tcon;
1357 if (file->private_data == NULL) {
1361 open_file = (struct cifsFileInfo *)file->private_data;
1363 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1364 cFYI(1, ("attempting read on write only file instance"));
1366 for (total_read = 0, current_offset = read_data;
1367 read_size > total_read;
1368 total_read += bytes_read, current_offset += bytes_read) {
1369 current_read_size = min_t(const int, read_size - total_read,
1372 smb_read_data = NULL;
1373 while (rc == -EAGAIN) {
1374 if ((open_file->invalidHandle) &&
1375 (!open_file->closePend)) {
1376 rc = cifs_reopen_file(file->f_dentry->d_inode,
1381 rc = CIFSSMBRead(xid, pTcon,
1383 current_read_size, *poffset,
1384 &bytes_read, &smb_read_data);
1385 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1386 if (copy_to_user(current_offset,
1387 smb_read_data + 4 /* RFC1001 hdr */
1388 + le16_to_cpu(pSMBr->DataOffset),
1394 if (smb_read_data) {
1395 cifs_buf_release(smb_read_data);
1396 smb_read_data = NULL;
1399 if (rc || (bytes_read == 0)) {
1407 cifs_stats_bytes_read(pTcon, bytes_read);
1408 *poffset += bytes_read;
1416 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1420 unsigned int bytes_read = 0;
1421 unsigned int total_read;
1422 unsigned int current_read_size;
1423 struct cifs_sb_info *cifs_sb;
1424 struct cifsTconInfo *pTcon;
1426 char *current_offset;
1427 struct cifsFileInfo *open_file;
1430 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1431 pTcon = cifs_sb->tcon;
1433 if (file->private_data == NULL) {
1437 open_file = (struct cifsFileInfo *)file->private_data;
1439 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1440 cFYI(1, ("attempting read on write only file instance"));
1442 for (total_read = 0, current_offset = read_data;
1443 read_size > total_read;
1444 total_read += bytes_read, current_offset += bytes_read) {
1445 current_read_size = min_t(const int, read_size - total_read,
1447 /* For windows me and 9x we do not want to request more
1448 than it negotiated since it will refuse the read then */
1450 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1451 current_read_size = min_t(const int, current_read_size,
1452 pTcon->ses->server->maxBuf - 128);
1455 while (rc == -EAGAIN) {
1456 if ((open_file->invalidHandle) &&
1457 (!open_file->closePend)) {
1458 rc = cifs_reopen_file(file->f_dentry->d_inode,
1463 rc = CIFSSMBRead(xid, pTcon,
1465 current_read_size, *poffset,
1466 &bytes_read, ¤t_offset);
1468 if (rc || (bytes_read == 0)) {
1476 cifs_stats_bytes_read(pTcon, total_read);
1477 *poffset += bytes_read;
1484 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1486 struct dentry *dentry = file->f_dentry;
1490 rc = cifs_revalidate(dentry);
1492 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1496 rc = generic_file_mmap(file, vma);
1502 static void cifs_copy_cache_pages(struct address_space *mapping,
1503 struct list_head *pages, int bytes_read, char *data,
1504 struct pagevec *plru_pvec)
1509 while (bytes_read > 0) {
1510 if (list_empty(pages))
1513 page = list_entry(pages->prev, struct page, lru);
1514 list_del(&page->lru);
1516 if (add_to_page_cache(page, mapping, page->index,
1518 page_cache_release(page);
1519 cFYI(1, ("Add page cache failed"));
1520 data += PAGE_CACHE_SIZE;
1521 bytes_read -= PAGE_CACHE_SIZE;
1525 target = kmap_atomic(page,KM_USER0);
1527 if (PAGE_CACHE_SIZE > bytes_read) {
1528 memcpy(target, data, bytes_read);
1529 /* zero the tail end of this partial page */
1530 memset(target + bytes_read, 0,
1531 PAGE_CACHE_SIZE - bytes_read);
1534 memcpy(target, data, PAGE_CACHE_SIZE);
1535 bytes_read -= PAGE_CACHE_SIZE;
1537 kunmap_atomic(target, KM_USER0);
1539 flush_dcache_page(page);
1540 SetPageUptodate(page);
1542 if (!pagevec_add(plru_pvec, page))
1543 __pagevec_lru_add(plru_pvec);
1544 data += PAGE_CACHE_SIZE;
1549 static int cifs_readpages(struct file *file, struct address_space *mapping,
1550 struct list_head *page_list, unsigned num_pages)
1556 struct cifs_sb_info *cifs_sb;
1557 struct cifsTconInfo *pTcon;
1559 unsigned int read_size,i;
1560 char *smb_read_data = NULL;
1561 struct smb_com_read_rsp *pSMBr;
1562 struct pagevec lru_pvec;
1563 struct cifsFileInfo *open_file;
1566 if (file->private_data == NULL) {
1570 open_file = (struct cifsFileInfo *)file->private_data;
1571 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1572 pTcon = cifs_sb->tcon;
1574 pagevec_init(&lru_pvec, 0);
1576 for (i = 0; i < num_pages; ) {
1577 unsigned contig_pages;
1578 struct page *tmp_page;
1579 unsigned long expected_index;
1581 if (list_empty(page_list))
1584 page = list_entry(page_list->prev, struct page, lru);
1585 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1587 /* count adjacent pages that we will read into */
1590 list_entry(page_list->prev, struct page, lru)->index;
1591 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1592 if (tmp_page->index == expected_index) {
1598 if (contig_pages + i > num_pages)
1599 contig_pages = num_pages - i;
1601 /* for reads over a certain size could initiate async
1604 read_size = contig_pages * PAGE_CACHE_SIZE;
1605 /* Read size needs to be in multiples of one page */
1606 read_size = min_t(const unsigned int, read_size,
1607 cifs_sb->rsize & PAGE_CACHE_MASK);
1610 while (rc == -EAGAIN) {
1611 if ((open_file->invalidHandle) &&
1612 (!open_file->closePend)) {
1613 rc = cifs_reopen_file(file->f_dentry->d_inode,
1619 rc = CIFSSMBRead(xid, pTcon,
1622 &bytes_read, &smb_read_data);
1624 /* BB more RC checks ? */
1626 if (smb_read_data) {
1627 cifs_buf_release(smb_read_data);
1628 smb_read_data = NULL;
1632 if ((rc < 0) || (smb_read_data == NULL)) {
1633 cFYI(1, ("Read error in readpages: %d", rc));
1634 /* clean up remaing pages off list */
1635 while (!list_empty(page_list) && (i < num_pages)) {
1636 page = list_entry(page_list->prev, struct page,
1638 list_del(&page->lru);
1639 page_cache_release(page);
1642 } else if (bytes_read > 0) {
1643 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1644 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1645 smb_read_data + 4 /* RFC1001 hdr */ +
1646 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1648 i += bytes_read >> PAGE_CACHE_SHIFT;
1649 cifs_stats_bytes_read(pTcon, bytes_read);
1650 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1651 i++; /* account for partial page */
1653 /* server copy of file can have smaller size
1655 /* BB do we need to verify this common case ?
1656 this case is ok - if we are at server EOF
1657 we will hit it on next read */
1659 /* while (!list_empty(page_list) && (i < num_pages)) {
1660 page = list_entry(page_list->prev,
1662 list_del(&page->list);
1663 page_cache_release(page);
1668 cFYI(1, ("No bytes read (%d) at offset %lld . "
1669 "Cleaning remaining pages from readahead list",
1670 bytes_read, offset));
1671 /* BB turn off caching and do new lookup on
1672 file size at server? */
1673 while (!list_empty(page_list) && (i < num_pages)) {
1674 page = list_entry(page_list->prev, struct page,
1676 list_del(&page->lru);
1678 /* BB removeme - replace with zero of page? */
1679 page_cache_release(page);
1683 if (smb_read_data) {
1684 cifs_buf_release(smb_read_data);
1685 smb_read_data = NULL;
1690 pagevec_lru_add(&lru_pvec);
1692 /* need to free smb_read_data buf before exit */
1693 if (smb_read_data) {
1694 cifs_buf_release(smb_read_data);
1695 smb_read_data = NULL;
1702 static int cifs_readpage_worker(struct file *file, struct page *page,
1708 page_cache_get(page);
1709 read_data = kmap(page);
1710 /* for reads over a certain size could initiate async read ahead */
1712 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1717 cFYI(1, ("Bytes read %d ",rc));
1719 file->f_dentry->d_inode->i_atime =
1720 current_fs_time(file->f_dentry->d_inode->i_sb);
1722 if (PAGE_CACHE_SIZE > rc)
1723 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1725 flush_dcache_page(page);
1726 SetPageUptodate(page);
1731 page_cache_release(page);
1735 static int cifs_readpage(struct file *file, struct page *page)
1737 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1743 if (file->private_data == NULL) {
1748 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1749 page, (int)offset, (int)offset));
1751 rc = cifs_readpage_worker(file, page, &offset);
1759 /* We do not want to update the file size from server for inodes
1760 open for write - to avoid races with writepage extending
1761 the file - in the future we could consider allowing
1762 refreshing the inode only on increases in the file size
1763 but this is tricky to do without racing with writebehind
1764 page caching in the current Linux kernel design */
1765 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1767 if (cifsInode && find_writable_file(cifsInode))
1773 static int cifs_prepare_write(struct file *file, struct page *page,
1774 unsigned from, unsigned to)
1777 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1778 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1779 if (!PageUptodate(page)) {
1780 /* if (to - from != PAGE_CACHE_SIZE) {
1781 void *kaddr = kmap_atomic(page, KM_USER0);
1782 memset(kaddr, 0, from);
1783 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1784 flush_dcache_page(page);
1785 kunmap_atomic(kaddr, KM_USER0);
1787 /* If we are writing a full page it will be up to date,
1788 no need to read from the server */
1789 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1790 SetPageUptodate(page);
1792 /* might as well read a page, it is fast enough */
1793 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1794 rc = cifs_readpage_worker(file, page, &offset);
1796 /* should we try using another file handle if there is one -
1797 how would we lock it to prevent close of that handle
1798 racing with this read?
1799 In any case this will be written out by commit_write */
1803 /* BB should we pass any errors back?
1804 e.g. if we do not have read access to the file */
1808 struct address_space_operations cifs_addr_ops = {
1809 .readpage = cifs_readpage,
1810 .readpages = cifs_readpages,
1811 .writepage = cifs_writepage,
1812 #ifdef CONFIG_CIFS_EXPERIMENTAL
1813 .writepages = cifs_writepages,
1815 .prepare_write = cifs_prepare_write,
1816 .commit_write = cifs_commit_write,
1817 .set_page_dirty = __set_page_dirty_nobuffers,
1818 /* .sync_page = cifs_sync_page, */