if (vp) {
                vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-               if (sync)
+               if (sync) {
+                       filemap_fdatawait(inode->i_mapping);
                        flags |= FLUSH_SYNC;
+               }
                error = bhv_vop_iflush(vp, flags);
                if (error == EAGAIN)
                        error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
 
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return XFS_ERROR(EIO);
 
+       if (flag & FSYNC_DATA)
+               filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+
        /*
         * We always need to make sure that the required inode state
         * is safe on disk.  The vnode might be clean but because
                        sync_lsn = log->l_last_sync_lsn;
                        GRANT_UNLOCK(log, s);
 
-                       if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0))
-                               return 0;
+                       if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
+                               if (flags & FLUSH_SYNC)
+                                       log_flags |= XFS_LOG_SYNC;
+                               error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+                               if (error)
+                                       return error;
+                       }
 
-                       if (flags & FLUSH_SYNC)
-                               log_flags |= XFS_LOG_SYNC;
-                       return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+                       if (ip->i_update_core == 0)
+                               return 0;
                }
        }
 
        if (flags & FLUSH_INODE) {
                int     flush_flags;
 
-               if (xfs_ipincount(ip))
-                       return EAGAIN;
-
                if (flags & FLUSH_SYNC) {
                        xfs_ilock(ip, XFS_ILOCK_SHARED);
                        xfs_iflock(ip);