serv->sv_program   = prog;
        serv->sv_nrthreads = 1;
        serv->sv_stats     = prog->pg_stats;
-       serv->sv_bufsz     = bufsize? bufsize : 4096;
+       if (bufsize > RPCSVC_MAXPAYLOAD)
+               bufsize = RPCSVC_MAXPAYLOAD;
+       serv->sv_max_payload = bufsize? bufsize : 4096;
+       serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
        serv->sv_shutdown  = shutdown;
        xdrsize = 0;
        while (prog) {
        int pages;
        int arghi;
        
-       if (size > RPCSVC_MAXPAYLOAD)
-               size = RPCSVC_MAXPAYLOAD;
-       pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE;
+       pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
+                                      * We assume one is at most one page
+                                      */
        arghi = 0;
        BUG_ON(pages > RPCSVC_MAXPAGES);
        while (pages) {
 
        if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
         || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
-        || !svc_init_buffer(rqstp, serv->sv_bufsz))
+        || !svc_init_buffer(rqstp, serv->sv_max_mesg))
                goto out_thread;
 
        serv->sv_nrthreads++;
 
        if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
                max = RPCSVC_MAXPAYLOAD_UDP;
-       if (rqstp->rq_server->sv_bufsz < max)
-               max = rqstp->rq_server->sv_bufsz;
+       if (rqstp->rq_server->sv_max_payload < max)
+               max = rqstp->rq_server->sv_max_payload;
        return max;
 }
 EXPORT_SYMBOL_GPL(svc_max_payload);
 
        svsk->sk_pool = pool;
 
        set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
-       if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2
+       if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
             > svc_sock_wspace(svsk))
            && !test_bit(SK_CLOSE, &svsk->sk_flags)
            && !test_bit(SK_CONN, &svsk->sk_flags)) {
                /* Don't enqueue while not enough space for reply */
                dprintk("svc: socket %p  no space, %d*2 > %ld, not enqueued\n",
-                       svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz,
+                       svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
                        svc_sock_wspace(svsk));
                svsk->sk_pool = NULL;
                clear_bit(SK_BUSY, &svsk->sk_flags);
                                rqstp, rqstp->rq_sock);
                rqstp->rq_sock = svsk;
                atomic_inc(&svsk->sk_inuse);
-               rqstp->rq_reserved = serv->sv_bufsz;
+               rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
                BUG_ON(svsk->sk_pool != pool);
                wake_up(&rqstp->rq_wait);
             * which will access the socket.
             */
            svc_sock_setbufsize(svsk->sk_sock,
-                               (serv->sv_nrthreads+3) * serv->sv_bufsz,
-                               (serv->sv_nrthreads+3) * serv->sv_bufsz);
+                               (serv->sv_nrthreads+3) * serv->sv_max_mesg,
+                               (serv->sv_nrthreads+3) * serv->sv_max_mesg);
 
        if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
                svc_sock_received(svsk);
         * svc_udp_recvfrom will re-adjust if necessary
         */
        svc_sock_setbufsize(svsk->sk_sock,
-                           3 * svsk->sk_server->sv_bufsz,
-                           3 * svsk->sk_server->sv_bufsz);
+                           3 * svsk->sk_server->sv_max_mesg,
+                           3 * svsk->sk_server->sv_max_mesg);
 
        set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
        set_bit(SK_CHNGBUF, &svsk->sk_flags);
                 * as soon a a complete request arrives.
                 */
                svc_sock_setbufsize(svsk->sk_sock,
-                                   (serv->sv_nrthreads+3) * serv->sv_bufsz,
-                                   3 * serv->sv_bufsz);
+                                   (serv->sv_nrthreads+3) * serv->sv_max_mesg,
+                                   3 * serv->sv_max_mesg);
 
        clear_bit(SK_DATA, &svsk->sk_flags);
 
                }
                svsk->sk_reclen &= 0x7fffffff;
                dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
-               if (svsk->sk_reclen > serv->sv_bufsz) {
+               if (svsk->sk_reclen > serv->sv_max_mesg) {
                        printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
                               (unsigned long) svsk->sk_reclen);
                        goto err_delete;
                 * svc_tcp_recvfrom will re-adjust if necessary
                 */
                svc_sock_setbufsize(svsk->sk_sock,
-                                   3 * svsk->sk_server->sv_bufsz,
-                                   3 * svsk->sk_server->sv_bufsz);
+                                   3 * svsk->sk_server->sv_max_mesg,
+                                   3 * svsk->sk_server->sv_max_mesg);
 
                set_bit(SK_CHNGBUF, &svsk->sk_flags);
                set_bit(SK_DATA, &svsk->sk_flags);
 
 
        /* now allocate needed pages.  If we get a failure, sleep briefly */
-       pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
+       pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
        for (i=0; i < pages ; i++)
                while (rqstp->rq_pages[i] == NULL) {
                        struct page *p = alloc_page(GFP_KERNEL);
        if ((svsk = svc_sock_dequeue(pool)) != NULL) {
                rqstp->rq_sock = svsk;
                atomic_inc(&svsk->sk_inuse);
-               rqstp->rq_reserved = serv->sv_bufsz;    
+               rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
        } else {
                /* No data pending. Go to sleep */