]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@woody.osdl.org>
Wed, 10 Jan 2007 16:30:22 +0000 (08:30 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Wed, 10 Jan 2007 16:30:22 +0000 (08:30 -0800)
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [IPV4] devinet: inetdev_init out label moved after RCU assignment
  [INET]: style updates for the inet_sock->is_icsk assignment fix
  [SCTP]: Fix err_hdr assignment in sctp_init_cause.
  [NETFILTER]: tcp conntrack: fix IP_CT_TCP_FLAG_CLOSE_INIT value
  [NETFILTER]: nf_nat: fix hanging connections when loading the NAT module
  [NETFILTER]: arp_tables: fix userspace compilation
  [NETFILTER]: nf_conntrack_ipv6: fix crash when handling fragments

14 files changed:
MAINTAINERS
drivers/infiniband/core/cma.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/ehca/ehca_hca.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_iverbs.h
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_mrmw.c
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iser_initiator.c

index 4ccc5fa06d092cf83d8034293050bd3c1973bead..49e4f554d2117d21e3ce96e50bb6b1a5a78c0de7 100644 (file)
@@ -2636,6 +2636,12 @@ M:       promise@pnd-pc.demon.co.uk
 W:     http://www.pnd-pc.demon.co.uk/promise/
 S:     Maintained
 
+PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
+P:     Mikael Pettersson
+M:     mikpe@it.uu.se
+L:     linux-ide@vger.kernel.org
+S:     Maintained
+
 PS3 PLATFORM SUPPORT
 P:     Geoff Levand
 M:     geoffrey.levand@am.sony.com
index 533193d4e5dffa177f7747f69d33456c2cb7e764..9e0ab048c878eba5b07d57501762f83c3f1f2356 100644 (file)
@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                *sin = iw_event->local_addr;
                sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
                *sin = iw_event->remote_addr;
-               if (iw_event->status)
-                       event.event = RDMA_CM_EVENT_REJECTED;
-               else
+               switch (iw_event->status) {
+               case 0:
                        event.event = RDMA_CM_EVENT_ESTABLISHED;
+                       break;
+               case -ECONNRESET:
+               case -ECONNREFUSED:
+                       event.event = RDMA_CM_EVENT_REJECTED;
+                       break;
+               case -ETIMEDOUT:
+                       event.event = RDMA_CM_EVENT_UNREACHABLE;
+                       break;
+               default:
+                       event.event = RDMA_CM_EVENT_CONNECT_ERROR;
+                       break;
+               }
                break;
        case IW_CM_EVENT_ESTABLISHED:
                event.event = RDMA_CM_EVENT_ESTABLISHED;
index 81a5cdc5733aa4f5372d2fbaa173489f132cd1c9..e2e8d329b44389cd04230c55853db3f55f75a07b 100644 (file)
@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
        if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
                if (!ctx->backlog) {
                        ret = -EDQUOT;
+                       kfree(uevent);
                        goto out;
                }
                ctx->backlog--;
+       } else if (!ctx->uid) {
+               /*
+                * We ignore events for new connections until userspace has set
+                * their context.  This can only happen if an error occurs on a
+                * new connection before the user accepts it.  This is okay,
+                * since the accept will just fail later.
+                */
+               kfree(uevent);
+               goto out;
        }
+
        list_add_tail(&uevent->list, &ctx->file->event_list);
        wake_up_interruptible(&ctx->file->poll_wait);
 out:
index e1b618c5f685079cd1c9e8245225ee3993372902..b7be950ab47c3a700737feae370d8bd09a2d2b69 100644 (file)
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
                                              ib_device);
        struct hipz_query_hca *rblock;
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
                return -ENOMEM;
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
                                              ib_device);
        struct hipz_query_port *rblock;
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
                return -ENOMEM;
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
                return -EINVAL;
        }
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
                return -ENOMEM;
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
                return -EINVAL;
        }
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
                return -ENOMEM;
index c3ea746e9045669852266ecb17baa2b7208c8c1c..e7209afb4250c1bc7261513b897e606de53e4a12 100644 (file)
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
        u64 *rblock;
        unsigned long block_count;
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
        if (!rblock) {
                ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
                ret = -ENOMEM;
index 3720e3032cceed84e22a514280e36cbbcc030e62..cd7789f0d08ecbb99a1a9da168b58d4d78182fab 100644 (file)
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
 int ehca_munmap(unsigned long addr, size_t len);
 
 #ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(void);
+void *ehca_alloc_fw_ctrlblock(gfp_t flags);
 void ehca_free_fw_ctrlblock(void *ptr);
 #else
-#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL))
+#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
 #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
 #endif
 
index cc47e4c13a180897fa3aee03a589d197b48367db..6574fbbaead5e8faf0f65d7e98ca3c75c4bede05 100644 (file)
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
 #ifdef CONFIG_PPC_64K_PAGES
 static struct kmem_cache *ctblk_cache = NULL;
 
-void *ehca_alloc_fw_ctrlblock(void)
+void *ehca_alloc_fw_ctrlblock(gfp_t flags)
 {
-       void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
+       void *ret = kmem_cache_zalloc(ctblk_cache, flags);
        if (!ret)
                ehca_gen_err("Out of memory for ctblk");
        return ret;
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
        u64 h_ret;
        struct hipz_query_hca *rblock;
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_gen_err("Cannot allocate rblock memory.");
                return -ENOMEM;
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
        int ret = 0;
        struct hipz_query_hca *rblock;
 
-       rblock = ehca_alloc_fw_ctrlblock();
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
                return -ENOMEM;
@@ -469,7 +469,7 @@ static ssize_t  ehca_show_##name(struct device *dev,                       \
                                                                           \
        shca = dev->driver_data;                                           \
                                                                           \
-       rblock = ehca_alloc_fw_ctrlblock();                                \
+       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);                      \
        if (!rblock) {                                                     \
                dev_err(dev, "Can't allocate rblock memory.");             \
                return 0;                                                  \
index 0a5e2214cc5f72b30ed36d0e0e95384fea4d6b0f..cfb362a1029caa979394fed9bc8fc54be60812ca 100644 (file)
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
        u32 i;
        u64 *kpage;
 
-       kpage = ehca_alloc_fw_ctrlblock();
+       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!kpage) {
                ehca_err(&shca->ib_device, "kpage alloc failed");
                ret = -ENOMEM;
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
        ehca_mrmw_map_acl(acl, &hipz_acl);
        ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 
-       kpage = ehca_alloc_fw_ctrlblock();
+       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!kpage) {
                ehca_err(&shca->ib_device, "kpage alloc failed");
                ret = -ENOMEM;
index c6c9cef203e3361fb2868222cb4a405487b84087..34b85556d01e0d55b8524f62ac1f65aef2be63e5 100644 (file)
@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
        unsigned long spl_flags = 0;
 
        /* do query_qp to obtain current attr values */
-       mqpcb = ehca_alloc_fw_ctrlblock();
+       mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!mqpcb) {
                ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
                         "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
                return -EINVAL;
        }
 
-       qpcb = ehca_alloc_fw_ctrlblock();
+       qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!qpcb) {
                ehca_err(qp->device,"Out of memory for qpcb "
                         "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
index 283d50b76c3ddceb274d8445381d5ba569b28884..1159c8a0f2c5211d3b9b6f9f2a2b59ea8de6872b 100644 (file)
@@ -54,6 +54,10 @@ enum {
        MTHCA_CQ_ENTRY_SIZE = 0x20
 };
 
+enum {
+       MTHCA_ATOMIC_BYTE_LEN = 8
+};
+
 /*
  * Must be packed because start is 64 bits but only aligned to 32 bits.
  */
@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
                        break;
                case MTHCA_OPCODE_ATOMIC_CS:
                        entry->opcode    = IB_WC_COMP_SWAP;
-                       entry->byte_len  = be32_to_cpu(cqe->byte_cnt);
+                       entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
                        break;
                case MTHCA_OPCODE_ATOMIC_FA:
                        entry->opcode    = IB_WC_FETCH_ADD;
-                       entry->byte_len  = be32_to_cpu(cqe->byte_cnt);
+                       entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
                        break;
                case MTHCA_OPCODE_BIND_MW:
                        entry->opcode    = IB_WC_BIND_MW;
index 15cc2f6eb4754fa83a497fec55f381371c466000..6b19645d946c31af877c28d5c8d8c9ddfa4ec94f 100644 (file)
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
 
        list_for_each_entry(chunk, &icm->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i) {
-                       if (chunk->mem[i].length >= offset) {
+                       if (chunk->mem[i].length > offset) {
                                page = chunk->mem[i].page;
                                goto out;
                        }
index d844a2569b471f1dcfc5b63ddacbc31b45db22df..5f5214c0337d18383ad6f2272b99fbd47f68a69b 100644 (file)
@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
 {
        struct mthca_dev *dev = to_mdev(ibqp->device);
        struct mthca_qp *qp = to_mqp(ibqp);
-       int err;
-       struct mthca_mailbox *mailbox;
+       int err = 0;
+       struct mthca_mailbox *mailbox = NULL;
        struct mthca_qp_param *qp_param;
        struct mthca_qp_context *context;
        int mthca_state;
        u8 status;
 
+       if (qp->state == IB_QPS_RESET) {
+               qp_attr->qp_state = IB_QPS_RESET;
+               goto done;
+       }
+
        mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
        mthca_state = be32_to_cpu(context->flags) >> 28;
 
        qp_attr->qp_state            = to_ib_qp_state(mthca_state);
-       qp_attr->cur_qp_state        = qp_attr->qp_state;
        qp_attr->path_mtu            = context->mtu_msgmax >> 5;
        qp_attr->path_mig_state      =
                to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
        qp_attr->dest_qp_num         = be32_to_cpu(context->remote_qpn) & 0xffffff;
        qp_attr->qp_access_flags     =
                to_ib_qp_access_flags(be32_to_cpu(context->params2));
-       qp_attr->cap.max_send_wr     = qp->sq.max;
-       qp_attr->cap.max_recv_wr     = qp->rq.max;
-       qp_attr->cap.max_send_sge    = qp->sq.max_gs;
-       qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
-       qp_attr->cap.max_inline_data = qp->max_inline_data;
 
        if (qp->transport == RC || qp->transport == UC) {
                to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
        qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
        qp_attr->rnr_retry          = context->pri_path.rnr_retry >> 5;
        qp_attr->alt_timeout        = context->alt_path.ackto >> 3;
-       qp_init_attr->cap           = qp_attr->cap;
+
+done:
+       qp_attr->cur_qp_state        = qp_attr->qp_state;
+       qp_attr->cap.max_send_wr     = qp->sq.max;
+       qp_attr->cap.max_recv_wr     = qp->rq.max;
+       qp_attr->cap.max_send_sge    = qp->sq.max_gs;
+       qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+       qp_init_attr->cap            = qp_attr->cap;
 
 out:
        mthca_free_mailbox(dev, mailbox);
index 9b2041e25d593978c183cedd04999368dfeb140b..dd221eda3ea63c9a648146d38d5a9269172f9485 100644 (file)
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
         * - if yes, the mtask is recycled at iscsi_complete_pdu
         * - if no,  the mtask is recycled at iser_snd_completion
         */
-       if (error && error != -EAGAIN)
+       if (error && error != -ENOBUFS)
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 
        return error;
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
                error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
 
  iscsi_iser_ctask_xmit_exit:
-       if (error && error != -EAGAIN)
+       if (error && error != -ENOBUFS)
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
        return error;
 }
index e73c87b9be43cad3b1c0985c30040da433012912..0a7d1ab60e6d81832e70e037255dd0cbb27efa1b 100644 (file)
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
 static int
 iser_check_xmit(struct iscsi_conn *conn, void *task)
 {
-       int rc = 0;
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
 
-       write_lock_bh(conn->recv_lock);
        if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
            ISER_QP_MAX_REQ_DTOS) {
-               iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task);
-               set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-               rc = -EAGAIN;
+               iser_dbg("%ld can't xmit task %p\n",jiffies,task);
+               return -ENOBUFS;
        }
-       write_unlock_bh(conn->recv_lock);
-       return rc;
+       return 0;
 }
 
 
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn     *conn,
                return -EPERM;
        }
        if (iser_check_xmit(conn, ctask))
-               return -EAGAIN;
+               return -ENOBUFS;
 
        edtl = ntohl(hdr->data_length);
 
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn     *conn,
        }
 
        if (iser_check_xmit(conn, ctask))
-               return -EAGAIN;
+               return -ENOBUFS;
 
        itt = ntohl(hdr->itt);
        data_seg_len = ntoh24(hdr->dlength);
@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
        }
 
        if (iser_check_xmit(conn,mtask))
-               return -EAGAIN;
+               return -ENOBUFS;
 
        /* build the tx desc regd header and add it to the tx desc dto */
        mdesc->type = ISCSI_TX_CONTROL;
@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
        struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
        struct iscsi_conn      *conn = iser_conn->iscsi_conn;
        struct iscsi_mgmt_task *mtask;
+       int resume_tx = 0;
 
        iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
 
@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
        if (tx_desc->type == ISCSI_TX_DATAOUT)
                kmem_cache_free(ig.desc_cache, tx_desc);
 
+       if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
+           ISER_QP_MAX_REQ_DTOS)
+               resume_tx = 1;
+
        atomic_dec(&ib_conn->post_send_buf_count);
 
-       write_lock(conn->recv_lock);
-       if (conn->suspend_tx) {
+       if (resume_tx) {
                iser_dbg("%ld resuming tx\n",jiffies);
-               clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
                scsi_queue_work(conn->session->host, &conn->xmitwork);
        }
-       write_unlock(conn->recv_lock);
 
        if (tx_desc->type == ISCSI_TX_CONTROL) {
                /* this arithmetic is legal by libiscsi dd_data allocation */