]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/hw/ipath/ipath_verbs.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-omap-h63xx.git] / drivers / infiniband / hw / ipath / ipath_verbs.h
index c0c8d5b24a7d9cb5dd2c866e5db92b9f71ecefb7..6ccb54f104a3fff2ddc6e08746949a29f1cb5a1e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #include <linux/interrupt.h>
 #include <linux/kref.h>
 #include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
 
-#include "ipath_layer.h"
+#include "ipath_kernel.h"
+
+#define IPATH_MAX_RDMA_ATOMIC  4
 
 #define QPN_MAX                 (1 << 24)
 #define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
@@ -58,6 +61,7 @@
  */
 #define IB_CQ_NONE     (IB_CQ_NEXT_COMP + 1)
 
+/* AETH NAK opcode values */
 #define IB_RNR_NAK                     0x20
 #define IB_NAK_PSN_ERROR               0x60
 #define IB_NAK_INVALID_REQUEST         0x61
@@ -65,6 +69,7 @@
 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
 #define IB_NAK_INVALID_RD_REQUEST      0x64
 
+/* Flags for checking QP state (see ib_ipath_state_ops[]) */
 #define IPATH_POST_SEND_OK             0x01
 #define IPATH_POST_RECV_OK             0x02
 #define IPATH_PROCESS_RECV_OK          0x04
@@ -89,7 +94,7 @@ struct ib_reth {
 } __attribute__ ((packed));
 
 struct ib_atomic_eth {
-       __be64 vaddr;
+       __be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
        __be32 rkey;
        __be64 swap_data;
        __be64 compare_data;
@@ -108,7 +113,7 @@ struct ipath_other_headers {
                } rc;
                struct {
                        __be32 aeth;
-                       __be64 atomic_ack_eth;
+                       __be32 atomic_ack_eth[2];
                } at;
                __be32 imm_data;
                __be32 aeth;
@@ -170,12 +175,12 @@ struct ipath_ah {
  * this as its vm_private_data.
  */
 struct ipath_mmap_info {
-       struct ipath_mmap_info *next;
+       struct list_head pending_mmaps;
        struct ib_ucontext *context;
        void *obj;
+       __u64 offset;
        struct kref ref;
        unsigned size;
-       unsigned mmap_cnt;
 };
 
 /*
@@ -186,7 +191,11 @@ struct ipath_mmap_info {
 struct ipath_cq_wc {
        u32 head;               /* index of next entry to fill */
        u32 tail;               /* index of next ib_poll_cq() entry */
-       struct ib_wc queue[1];  /* this is actually size ibcq.cqe + 1 */
+       union {
+               /* these are actually size ibcq.cqe + 1 */
+               struct ib_uverbs_wc uqueue[0];
+               struct ib_wc kqueue[0];
+       };
 };
 
 /*
@@ -238,7 +247,7 @@ struct ipath_mregion {
  */
 struct ipath_sge {
        struct ipath_mregion *mr;
-       void *vaddr;            /* current pointer into the segment */
+       void *vaddr;            /* kernel virtual address of segment */
        u32 sge_length;         /* length of the SGE */
        u32 length;             /* remaining length of the segment */
        u16 m;                  /* current index: mr->map[m] */
@@ -248,6 +257,7 @@ struct ipath_sge {
 /* Memory region */
 struct ipath_mr {
        struct ib_mr ibmr;
+       struct ib_umem *umem;
        struct ipath_mregion mr;        /* must be last */
 };
 
@@ -311,6 +321,20 @@ struct ipath_sge_state {
        u8 num_sge;
 };
 
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct ipath_ack_entry {
+       u8 opcode;
+       u8 sent;
+       u32 psn;
+       union {
+               struct ipath_sge_state rdma_sge;
+               u64 atomic_data;
+       };
+};
+
 /*
  * Variables prefixed with s_ are for the requester (sender).
  * Variables prefixed with r_ are for the responder (receiver).
@@ -333,24 +357,24 @@ struct ipath_qp {
        struct ipath_mmap_info *ip;
        struct ipath_sge_state *s_cur_sge;
        struct ipath_sge_state s_sge;   /* current send request data */
-       /* current RDMA read send data */
-       struct ipath_sge_state s_rdma_sge;
+       struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
+       struct ipath_sge_state s_ack_rdma_sge;
+       struct ipath_sge_state s_rdma_read_sge;
        struct ipath_sge_state r_sge;   /* current receive data */
        spinlock_t s_lock;
-       unsigned long s_flags;
+       unsigned long s_busy;
        u32 s_hdrwords;         /* size of s_hdr in 32 bit words */
        u32 s_cur_size;         /* size of send packet in bytes */
        u32 s_len;              /* total length of s_sge */
-       u32 s_rdma_len;         /* total length of s_rdma_sge */
+       u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
        u32 s_next_psn;         /* PSN for next request */
        u32 s_last_psn;         /* last response PSN processed */
        u32 s_psn;              /* current packet sequence number */
-       u32 s_ack_psn;          /* PSN for RDMA_READ */
+       u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
+       u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
        u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
        u64 r_wr_id;            /* ID for current receive WQE */
-       u64 r_atomic_data;      /* data for last atomic op */
-       u32 r_atomic_psn;       /* PSN of last atomic op */
        u32 r_len;              /* total length of r_sge */
        u32 r_rcv_len;          /* receive data len processed */
        u32 r_psn;              /* expected rcv packet sequence number */
@@ -360,12 +384,13 @@ struct ipath_qp {
        u8 s_ack_state;         /* opcode of packet to ACK */
        u8 s_nak_state;         /* non-zero if NAK is pending */
        u8 r_state;             /* opcode of last packet received */
-       u8 r_ack_state;         /* opcode of packet to ACK */
        u8 r_nak_state;         /* non-zero if NAK is pending */
        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
        u8 r_reuse_sge;         /* for UC receive errors */
        u8 r_sge_inx;           /* current index into sg_list */
        u8 r_wrid_valid;        /* r_wrid set but CQ entry not yet made */
+       u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
+       u8 r_head_ack_queue;    /* index into s_ack_queue[] */
        u8 qp_access_flags;
        u8 s_max_sge;           /* size of s_wq->sg_list */
        u8 s_retry_cnt;         /* number of times to retry */
@@ -374,6 +399,10 @@ struct ipath_qp {
        u8 s_rnr_retry;         /* requester RNR retry counter */
        u8 s_wait_credit;       /* limit number of unacked packets sent */
        u8 s_pkey_index;        /* PKEY index to use */
+       u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
+       u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
+       u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
+       u8 s_flags;
        u8 timeout;             /* Timeout for this QP */
        enum ib_mtu path_mtu;
        u32 remote_qpn;
@@ -386,17 +415,23 @@ struct ipath_qp {
        u32 s_ssn;              /* SSN of tail entry */
        u32 s_lsn;              /* limit sequence number (credit) */
        struct ipath_swqe *s_wq;        /* send work queue */
+       struct ipath_swqe *s_wqe;
        struct ipath_rq r_rq;           /* receive work queue */
        struct ipath_sge r_sg_list[0];  /* verified SGEs */
 };
 
+/* Bit definition for s_busy. */
+#define IPATH_S_BUSY           0
+
 /*
  * Bit definitions for s_flags.
  */
-#define IPATH_S_BUSY           0
-#define IPATH_S_SIGNAL_REQ_WR  1
+#define IPATH_S_SIGNAL_REQ_WR  0x01
+#define IPATH_S_FENCE_PENDING  0x02
+#define IPATH_S_RDMAR_PENDING  0x04
+#define IPATH_S_ACK_PENDING    0x08
 
-#define IPATH_PSN_CREDIT       2048
+#define IPATH_PSN_CREDIT       512
 
 /*
  * Since struct ipath_swqe is not a fixed size, we can't simply index into
@@ -459,13 +494,14 @@ struct ipath_opcode_stats {
 
 struct ipath_ibdev {
        struct ib_device ibdev;
-       struct list_head dev_list;
        struct ipath_devdata *dd;
-       struct ipath_mmap_info *pending_mmaps;
+       struct list_head pending_mmaps;
+       spinlock_t mmap_offset_lock;
+       u32 mmap_offset;
        int ib_unit;            /* This is the device number */
        u16 sm_lid;             /* in host order */
        u8 sm_sl;
-       u8 mkeyprot_resv_lmc;
+       u8 mkeyprot;
        /* non-zero when timer is set */
        unsigned long mkey_lease_timeout;
 
@@ -640,7 +676,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
 
 int ipath_destroy_qp(struct ib_qp *ibqp);
 
-void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
+int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
 
 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                    int attr_mask, struct ib_udata *udata);
@@ -656,8 +692,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
 
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
 
-int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
-                    u32 *hdr, u32 len, struct ipath_sge_state *ss);
+int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
+                    u32 hdrwords, struct ipath_sge_state *ss, u32 len);
 
 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
 
@@ -665,8 +701,6 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
 
 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
 
-int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
-
 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
 
@@ -710,13 +744,13 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
 
 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
 
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
+struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
                              struct ib_ucontext *context,
                              struct ib_udata *udata);
 
 int ipath_destroy_cq(struct ib_cq *ibcq);
 
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
+int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
 
 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
 
@@ -726,8 +760,8 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
                                struct ib_phys_buf *buffer_list,
                                int num_phys_buf, int acc, u64 *iova_start);
 
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
-                               int mr_access_flags,
+struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+                               u64 virt_addr, int mr_access_flags,
                                struct ib_udata *udata);
 
 int ipath_dereg_mr(struct ib_mr *ibmr);
@@ -744,27 +778,41 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
 
 void ipath_release_mmap_info(struct kref *ref);
 
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
+                                              u32 size,
+                                              struct ib_ucontext *context,
+                                              void *obj);
 
-void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
+void ipath_update_mmap_info(struct ipath_ibdev *dev,
+                           struct ipath_mmap_info *ip,
+                           u32 size, void *obj);
+
+int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 
 void ipath_insert_rnr_queue(struct ipath_qp *qp);
 
+int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
+                  u32 *lengthp, struct ipath_sge_state *ss);
+
 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
 
 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
                   struct ib_global_route *grh, u32 hwords, u32 nwords);
 
-void ipath_do_ruc_send(unsigned long data);
+void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
+                          struct ipath_other_headers *ohdr,
+                          u32 bth0, u32 bth2);
+
+void ipath_do_send(unsigned long data);
 
-u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
-                     u32 pmtu);
+void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
+                        enum ib_wc_status status);
 
-int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
-                     u32 pmtu, u32 *bth0p, u32 *bth2p);
+int ipath_make_rc_req(struct ipath_qp *qp);
 
-int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
-                     u32 pmtu, u32 *bth0p, u32 *bth2p);
+int ipath_make_uc_req(struct ipath_qp *qp);
+
+int ipath_make_ud_req(struct ipath_qp *qp);
 
 int ipath_register_ib_device(struct ipath_devdata *);
 
@@ -774,8 +822,6 @@ void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
 
 int ipath_ib_piobufavail(struct ipath_ibdev *);
 
-void ipath_ib_timer(struct ipath_ibdev *);
-
 unsigned ipath_get_npkeys(struct ipath_devdata *);
 
 u32 ipath_get_cr_errpkey(struct ipath_devdata *);