]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/hw/ipath/ipath_verbs.h
device create: infiniband: convert device_create_drvdata to device_create
[linux-2.6-omap-h63xx.git] / drivers / infiniband / hw / ipath / ipath_verbs.h
index 1c89850127430dc777c19fba500e4537126bf449..9d12ae8a778eba131e5936e9e37e22a3390bfe10 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #define IPATH_POST_RECV_OK             0x02
 #define IPATH_PROCESS_RECV_OK          0x04
 #define IPATH_PROCESS_SEND_OK          0x08
+#define IPATH_PROCESS_NEXT_SEND_OK     0x10
+#define IPATH_FLUSH_SEND               0x20
+#define IPATH_FLUSH_RECV               0x40
+#define IPATH_PROCESS_OR_FLUSH_SEND \
+       (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
 
 /* IB Performance Manager status values */
 #define IB_PMA_SAMPLE_STATUS_DONE      0x00
@@ -138,6 +143,11 @@ struct ipath_ib_header {
        } u;
 } __attribute__ ((packed));
 
+struct ipath_pio_header {
+       __le32 pbc[2];
+       struct ipath_ib_header hdr;
+} __attribute__ ((packed));
+
 /*
  * There is one struct ipath_mcast for each multicast GID.
  * All attached QPs are then stored as a list of
@@ -319,6 +329,7 @@ struct ipath_sge_state {
        struct ipath_sge *sg_list;      /* next SGE to be used if any */
        struct ipath_sge sge;   /* progress state for the current SGE */
        u8 num_sge;
+       u8 static_rate;
 };
 
 /*
@@ -347,23 +358,27 @@ struct ipath_qp {
        struct ib_qp ibqp;
        struct ipath_qp *next;          /* link list for QPN hash table */
        struct ipath_qp *timer_next;    /* link list for ipath_ib_timer() */
+       struct ipath_qp *pio_next;      /* link for ipath_ib_piobufavail() */
        struct list_head piowait;       /* link for wait PIO buf */
        struct list_head timerwait;     /* link for waiting for timeouts */
        struct ib_ah_attr remote_ah_attr;
        struct ipath_ib_header s_hdr;   /* next packet header to send */
        atomic_t refcount;
        wait_queue_head_t wait;
+       wait_queue_head_t wait_dma;
        struct tasklet_struct s_task;
        struct ipath_mmap_info *ip;
        struct ipath_sge_state *s_cur_sge;
+       struct ipath_verbs_txreq *s_tx;
        struct ipath_sge_state s_sge;   /* current send request data */
        struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
        struct ipath_sge_state s_ack_rdma_sge;
        struct ipath_sge_state s_rdma_read_sge;
        struct ipath_sge_state r_sge;   /* current receive data */
        spinlock_t s_lock;
-       unsigned long s_busy;
-       u32 s_hdrwords;         /* size of s_hdr in 32 bit words */
+       atomic_t s_dma_busy;
+       u16 s_pkt_delay;
+       u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
        u32 s_cur_size;         /* size of send packet in bytes */
        u32 s_len;              /* total length of s_sge */
        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
@@ -375,6 +390,7 @@ struct ipath_qp {
        u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
        u64 r_wr_id;            /* ID for current receive WQE */
+       unsigned long r_aflags;
        u32 r_len;              /* total length of r_sge */
        u32 r_rcv_len;          /* receive data len processed */
        u32 r_psn;              /* expected rcv packet sequence number */
@@ -386,9 +402,7 @@ struct ipath_qp {
        u8 r_state;             /* opcode of last packet received */
        u8 r_nak_state;         /* non-zero if NAK is pending */
        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
-       u8 r_reuse_sge;         /* for UC receive errors */
-       u8 r_sge_inx;           /* current index into sg_list */
-       u8 r_wrid_valid;        /* r_wrid set but CQ entry not yet made */
+       u8 r_flags;
        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
        u8 qp_access_flags;
@@ -397,12 +411,13 @@ struct ipath_qp {
        u8 s_rnr_retry_cnt;
        u8 s_retry;             /* requester retry counter */
        u8 s_rnr_retry;         /* requester RNR retry counter */
-       u8 s_wait_credit;       /* limit number of unacked packets sent */
        u8 s_pkey_index;        /* PKEY index to use */
        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
        u8 s_flags;
+       u8 s_dmult;
+       u8 s_draining;
        u8 timeout;             /* Timeout for this QP */
        enum ib_mtu path_mtu;
        u32 remote_qpn;
@@ -420,16 +435,40 @@ struct ipath_qp {
        struct ipath_sge r_sg_list[0];  /* verified SGEs */
 };
 
-/* Bit definition for s_busy. */
-#define IPATH_S_BUSY           0
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define IPATH_R_WRID_VALID     0
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define IPATH_R_REUSE_SGE      0x01
+#define IPATH_R_RDMAR_SEQ      0x02
 
 /*
  * Bit definitions for s_flags.
+ *
+ * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
+ *                        before processing the next SWQE
+ * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
+ *                        before processing the next SWQE
+ * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
+ * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ *                   next send completion entry not via send DMA.
  */
 #define IPATH_S_SIGNAL_REQ_WR  0x01
 #define IPATH_S_FENCE_PENDING  0x02
 #define IPATH_S_RDMAR_PENDING  0x04
 #define IPATH_S_ACK_PENDING    0x08
+#define IPATH_S_BUSY           0x10
+#define IPATH_S_WAITING                0x20
+#define IPATH_S_WAIT_SSN_CREDIT        0x40
+#define IPATH_S_WAIT_DMA       0x80
+
+#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
+       IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
 
 #define IPATH_PSN_CREDIT       512
 
@@ -510,6 +549,8 @@ struct ipath_ibdev {
        struct ipath_lkey_table lk_table;
        struct list_head pending[3];    /* FIFO of QPs waiting for ACKs */
        struct list_head piowait;       /* list for wait PIO buf */
+       struct list_head txreq_free;
+       void *txreq_bufs;
        /* list of QPs waiting for RNR timer */
        struct list_head rnrwait;
        spinlock_t pending_lock;
@@ -563,13 +604,12 @@ struct ipath_ibdev {
        u32 n_rnr_naks;
        u32 n_other_naks;
        u32 n_timeouts;
-       u32 n_rc_stalls;
        u32 n_pkt_drops;
        u32 n_vl15_dropped;
        u32 n_wqe_errs;
        u32 n_rdma_dup_busy;
        u32 n_piowait;
-       u32 n_no_piobuf;
+       u32 n_unaligned;
        u32 port_cap_flags;
        u32 pma_sample_start;
        u32 pma_sample_interval;
@@ -581,7 +621,6 @@ struct ipath_ibdev {
        u16 pending_index;      /* which pending queue is active */
        u8 pma_sample_status;
        u8 subnet_timeout;
-       u8 link_width_enabled;
        u8 vl_high_limit;
        struct ipath_opcode_stats opstats[128];
 };
@@ -602,6 +641,16 @@ struct ipath_verbs_counters {
        u32 vl15_dropped;
 };
 
+struct ipath_verbs_txreq {
+       struct ipath_qp         *qp;
+       struct ipath_swqe       *wqe;
+       u32                      map_len;
+       u32                      len;
+       struct ipath_sge_state  *ss;
+       struct ipath_pio_header  hdr;
+       struct ipath_sdma_txreq  txreq;
+};
+
 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
 {
        return container_of(ibmr, struct ipath_mr, ibmr);
@@ -637,6 +686,17 @@ static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
        return container_of(ibdev, struct ipath_ibdev, ibdev);
 }
 
+/*
+ * This must be called with s_lock held.
+ */
+static inline void ipath_schedule_send(struct ipath_qp *qp)
+{
+       if (qp->s_flags & IPATH_S_ANY_WAIT)
+               qp->s_flags &= ~IPATH_S_ANY_WAIT;
+       if (!(qp->s_flags & IPATH_S_BUSY))
+               tasklet_hi_schedule(&qp->s_task);
+}
+
 int ipath_process_mad(struct ib_device *ibdev,
                      int mad_flags,
                      u8 port_num,
@@ -686,19 +746,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                   int attr_mask, struct ib_qp_init_attr *init_attr);
 
-void ipath_free_all_qps(struct ipath_qp_table *qpt);
+unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
 
 int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
 
-void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
-
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
 
+unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
+
 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
                     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
 
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
 
 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
@@ -709,7 +767,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
 
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
+void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
+
+void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
 
 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
 
@@ -832,7 +892,17 @@ unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
 
 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
 
+/*
+ * Below converts HCA-specific LinkTrainingState to IB PhysPortState
+ * values.
+ */
 extern const u8 ipath_cvt_physportstate[];
+#define IB_PHYSPORTSTATE_SLEEP 1
+#define IB_PHYSPORTSTATE_POLL 2
+#define IB_PHYSPORTSTATE_DISABLED 3
+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
+#define IB_PHYSPORTSTATE_LINKUP 5
+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
 
 extern const int ib_ipath_state_ops[];