2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
55 #define RX_PAGE_SIZE 2048
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
61 #define SGE_RX_COPY_THRES 256
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
66 #define SGE_RX_DROP_THRES 16
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
72 #define TX_RECLAIM_PERIOD (HZ / 4)
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
78 * Types of Tx queues in each queue set. Order here matters, do not change.
80 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
82 /* Values for sge_txq.flags */
84 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
89 u64 flit[TX_DESC_FLITS];
99 struct tx_sw_desc { /* SW state per Tx descriptor */
103 struct rx_sw_desc { /* SW state per Rx descriptor */
106 struct sge_fl_page page;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
119 struct unmap_info { /* packet unmapping info, overlays skb->cb */
120 int sflit; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx; /* first page fragment in current Tx descriptor */
122 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
123 u32 len; /* mapped length of skb main body */
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
130 struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
141 * HW allows up to 4 descriptors to be combined into a WR.
143 static u8 flit_desc_map[] = {
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
156 # error "SGE_NUM_GENBITS must be 1 or 2"
160 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
162 return container_of(q, struct sge_qset, fl[qidx]);
165 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
167 return container_of(q, struct sge_qset, rspq);
170 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
172 return container_of(q, struct sge_qset, txq[qidx]);
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
181 * Replenishes a response queue by making the supplied number of responses
184 static inline void refill_rspq(struct adapter *adapter,
185 const struct sge_rspq *q, unsigned int credits)
187 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
188 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
197 static inline int need_skb_unmap(void)
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
204 DECLARE_PCI_UNMAP_ADDR(addr);
207 return sizeof(struct dummy) != 0;
211 * unmap_skb - unmap a packet main body and its page fragments
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
230 const struct sg_ent *sgp;
231 struct unmap_info *ui = (struct unmap_info *)skb->cb;
232 int nfrags, frag_idx, curflit, j = ui->addr_idx;
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
239 ui->len = 0; /* so we know for next descriptor for this skb */
243 frag_idx = ui->fragidx;
244 curflit = ui->sflit + 1 + j;
245 nfrags = skb_shinfo(skb)->nr_frags;
247 while (frag_idx < nfrags && curflit < WR_FLITS) {
248 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
249 skb_shinfo(skb)->frags[frag_idx].size,
260 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
261 ui->fragidx = frag_idx;
263 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
288 if (d->skb) { /* an SGL is present */
290 unmap_skb(d->skb, q, cidx, pdev);
291 if (d->skb->priority == cidx)
295 if (++cidx == q->size) {
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
312 static inline void reclaim_completed_tx(struct adapter *adapter,
315 unsigned int reclaim = q->processed - q->cleaned;
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
325 * should_restart_tx - are there enough resources to restart a Tx queue?
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
330 static inline int should_restart_tx(const struct sge_txq *q)
332 unsigned int r = q->processed - q->cleaned;
334 return q->in_use - r < (q->size >> 1);
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
347 unsigned int cidx = q->cidx;
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
355 if (q->buf_size != RX_PAGE_SIZE) {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
363 if (++cidx == q->size)
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
381 * Add a buffer of the given length to the supplied HW and SW Rx
384 static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 pci_unmap_addr_set(sd, dma_addr, mapping);
393 d->addr_lo = cpu_to_be32(mapping);
394 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
396 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
411 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
442 memcpy(&sd->t, p, sizeof(*p));
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
451 get_page(p->frag.page);
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
458 if (++q->pidx == q->size) {
467 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
470 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
472 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
484 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
487 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx];
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
491 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */
494 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
495 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
498 if (++q->pidx == q->size) {
502 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
522 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
523 size_t sw_size, dma_addr_t * phys, void *metadata)
525 size_t len = nelem * elem_size;
527 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
532 s = kcalloc(nelem, sw_size, GFP_KERNEL);
535 dma_free_coherent(&pdev->dev, len, p, *phys);
540 *(void **)metadata = s;
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
554 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
557 struct pci_dev *pdev = adapter->pdev;
559 if (q->tx_reclaim_timer.function)
560 del_timer_sync(&q->tx_reclaim_timer);
562 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
564 spin_lock(&adapter->sge.reg_lock);
565 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
566 spin_unlock(&adapter->sge.reg_lock);
567 free_rx_bufs(pdev, &q->fl[i]);
568 kfree(q->fl[i].sdesc);
569 dma_free_coherent(&pdev->dev,
571 sizeof(struct rx_desc), q->fl[i].desc,
575 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
576 if (q->txq[i].desc) {
577 spin_lock(&adapter->sge.reg_lock);
578 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
579 spin_unlock(&adapter->sge.reg_lock);
580 if (q->txq[i].sdesc) {
581 free_tx_desc(adapter, &q->txq[i],
583 kfree(q->txq[i].sdesc);
585 dma_free_coherent(&pdev->dev,
587 sizeof(struct tx_desc),
588 q->txq[i].desc, q->txq[i].phys_addr);
589 __skb_queue_purge(&q->txq[i].sendq);
593 spin_lock(&adapter->sge.reg_lock);
594 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
595 spin_unlock(&adapter->sge.reg_lock);
596 dma_free_coherent(&pdev->dev,
597 q->rspq.size * sizeof(struct rsp_desc),
598 q->rspq.desc, q->rspq.phys_addr);
602 q->netdev->atalk_ptr = NULL;
604 memset(q, 0, sizeof(*q));
608 * init_qset_cntxt - initialize an SGE queue set context info
610 * @id: the queue set id
612 * Initializes the TIDs and context ids for the queues of a queue set.
614 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
616 qs->rspq.cntxt_id = id;
617 qs->fl[0].cntxt_id = 2 * id;
618 qs->fl[1].cntxt_id = 2 * id + 1;
619 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
620 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
621 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
622 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
623 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
633 static inline unsigned int sgl_len(unsigned int n)
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n) / 2 + (n & 1);
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
643 * Calculates the number of Tx descriptors needed for the supplied number
646 static inline unsigned int flits_to_desc(unsigned int n)
648 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
649 return flit_desc_map[n];
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
656 * Return a packet containing the immediate data of the given response.
658 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
660 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
676 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
680 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
683 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
684 if (skb_shinfo(skb)->gso_size)
686 return flits_to_desc(flits);
690 * make_sgl - populate a scatter/gather list for a packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
701 static inline unsigned int make_sgl(const struct sk_buff *skb,
702 struct sg_ent *sgp, unsigned char *start,
703 unsigned int len, struct pci_dev *pdev)
706 unsigned int i, j = 0, nfrags;
709 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
710 sgp->len[0] = cpu_to_be32(len);
711 sgp->addr[0] = cpu_to_be64(mapping);
715 nfrags = skb_shinfo(skb)->nr_frags;
716 for (i = 0; i < nfrags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
719 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
720 frag->size, PCI_DMA_TODEVICE);
721 sgp->len[j] = cpu_to_be32(frag->size);
722 sgp->addr[j] = cpu_to_be64(mapping);
729 return ((nfrags + (len != 0)) * 3) / 2 + j;
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
742 * When GTS is disabled we unconditionally ring the doorbell.
744 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
747 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
748 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
749 set_bit(TXQ_LAST_PKT_DB, &q->flags);
750 t3_write_reg(adap, A_SG_KDOORBELL,
751 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap, A_SG_KDOORBELL,
756 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
760 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
762 #if SGE_NUM_GENBITS == 2
763 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
786 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
787 struct tx_desc *d, unsigned int pidx,
788 const struct sge_txq *q,
789 const struct sg_ent *sgl,
790 unsigned int flits, unsigned int sgl_flits,
791 unsigned int gen, unsigned int wr_hi,
794 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
795 struct tx_sw_desc *sd = &q->sdesc[pidx];
798 if (need_skb_unmap()) {
799 struct unmap_info *ui = (struct unmap_info *)skb->cb;
806 if (likely(ndesc == 1)) {
807 skb->priority = pidx;
808 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits)) | wr_hi;
811 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
812 V_WR_GEN(gen)) | wr_lo;
815 unsigned int ogen = gen;
816 const u64 *fp = (const u64 *)sgl;
817 struct work_request_hdr *wp = wrp;
819 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits)) | wr_hi;
823 unsigned int avail = WR_FLITS - flits;
825 if (avail > sgl_flits)
827 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
836 if (++pidx == q->size) {
844 wrp = (struct work_request_hdr *)d;
845 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi;
847 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
849 V_WR_GEN(gen)) | wr_lo;
853 skb->priority = pidx;
854 wrp->wr_hi |= htonl(F_WR_EOP);
856 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
857 wr_gen2((struct tx_desc *)wp, ogen);
863 * write_tx_pkt_wr - write a TX_PKT work request
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
873 * Generate a TX_PKT work request to send the supplied packet.
875 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
876 const struct port_info *pi,
877 unsigned int pidx, unsigned int gen,
878 struct sge_txq *q, unsigned int ndesc,
881 unsigned int flits, sgl_flits, cntrl, tso_info;
882 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
883 struct tx_desc *d = &q->desc[pidx];
884 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
886 cpl->len = htonl(skb->len | 0x80000000);
887 cntrl = V_TXPKT_INTF(pi->port_id);
889 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
890 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
892 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
895 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
905 hdr->lso_info = htonl(tso_info);
908 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
909 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
910 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
911 cpl->cntrl = htonl(cntrl);
913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL;
916 memcpy(&d->flit[2], skb->data, skb->len);
918 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
920 flits = (skb->len + 7) / 8 + 2;
921 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
922 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
923 | F_WR_SOP | F_WR_EOP | compl);
925 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
935 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
936 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
937 if (need_skb_unmap())
938 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
940 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
941 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
942 htonl(V_WR_TID(q->token)));
946 * eth_xmit - add a packet to the Ethernet Tx queue
948 * @dev: the egress net device
950 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
952 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
954 unsigned int ndesc, pidx, credits, gen, compl;
955 const struct port_info *pi = netdev_priv(dev);
956 struct adapter *adap = dev->priv;
957 struct sge_qset *qs = dev2qset(dev);
958 struct sge_txq *q = &qs->txq[TXQ_ETH];
961 * The chip min packet length is 9 octets but play safe and reject
962 * anything shorter than an Ethernet header.
964 if (unlikely(skb->len < ETH_HLEN)) {
970 reclaim_completed_tx(adap, q);
972 credits = q->size - q->in_use;
973 ndesc = calc_tx_descs(skb);
975 if (unlikely(credits < ndesc)) {
976 if (!netif_queue_stopped(dev)) {
977 netif_stop_queue(dev);
978 set_bit(TXQ_ETH, &qs->txq_stopped);
980 dev_err(&adap->pdev->dev,
981 "%s: Tx ring %u full while queue awake!\n",
982 dev->name, q->cntxt_id & 7);
984 spin_unlock(&q->lock);
985 return NETDEV_TX_BUSY;
989 if (unlikely(credits - ndesc < q->stop_thres)) {
991 netif_stop_queue(dev);
992 set_bit(TXQ_ETH, &qs->txq_stopped);
994 if (should_restart_tx(q) &&
995 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
997 netif_wake_queue(dev);
1003 q->unacked += ndesc;
1004 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1008 if (q->pidx >= q->size) {
1013 /* update port statistics */
1014 if (skb->ip_summed == CHECKSUM_COMPLETE)
1015 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1016 if (skb_shinfo(skb)->gso_size)
1017 qs->port_stats[SGE_PSTAT_TSO]++;
1018 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1019 qs->port_stats[SGE_PSTAT_VLANINS]++;
1021 dev->trans_start = jiffies;
1022 spin_unlock(&q->lock);
1025 * We do not use Tx completion interrupts to free DMAd Tx packets.
1026 * This is good for performamce but means that we rely on new Tx
1027 * packets arriving to run the destructors of completed packets,
1028 * which open up space in their sockets' send queues. Sometimes
1029 * we do not get such new packets causing Tx to stall. A single
1030 * UDP transmitter is a good example of this situation. We have
1031 * a clean up timer that periodically reclaims completed packets
1032 * but it doesn't run often enough (nor do we want it to) to prevent
1033 * lengthy stalls. A solution to this problem is to run the
1034 * destructor early, after the packet is queued but before it's DMAd.
1035 * A cons is that we lie to socket memory accounting, but the amount
1036 * of extra memory is reasonable (limited by the number of Tx
1037 * descriptors), the packets do actually get freed quickly by new
1038 * packets almost always, and for protocols like TCP that wait for
1039 * acks to really free up the data the extra memory is even less.
1040 * On the positive side we run the destructors on the sending CPU
1041 * rather than on a potentially different completing CPU, usually a
1042 * good thing. We also run them without holding our Tx queue lock,
1043 * unlike what reclaim_completed_tx() would otherwise do.
1045 * Run the destructor before telling the DMA engine about the packet
1046 * to make sure it doesn't complete and get freed prematurely.
1048 if (likely(!skb_shared(skb)))
1051 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1052 check_ring_tx_db(adap, q);
1053 return NETDEV_TX_OK;
1057 * write_imm - write a packet into a Tx descriptor as immediate data
1058 * @d: the Tx descriptor to write
1060 * @len: the length of packet data to write as immediate data
1061 * @gen: the generation bit value to write
1063 * Writes a packet as immediate data into a Tx descriptor. The packet
1064 * contains a work request at its beginning. We must write the packet
1065 * carefully so the SGE doesn't read accidentally before it's written in
1068 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1069 unsigned int len, unsigned int gen)
1071 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1072 struct work_request_hdr *to = (struct work_request_hdr *)d;
1074 memcpy(&to[1], &from[1], len - sizeof(*from));
1075 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1076 V_WR_BCNTLFLT(len & 7));
1078 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1079 V_WR_LEN((len + 7) / 8));
1085 * check_desc_avail - check descriptor availability on a send queue
1086 * @adap: the adapter
1087 * @q: the send queue
1088 * @skb: the packet needing the descriptors
1089 * @ndesc: the number of Tx descriptors needed
1090 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1092 * Checks if the requested number of Tx descriptors is available on an
1093 * SGE send queue. If the queue is already suspended or not enough
1094 * descriptors are available the packet is queued for later transmission.
1095 * Must be called with the Tx queue locked.
1097 * Returns 0 if enough descriptors are available, 1 if there aren't
1098 * enough descriptors and the packet has been queued, and 2 if the caller
1099 * needs to retry because there weren't enough descriptors at the
1100 * beginning of the call but some freed up in the mean time.
1102 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1103 struct sk_buff *skb, unsigned int ndesc,
1106 if (unlikely(!skb_queue_empty(&q->sendq))) {
1107 addq_exit:__skb_queue_tail(&q->sendq, skb);
1110 if (unlikely(q->size - q->in_use < ndesc)) {
1111 struct sge_qset *qs = txq_to_qset(q, qid);
1113 set_bit(qid, &qs->txq_stopped);
1114 smp_mb__after_clear_bit();
1116 if (should_restart_tx(q) &&
1117 test_and_clear_bit(qid, &qs->txq_stopped))
1127 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1128 * @q: the SGE control Tx queue
1130 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1131 * that send only immediate data (presently just the control queues) and
1132 * thus do not have any sk_buffs to release.
1134 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1136 unsigned int reclaim = q->processed - q->cleaned;
1138 q->in_use -= reclaim;
1139 q->cleaned += reclaim;
1142 static inline int immediate(const struct sk_buff *skb)
1144 return skb->len <= WR_LEN && !skb->data_len;
1148 * ctrl_xmit - send a packet through an SGE control Tx queue
1149 * @adap: the adapter
1150 * @q: the control queue
1153 * Send a packet through an SGE control Tx queue. Packets sent through
1154 * a control queue must fit entirely as immediate data in a single Tx
1155 * descriptor and have no page fragments.
1157 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1158 struct sk_buff *skb)
1161 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1163 if (unlikely(!immediate(skb))) {
1166 return NET_XMIT_SUCCESS;
1169 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1170 wrp->wr_lo = htonl(V_WR_TID(q->token));
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1175 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1176 if (unlikely(ret)) {
1178 spin_unlock(&q->lock);
1184 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1187 if (++q->pidx >= q->size) {
1191 spin_unlock(&q->lock);
1193 t3_write_reg(adap, A_SG_KDOORBELL,
1194 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1195 return NET_XMIT_SUCCESS;
1199 * restart_ctrlq - restart a suspended control queue
1200 * @qs: the queue set cotaining the control queue
1202 * Resumes transmission on a suspended Tx control queue.
1204 static void restart_ctrlq(unsigned long data)
1206 struct sk_buff *skb;
1207 struct sge_qset *qs = (struct sge_qset *)data;
1208 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1209 struct adapter *adap = qs->netdev->priv;
1211 spin_lock(&q->lock);
1212 again:reclaim_completed_tx_imm(q);
1214 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1216 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1218 if (++q->pidx >= q->size) {
1225 if (!skb_queue_empty(&q->sendq)) {
1226 set_bit(TXQ_CTRL, &qs->txq_stopped);
1227 smp_mb__after_clear_bit();
1229 if (should_restart_tx(q) &&
1230 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1235 spin_unlock(&q->lock);
1236 t3_write_reg(adap, A_SG_KDOORBELL,
1237 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1241 * Send a management message through control queue 0
1243 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1245 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1256 static void deferred_unmap_destructor(struct sk_buff *skb)
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1264 dui = (struct deferred_unmap_info *)skb->head;
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1276 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1280 struct deferred_unmap_info *dui;
1282 dui = (struct deferred_unmap_info *)skb->head;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1289 *p = be64_to_cpu(sgl->addr[0]);
1293 * write_ofld_wr - write an offload work request
1294 * @adap: the adapter
1295 * @skb: the packet to send
1297 * @pidx: index of the first Tx descriptor to write
1298 * @gen: the generation value to use
1299 * @ndesc: number of descriptors the packet will occupy
1301 * Write an offload work request to send the supplied packet. The packet
1302 * data already carry the work request with most fields populated.
1304 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1305 struct sge_txq *q, unsigned int pidx,
1306 unsigned int gen, unsigned int ndesc)
1308 unsigned int sgl_flits, flits;
1309 struct work_request_hdr *from;
1310 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1311 struct tx_desc *d = &q->desc[pidx];
1313 if (immediate(skb)) {
1314 q->sdesc[pidx].skb = NULL;
1315 write_imm(d, skb, skb->len, gen);
1319 /* Only TX_DATA builds SGLs */
1321 from = (struct work_request_hdr *)skb->data;
1322 memcpy(&d->flit[1], &from[1],
1323 skb_transport_offset(skb) - sizeof(*from));
1325 flits = skb_transport_offset(skb) / 8;
1326 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1327 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1328 skb->tail - skb->transport_header,
1330 if (need_skb_unmap()) {
1331 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1332 skb->destructor = deferred_unmap_destructor;
1333 ((struct unmap_info *)skb->cb)->len = (skb->tail -
1334 skb->transport_header);
1337 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1338 gen, from->wr_hi, from->wr_lo);
1342 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1345 * Returns the number of Tx descriptors needed for the given offload
1346 * packet. These packets are already fully constructed.
1348 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1350 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1352 if (skb->len <= WR_LEN && cnt == 0)
1353 return 1; /* packet fits as immediate data */
1355 flits = skb_transport_offset(skb) / 8; /* headers */
1356 if (skb->tail != skb->transport_header)
1358 return flits_to_desc(flits + sgl_len(cnt));
1362 * ofld_xmit - send a packet through an offload queue
1363 * @adap: the adapter
1364 * @q: the Tx offload queue
1367 * Send an offload packet through an SGE offload queue.
1369 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1370 struct sk_buff *skb)
1373 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1375 spin_lock(&q->lock);
1376 again:reclaim_completed_tx(adap, q);
1378 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1379 if (unlikely(ret)) {
1381 skb->priority = ndesc; /* save for restart */
1382 spin_unlock(&q->lock);
1392 if (q->pidx >= q->size) {
1396 spin_unlock(&q->lock);
1398 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1399 check_ring_tx_db(adap, q);
1400 return NET_XMIT_SUCCESS;
1404 * restart_offloadq - restart a suspended offload queue
1405 * @qs: the queue set cotaining the offload queue
1407 * Resumes transmission on a suspended Tx offload queue.
1409 static void restart_offloadq(unsigned long data)
1411 struct sk_buff *skb;
1412 struct sge_qset *qs = (struct sge_qset *)data;
1413 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1414 struct adapter *adap = qs->netdev->priv;
1416 spin_lock(&q->lock);
1417 again:reclaim_completed_tx(adap, q);
1419 while ((skb = skb_peek(&q->sendq)) != NULL) {
1420 unsigned int gen, pidx;
1421 unsigned int ndesc = skb->priority;
1423 if (unlikely(q->size - q->in_use < ndesc)) {
1424 set_bit(TXQ_OFLD, &qs->txq_stopped);
1425 smp_mb__after_clear_bit();
1427 if (should_restart_tx(q) &&
1428 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1438 if (q->pidx >= q->size) {
1442 __skb_unlink(skb, &q->sendq);
1443 spin_unlock(&q->lock);
1445 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1446 spin_lock(&q->lock);
1448 spin_unlock(&q->lock);
1451 set_bit(TXQ_RUNNING, &q->flags);
1452 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1454 t3_write_reg(adap, A_SG_KDOORBELL,
1455 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1459 * queue_set - return the queue set a packet should use
1462 * Maps a packet to the SGE queue set it should use. The desired queue
1463 * set is carried in bits 1-3 in the packet's priority.
1465 static inline int queue_set(const struct sk_buff *skb)
1467 return skb->priority >> 1;
1471 * is_ctrl_pkt - return whether an offload packet is a control packet
1474 * Determines whether an offload packet should use an OFLD or a CTRL
1475 * Tx queue. This is indicated by bit 0 in the packet's priority.
1477 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1479 return skb->priority & 1;
1483 * t3_offload_tx - send an offload packet
1484 * @tdev: the offload device to send to
1487 * Sends an offload packet. We use the packet priority to select the
1488 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1489 * should be sent as regular or control, bits 1-3 select the queue set.
1491 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1493 struct adapter *adap = tdev2adap(tdev);
1494 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1496 if (unlikely(is_ctrl_pkt(skb)))
1497 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1499 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1503 * offload_enqueue - add an offload packet to an SGE offload receive queue
1504 * @q: the SGE response queue
1507 * Add a new offload packet to an SGE response queue's offload packet
1508 * queue. If the packet is the first on the queue it schedules the RX
1509 * softirq to process the queue.
1511 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1513 skb->next = skb->prev = NULL;
1515 q->rx_tail->next = skb;
1517 struct sge_qset *qs = rspq_to_qset(q);
1519 if (__netif_rx_schedule_prep(qs->netdev))
1520 __netif_rx_schedule(qs->netdev);
1527 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1528 * @tdev: the offload device that will be receiving the packets
1529 * @q: the SGE response queue that assembled the bundle
1530 * @skbs: the partial bundle
1531 * @n: the number of packets in the bundle
1533 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1535 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1537 struct sk_buff *skbs[], int n)
1540 q->offload_bundles++;
1541 tdev->recv(tdev, skbs, n);
1546 * ofld_poll - NAPI handler for offload packets in interrupt mode
1547 * @dev: the network device doing the polling
1548 * @budget: polling budget
1550 * The NAPI handler for offload packets when a response queue is serviced
1551 * by the hard interrupt handler, i.e., when it's operating in non-polling
1552 * mode. Creates small packet batches and sends them through the offload
1553 * receive handler. Batches need to be of modest size as we do prefetches
1554 * on the packets in each.
1556 static int ofld_poll(struct net_device *dev, int *budget)
1558 struct adapter *adapter = dev->priv;
1559 struct sge_qset *qs = dev2qset(dev);
1560 struct sge_rspq *q = &qs->rspq;
1561 int work_done, limit = min(*budget, dev->quota), avail = limit;
1564 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1567 spin_lock_irq(&q->lock);
1570 work_done = limit - avail;
1571 *budget -= work_done;
1572 dev->quota -= work_done;
1573 __netif_rx_complete(dev);
1574 spin_unlock_irq(&q->lock);
1579 q->rx_head = q->rx_tail = NULL;
1580 spin_unlock_irq(&q->lock);
1582 for (ngathered = 0; avail && head; avail--) {
1583 prefetch(head->data);
1584 skbs[ngathered] = head;
1586 skbs[ngathered]->next = NULL;
1587 if (++ngathered == RX_BUNDLE_SIZE) {
1588 q->offload_bundles++;
1589 adapter->tdev.recv(&adapter->tdev, skbs,
1594 if (head) { /* splice remaining packets back onto Rx queue */
1595 spin_lock_irq(&q->lock);
1596 tail->next = q->rx_head;
1600 spin_unlock_irq(&q->lock);
1602 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1604 work_done = limit - avail;
1605 *budget -= work_done;
1606 dev->quota -= work_done;
1611 * rx_offload - process a received offload packet
1612 * @tdev: the offload device receiving the packet
1613 * @rq: the response queue that received the packet
1615 * @rx_gather: a gather list of packets if we are building a bundle
1616 * @gather_idx: index of the next available slot in the bundle
1618 * Process an ingress offload pakcet and add it to the offload ingress
1619 * queue. Returns the index of the next available slot in the bundle.
1621 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1622 struct sk_buff *skb, struct sk_buff *rx_gather[],
1623 unsigned int gather_idx)
1626 skb_reset_mac_header(skb);
1627 skb_reset_network_header(skb);
1628 skb_reset_transport_header(skb);
1631 rx_gather[gather_idx++] = skb;
1632 if (gather_idx == RX_BUNDLE_SIZE) {
1633 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1635 rq->offload_bundles++;
1638 offload_enqueue(rq, skb);
1644 * restart_tx - check whether to restart suspended Tx queues
1645 * @qs: the queue set to resume
1647 * Restarts suspended Tx queues of an SGE queue set if they have enough
1648 * free resources to resume operation.
1650 static void restart_tx(struct sge_qset *qs)
1652 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1653 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1654 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1655 qs->txq[TXQ_ETH].restarts++;
1656 if (netif_running(qs->netdev))
1657 netif_wake_queue(qs->netdev);
1660 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1661 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1662 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1663 qs->txq[TXQ_OFLD].restarts++;
1664 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1666 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1667 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1668 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1669 qs->txq[TXQ_CTRL].restarts++;
1670 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1675 * rx_eth - process an ingress ethernet packet
1676 * @adap: the adapter
1677 * @rq: the response queue that received the packet
1679 * @pad: amount of padding at the start of the buffer
1681 * Process an ingress ethernet pakcet and deliver it to the stack.
1682 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1683 * if it was immediate data in a response.
1685 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1686 struct sk_buff *skb, int pad)
1688 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1689 struct port_info *pi;
1691 skb_pull(skb, sizeof(*p) + pad);
1692 skb->dev->last_rx = jiffies;
1693 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1694 pi = netdev_priv(skb->dev);
1695 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1697 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1698 skb->ip_summed = CHECKSUM_UNNECESSARY;
1700 skb->ip_summed = CHECKSUM_NONE;
1702 if (unlikely(p->vlan_valid)) {
1703 struct vlan_group *grp = pi->vlan_grp;
1705 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1707 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1710 dev_kfree_skb_any(skb);
1711 } else if (rq->polling)
1712 netif_receive_skb(skb);
1717 #define SKB_DATA_SIZE 128
1719 static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1723 if (len <= SKB_DATA_SIZE) {
1724 memcpy(skb->data, p->va, len);
1726 put_page(p->frag.page);
1728 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1729 skb_shinfo(skb)->frags[0].page = p->frag.page;
1730 skb_shinfo(skb)->frags[0].page_offset =
1731 p->frag.page_offset + SKB_DATA_SIZE;
1732 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1733 skb_shinfo(skb)->nr_frags = 1;
1734 skb->data_len = len - SKB_DATA_SIZE;
1735 skb->tail += SKB_DATA_SIZE;
1736 skb->truesize += skb->data_len;
1741 * get_packet - return the next ingress packet buffer from a free list
1742 * @adap: the adapter that received the packet
1743 * @fl: the SGE free list holding the packet
1744 * @len: the packet length including any SGE padding
1745 * @drop_thres: # of remaining buffers before we start dropping packets
1747 * Get the next packet from a free list and complete setup of the
1748 * sk_buff. If the packet is small we make a copy and recycle the
1749 * original buffer, otherwise we use the original buffer itself. If a
1750 * positive drop threshold is supplied packets are dropped and their
1751 * buffers recycled if (a) the number of remaining buffers is under the
1752 * threshold and the packet is too big to copy, or (b) the packet should
1753 * be copied but there is no memory for the copy.
1755 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1756 unsigned int len, unsigned int drop_thres)
1758 struct sk_buff *skb = NULL;
1759 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1761 prefetch(sd->t.skb->data);
1763 if (len <= SGE_RX_COPY_THRES) {
1764 skb = alloc_skb(len, GFP_ATOMIC);
1765 if (likely(skb != NULL)) {
1766 struct rx_desc *d = &fl->desc[fl->cidx];
1767 dma_addr_t mapping =
1768 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1769 be32_to_cpu(d->addr_lo));
1771 __skb_put(skb, len);
1772 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1773 PCI_DMA_FROMDEVICE);
1774 memcpy(skb->data, sd->t.skb->data, len);
1775 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1776 PCI_DMA_FROMDEVICE);
1777 } else if (!drop_thres)
1780 recycle_rx_buf(adap, fl, fl->cidx);
1784 if (unlikely(fl->credits < drop_thres))
1788 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1789 fl->buf_size, PCI_DMA_FROMDEVICE);
1792 __refill_fl(adap, fl);
1797 * handle_rsp_cntrl_info - handles control information in a response
1798 * @qs: the queue set corresponding to the response
1799 * @flags: the response control flags
1801 * Handles the control information of an SGE response, such as GTS
1802 * indications and completion credits for the queue set's Tx queues.
1803 * HW coalesces credits, we don't do any extra SW coalescing.
1805 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1807 unsigned int credits;
1810 if (flags & F_RSPD_TXQ0_GTS)
1811 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1814 credits = G_RSPD_TXQ0_CR(flags);
1816 qs->txq[TXQ_ETH].processed += credits;
1818 credits = G_RSPD_TXQ2_CR(flags);
1820 qs->txq[TXQ_CTRL].processed += credits;
1823 if (flags & F_RSPD_TXQ1_GTS)
1824 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1826 credits = G_RSPD_TXQ1_CR(flags);
1828 qs->txq[TXQ_OFLD].processed += credits;
1832 * check_ring_db - check if we need to ring any doorbells
1833 * @adapter: the adapter
1834 * @qs: the queue set whose Tx queues are to be examined
1835 * @sleeping: indicates which Tx queue sent GTS
1837 * Checks if some of a queue set's Tx queues need to ring their doorbells
1838 * to resume transmission after idling while they still have unprocessed
1841 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1842 unsigned int sleeping)
1844 if (sleeping & F_RSPD_TXQ0_GTS) {
1845 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1847 if (txq->cleaned + txq->in_use != txq->processed &&
1848 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1849 set_bit(TXQ_RUNNING, &txq->flags);
1850 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1851 V_EGRCNTX(txq->cntxt_id));
1855 if (sleeping & F_RSPD_TXQ1_GTS) {
1856 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1858 if (txq->cleaned + txq->in_use != txq->processed &&
1859 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1860 set_bit(TXQ_RUNNING, &txq->flags);
1861 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1862 V_EGRCNTX(txq->cntxt_id));
1868 * is_new_response - check if a response is newly written
1869 * @r: the response descriptor
1870 * @q: the response queue
1872 * Returns true if a response descriptor contains a yet unprocessed
1875 static inline int is_new_response(const struct rsp_desc *r,
1876 const struct sge_rspq *q)
1878 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1881 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1882 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1883 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1884 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1885 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1887 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1888 #define NOMEM_INTR_DELAY 2500
1891 * process_responses - process responses from an SGE response queue
1892 * @adap: the adapter
1893 * @qs: the queue set to which the response queue belongs
1894 * @budget: how many responses can be processed in this round
1896 * Process responses from an SGE response queue up to the supplied budget.
1897 * Responses include received packets as well as credits and other events
1898 * for the queues that belong to the response queue's queue set.
1899 * A negative budget is effectively unlimited.
1901 * Additionally choose the interrupt holdoff time for the next interrupt
1902 * on this queue. If the system is under memory shortage use a fairly
1903 * long delay to help recovery.
1905 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1908 struct sge_rspq *q = &qs->rspq;
1909 struct rsp_desc *r = &q->desc[q->cidx];
1910 int budget_left = budget;
1911 unsigned int sleeping = 0;
1912 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1915 q->next_holdoff = q->holdoff_tmr;
1917 while (likely(budget_left && is_new_response(r, q))) {
1918 int eth, ethpad = 2;
1919 struct sk_buff *skb = NULL;
1920 u32 len, flags = ntohl(r->flags);
1921 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1923 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1925 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1926 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1930 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1931 skb->data[0] = CPL_ASYNC_NOTIF;
1932 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1934 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1935 skb = get_imm_packet(r);
1936 if (unlikely(!skb)) {
1938 q->next_holdoff = NOMEM_INTR_DELAY;
1940 /* consume one credit since we tried */
1946 } else if ((len = ntohl(r->len_cq)) != 0) {
1948 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1950 if (fl->buf_size == RX_PAGE_SIZE) {
1951 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1952 struct sge_fl_page *p = &sd->t.page;
1955 prefetch(p->va + L1_CACHE_BYTES);
1957 __refill_fl(adap, fl);
1959 pci_unmap_single(adap->pdev,
1960 pci_unmap_addr(sd, dma_addr),
1962 PCI_DMA_FROMDEVICE);
1965 if (unlikely(fl->credits <
1969 skb = alloc_skb(SKB_DATA_SIZE,
1971 if (unlikely(!skb)) {
1974 recycle_rx_buf(adap, fl,
1979 skb = alloc_skb(SKB_DATA_SIZE,
1985 skb_data_init(skb, p, G_RSPD_LEN(len));
1991 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1992 eth ? SGE_RX_DROP_THRES : 0);
1995 if (++fl->cidx == fl->size)
2000 if (flags & RSPD_CTRL_MASK) {
2001 sleeping |= flags & RSPD_GTS_MASK;
2002 handle_rsp_cntrl_info(qs, flags);
2006 if (unlikely(++q->cidx == q->size)) {
2013 if (++q->credits >= (q->size / 4)) {
2014 refill_rspq(adap, q, q->credits);
2019 /* Preserve the RSS info in csum & priority */
2021 skb->priority = rss_lo;
2024 rx_eth(adap, q, skb, ethpad);
2026 if (unlikely(r->rss_hdr.opcode ==
2028 __skb_pull(skb, ethpad);
2030 ngathered = rx_offload(&adap->tdev, q,
2038 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2040 check_ring_db(adap, qs, sleeping);
2042 smp_mb(); /* commit Tx queue .processed updates */
2043 if (unlikely(qs->txq_stopped != 0))
2046 budget -= budget_left;
2050 static inline int is_pure_response(const struct rsp_desc *r)
2052 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2054 return (n | r->len_cq) == 0;
2058 * napi_rx_handler - the NAPI handler for Rx processing
2059 * @dev: the net device
2060 * @budget: how many packets we can process in this round
2062 * Handler for new data events when using NAPI.
2064 static int napi_rx_handler(struct net_device *dev, int *budget)
2066 struct adapter *adap = dev->priv;
2067 struct sge_qset *qs = dev2qset(dev);
2068 int effective_budget = min(*budget, dev->quota);
2070 int work_done = process_responses(adap, qs, effective_budget);
2071 *budget -= work_done;
2072 dev->quota -= work_done;
2074 if (work_done >= effective_budget)
2077 netif_rx_complete(dev);
2080 * Because we don't atomically flush the following write it is
2081 * possible that in very rare cases it can reach the device in a way
2082 * that races with a new response being written plus an error interrupt
2083 * causing the NAPI interrupt handler below to return unhandled status
2084 * to the OS. To protect against this would require flushing the write
2085 * and doing both the write and the flush with interrupts off. Way too
2086 * expensive and unjustifiable given the rarity of the race.
2088 * The race cannot happen at all with MSI-X.
2090 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2091 V_NEWTIMER(qs->rspq.next_holdoff) |
2092 V_NEWINDEX(qs->rspq.cidx));
2097 * Returns true if the device is already scheduled for polling.
2099 static inline int napi_is_scheduled(struct net_device *dev)
2101 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
2105 * process_pure_responses - process pure responses from a response queue
2106 * @adap: the adapter
2107 * @qs: the queue set owning the response queue
2108 * @r: the first pure response to process
2110 * A simpler version of process_responses() that handles only pure (i.e.,
2111 * non data-carrying) responses. Such respones are too light-weight to
2112 * justify calling a softirq under NAPI, so we handle them specially in
2113 * the interrupt handler. The function is called with a pointer to a
2114 * response, which the caller must ensure is a valid pure response.
2116 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2118 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2121 struct sge_rspq *q = &qs->rspq;
2122 unsigned int sleeping = 0;
2125 u32 flags = ntohl(r->flags);
2128 if (unlikely(++q->cidx == q->size)) {
2135 if (flags & RSPD_CTRL_MASK) {
2136 sleeping |= flags & RSPD_GTS_MASK;
2137 handle_rsp_cntrl_info(qs, flags);
2141 if (++q->credits >= (q->size / 4)) {
2142 refill_rspq(adap, q, q->credits);
2145 } while (is_new_response(r, q) && is_pure_response(r));
2148 check_ring_db(adap, qs, sleeping);
2150 smp_mb(); /* commit Tx queue .processed updates */
2151 if (unlikely(qs->txq_stopped != 0))
2154 return is_new_response(r, q);
2158 * handle_responses - decide what to do with new responses in NAPI mode
2159 * @adap: the adapter
2160 * @q: the response queue
2162 * This is used by the NAPI interrupt handlers to decide what to do with
2163 * new SGE responses. If there are no new responses it returns -1. If
2164 * there are new responses and they are pure (i.e., non-data carrying)
2165 * it handles them straight in hard interrupt context as they are very
2166 * cheap and don't deliver any packets. Finally, if there are any data
2167 * signaling responses it schedules the NAPI handler. Returns 1 if it
2168 * schedules NAPI, 0 if all new responses were pure.
2170 * The caller must ascertain NAPI is not already running.
2172 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2174 struct sge_qset *qs = rspq_to_qset(q);
2175 struct rsp_desc *r = &q->desc[q->cidx];
2177 if (!is_new_response(r, q))
2179 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2180 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2181 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2184 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2185 __netif_rx_schedule(qs->netdev);
2190 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2191 * (i.e., response queue serviced in hard interrupt).
2193 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2195 struct sge_qset *qs = cookie;
2196 struct adapter *adap = qs->netdev->priv;
2197 struct sge_rspq *q = &qs->rspq;
2199 spin_lock(&q->lock);
2200 if (process_responses(adap, qs, -1) == 0)
2201 q->unhandled_irqs++;
2202 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2203 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2204 spin_unlock(&q->lock);
2209 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2210 * (i.e., response queue serviced by NAPI polling).
2212 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2214 struct sge_qset *qs = cookie;
2215 struct adapter *adap = qs->netdev->priv;
2216 struct sge_rspq *q = &qs->rspq;
2218 spin_lock(&q->lock);
2219 BUG_ON(napi_is_scheduled(qs->netdev));
2221 if (handle_responses(adap, q) < 0)
2222 q->unhandled_irqs++;
2223 spin_unlock(&q->lock);
2228 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2229 * SGE response queues as well as error and other async events as they all use
2230 * the same MSI vector. We use one SGE response queue per port in this mode
2231 * and protect all response queues with queue 0's lock.
2233 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2235 int new_packets = 0;
2236 struct adapter *adap = cookie;
2237 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2239 spin_lock(&q->lock);
2241 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2242 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2243 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2247 if (adap->params.nports == 2 &&
2248 process_responses(adap, &adap->sge.qs[1], -1)) {
2249 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2251 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2252 V_NEWTIMER(q1->next_holdoff) |
2253 V_NEWINDEX(q1->cidx));
2257 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2258 q->unhandled_irqs++;
2260 spin_unlock(&q->lock);
2264 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2266 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2267 if (likely(__netif_rx_schedule_prep(dev)))
2268 __netif_rx_schedule(dev);
2275 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2276 * by NAPI polling). Handles data events from SGE response queues as well as
2277 * error and other async events as they all use the same MSI vector. We use
2278 * one SGE response queue per port in this mode and protect all response
2279 * queues with queue 0's lock.
2281 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2284 struct adapter *adap = cookie;
2285 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2287 spin_lock(&q->lock);
2289 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2290 if (adap->params.nports == 2)
2291 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2292 &adap->sge.qs[1].rspq);
2293 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2294 q->unhandled_irqs++;
2296 spin_unlock(&q->lock);
2301 * A helper function that processes responses and issues GTS.
2303 static inline int process_responses_gts(struct adapter *adap,
2304 struct sge_rspq *rq)
2308 work = process_responses(adap, rspq_to_qset(rq), -1);
2309 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2310 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2315 * The legacy INTx interrupt handler. This needs to handle data events from
2316 * SGE response queues as well as error and other async events as they all use
2317 * the same interrupt pin. We use one SGE response queue per port in this mode
2318 * and protect all response queues with queue 0's lock.
2320 static irqreturn_t t3_intr(int irq, void *cookie)
2322 int work_done, w0, w1;
2323 struct adapter *adap = cookie;
2324 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2325 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2327 spin_lock(&q0->lock);
2329 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2330 w1 = adap->params.nports == 2 &&
2331 is_new_response(&q1->desc[q1->cidx], q1);
2333 if (likely(w0 | w1)) {
2334 t3_write_reg(adap, A_PL_CLI, 0);
2335 t3_read_reg(adap, A_PL_CLI); /* flush */
2338 process_responses_gts(adap, q0);
2341 process_responses_gts(adap, q1);
2343 work_done = w0 | w1;
2345 work_done = t3_slow_intr_handler(adap);
2347 spin_unlock(&q0->lock);
2348 return IRQ_RETVAL(work_done != 0);
2352 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2353 * Handles data events from SGE response queues as well as error and other
2354 * async events as they all use the same interrupt pin. We use one SGE
2355 * response queue per port in this mode and protect all response queues with
2358 static irqreturn_t t3b_intr(int irq, void *cookie)
2361 struct adapter *adap = cookie;
2362 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2364 t3_write_reg(adap, A_PL_CLI, 0);
2365 map = t3_read_reg(adap, A_SG_DATA_INTR);
2367 if (unlikely(!map)) /* shared interrupt, most likely */
2370 spin_lock(&q0->lock);
2372 if (unlikely(map & F_ERRINTR))
2373 t3_slow_intr_handler(adap);
2375 if (likely(map & 1))
2376 process_responses_gts(adap, q0);
2379 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2381 spin_unlock(&q0->lock);
2386 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2387 * Handles data events from SGE response queues as well as error and other
2388 * async events as they all use the same interrupt pin. We use one SGE
2389 * response queue per port in this mode and protect all response queues with
2392 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2395 struct net_device *dev;
2396 struct adapter *adap = cookie;
2397 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2399 t3_write_reg(adap, A_PL_CLI, 0);
2400 map = t3_read_reg(adap, A_SG_DATA_INTR);
2402 if (unlikely(!map)) /* shared interrupt, most likely */
2405 spin_lock(&q0->lock);
2407 if (unlikely(map & F_ERRINTR))
2408 t3_slow_intr_handler(adap);
2410 if (likely(map & 1)) {
2411 dev = adap->sge.qs[0].netdev;
2413 if (likely(__netif_rx_schedule_prep(dev)))
2414 __netif_rx_schedule(dev);
2417 dev = adap->sge.qs[1].netdev;
2419 if (likely(__netif_rx_schedule_prep(dev)))
2420 __netif_rx_schedule(dev);
2423 spin_unlock(&q0->lock);
2428 * t3_intr_handler - select the top-level interrupt handler
2429 * @adap: the adapter
2430 * @polling: whether using NAPI to service response queues
2432 * Selects the top-level interrupt handler based on the type of interrupts
2433 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2436 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2438 if (adap->flags & USING_MSIX)
2439 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2440 if (adap->flags & USING_MSI)
2441 return polling ? t3_intr_msi_napi : t3_intr_msi;
2442 if (adap->params.rev > 0)
2443 return polling ? t3b_intr_napi : t3b_intr;
2448 * t3_sge_err_intr_handler - SGE async event interrupt handler
2449 * @adapter: the adapter
2451 * Interrupt handler for SGE asynchronous (non-data) events.
2453 void t3_sge_err_intr_handler(struct adapter *adapter)
2455 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2457 if (status & F_RSPQCREDITOVERFOW)
2458 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2460 if (status & F_RSPQDISABLED) {
2461 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2464 "packet delivered to disabled response queue "
2465 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2468 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2469 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2470 t3_fatal_err(adapter);
2474 * sge_timer_cb - perform periodic maintenance of an SGE qset
2475 * @data: the SGE queue set to maintain
2477 * Runs periodically from a timer to perform maintenance of an SGE queue
2478 * set. It performs two tasks:
2480 * a) Cleans up any completed Tx descriptors that may still be pending.
2481 * Normal descriptor cleanup happens when new packets are added to a Tx
2482 * queue so this timer is relatively infrequent and does any cleanup only
2483 * if the Tx queue has not seen any new packets in a while. We make a
2484 * best effort attempt to reclaim descriptors, in that we don't wait
2485 * around if we cannot get a queue's lock (which most likely is because
2486 * someone else is queueing new packets and so will also handle the clean
2487 * up). Since control queues use immediate data exclusively we don't
2488 * bother cleaning them up here.
2490 * b) Replenishes Rx queues that have run out due to memory shortage.
2491 * Normally new Rx buffers are added when existing ones are consumed but
2492 * when out of memory a queue can become empty. We try to add only a few
2493 * buffers here, the queue will be replenished fully as these new buffers
2494 * are used up if memory shortage has subsided.
2496 static void sge_timer_cb(unsigned long data)
2499 struct sge_qset *qs = (struct sge_qset *)data;
2500 struct adapter *adap = qs->netdev->priv;
2502 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2503 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2504 spin_unlock(&qs->txq[TXQ_ETH].lock);
2506 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2507 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2508 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2510 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2511 &adap->sge.qs[0].rspq.lock;
2512 if (spin_trylock_irq(lock)) {
2513 if (!napi_is_scheduled(qs->netdev)) {
2514 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2516 if (qs->fl[0].credits < qs->fl[0].size)
2517 __refill_fl(adap, &qs->fl[0]);
2518 if (qs->fl[1].credits < qs->fl[1].size)
2519 __refill_fl(adap, &qs->fl[1]);
2521 if (status & (1 << qs->rspq.cntxt_id)) {
2523 if (qs->rspq.credits) {
2524 refill_rspq(adap, &qs->rspq, 1);
2526 qs->rspq.restarted++;
2527 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2528 1 << qs->rspq.cntxt_id);
2532 spin_unlock_irq(lock);
2534 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2538 * t3_update_qset_coalesce - update coalescing settings for a queue set
2539 * @qs: the SGE queue set
2540 * @p: new queue set parameters
2542 * Update the coalescing settings for an SGE queue set. Nothing is done
2543 * if the queue set is not initialized yet.
2545 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2550 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2551 qs->rspq.polling = p->polling;
2552 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2556 * t3_sge_alloc_qset - initialize an SGE queue set
2557 * @adapter: the adapter
2558 * @id: the queue set id
2559 * @nports: how many Ethernet ports will be using this queue set
2560 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2561 * @p: configuration parameters for this queue set
2562 * @ntxq: number of Tx queues for the queue set
2563 * @netdev: net device associated with this queue set
2565 * Allocate resources and initialize an SGE queue set. A queue set
2566 * comprises a response queue, two Rx free-buffer queues, and up to 3
2567 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2568 * queue, offload queue, and control queue.
2570 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2571 int irq_vec_idx, const struct qset_params *p,
2572 int ntxq, struct net_device *netdev)
2574 int i, ret = -ENOMEM;
2575 struct sge_qset *q = &adapter->sge.qs[id];
2577 init_qset_cntxt(q, id);
2578 init_timer(&q->tx_reclaim_timer);
2579 q->tx_reclaim_timer.data = (unsigned long)q;
2580 q->tx_reclaim_timer.function = sge_timer_cb;
2582 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2583 sizeof(struct rx_desc),
2584 sizeof(struct rx_sw_desc),
2585 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2589 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2590 sizeof(struct rx_desc),
2591 sizeof(struct rx_sw_desc),
2592 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2596 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2597 sizeof(struct rsp_desc), 0,
2598 &q->rspq.phys_addr, NULL);
2602 for (i = 0; i < ntxq; ++i) {
2604 * The control queue always uses immediate data so does not
2605 * need to keep track of any sk_buffs.
2607 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2609 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2610 sizeof(struct tx_desc), sz,
2611 &q->txq[i].phys_addr,
2613 if (!q->txq[i].desc)
2617 q->txq[i].size = p->txq_size[i];
2618 spin_lock_init(&q->txq[i].lock);
2619 skb_queue_head_init(&q->txq[i].sendq);
2622 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2624 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2627 q->fl[0].gen = q->fl[1].gen = 1;
2628 q->fl[0].size = p->fl_size;
2629 q->fl[1].size = p->jumbo_size;
2632 q->rspq.size = p->rspq_size;
2633 spin_lock_init(&q->rspq.lock);
2635 q->txq[TXQ_ETH].stop_thres = nports *
2636 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2638 if (!is_offload(adapter)) {
2640 q->fl[0].buf_size = RX_PAGE_SIZE;
2642 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2643 sizeof(struct cpl_rx_pkt);
2645 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2646 sizeof(struct cpl_rx_pkt);
2649 q->fl[0].buf_size = RX_PAGE_SIZE;
2651 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2652 sizeof(struct cpl_rx_data);
2654 q->fl[1].buf_size = (16 * 1024) -
2655 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2658 spin_lock(&adapter->sge.reg_lock);
2660 /* FL threshold comparison uses < */
2661 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2662 q->rspq.phys_addr, q->rspq.size,
2663 q->fl[0].buf_size, 1, 0);
2667 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2668 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2669 q->fl[i].phys_addr, q->fl[i].size,
2670 q->fl[i].buf_size, p->cong_thres, 1,
2676 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2677 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2678 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2684 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2685 USE_GTS, SGE_CNTXT_OFLD, id,
2686 q->txq[TXQ_OFLD].phys_addr,
2687 q->txq[TXQ_OFLD].size, 0, 1, 0);
2693 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2695 q->txq[TXQ_CTRL].phys_addr,
2696 q->txq[TXQ_CTRL].size,
2697 q->txq[TXQ_CTRL].token, 1, 0);
2702 spin_unlock(&adapter->sge.reg_lock);
2704 t3_update_qset_coalesce(q, p);
2707 * We use atalk_ptr as a backpointer to a qset. In case a device is
2708 * associated with multiple queue sets only the first one sets
2711 if (netdev->atalk_ptr == NULL)
2712 netdev->atalk_ptr = q;
2714 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2715 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2716 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2718 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2719 V_NEWTIMER(q->rspq.holdoff_tmr));
2721 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2725 spin_unlock(&adapter->sge.reg_lock);
2727 t3_free_qset(adapter, q);
2732 * t3_free_sge_resources - free SGE resources
2733 * @adap: the adapter
2735 * Frees resources used by the SGE queue sets.
2737 void t3_free_sge_resources(struct adapter *adap)
2741 for (i = 0; i < SGE_QSETS; ++i)
2742 t3_free_qset(adap, &adap->sge.qs[i]);
2746 * t3_sge_start - enable SGE
2747 * @adap: the adapter
2749 * Enables the SGE for DMAs. This is the last step in starting packet
2752 void t3_sge_start(struct adapter *adap)
2754 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2758 * t3_sge_stop - disable SGE operation
2759 * @adap: the adapter
2761 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2762 * from error interrupts) or from normal process context. In the latter
2763 * case it also disables any pending queue restart tasklets. Note that
2764 * if it is called in interrupt context it cannot disable the restart
2765 * tasklets as it cannot wait, however the tasklets will have no effect
2766 * since the doorbells are disabled and the driver will call this again
2767 * later from process context, at which time the tasklets will be stopped
2768 * if they are still running.
2770 void t3_sge_stop(struct adapter *adap)
2772 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2773 if (!in_interrupt()) {
2776 for (i = 0; i < SGE_QSETS; ++i) {
2777 struct sge_qset *qs = &adap->sge.qs[i];
2779 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2780 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2786 * t3_sge_init - initialize SGE
2787 * @adap: the adapter
2788 * @p: the SGE parameters
2790 * Performs SGE initialization needed every time after a chip reset.
2791 * We do not initialize any of the queue sets here, instead the driver
2792 * top-level must request those individually. We also do not enable DMA
2793 * here, that should be done after the queues have been set up.
2795 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2797 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2799 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2801 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2802 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2803 #if SGE_NUM_GENBITS == 1
2804 ctrl |= F_EGRGENCTRL;
2806 if (adap->params.rev > 0) {
2807 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2808 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2809 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2811 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2812 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2813 V_LORCQDRBTHRSH(512));
2814 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2815 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2816 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2817 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2818 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2819 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2820 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2821 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2822 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2826 * t3_sge_prep - one-time SGE initialization
2827 * @adap: the associated adapter
2828 * @p: SGE parameters
2830 * Performs one-time initialization of SGE SW state. Includes determining
2831 * defaults for the assorted SGE parameters, which admins can change until
2832 * they are used to initialize the SGE.
2834 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2838 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2839 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2841 for (i = 0; i < SGE_QSETS; ++i) {
2842 struct qset_params *q = p->qset + i;
2844 q->polling = adap->params.rev > 0;
2845 q->coalesce_usecs = 5;
2846 q->rspq_size = 1024;
2848 q->jumbo_size = 512;
2849 q->txq_size[TXQ_ETH] = 1024;
2850 q->txq_size[TXQ_OFLD] = 1024;
2851 q->txq_size[TXQ_CTRL] = 256;
2855 spin_lock_init(&adap->sge.reg_lock);
2859 * t3_get_desc - dump an SGE descriptor for debugging purposes
2860 * @qs: the queue set
2861 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2862 * @idx: the descriptor index in the queue
2863 * @data: where to dump the descriptor contents
2865 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2866 * size of the descriptor.
2868 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2869 unsigned char *data)
2875 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2877 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2878 return sizeof(struct tx_desc);
2882 if (!qs->rspq.desc || idx >= qs->rspq.size)
2884 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2885 return sizeof(struct rsp_desc);
2889 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2891 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2892 return sizeof(struct rx_desc);