2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
55 #define RX_PAGE_SIZE 2048
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
61 #define SGE_RX_COPY_THRES 256
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
66 #define SGE_RX_DROP_THRES 16
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
72 #define TX_RECLAIM_PERIOD (HZ / 4)
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
78 * Types of Tx queues in each queue set. Order here matters, do not change.
80 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
82 /* Values for sge_txq.flags */
84 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
89 u64 flit[TX_DESC_FLITS];
99 struct tx_sw_desc { /* SW state per Tx descriptor */
103 struct rx_sw_desc { /* SW state per Rx descriptor */
106 struct sge_fl_page page;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
119 struct unmap_info { /* packet unmapping info, overlays skb->cb */
120 int sflit; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx; /* first page fragment in current Tx descriptor */
122 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
123 u32 len; /* mapped length of skb main body */
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
130 struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
141 * HW allows up to 4 descriptors to be combined into a WR.
143 static u8 flit_desc_map[] = {
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
156 # error "SGE_NUM_GENBITS must be 1 or 2"
160 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
162 return container_of(q, struct sge_qset, fl[qidx]);
165 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
167 return container_of(q, struct sge_qset, rspq);
170 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
172 return container_of(q, struct sge_qset, txq[qidx]);
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
181 * Replenishes a response queue by making the supplied number of responses
184 static inline void refill_rspq(struct adapter *adapter,
185 const struct sge_rspq *q, unsigned int credits)
187 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
188 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
197 static inline int need_skb_unmap(void)
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
204 DECLARE_PCI_UNMAP_ADDR(addr);
207 return sizeof(struct dummy) != 0;
211 * unmap_skb - unmap a packet main body and its page fragments
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
230 const struct sg_ent *sgp;
231 struct unmap_info *ui = (struct unmap_info *)skb->cb;
232 int nfrags, frag_idx, curflit, j = ui->addr_idx;
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
239 ui->len = 0; /* so we know for next descriptor for this skb */
243 frag_idx = ui->fragidx;
244 curflit = ui->sflit + 1 + j;
245 nfrags = skb_shinfo(skb)->nr_frags;
247 while (frag_idx < nfrags && curflit < WR_FLITS) {
248 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
249 skb_shinfo(skb)->frags[frag_idx].size,
260 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
261 ui->fragidx = frag_idx;
263 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
288 if (d->skb) { /* an SGL is present */
290 unmap_skb(d->skb, q, cidx, pdev);
291 if (d->skb->priority == cidx)
295 if (++cidx == q->size) {
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
312 static inline void reclaim_completed_tx(struct adapter *adapter,
315 unsigned int reclaim = q->processed - q->cleaned;
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
325 * should_restart_tx - are there enough resources to restart a Tx queue?
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
330 static inline int should_restart_tx(const struct sge_txq *q)
332 unsigned int r = q->processed - q->cleaned;
334 return q->in_use - r < (q->size >> 1);
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
347 unsigned int cidx = q->cidx;
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
355 if (q->buf_size != RX_PAGE_SIZE) {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
363 if (++cidx == q->size)
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
381 * Add a buffer of the given length to the supplied HW and SW Rx
384 static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 pci_unmap_addr_set(sd, dma_addr, mapping);
393 d->addr_lo = cpu_to_be32(mapping);
394 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
396 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
411 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
442 memcpy(&sd->t, p, sizeof(*p));
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
451 get_page(p->frag.page);
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
458 if (++q->pidx == q->size) {
467 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
470 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
472 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
484 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
487 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx];
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
491 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */
494 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
495 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
498 if (++q->pidx == q->size) {
502 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
522 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
523 size_t sw_size, dma_addr_t * phys, void *metadata)
525 size_t len = nelem * elem_size;
527 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
532 s = kcalloc(nelem, sw_size, GFP_KERNEL);
535 dma_free_coherent(&pdev->dev, len, p, *phys);
540 *(void **)metadata = s;
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
554 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
557 struct pci_dev *pdev = adapter->pdev;
559 if (q->tx_reclaim_timer.function)
560 del_timer_sync(&q->tx_reclaim_timer);
562 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
564 spin_lock(&adapter->sge.reg_lock);
565 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
566 spin_unlock(&adapter->sge.reg_lock);
567 free_rx_bufs(pdev, &q->fl[i]);
568 kfree(q->fl[i].sdesc);
569 dma_free_coherent(&pdev->dev,
571 sizeof(struct rx_desc), q->fl[i].desc,
575 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
576 if (q->txq[i].desc) {
577 spin_lock(&adapter->sge.reg_lock);
578 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
579 spin_unlock(&adapter->sge.reg_lock);
580 if (q->txq[i].sdesc) {
581 free_tx_desc(adapter, &q->txq[i],
583 kfree(q->txq[i].sdesc);
585 dma_free_coherent(&pdev->dev,
587 sizeof(struct tx_desc),
588 q->txq[i].desc, q->txq[i].phys_addr);
589 __skb_queue_purge(&q->txq[i].sendq);
593 spin_lock(&adapter->sge.reg_lock);
594 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
595 spin_unlock(&adapter->sge.reg_lock);
596 dma_free_coherent(&pdev->dev,
597 q->rspq.size * sizeof(struct rsp_desc),
598 q->rspq.desc, q->rspq.phys_addr);
602 q->netdev->atalk_ptr = NULL;
604 memset(q, 0, sizeof(*q));
608 * init_qset_cntxt - initialize an SGE queue set context info
610 * @id: the queue set id
612 * Initializes the TIDs and context ids for the queues of a queue set.
614 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
616 qs->rspq.cntxt_id = id;
617 qs->fl[0].cntxt_id = 2 * id;
618 qs->fl[1].cntxt_id = 2 * id + 1;
619 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
620 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
621 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
622 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
623 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
633 static inline unsigned int sgl_len(unsigned int n)
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n) / 2 + (n & 1);
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
643 * Calculates the number of Tx descriptors needed for the supplied number
646 static inline unsigned int flits_to_desc(unsigned int n)
648 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
649 return flit_desc_map[n];
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
656 * Return a packet containing the immediate data of the given response.
658 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
660 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
676 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
680 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
683 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
684 if (skb_shinfo(skb)->gso_size)
686 return flits_to_desc(flits);
690 * make_sgl - populate a scatter/gather list for a packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
701 static inline unsigned int make_sgl(const struct sk_buff *skb,
702 struct sg_ent *sgp, unsigned char *start,
703 unsigned int len, struct pci_dev *pdev)
706 unsigned int i, j = 0, nfrags;
709 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
710 sgp->len[0] = cpu_to_be32(len);
711 sgp->addr[0] = cpu_to_be64(mapping);
715 nfrags = skb_shinfo(skb)->nr_frags;
716 for (i = 0; i < nfrags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
719 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
720 frag->size, PCI_DMA_TODEVICE);
721 sgp->len[j] = cpu_to_be32(frag->size);
722 sgp->addr[j] = cpu_to_be64(mapping);
729 return ((nfrags + (len != 0)) * 3) / 2 + j;
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
742 * When GTS is disabled we unconditionally ring the doorbell.
744 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
747 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
748 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
749 set_bit(TXQ_LAST_PKT_DB, &q->flags);
750 t3_write_reg(adap, A_SG_KDOORBELL,
751 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap, A_SG_KDOORBELL,
756 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
760 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
762 #if SGE_NUM_GENBITS == 2
763 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
786 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
787 struct tx_desc *d, unsigned int pidx,
788 const struct sge_txq *q,
789 const struct sg_ent *sgl,
790 unsigned int flits, unsigned int sgl_flits,
791 unsigned int gen, unsigned int wr_hi,
794 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
795 struct tx_sw_desc *sd = &q->sdesc[pidx];
798 if (need_skb_unmap()) {
799 struct unmap_info *ui = (struct unmap_info *)skb->cb;
806 if (likely(ndesc == 1)) {
807 skb->priority = pidx;
808 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits)) | wr_hi;
811 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
812 V_WR_GEN(gen)) | wr_lo;
815 unsigned int ogen = gen;
816 const u64 *fp = (const u64 *)sgl;
817 struct work_request_hdr *wp = wrp;
819 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits)) | wr_hi;
823 unsigned int avail = WR_FLITS - flits;
825 if (avail > sgl_flits)
827 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
836 if (++pidx == q->size) {
844 wrp = (struct work_request_hdr *)d;
845 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi;
847 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
849 V_WR_GEN(gen)) | wr_lo;
853 skb->priority = pidx;
854 wrp->wr_hi |= htonl(F_WR_EOP);
856 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
857 wr_gen2((struct tx_desc *)wp, ogen);
863 * write_tx_pkt_wr - write a TX_PKT work request
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
873 * Generate a TX_PKT work request to send the supplied packet.
875 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
876 const struct port_info *pi,
877 unsigned int pidx, unsigned int gen,
878 struct sge_txq *q, unsigned int ndesc,
881 unsigned int flits, sgl_flits, cntrl, tso_info;
882 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
883 struct tx_desc *d = &q->desc[pidx];
884 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
886 cpl->len = htonl(skb->len | 0x80000000);
887 cntrl = V_TXPKT_INTF(pi->port_id);
889 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
890 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
892 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
895 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
905 hdr->lso_info = htonl(tso_info);
908 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
909 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
910 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
911 cpl->cntrl = htonl(cntrl);
913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL;
916 memcpy(&d->flit[2], skb->data, skb->len);
918 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
920 flits = (skb->len + 7) / 8 + 2;
921 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
922 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
923 | F_WR_SOP | F_WR_EOP | compl);
925 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
935 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
936 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
937 if (need_skb_unmap())
938 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
940 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
941 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
942 htonl(V_WR_TID(q->token)));
946 * eth_xmit - add a packet to the Ethernet Tx queue
948 * @dev: the egress net device
950 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
952 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
954 unsigned int ndesc, pidx, credits, gen, compl;
955 const struct port_info *pi = netdev_priv(dev);
956 struct adapter *adap = dev->priv;
957 struct sge_qset *qs = dev2qset(dev);
958 struct sge_txq *q = &qs->txq[TXQ_ETH];
961 * The chip min packet length is 9 octets but play safe and reject
962 * anything shorter than an Ethernet header.
964 if (unlikely(skb->len < ETH_HLEN)) {
970 reclaim_completed_tx(adap, q);
972 credits = q->size - q->in_use;
973 ndesc = calc_tx_descs(skb);
975 if (unlikely(credits < ndesc)) {
976 if (!netif_queue_stopped(dev)) {
977 netif_stop_queue(dev);
978 set_bit(TXQ_ETH, &qs->txq_stopped);
980 dev_err(&adap->pdev->dev,
981 "%s: Tx ring %u full while queue awake!\n",
982 dev->name, q->cntxt_id & 7);
984 spin_unlock(&q->lock);
985 return NETDEV_TX_BUSY;
989 if (unlikely(credits - ndesc < q->stop_thres)) {
991 netif_stop_queue(dev);
992 set_bit(TXQ_ETH, &qs->txq_stopped);
994 if (should_restart_tx(q) &&
995 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
997 netif_wake_queue(dev);
1003 q->unacked += ndesc;
1004 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1008 if (q->pidx >= q->size) {
1013 /* update port statistics */
1014 if (skb->ip_summed == CHECKSUM_COMPLETE)
1015 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1016 if (skb_shinfo(skb)->gso_size)
1017 qs->port_stats[SGE_PSTAT_TSO]++;
1018 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1019 qs->port_stats[SGE_PSTAT_VLANINS]++;
1021 dev->trans_start = jiffies;
1022 spin_unlock(&q->lock);
1025 * We do not use Tx completion interrupts to free DMAd Tx packets.
1026 * This is good for performamce but means that we rely on new Tx
1027 * packets arriving to run the destructors of completed packets,
1028 * which open up space in their sockets' send queues. Sometimes
1029 * we do not get such new packets causing Tx to stall. A single
1030 * UDP transmitter is a good example of this situation. We have
1031 * a clean up timer that periodically reclaims completed packets
1032 * but it doesn't run often enough (nor do we want it to) to prevent
1033 * lengthy stalls. A solution to this problem is to run the
1034 * destructor early, after the packet is queued but before it's DMAd.
1035 * A cons is that we lie to socket memory accounting, but the amount
1036 * of extra memory is reasonable (limited by the number of Tx
1037 * descriptors), the packets do actually get freed quickly by new
1038 * packets almost always, and for protocols like TCP that wait for
1039 * acks to really free up the data the extra memory is even less.
1040 * On the positive side we run the destructors on the sending CPU
1041 * rather than on a potentially different completing CPU, usually a
1042 * good thing. We also run them without holding our Tx queue lock,
1043 * unlike what reclaim_completed_tx() would otherwise do.
1045 * Run the destructor before telling the DMA engine about the packet
1046 * to make sure it doesn't complete and get freed prematurely.
1048 if (likely(!skb_shared(skb)))
1051 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1052 check_ring_tx_db(adap, q);
1053 return NETDEV_TX_OK;
1057 * write_imm - write a packet into a Tx descriptor as immediate data
1058 * @d: the Tx descriptor to write
1060 * @len: the length of packet data to write as immediate data
1061 * @gen: the generation bit value to write
1063 * Writes a packet as immediate data into a Tx descriptor. The packet
1064 * contains a work request at its beginning. We must write the packet
1065 * carefully so the SGE doesn't read accidentally before it's written in
1068 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1069 unsigned int len, unsigned int gen)
1071 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1072 struct work_request_hdr *to = (struct work_request_hdr *)d;
1074 memcpy(&to[1], &from[1], len - sizeof(*from));
1075 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1076 V_WR_BCNTLFLT(len & 7));
1078 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1079 V_WR_LEN((len + 7) / 8));
1085 * check_desc_avail - check descriptor availability on a send queue
1086 * @adap: the adapter
1087 * @q: the send queue
1088 * @skb: the packet needing the descriptors
1089 * @ndesc: the number of Tx descriptors needed
1090 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1092 * Checks if the requested number of Tx descriptors is available on an
1093 * SGE send queue. If the queue is already suspended or not enough
1094 * descriptors are available the packet is queued for later transmission.
1095 * Must be called with the Tx queue locked.
1097 * Returns 0 if enough descriptors are available, 1 if there aren't
1098 * enough descriptors and the packet has been queued, and 2 if the caller
1099 * needs to retry because there weren't enough descriptors at the
1100 * beginning of the call but some freed up in the mean time.
1102 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1103 struct sk_buff *skb, unsigned int ndesc,
1106 if (unlikely(!skb_queue_empty(&q->sendq))) {
1107 addq_exit:__skb_queue_tail(&q->sendq, skb);
1110 if (unlikely(q->size - q->in_use < ndesc)) {
1111 struct sge_qset *qs = txq_to_qset(q, qid);
1113 set_bit(qid, &qs->txq_stopped);
1114 smp_mb__after_clear_bit();
1116 if (should_restart_tx(q) &&
1117 test_and_clear_bit(qid, &qs->txq_stopped))
1127 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1128 * @q: the SGE control Tx queue
1130 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1131 * that send only immediate data (presently just the control queues) and
1132 * thus do not have any sk_buffs to release.
1134 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1136 unsigned int reclaim = q->processed - q->cleaned;
1138 q->in_use -= reclaim;
1139 q->cleaned += reclaim;
1142 static inline int immediate(const struct sk_buff *skb)
1144 return skb->len <= WR_LEN && !skb->data_len;
1148 * ctrl_xmit - send a packet through an SGE control Tx queue
1149 * @adap: the adapter
1150 * @q: the control queue
1153 * Send a packet through an SGE control Tx queue. Packets sent through
1154 * a control queue must fit entirely as immediate data in a single Tx
1155 * descriptor and have no page fragments.
1157 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1158 struct sk_buff *skb)
1161 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1163 if (unlikely(!immediate(skb))) {
1166 return NET_XMIT_SUCCESS;
1169 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1170 wrp->wr_lo = htonl(V_WR_TID(q->token));
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1175 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1176 if (unlikely(ret)) {
1178 spin_unlock(&q->lock);
1184 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1187 if (++q->pidx >= q->size) {
1191 spin_unlock(&q->lock);
1193 t3_write_reg(adap, A_SG_KDOORBELL,
1194 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1195 return NET_XMIT_SUCCESS;
1199 * restart_ctrlq - restart a suspended control queue
1200 * @qs: the queue set cotaining the control queue
1202 * Resumes transmission on a suspended Tx control queue.
1204 static void restart_ctrlq(unsigned long data)
1206 struct sk_buff *skb;
1207 struct sge_qset *qs = (struct sge_qset *)data;
1208 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1209 struct adapter *adap = qs->netdev->priv;
1211 spin_lock(&q->lock);
1212 again:reclaim_completed_tx_imm(q);
1214 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1216 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1218 if (++q->pidx >= q->size) {
1225 if (!skb_queue_empty(&q->sendq)) {
1226 set_bit(TXQ_CTRL, &qs->txq_stopped);
1227 smp_mb__after_clear_bit();
1229 if (should_restart_tx(q) &&
1230 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1235 spin_unlock(&q->lock);
1236 t3_write_reg(adap, A_SG_KDOORBELL,
1237 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1241 * Send a management message through control queue 0
1243 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1245 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1256 static void deferred_unmap_destructor(struct sk_buff *skb)
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1264 dui = (struct deferred_unmap_info *)skb->head;
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1276 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1280 struct deferred_unmap_info *dui;
1282 dui = (struct deferred_unmap_info *)skb->head;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1289 *p = be64_to_cpu(sgl->addr[0]);
1293 * write_ofld_wr - write an offload work request
1294 * @adap: the adapter
1295 * @skb: the packet to send
1297 * @pidx: index of the first Tx descriptor to write
1298 * @gen: the generation value to use
1299 * @ndesc: number of descriptors the packet will occupy
1301 * Write an offload work request to send the supplied packet. The packet
1302 * data already carry the work request with most fields populated.
1304 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1305 struct sge_txq *q, unsigned int pidx,
1306 unsigned int gen, unsigned int ndesc)
1308 unsigned int sgl_flits, flits;
1309 struct work_request_hdr *from;
1310 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1311 struct tx_desc *d = &q->desc[pidx];
1313 if (immediate(skb)) {
1314 q->sdesc[pidx].skb = NULL;
1315 write_imm(d, skb, skb->len, gen);
1319 /* Only TX_DATA builds SGLs */
1321 from = (struct work_request_hdr *)skb->data;
1322 memcpy(&d->flit[1], &from[1],
1323 skb_transport_offset(skb) - sizeof(*from));
1325 flits = skb_transport_offset(skb) / 8;
1326 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1327 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1329 if (need_skb_unmap()) {
1330 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1331 skb->destructor = deferred_unmap_destructor;
1332 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1335 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1336 gen, from->wr_hi, from->wr_lo);
1340 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1343 * Returns the number of Tx descriptors needed for the given offload
1344 * packet. These packets are already fully constructed.
1346 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1348 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1350 if (skb->len <= WR_LEN && cnt == 0)
1351 return 1; /* packet fits as immediate data */
1353 flits = skb_transport_offset(skb) / 8; /* headers */
1354 if (skb->tail != skb->h.raw)
1356 return flits_to_desc(flits + sgl_len(cnt));
1360 * ofld_xmit - send a packet through an offload queue
1361 * @adap: the adapter
1362 * @q: the Tx offload queue
1365 * Send an offload packet through an SGE offload queue.
1367 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1368 struct sk_buff *skb)
1371 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1373 spin_lock(&q->lock);
1374 again:reclaim_completed_tx(adap, q);
1376 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1377 if (unlikely(ret)) {
1379 skb->priority = ndesc; /* save for restart */
1380 spin_unlock(&q->lock);
1390 if (q->pidx >= q->size) {
1394 spin_unlock(&q->lock);
1396 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1397 check_ring_tx_db(adap, q);
1398 return NET_XMIT_SUCCESS;
1402 * restart_offloadq - restart a suspended offload queue
1403 * @qs: the queue set cotaining the offload queue
1405 * Resumes transmission on a suspended Tx offload queue.
1407 static void restart_offloadq(unsigned long data)
1409 struct sk_buff *skb;
1410 struct sge_qset *qs = (struct sge_qset *)data;
1411 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1412 struct adapter *adap = qs->netdev->priv;
1414 spin_lock(&q->lock);
1415 again:reclaim_completed_tx(adap, q);
1417 while ((skb = skb_peek(&q->sendq)) != NULL) {
1418 unsigned int gen, pidx;
1419 unsigned int ndesc = skb->priority;
1421 if (unlikely(q->size - q->in_use < ndesc)) {
1422 set_bit(TXQ_OFLD, &qs->txq_stopped);
1423 smp_mb__after_clear_bit();
1425 if (should_restart_tx(q) &&
1426 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1436 if (q->pidx >= q->size) {
1440 __skb_unlink(skb, &q->sendq);
1441 spin_unlock(&q->lock);
1443 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1444 spin_lock(&q->lock);
1446 spin_unlock(&q->lock);
1449 set_bit(TXQ_RUNNING, &q->flags);
1450 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1452 t3_write_reg(adap, A_SG_KDOORBELL,
1453 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1457 * queue_set - return the queue set a packet should use
1460 * Maps a packet to the SGE queue set it should use. The desired queue
1461 * set is carried in bits 1-3 in the packet's priority.
1463 static inline int queue_set(const struct sk_buff *skb)
1465 return skb->priority >> 1;
1469 * is_ctrl_pkt - return whether an offload packet is a control packet
1472 * Determines whether an offload packet should use an OFLD or a CTRL
1473 * Tx queue. This is indicated by bit 0 in the packet's priority.
1475 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1477 return skb->priority & 1;
1481 * t3_offload_tx - send an offload packet
1482 * @tdev: the offload device to send to
1485 * Sends an offload packet. We use the packet priority to select the
1486 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1487 * should be sent as regular or control, bits 1-3 select the queue set.
1489 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1491 struct adapter *adap = tdev2adap(tdev);
1492 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1494 if (unlikely(is_ctrl_pkt(skb)))
1495 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1497 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1501 * offload_enqueue - add an offload packet to an SGE offload receive queue
1502 * @q: the SGE response queue
1505 * Add a new offload packet to an SGE response queue's offload packet
1506 * queue. If the packet is the first on the queue it schedules the RX
1507 * softirq to process the queue.
1509 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1511 skb->next = skb->prev = NULL;
1513 q->rx_tail->next = skb;
1515 struct sge_qset *qs = rspq_to_qset(q);
1517 if (__netif_rx_schedule_prep(qs->netdev))
1518 __netif_rx_schedule(qs->netdev);
1525 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1526 * @tdev: the offload device that will be receiving the packets
1527 * @q: the SGE response queue that assembled the bundle
1528 * @skbs: the partial bundle
1529 * @n: the number of packets in the bundle
1531 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1533 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1535 struct sk_buff *skbs[], int n)
1538 q->offload_bundles++;
1539 tdev->recv(tdev, skbs, n);
1544 * ofld_poll - NAPI handler for offload packets in interrupt mode
1545 * @dev: the network device doing the polling
1546 * @budget: polling budget
1548 * The NAPI handler for offload packets when a response queue is serviced
1549 * by the hard interrupt handler, i.e., when it's operating in non-polling
1550 * mode. Creates small packet batches and sends them through the offload
1551 * receive handler. Batches need to be of modest size as we do prefetches
1552 * on the packets in each.
1554 static int ofld_poll(struct net_device *dev, int *budget)
1556 struct adapter *adapter = dev->priv;
1557 struct sge_qset *qs = dev2qset(dev);
1558 struct sge_rspq *q = &qs->rspq;
1559 int work_done, limit = min(*budget, dev->quota), avail = limit;
1562 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1565 spin_lock_irq(&q->lock);
1568 work_done = limit - avail;
1569 *budget -= work_done;
1570 dev->quota -= work_done;
1571 __netif_rx_complete(dev);
1572 spin_unlock_irq(&q->lock);
1577 q->rx_head = q->rx_tail = NULL;
1578 spin_unlock_irq(&q->lock);
1580 for (ngathered = 0; avail && head; avail--) {
1581 prefetch(head->data);
1582 skbs[ngathered] = head;
1584 skbs[ngathered]->next = NULL;
1585 if (++ngathered == RX_BUNDLE_SIZE) {
1586 q->offload_bundles++;
1587 adapter->tdev.recv(&adapter->tdev, skbs,
1592 if (head) { /* splice remaining packets back onto Rx queue */
1593 spin_lock_irq(&q->lock);
1594 tail->next = q->rx_head;
1598 spin_unlock_irq(&q->lock);
1600 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1602 work_done = limit - avail;
1603 *budget -= work_done;
1604 dev->quota -= work_done;
1609 * rx_offload - process a received offload packet
1610 * @tdev: the offload device receiving the packet
1611 * @rq: the response queue that received the packet
1613 * @rx_gather: a gather list of packets if we are building a bundle
1614 * @gather_idx: index of the next available slot in the bundle
1616 * Process an ingress offload pakcet and add it to the offload ingress
1617 * queue. Returns the index of the next available slot in the bundle.
1619 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1620 struct sk_buff *skb, struct sk_buff *rx_gather[],
1621 unsigned int gather_idx)
1624 skb_reset_mac_header(skb);
1625 skb_reset_network_header(skb);
1626 skb_reset_transport_header(skb);
1629 rx_gather[gather_idx++] = skb;
1630 if (gather_idx == RX_BUNDLE_SIZE) {
1631 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1633 rq->offload_bundles++;
1636 offload_enqueue(rq, skb);
1642 * restart_tx - check whether to restart suspended Tx queues
1643 * @qs: the queue set to resume
1645 * Restarts suspended Tx queues of an SGE queue set if they have enough
1646 * free resources to resume operation.
1648 static void restart_tx(struct sge_qset *qs)
1650 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1651 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1652 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1653 qs->txq[TXQ_ETH].restarts++;
1654 if (netif_running(qs->netdev))
1655 netif_wake_queue(qs->netdev);
1658 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1659 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1660 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1661 qs->txq[TXQ_OFLD].restarts++;
1662 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1664 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1665 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1666 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1667 qs->txq[TXQ_CTRL].restarts++;
1668 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1673 * rx_eth - process an ingress ethernet packet
1674 * @adap: the adapter
1675 * @rq: the response queue that received the packet
1677 * @pad: amount of padding at the start of the buffer
1679 * Process an ingress ethernet pakcet and deliver it to the stack.
1680 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1681 * if it was immediate data in a response.
1683 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1684 struct sk_buff *skb, int pad)
1686 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1687 struct port_info *pi;
1689 skb_pull(skb, sizeof(*p) + pad);
1690 skb->dev->last_rx = jiffies;
1691 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1692 pi = netdev_priv(skb->dev);
1693 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1695 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1696 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698 skb->ip_summed = CHECKSUM_NONE;
1700 if (unlikely(p->vlan_valid)) {
1701 struct vlan_group *grp = pi->vlan_grp;
1703 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1705 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1708 dev_kfree_skb_any(skb);
1709 } else if (rq->polling)
1710 netif_receive_skb(skb);
1715 #define SKB_DATA_SIZE 128
1717 static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1721 if (len <= SKB_DATA_SIZE) {
1722 memcpy(skb->data, p->va, len);
1724 put_page(p->frag.page);
1726 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1727 skb_shinfo(skb)->frags[0].page = p->frag.page;
1728 skb_shinfo(skb)->frags[0].page_offset =
1729 p->frag.page_offset + SKB_DATA_SIZE;
1730 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1731 skb_shinfo(skb)->nr_frags = 1;
1732 skb->data_len = len - SKB_DATA_SIZE;
1733 skb->tail += SKB_DATA_SIZE;
1734 skb->truesize += skb->data_len;
1739 * get_packet - return the next ingress packet buffer from a free list
1740 * @adap: the adapter that received the packet
1741 * @fl: the SGE free list holding the packet
1742 * @len: the packet length including any SGE padding
1743 * @drop_thres: # of remaining buffers before we start dropping packets
1745 * Get the next packet from a free list and complete setup of the
1746 * sk_buff. If the packet is small we make a copy and recycle the
1747 * original buffer, otherwise we use the original buffer itself. If a
1748 * positive drop threshold is supplied packets are dropped and their
1749 * buffers recycled if (a) the number of remaining buffers is under the
1750 * threshold and the packet is too big to copy, or (b) the packet should
1751 * be copied but there is no memory for the copy.
1753 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1754 unsigned int len, unsigned int drop_thres)
1756 struct sk_buff *skb = NULL;
1757 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1759 prefetch(sd->t.skb->data);
1761 if (len <= SGE_RX_COPY_THRES) {
1762 skb = alloc_skb(len, GFP_ATOMIC);
1763 if (likely(skb != NULL)) {
1764 struct rx_desc *d = &fl->desc[fl->cidx];
1765 dma_addr_t mapping =
1766 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1767 be32_to_cpu(d->addr_lo));
1769 __skb_put(skb, len);
1770 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1771 PCI_DMA_FROMDEVICE);
1772 memcpy(skb->data, sd->t.skb->data, len);
1773 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1774 PCI_DMA_FROMDEVICE);
1775 } else if (!drop_thres)
1778 recycle_rx_buf(adap, fl, fl->cidx);
1782 if (unlikely(fl->credits < drop_thres))
1786 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1787 fl->buf_size, PCI_DMA_FROMDEVICE);
1790 __refill_fl(adap, fl);
1795 * handle_rsp_cntrl_info - handles control information in a response
1796 * @qs: the queue set corresponding to the response
1797 * @flags: the response control flags
1799 * Handles the control information of an SGE response, such as GTS
1800 * indications and completion credits for the queue set's Tx queues.
1801 * HW coalesces credits, we don't do any extra SW coalescing.
1803 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1805 unsigned int credits;
1808 if (flags & F_RSPD_TXQ0_GTS)
1809 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1812 credits = G_RSPD_TXQ0_CR(flags);
1814 qs->txq[TXQ_ETH].processed += credits;
1816 credits = G_RSPD_TXQ2_CR(flags);
1818 qs->txq[TXQ_CTRL].processed += credits;
1821 if (flags & F_RSPD_TXQ1_GTS)
1822 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1824 credits = G_RSPD_TXQ1_CR(flags);
1826 qs->txq[TXQ_OFLD].processed += credits;
1830 * check_ring_db - check if we need to ring any doorbells
1831 * @adapter: the adapter
1832 * @qs: the queue set whose Tx queues are to be examined
1833 * @sleeping: indicates which Tx queue sent GTS
1835 * Checks if some of a queue set's Tx queues need to ring their doorbells
1836 * to resume transmission after idling while they still have unprocessed
1839 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1840 unsigned int sleeping)
1842 if (sleeping & F_RSPD_TXQ0_GTS) {
1843 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1845 if (txq->cleaned + txq->in_use != txq->processed &&
1846 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1847 set_bit(TXQ_RUNNING, &txq->flags);
1848 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1849 V_EGRCNTX(txq->cntxt_id));
1853 if (sleeping & F_RSPD_TXQ1_GTS) {
1854 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1856 if (txq->cleaned + txq->in_use != txq->processed &&
1857 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1858 set_bit(TXQ_RUNNING, &txq->flags);
1859 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1860 V_EGRCNTX(txq->cntxt_id));
1866 * is_new_response - check if a response is newly written
1867 * @r: the response descriptor
1868 * @q: the response queue
1870 * Returns true if a response descriptor contains a yet unprocessed
1873 static inline int is_new_response(const struct rsp_desc *r,
1874 const struct sge_rspq *q)
1876 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1879 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1880 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1881 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1882 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1883 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1885 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1886 #define NOMEM_INTR_DELAY 2500
1889 * process_responses - process responses from an SGE response queue
1890 * @adap: the adapter
1891 * @qs: the queue set to which the response queue belongs
1892 * @budget: how many responses can be processed in this round
1894 * Process responses from an SGE response queue up to the supplied budget.
1895 * Responses include received packets as well as credits and other events
1896 * for the queues that belong to the response queue's queue set.
1897 * A negative budget is effectively unlimited.
1899 * Additionally choose the interrupt holdoff time for the next interrupt
1900 * on this queue. If the system is under memory shortage use a fairly
1901 * long delay to help recovery.
1903 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1906 struct sge_rspq *q = &qs->rspq;
1907 struct rsp_desc *r = &q->desc[q->cidx];
1908 int budget_left = budget;
1909 unsigned int sleeping = 0;
1910 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1913 q->next_holdoff = q->holdoff_tmr;
1915 while (likely(budget_left && is_new_response(r, q))) {
1916 int eth, ethpad = 2;
1917 struct sk_buff *skb = NULL;
1918 u32 len, flags = ntohl(r->flags);
1919 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1921 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1923 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1924 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1928 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1929 skb->data[0] = CPL_ASYNC_NOTIF;
1930 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1932 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1933 skb = get_imm_packet(r);
1934 if (unlikely(!skb)) {
1936 q->next_holdoff = NOMEM_INTR_DELAY;
1938 /* consume one credit since we tried */
1944 } else if ((len = ntohl(r->len_cq)) != 0) {
1946 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1948 if (fl->buf_size == RX_PAGE_SIZE) {
1949 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1950 struct sge_fl_page *p = &sd->t.page;
1953 prefetch(p->va + L1_CACHE_BYTES);
1955 __refill_fl(adap, fl);
1957 pci_unmap_single(adap->pdev,
1958 pci_unmap_addr(sd, dma_addr),
1960 PCI_DMA_FROMDEVICE);
1963 if (unlikely(fl->credits <
1967 skb = alloc_skb(SKB_DATA_SIZE,
1969 if (unlikely(!skb)) {
1972 recycle_rx_buf(adap, fl,
1977 skb = alloc_skb(SKB_DATA_SIZE,
1983 skb_data_init(skb, p, G_RSPD_LEN(len));
1989 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1990 eth ? SGE_RX_DROP_THRES : 0);
1993 if (++fl->cidx == fl->size)
1998 if (flags & RSPD_CTRL_MASK) {
1999 sleeping |= flags & RSPD_GTS_MASK;
2000 handle_rsp_cntrl_info(qs, flags);
2004 if (unlikely(++q->cidx == q->size)) {
2011 if (++q->credits >= (q->size / 4)) {
2012 refill_rspq(adap, q, q->credits);
2017 /* Preserve the RSS info in csum & priority */
2019 skb->priority = rss_lo;
2022 rx_eth(adap, q, skb, ethpad);
2024 if (unlikely(r->rss_hdr.opcode ==
2026 __skb_pull(skb, ethpad);
2028 ngathered = rx_offload(&adap->tdev, q,
2036 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2038 check_ring_db(adap, qs, sleeping);
2040 smp_mb(); /* commit Tx queue .processed updates */
2041 if (unlikely(qs->txq_stopped != 0))
2044 budget -= budget_left;
2048 static inline int is_pure_response(const struct rsp_desc *r)
2050 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2052 return (n | r->len_cq) == 0;
2056 * napi_rx_handler - the NAPI handler for Rx processing
2057 * @dev: the net device
2058 * @budget: how many packets we can process in this round
2060 * Handler for new data events when using NAPI.
2062 static int napi_rx_handler(struct net_device *dev, int *budget)
2064 struct adapter *adap = dev->priv;
2065 struct sge_qset *qs = dev2qset(dev);
2066 int effective_budget = min(*budget, dev->quota);
2068 int work_done = process_responses(adap, qs, effective_budget);
2069 *budget -= work_done;
2070 dev->quota -= work_done;
2072 if (work_done >= effective_budget)
2075 netif_rx_complete(dev);
2078 * Because we don't atomically flush the following write it is
2079 * possible that in very rare cases it can reach the device in a way
2080 * that races with a new response being written plus an error interrupt
2081 * causing the NAPI interrupt handler below to return unhandled status
2082 * to the OS. To protect against this would require flushing the write
2083 * and doing both the write and the flush with interrupts off. Way too
2084 * expensive and unjustifiable given the rarity of the race.
2086 * The race cannot happen at all with MSI-X.
2088 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2089 V_NEWTIMER(qs->rspq.next_holdoff) |
2090 V_NEWINDEX(qs->rspq.cidx));
2095 * Returns true if the device is already scheduled for polling.
2097 static inline int napi_is_scheduled(struct net_device *dev)
2099 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
2103 * process_pure_responses - process pure responses from a response queue
2104 * @adap: the adapter
2105 * @qs: the queue set owning the response queue
2106 * @r: the first pure response to process
2108 * A simpler version of process_responses() that handles only pure (i.e.,
2109 * non data-carrying) responses. Such respones are too light-weight to
2110 * justify calling a softirq under NAPI, so we handle them specially in
2111 * the interrupt handler. The function is called with a pointer to a
2112 * response, which the caller must ensure is a valid pure response.
2114 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2116 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2119 struct sge_rspq *q = &qs->rspq;
2120 unsigned int sleeping = 0;
2123 u32 flags = ntohl(r->flags);
2126 if (unlikely(++q->cidx == q->size)) {
2133 if (flags & RSPD_CTRL_MASK) {
2134 sleeping |= flags & RSPD_GTS_MASK;
2135 handle_rsp_cntrl_info(qs, flags);
2139 if (++q->credits >= (q->size / 4)) {
2140 refill_rspq(adap, q, q->credits);
2143 } while (is_new_response(r, q) && is_pure_response(r));
2146 check_ring_db(adap, qs, sleeping);
2148 smp_mb(); /* commit Tx queue .processed updates */
2149 if (unlikely(qs->txq_stopped != 0))
2152 return is_new_response(r, q);
2156 * handle_responses - decide what to do with new responses in NAPI mode
2157 * @adap: the adapter
2158 * @q: the response queue
2160 * This is used by the NAPI interrupt handlers to decide what to do with
2161 * new SGE responses. If there are no new responses it returns -1. If
2162 * there are new responses and they are pure (i.e., non-data carrying)
2163 * it handles them straight in hard interrupt context as they are very
2164 * cheap and don't deliver any packets. Finally, if there are any data
2165 * signaling responses it schedules the NAPI handler. Returns 1 if it
2166 * schedules NAPI, 0 if all new responses were pure.
2168 * The caller must ascertain NAPI is not already running.
2170 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2172 struct sge_qset *qs = rspq_to_qset(q);
2173 struct rsp_desc *r = &q->desc[q->cidx];
2175 if (!is_new_response(r, q))
2177 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2178 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2179 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2182 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2183 __netif_rx_schedule(qs->netdev);
2188 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2189 * (i.e., response queue serviced in hard interrupt).
2191 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2193 struct sge_qset *qs = cookie;
2194 struct adapter *adap = qs->netdev->priv;
2195 struct sge_rspq *q = &qs->rspq;
2197 spin_lock(&q->lock);
2198 if (process_responses(adap, qs, -1) == 0)
2199 q->unhandled_irqs++;
2200 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2201 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2202 spin_unlock(&q->lock);
2207 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2208 * (i.e., response queue serviced by NAPI polling).
2210 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2212 struct sge_qset *qs = cookie;
2213 struct adapter *adap = qs->netdev->priv;
2214 struct sge_rspq *q = &qs->rspq;
2216 spin_lock(&q->lock);
2217 BUG_ON(napi_is_scheduled(qs->netdev));
2219 if (handle_responses(adap, q) < 0)
2220 q->unhandled_irqs++;
2221 spin_unlock(&q->lock);
2226 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2227 * SGE response queues as well as error and other async events as they all use
2228 * the same MSI vector. We use one SGE response queue per port in this mode
2229 * and protect all response queues with queue 0's lock.
2231 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2233 int new_packets = 0;
2234 struct adapter *adap = cookie;
2235 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2237 spin_lock(&q->lock);
2239 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2240 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2241 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2245 if (adap->params.nports == 2 &&
2246 process_responses(adap, &adap->sge.qs[1], -1)) {
2247 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2249 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2250 V_NEWTIMER(q1->next_holdoff) |
2251 V_NEWINDEX(q1->cidx));
2255 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2256 q->unhandled_irqs++;
2258 spin_unlock(&q->lock);
2262 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2264 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2265 if (likely(__netif_rx_schedule_prep(dev)))
2266 __netif_rx_schedule(dev);
2273 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2274 * by NAPI polling). Handles data events from SGE response queues as well as
2275 * error and other async events as they all use the same MSI vector. We use
2276 * one SGE response queue per port in this mode and protect all response
2277 * queues with queue 0's lock.
2279 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2282 struct adapter *adap = cookie;
2283 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2285 spin_lock(&q->lock);
2287 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2288 if (adap->params.nports == 2)
2289 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2290 &adap->sge.qs[1].rspq);
2291 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2292 q->unhandled_irqs++;
2294 spin_unlock(&q->lock);
2299 * A helper function that processes responses and issues GTS.
2301 static inline int process_responses_gts(struct adapter *adap,
2302 struct sge_rspq *rq)
2306 work = process_responses(adap, rspq_to_qset(rq), -1);
2307 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2308 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2313 * The legacy INTx interrupt handler. This needs to handle data events from
2314 * SGE response queues as well as error and other async events as they all use
2315 * the same interrupt pin. We use one SGE response queue per port in this mode
2316 * and protect all response queues with queue 0's lock.
2318 static irqreturn_t t3_intr(int irq, void *cookie)
2320 int work_done, w0, w1;
2321 struct adapter *adap = cookie;
2322 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2323 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2325 spin_lock(&q0->lock);
2327 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2328 w1 = adap->params.nports == 2 &&
2329 is_new_response(&q1->desc[q1->cidx], q1);
2331 if (likely(w0 | w1)) {
2332 t3_write_reg(adap, A_PL_CLI, 0);
2333 t3_read_reg(adap, A_PL_CLI); /* flush */
2336 process_responses_gts(adap, q0);
2339 process_responses_gts(adap, q1);
2341 work_done = w0 | w1;
2343 work_done = t3_slow_intr_handler(adap);
2345 spin_unlock(&q0->lock);
2346 return IRQ_RETVAL(work_done != 0);
2350 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2351 * Handles data events from SGE response queues as well as error and other
2352 * async events as they all use the same interrupt pin. We use one SGE
2353 * response queue per port in this mode and protect all response queues with
2356 static irqreturn_t t3b_intr(int irq, void *cookie)
2359 struct adapter *adap = cookie;
2360 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2362 t3_write_reg(adap, A_PL_CLI, 0);
2363 map = t3_read_reg(adap, A_SG_DATA_INTR);
2365 if (unlikely(!map)) /* shared interrupt, most likely */
2368 spin_lock(&q0->lock);
2370 if (unlikely(map & F_ERRINTR))
2371 t3_slow_intr_handler(adap);
2373 if (likely(map & 1))
2374 process_responses_gts(adap, q0);
2377 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2379 spin_unlock(&q0->lock);
2384 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2385 * Handles data events from SGE response queues as well as error and other
2386 * async events as they all use the same interrupt pin. We use one SGE
2387 * response queue per port in this mode and protect all response queues with
2390 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2393 struct net_device *dev;
2394 struct adapter *adap = cookie;
2395 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2397 t3_write_reg(adap, A_PL_CLI, 0);
2398 map = t3_read_reg(adap, A_SG_DATA_INTR);
2400 if (unlikely(!map)) /* shared interrupt, most likely */
2403 spin_lock(&q0->lock);
2405 if (unlikely(map & F_ERRINTR))
2406 t3_slow_intr_handler(adap);
2408 if (likely(map & 1)) {
2409 dev = adap->sge.qs[0].netdev;
2411 if (likely(__netif_rx_schedule_prep(dev)))
2412 __netif_rx_schedule(dev);
2415 dev = adap->sge.qs[1].netdev;
2417 if (likely(__netif_rx_schedule_prep(dev)))
2418 __netif_rx_schedule(dev);
2421 spin_unlock(&q0->lock);
2426 * t3_intr_handler - select the top-level interrupt handler
2427 * @adap: the adapter
2428 * @polling: whether using NAPI to service response queues
2430 * Selects the top-level interrupt handler based on the type of interrupts
2431 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2434 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2436 if (adap->flags & USING_MSIX)
2437 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2438 if (adap->flags & USING_MSI)
2439 return polling ? t3_intr_msi_napi : t3_intr_msi;
2440 if (adap->params.rev > 0)
2441 return polling ? t3b_intr_napi : t3b_intr;
2446 * t3_sge_err_intr_handler - SGE async event interrupt handler
2447 * @adapter: the adapter
2449 * Interrupt handler for SGE asynchronous (non-data) events.
2451 void t3_sge_err_intr_handler(struct adapter *adapter)
2453 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2455 if (status & F_RSPQCREDITOVERFOW)
2456 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2458 if (status & F_RSPQDISABLED) {
2459 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2462 "packet delivered to disabled response queue "
2463 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2466 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2467 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2468 t3_fatal_err(adapter);
2472 * sge_timer_cb - perform periodic maintenance of an SGE qset
2473 * @data: the SGE queue set to maintain
2475 * Runs periodically from a timer to perform maintenance of an SGE queue
2476 * set. It performs two tasks:
2478 * a) Cleans up any completed Tx descriptors that may still be pending.
2479 * Normal descriptor cleanup happens when new packets are added to a Tx
2480 * queue so this timer is relatively infrequent and does any cleanup only
2481 * if the Tx queue has not seen any new packets in a while. We make a
2482 * best effort attempt to reclaim descriptors, in that we don't wait
2483 * around if we cannot get a queue's lock (which most likely is because
2484 * someone else is queueing new packets and so will also handle the clean
2485 * up). Since control queues use immediate data exclusively we don't
2486 * bother cleaning them up here.
2488 * b) Replenishes Rx queues that have run out due to memory shortage.
2489 * Normally new Rx buffers are added when existing ones are consumed but
2490 * when out of memory a queue can become empty. We try to add only a few
2491 * buffers here, the queue will be replenished fully as these new buffers
2492 * are used up if memory shortage has subsided.
2494 static void sge_timer_cb(unsigned long data)
2497 struct sge_qset *qs = (struct sge_qset *)data;
2498 struct adapter *adap = qs->netdev->priv;
2500 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2501 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2502 spin_unlock(&qs->txq[TXQ_ETH].lock);
2504 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2505 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2506 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2508 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2509 &adap->sge.qs[0].rspq.lock;
2510 if (spin_trylock_irq(lock)) {
2511 if (!napi_is_scheduled(qs->netdev)) {
2512 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2514 if (qs->fl[0].credits < qs->fl[0].size)
2515 __refill_fl(adap, &qs->fl[0]);
2516 if (qs->fl[1].credits < qs->fl[1].size)
2517 __refill_fl(adap, &qs->fl[1]);
2519 if (status & (1 << qs->rspq.cntxt_id)) {
2521 if (qs->rspq.credits) {
2522 refill_rspq(adap, &qs->rspq, 1);
2524 qs->rspq.restarted++;
2525 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2526 1 << qs->rspq.cntxt_id);
2530 spin_unlock_irq(lock);
2532 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2536 * t3_update_qset_coalesce - update coalescing settings for a queue set
2537 * @qs: the SGE queue set
2538 * @p: new queue set parameters
2540 * Update the coalescing settings for an SGE queue set. Nothing is done
2541 * if the queue set is not initialized yet.
2543 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2548 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2549 qs->rspq.polling = p->polling;
2550 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2554 * t3_sge_alloc_qset - initialize an SGE queue set
2555 * @adapter: the adapter
2556 * @id: the queue set id
2557 * @nports: how many Ethernet ports will be using this queue set
2558 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2559 * @p: configuration parameters for this queue set
2560 * @ntxq: number of Tx queues for the queue set
2561 * @netdev: net device associated with this queue set
2563 * Allocate resources and initialize an SGE queue set. A queue set
2564 * comprises a response queue, two Rx free-buffer queues, and up to 3
2565 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2566 * queue, offload queue, and control queue.
2568 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2569 int irq_vec_idx, const struct qset_params *p,
2570 int ntxq, struct net_device *netdev)
2572 int i, ret = -ENOMEM;
2573 struct sge_qset *q = &adapter->sge.qs[id];
2575 init_qset_cntxt(q, id);
2576 init_timer(&q->tx_reclaim_timer);
2577 q->tx_reclaim_timer.data = (unsigned long)q;
2578 q->tx_reclaim_timer.function = sge_timer_cb;
2580 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2581 sizeof(struct rx_desc),
2582 sizeof(struct rx_sw_desc),
2583 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2587 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2588 sizeof(struct rx_desc),
2589 sizeof(struct rx_sw_desc),
2590 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2594 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2595 sizeof(struct rsp_desc), 0,
2596 &q->rspq.phys_addr, NULL);
2600 for (i = 0; i < ntxq; ++i) {
2602 * The control queue always uses immediate data so does not
2603 * need to keep track of any sk_buffs.
2605 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2607 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2608 sizeof(struct tx_desc), sz,
2609 &q->txq[i].phys_addr,
2611 if (!q->txq[i].desc)
2615 q->txq[i].size = p->txq_size[i];
2616 spin_lock_init(&q->txq[i].lock);
2617 skb_queue_head_init(&q->txq[i].sendq);
2620 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2622 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2625 q->fl[0].gen = q->fl[1].gen = 1;
2626 q->fl[0].size = p->fl_size;
2627 q->fl[1].size = p->jumbo_size;
2630 q->rspq.size = p->rspq_size;
2631 spin_lock_init(&q->rspq.lock);
2633 q->txq[TXQ_ETH].stop_thres = nports *
2634 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2636 if (!is_offload(adapter)) {
2638 q->fl[0].buf_size = RX_PAGE_SIZE;
2640 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2641 sizeof(struct cpl_rx_pkt);
2643 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2644 sizeof(struct cpl_rx_pkt);
2647 q->fl[0].buf_size = RX_PAGE_SIZE;
2649 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2650 sizeof(struct cpl_rx_data);
2652 q->fl[1].buf_size = (16 * 1024) -
2653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2656 spin_lock(&adapter->sge.reg_lock);
2658 /* FL threshold comparison uses < */
2659 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2660 q->rspq.phys_addr, q->rspq.size,
2661 q->fl[0].buf_size, 1, 0);
2665 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2666 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2667 q->fl[i].phys_addr, q->fl[i].size,
2668 q->fl[i].buf_size, p->cong_thres, 1,
2674 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2675 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2676 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2682 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2683 USE_GTS, SGE_CNTXT_OFLD, id,
2684 q->txq[TXQ_OFLD].phys_addr,
2685 q->txq[TXQ_OFLD].size, 0, 1, 0);
2691 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2693 q->txq[TXQ_CTRL].phys_addr,
2694 q->txq[TXQ_CTRL].size,
2695 q->txq[TXQ_CTRL].token, 1, 0);
2700 spin_unlock(&adapter->sge.reg_lock);
2702 t3_update_qset_coalesce(q, p);
2705 * We use atalk_ptr as a backpointer to a qset. In case a device is
2706 * associated with multiple queue sets only the first one sets
2709 if (netdev->atalk_ptr == NULL)
2710 netdev->atalk_ptr = q;
2712 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2713 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2714 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2716 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2717 V_NEWTIMER(q->rspq.holdoff_tmr));
2719 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2723 spin_unlock(&adapter->sge.reg_lock);
2725 t3_free_qset(adapter, q);
2730 * t3_free_sge_resources - free SGE resources
2731 * @adap: the adapter
2733 * Frees resources used by the SGE queue sets.
2735 void t3_free_sge_resources(struct adapter *adap)
2739 for (i = 0; i < SGE_QSETS; ++i)
2740 t3_free_qset(adap, &adap->sge.qs[i]);
2744 * t3_sge_start - enable SGE
2745 * @adap: the adapter
2747 * Enables the SGE for DMAs. This is the last step in starting packet
2750 void t3_sge_start(struct adapter *adap)
2752 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2756 * t3_sge_stop - disable SGE operation
2757 * @adap: the adapter
2759 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2760 * from error interrupts) or from normal process context. In the latter
2761 * case it also disables any pending queue restart tasklets. Note that
2762 * if it is called in interrupt context it cannot disable the restart
2763 * tasklets as it cannot wait, however the tasklets will have no effect
2764 * since the doorbells are disabled and the driver will call this again
2765 * later from process context, at which time the tasklets will be stopped
2766 * if they are still running.
2768 void t3_sge_stop(struct adapter *adap)
2770 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2771 if (!in_interrupt()) {
2774 for (i = 0; i < SGE_QSETS; ++i) {
2775 struct sge_qset *qs = &adap->sge.qs[i];
2777 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2778 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2784 * t3_sge_init - initialize SGE
2785 * @adap: the adapter
2786 * @p: the SGE parameters
2788 * Performs SGE initialization needed every time after a chip reset.
2789 * We do not initialize any of the queue sets here, instead the driver
2790 * top-level must request those individually. We also do not enable DMA
2791 * here, that should be done after the queues have been set up.
2793 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2795 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2797 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2799 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2800 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2801 #if SGE_NUM_GENBITS == 1
2802 ctrl |= F_EGRGENCTRL;
2804 if (adap->params.rev > 0) {
2805 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2806 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2807 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2809 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2810 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2811 V_LORCQDRBTHRSH(512));
2812 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2813 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2814 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2815 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2816 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2817 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2818 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2819 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2820 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2824 * t3_sge_prep - one-time SGE initialization
2825 * @adap: the associated adapter
2826 * @p: SGE parameters
2828 * Performs one-time initialization of SGE SW state. Includes determining
2829 * defaults for the assorted SGE parameters, which admins can change until
2830 * they are used to initialize the SGE.
2832 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2836 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2837 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2839 for (i = 0; i < SGE_QSETS; ++i) {
2840 struct qset_params *q = p->qset + i;
2842 q->polling = adap->params.rev > 0;
2843 q->coalesce_usecs = 5;
2844 q->rspq_size = 1024;
2846 q->jumbo_size = 512;
2847 q->txq_size[TXQ_ETH] = 1024;
2848 q->txq_size[TXQ_OFLD] = 1024;
2849 q->txq_size[TXQ_CTRL] = 256;
2853 spin_lock_init(&adap->sge.reg_lock);
2857 * t3_get_desc - dump an SGE descriptor for debugging purposes
2858 * @qs: the queue set
2859 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2860 * @idx: the descriptor index in the queue
2861 * @data: where to dump the descriptor contents
2863 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2864 * size of the descriptor.
2866 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2867 unsigned char *data)
2873 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2875 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2876 return sizeof(struct tx_desc);
2880 if (!qs->rspq.desc || idx >= qs->rspq.size)
2882 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2883 return sizeof(struct rsp_desc);
2887 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2889 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2890 return sizeof(struct rx_desc);