2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <linux/notifier.h>
37 #include <linux/reboot.h>
38 #include <asm/kexec.h>
39 #include <linux/mutex.h>
45 #include "ehea_phyp.h"
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
50 MODULE_DESCRIPTION("IBM eServer HEA Driver");
51 MODULE_VERSION(DRV_VERSION);
54 static int msg_level = -1;
55 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
56 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
57 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
58 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
61 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
62 static int num_tx_qps = EHEA_NUM_TX_QP;
63 static int prop_carrier_state;
65 module_param(msg_level, int, 0);
66 module_param(rq1_entries, int, 0);
67 module_param(rq2_entries, int, 0);
68 module_param(rq3_entries, int, 0);
69 module_param(sq_entries, int, 0);
70 module_param(prop_carrier_state, int, 0);
71 module_param(use_mcs, int, 0);
72 module_param(use_lro, int, 0);
73 module_param(lro_max_aggr, int, 0);
74 module_param(num_tx_qps, int, 0);
76 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
77 MODULE_PARM_DESC(msg_level, "msg_level");
78 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
79 "port to stack. 1:yes, 0:no. Default = 0 ");
80 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
81 "[2^x - 1], x = [6..14]. Default = "
82 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
83 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
84 "[2^x - 1], x = [6..14]. Default = "
85 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
86 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
87 "[2^x - 1], x = [6..14]. Default = "
88 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
89 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
90 "[2^x - 1], x = [6..14]. Default = "
91 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
92 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
94 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
95 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
96 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
99 static int port_name_cnt;
100 static LIST_HEAD(adapter_list);
101 u64 ehea_driver_flags;
102 struct work_struct ehea_rereg_mr_task;
103 static DEFINE_MUTEX(dlpar_mem_lock);
104 struct ehea_fw_handle_array ehea_fw_handles;
105 struct ehea_bcmc_reg_array ehea_bcmc_regs;
108 static int __devinit ehea_probe_adapter(struct of_device *dev,
109 const struct of_device_id *id);
111 static int __devexit ehea_remove(struct of_device *dev);
113 static struct of_device_id ehea_device_table[] = {
116 .compatible = "IBM,lhea",
121 static struct of_platform_driver ehea_driver = {
123 .match_table = ehea_device_table,
124 .probe = ehea_probe_adapter,
125 .remove = ehea_remove,
128 void ehea_dump(void *adr, int len, char *msg)
131 unsigned char *deb = adr;
132 for (x = 0; x < len; x += 16) {
133 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
134 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
139 static void ehea_update_firmware_handles(void)
141 struct ehea_fw_handle_entry *arr = NULL;
142 struct ehea_adapter *adapter;
143 int num_adapters = 0;
147 int num_fw_handles, k, l;
149 /* Determine number of handles */
150 list_for_each_entry(adapter, &adapter_list, list) {
153 for (k = 0; k < EHEA_MAX_PORTS; k++) {
154 struct ehea_port *port = adapter->port[k];
156 if (!port || (port->state != EHEA_PORT_UP))
160 num_portres += port->num_def_qps + port->num_add_tx_qps;
164 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
165 num_ports * EHEA_NUM_PORT_FW_HANDLES +
166 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
168 if (num_fw_handles) {
169 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
171 return; /* Keep the existing array */
175 list_for_each_entry(adapter, &adapter_list, list) {
176 for (k = 0; k < EHEA_MAX_PORTS; k++) {
177 struct ehea_port *port = adapter->port[k];
179 if (!port || (port->state != EHEA_PORT_UP))
183 l < port->num_def_qps + port->num_add_tx_qps;
185 struct ehea_port_res *pr = &port->port_res[l];
187 arr[i].adh = adapter->handle;
188 arr[i++].fwh = pr->qp->fw_handle;
189 arr[i].adh = adapter->handle;
190 arr[i++].fwh = pr->send_cq->fw_handle;
191 arr[i].adh = adapter->handle;
192 arr[i++].fwh = pr->recv_cq->fw_handle;
193 arr[i].adh = adapter->handle;
194 arr[i++].fwh = pr->eq->fw_handle;
195 arr[i].adh = adapter->handle;
196 arr[i++].fwh = pr->send_mr.handle;
197 arr[i].adh = adapter->handle;
198 arr[i++].fwh = pr->recv_mr.handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = port->qp_eq->fw_handle;
204 arr[i].adh = adapter->handle;
205 arr[i++].fwh = adapter->neq->fw_handle;
207 if (adapter->mr.handle) {
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = adapter->mr.handle;
214 kfree(ehea_fw_handles.arr);
215 ehea_fw_handles.arr = arr;
216 ehea_fw_handles.num_entries = i;
219 static void ehea_update_bcmc_registrations(void)
221 struct ehea_bcmc_reg_entry *arr = NULL;
222 struct ehea_adapter *adapter;
223 struct ehea_mc_list *mc_entry;
224 int num_registrations = 0;
228 /* Determine number of registrations */
229 list_for_each_entry(adapter, &adapter_list, list)
230 for (k = 0; k < EHEA_MAX_PORTS; k++) {
231 struct ehea_port *port = adapter->port[k];
233 if (!port || (port->state != EHEA_PORT_UP))
236 num_registrations += 2; /* Broadcast registrations */
238 list_for_each_entry(mc_entry, &port->mc_list->list,list)
239 num_registrations += 2;
242 if (num_registrations) {
243 arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
245 return; /* Keep the existing array */
249 list_for_each_entry(adapter, &adapter_list, list) {
250 for (k = 0; k < EHEA_MAX_PORTS; k++) {
251 struct ehea_port *port = adapter->port[k];
253 if (!port || (port->state != EHEA_PORT_UP))
256 arr[i].adh = adapter->handle;
257 arr[i].port_id = port->logical_port_id;
258 arr[i].reg_type = EHEA_BCMC_BROADCAST |
260 arr[i++].macaddr = port->mac_addr;
262 arr[i].adh = adapter->handle;
263 arr[i].port_id = port->logical_port_id;
264 arr[i].reg_type = EHEA_BCMC_BROADCAST |
265 EHEA_BCMC_VLANID_ALL;
266 arr[i++].macaddr = port->mac_addr;
268 list_for_each_entry(mc_entry,
269 &port->mc_list->list, list) {
270 arr[i].adh = adapter->handle;
271 arr[i].port_id = port->logical_port_id;
272 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
273 EHEA_BCMC_MULTICAST |
275 arr[i++].macaddr = mc_entry->macaddr;
277 arr[i].adh = adapter->handle;
278 arr[i].port_id = port->logical_port_id;
279 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
280 EHEA_BCMC_MULTICAST |
281 EHEA_BCMC_VLANID_ALL;
282 arr[i++].macaddr = mc_entry->macaddr;
288 kfree(ehea_bcmc_regs.arr);
289 ehea_bcmc_regs.arr = arr;
290 ehea_bcmc_regs.num_entries = i;
293 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
295 struct ehea_port *port = netdev_priv(dev);
296 struct net_device_stats *stats = &port->stats;
297 struct hcp_ehea_port_cb2 *cb2;
298 u64 hret, rx_packets, tx_packets;
301 memset(stats, 0, sizeof(*stats));
303 cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
305 ehea_error("no mem for cb2");
309 hret = ehea_h_query_ehea_port(port->adapter->handle,
310 port->logical_port_id,
311 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
312 if (hret != H_SUCCESS) {
313 ehea_error("query_ehea_port failed");
317 if (netif_msg_hw(port))
318 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
321 for (i = 0; i < port->num_def_qps; i++)
322 rx_packets += port->port_res[i].rx_packets;
325 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
326 tx_packets += port->port_res[i].tx_packets;
328 stats->tx_packets = tx_packets;
329 stats->multicast = cb2->rxmcp;
330 stats->rx_errors = cb2->rxuerr;
331 stats->rx_bytes = cb2->rxo;
332 stats->tx_bytes = cb2->txo;
333 stats->rx_packets = rx_packets;
341 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
343 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
344 struct net_device *dev = pr->port->netdev;
345 int max_index_mask = pr->rq1_skba.len - 1;
346 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
350 pr->rq1_skba.os_skbs = 0;
352 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
353 pr->rq1_skba.index = index;
354 pr->rq1_skba.os_skbs = fill_wqes;
358 for (i = 0; i < fill_wqes; i++) {
359 if (!skb_arr_rq1[index]) {
360 skb_arr_rq1[index] = netdev_alloc_skb(dev,
362 if (!skb_arr_rq1[index]) {
363 pr->rq1_skba.os_skbs = fill_wqes - i;
364 ehea_error("%s: no mem for skb/%d wqes filled",
370 index &= max_index_mask;
378 ehea_update_rq1a(pr->qp, adder);
381 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
385 struct net_device *dev = pr->port->netdev;
388 for (i = 0; i < pr->rq1_skba.len; i++) {
389 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
390 if (!skb_arr_rq1[i]) {
391 ehea_error("%s: no mem for skb/%d wqes filled",
398 ehea_update_rq1a(pr->qp, nr_rq1a);
403 static int ehea_refill_rq_def(struct ehea_port_res *pr,
404 struct ehea_q_skb_arr *q_skba, int rq_nr,
405 int num_wqes, int wqe_type, int packet_size)
407 struct net_device *dev = pr->port->netdev;
408 struct ehea_qp *qp = pr->qp;
409 struct sk_buff **skb_arr = q_skba->arr;
410 struct ehea_rwqe *rwqe;
411 int i, index, max_index_mask, fill_wqes;
415 fill_wqes = q_skba->os_skbs + num_wqes;
418 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
419 q_skba->os_skbs = fill_wqes;
423 index = q_skba->index;
424 max_index_mask = q_skba->len - 1;
425 for (i = 0; i < fill_wqes; i++) {
427 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
429 ehea_error("%s: no mem for skb/%d wqes filled",
430 pr->port->netdev->name, i);
431 q_skba->os_skbs = fill_wqes - i;
435 skb_reserve(skb, NET_IP_ALIGN);
437 skb_arr[index] = skb;
438 tmp_addr = ehea_map_vaddr(skb->data);
439 if (tmp_addr == -1) {
441 q_skba->os_skbs = fill_wqes - i;
446 rwqe = ehea_get_next_rwqe(qp, rq_nr);
447 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
448 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
449 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
450 rwqe->sg_list[0].vaddr = tmp_addr;
451 rwqe->sg_list[0].len = packet_size;
452 rwqe->data_segments = 1;
455 index &= max_index_mask;
459 q_skba->index = index;
466 ehea_update_rq2a(pr->qp, adder);
468 ehea_update_rq3a(pr->qp, adder);
474 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
476 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
477 nr_of_wqes, EHEA_RWQE2_TYPE,
478 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
482 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
484 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
485 nr_of_wqes, EHEA_RWQE3_TYPE,
486 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
489 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
491 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
492 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
494 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
495 (cqe->header_length == 0))
500 static inline void ehea_fill_skb(struct net_device *dev,
501 struct sk_buff *skb, struct ehea_cqe *cqe)
503 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
505 skb_put(skb, length);
506 skb->ip_summed = CHECKSUM_UNNECESSARY;
507 skb->protocol = eth_type_trans(skb, dev);
510 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
512 struct ehea_cqe *cqe)
514 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
524 prefetchw(pref + EHEA_CACHE_LINE);
526 pref = (skb_array[x]->data);
528 prefetch(pref + EHEA_CACHE_LINE);
529 prefetch(pref + EHEA_CACHE_LINE * 2);
530 prefetch(pref + EHEA_CACHE_LINE * 3);
531 skb = skb_array[skb_index];
532 skb_array[skb_index] = NULL;
536 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
537 int arr_len, int wqe_index)
548 prefetchw(pref + EHEA_CACHE_LINE);
550 pref = (skb_array[x]->data);
552 prefetchw(pref + EHEA_CACHE_LINE);
554 skb = skb_array[wqe_index];
555 skb_array[wqe_index] = NULL;
559 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
560 struct ehea_cqe *cqe, int *processed_rq2,
565 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
566 pr->p_stats.err_tcp_cksum++;
567 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
568 pr->p_stats.err_ip_cksum++;
569 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
570 pr->p_stats.err_frame_crc++;
574 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
576 } else if (rq == 3) {
578 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
582 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
583 if (netif_msg_rx_err(pr->port)) {
584 ehea_error("Critical receive error for QP %d. "
585 "Resetting port.", pr->qp->init_attr.qp_nr);
586 ehea_dump(cqe, sizeof(*cqe), "CQE");
588 schedule_work(&pr->port->reset_task);
595 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
596 void **tcph, u64 *hdr_flags, void *priv)
598 struct ehea_cqe *cqe = priv;
602 /* non tcp/udp packets */
603 if (!cqe->header_length)
607 skb_reset_network_header(skb);
609 if (iph->protocol != IPPROTO_TCP)
612 ip_len = ip_hdrlen(skb);
613 skb_set_transport_header(skb, ip_len);
614 *tcph = tcp_hdr(skb);
616 /* check if ip header and tcp header are complete */
617 if (iph->tot_len < ip_len + tcp_hdrlen(skb))
620 *hdr_flags = LRO_IPV4 | LRO_TCP;
626 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
629 int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
634 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
639 lro_receive_skb(&pr->lro_mgr, skb, cqe);
642 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
645 netif_receive_skb(skb);
649 static int ehea_proc_rwqes(struct net_device *dev,
650 struct ehea_port_res *pr,
653 struct ehea_port *port = pr->port;
654 struct ehea_qp *qp = pr->qp;
655 struct ehea_cqe *cqe;
657 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
658 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
659 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
660 int skb_arr_rq1_len = pr->rq1_skba.len;
661 int skb_arr_rq2_len = pr->rq2_skba.len;
662 int skb_arr_rq3_len = pr->rq3_skba.len;
663 int processed, processed_rq1, processed_rq2, processed_rq3;
664 int wqe_index, last_wqe_index, rq, port_reset;
666 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
669 cqe = ehea_poll_rq1(qp, &wqe_index);
670 while ((processed < budget) && cqe) {
674 if (netif_msg_rx_status(port))
675 ehea_dump(cqe, sizeof(*cqe), "CQE");
677 last_wqe_index = wqe_index;
679 if (!ehea_check_cqe(cqe, &rq)) {
682 skb = get_skb_by_index_ll(skb_arr_rq1,
685 if (unlikely(!skb)) {
686 if (netif_msg_rx_err(port))
687 ehea_error("LL rq1: skb=NULL");
689 skb = netdev_alloc_skb(dev,
694 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
695 cqe->num_bytes_transfered - 4);
696 ehea_fill_skb(dev, skb, cqe);
697 } else if (rq == 2) {
699 skb = get_skb_by_index(skb_arr_rq2,
700 skb_arr_rq2_len, cqe);
701 if (unlikely(!skb)) {
702 if (netif_msg_rx_err(port))
703 ehea_error("rq2: skb=NULL");
706 ehea_fill_skb(dev, skb, cqe);
710 skb = get_skb_by_index(skb_arr_rq3,
711 skb_arr_rq3_len, cqe);
712 if (unlikely(!skb)) {
713 if (netif_msg_rx_err(port))
714 ehea_error("rq3: skb=NULL");
717 ehea_fill_skb(dev, skb, cqe);
721 ehea_proc_skb(pr, cqe, skb);
722 dev->last_rx = jiffies;
724 pr->p_stats.poll_receive_errors++;
725 port_reset = ehea_treat_poll_error(pr, rq, cqe,
731 cqe = ehea_poll_rq1(qp, &wqe_index);
734 lro_flush_all(&pr->lro_mgr);
736 pr->rx_packets += processed;
738 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
739 ehea_refill_rq2(pr, processed_rq2);
740 ehea_refill_rq3(pr, processed_rq3);
745 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
748 struct ehea_cq *send_cq = pr->send_cq;
749 struct ehea_cqe *cqe;
750 int quota = my_quota;
756 cqe = ehea_poll_cq(send_cq);
757 while (cqe && (quota > 0)) {
758 ehea_inc_cq(send_cq);
762 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
763 ehea_error("Send Completion Error: Resetting port");
764 if (netif_msg_tx_err(pr->port))
765 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
766 schedule_work(&pr->port->reset_task);
770 if (netif_msg_tx_done(pr->port))
771 ehea_dump(cqe, sizeof(*cqe), "CQE");
773 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
774 == EHEA_SWQE2_TYPE)) {
776 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
777 skb = pr->sq_skba.arr[index];
779 pr->sq_skba.arr[index] = NULL;
782 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
785 cqe = ehea_poll_cq(send_cq);
788 ehea_update_feca(send_cq, cqe_counter);
789 atomic_add(swqe_av, &pr->swqe_avail);
791 spin_lock_irqsave(&pr->netif_queue, flags);
793 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
794 >= pr->swqe_refill_th)) {
795 netif_wake_queue(pr->port->netdev);
796 pr->queue_stopped = 0;
798 spin_unlock_irqrestore(&pr->netif_queue, flags);
803 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
804 #define EHEA_POLL_MAX_CQES 65535
806 static int ehea_poll(struct napi_struct *napi, int budget)
808 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
810 struct net_device *dev = pr->port->netdev;
811 struct ehea_cqe *cqe;
812 struct ehea_cqe *cqe_skb = NULL;
813 int force_irq, wqe_index;
816 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
817 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
820 rx += ehea_proc_rwqes(dev, pr, budget - rx);
822 while ((rx != budget) || force_irq) {
823 pr->poll_counter = 0;
825 netif_rx_complete(dev, napi);
826 ehea_reset_cq_ep(pr->recv_cq);
827 ehea_reset_cq_ep(pr->send_cq);
828 ehea_reset_cq_n1(pr->recv_cq);
829 ehea_reset_cq_n1(pr->send_cq);
830 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
831 cqe_skb = ehea_poll_cq(pr->send_cq);
833 if (!cqe && !cqe_skb)
836 if (!netif_rx_reschedule(dev, napi))
839 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
840 rx += ehea_proc_rwqes(dev, pr, budget - rx);
847 #ifdef CONFIG_NET_POLL_CONTROLLER
848 static void ehea_netpoll(struct net_device *dev)
850 struct ehea_port *port = netdev_priv(dev);
853 for (i = 0; i < port->num_def_qps; i++)
854 netif_rx_schedule(dev, &port->port_res[i].napi);
858 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
860 struct ehea_port_res *pr = param;
862 netif_rx_schedule(pr->port->netdev, &pr->napi);
867 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
869 struct ehea_port *port = param;
870 struct ehea_eqe *eqe;
874 eqe = ehea_poll_eq(port->qp_eq);
877 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
878 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
879 eqe->entry, qp_token);
881 qp = port->port_res[qp_token].qp;
882 ehea_error_data(port->adapter, qp->fw_handle);
883 eqe = ehea_poll_eq(port->qp_eq);
886 schedule_work(&port->reset_task);
891 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
896 for (i = 0; i < EHEA_MAX_PORTS; i++)
897 if (adapter->port[i])
898 if (adapter->port[i]->logical_port_id == logical_port)
899 return adapter->port[i];
903 int ehea_sense_port_attr(struct ehea_port *port)
907 struct hcp_ehea_port_cb0 *cb0;
909 /* may be called via ehea_neq_tasklet() */
910 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
912 ehea_error("no mem for cb0");
917 hret = ehea_h_query_ehea_port(port->adapter->handle,
918 port->logical_port_id, H_PORT_CB0,
919 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
921 if (hret != H_SUCCESS) {
927 port->mac_addr = cb0->port_mac_addr << 16;
929 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
930 ret = -EADDRNOTAVAIL;
935 switch (cb0->port_speed) {
937 port->port_speed = EHEA_SPEED_10M;
938 port->full_duplex = 0;
941 port->port_speed = EHEA_SPEED_10M;
942 port->full_duplex = 1;
945 port->port_speed = EHEA_SPEED_100M;
946 port->full_duplex = 0;
949 port->port_speed = EHEA_SPEED_100M;
950 port->full_duplex = 1;
953 port->port_speed = EHEA_SPEED_1G;
954 port->full_duplex = 1;
957 port->port_speed = EHEA_SPEED_10G;
958 port->full_duplex = 1;
961 port->port_speed = 0;
962 port->full_duplex = 0;
967 port->num_mcs = cb0->num_default_qps;
969 /* Number of default QPs */
971 port->num_def_qps = cb0->num_default_qps;
973 port->num_def_qps = 1;
975 if (!port->num_def_qps) {
980 port->num_tx_qps = num_tx_qps;
982 if (port->num_def_qps >= port->num_tx_qps)
983 port->num_add_tx_qps = 0;
985 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
989 if (ret || netif_msg_probe(port))
990 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
996 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
998 struct hcp_ehea_port_cb4 *cb4;
1002 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1004 ehea_error("no mem for cb4");
1009 cb4->port_speed = port_speed;
1011 netif_carrier_off(port->netdev);
1013 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1014 port->logical_port_id,
1015 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1016 if (hret == H_SUCCESS) {
1017 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1019 hret = ehea_h_query_ehea_port(port->adapter->handle,
1020 port->logical_port_id,
1021 H_PORT_CB4, H_PORT_CB4_SPEED,
1023 if (hret == H_SUCCESS) {
1024 switch (cb4->port_speed) {
1026 port->port_speed = EHEA_SPEED_10M;
1027 port->full_duplex = 0;
1030 port->port_speed = EHEA_SPEED_10M;
1031 port->full_duplex = 1;
1033 case H_SPEED_100M_H:
1034 port->port_speed = EHEA_SPEED_100M;
1035 port->full_duplex = 0;
1037 case H_SPEED_100M_F:
1038 port->port_speed = EHEA_SPEED_100M;
1039 port->full_duplex = 1;
1042 port->port_speed = EHEA_SPEED_1G;
1043 port->full_duplex = 1;
1046 port->port_speed = EHEA_SPEED_10G;
1047 port->full_duplex = 1;
1050 port->port_speed = 0;
1051 port->full_duplex = 0;
1055 ehea_error("Failed sensing port speed");
1059 if (hret == H_AUTHORITY) {
1060 ehea_info("Hypervisor denied setting port speed");
1064 ehea_error("Failed setting port speed");
1067 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1068 netif_carrier_on(port->netdev);
1075 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1080 struct ehea_port *port;
1082 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1083 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1084 port = ehea_get_port(adapter, portnum);
1087 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1090 ehea_error("unknown portnum %x", portnum);
1094 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1095 if (!netif_carrier_ok(port->netdev)) {
1096 ret = ehea_sense_port_attr(port);
1098 ehea_error("failed resensing port "
1103 if (netif_msg_link(port))
1104 ehea_info("%s: Logical port up: %dMbps "
1108 port->full_duplex ==
1109 1 ? "Full" : "Half");
1111 netif_carrier_on(port->netdev);
1112 netif_wake_queue(port->netdev);
1115 if (netif_carrier_ok(port->netdev)) {
1116 if (netif_msg_link(port))
1117 ehea_info("%s: Logical port down",
1118 port->netdev->name);
1119 netif_carrier_off(port->netdev);
1120 netif_stop_queue(port->netdev);
1123 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1124 port->phy_link = EHEA_PHY_LINK_UP;
1125 if (netif_msg_link(port))
1126 ehea_info("%s: Physical port up",
1127 port->netdev->name);
1128 if (prop_carrier_state)
1129 netif_carrier_on(port->netdev);
1131 port->phy_link = EHEA_PHY_LINK_DOWN;
1132 if (netif_msg_link(port))
1133 ehea_info("%s: Physical port down",
1134 port->netdev->name);
1135 if (prop_carrier_state)
1136 netif_carrier_off(port->netdev);
1139 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1140 ehea_info("External switch port is primary port");
1142 ehea_info("External switch port is backup port");
1145 case EHEA_EC_ADAPTER_MALFUNC:
1146 ehea_error("Adapter malfunction");
1148 case EHEA_EC_PORT_MALFUNC:
1149 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1150 netif_carrier_off(port->netdev);
1151 netif_stop_queue(port->netdev);
1154 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
1159 static void ehea_neq_tasklet(unsigned long data)
1161 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1162 struct ehea_eqe *eqe;
1165 eqe = ehea_poll_eq(adapter->neq);
1166 ehea_debug("eqe=%p", eqe);
1169 ehea_debug("*eqe=%lx", eqe->entry);
1170 ehea_parse_eqe(adapter, eqe->entry);
1171 eqe = ehea_poll_eq(adapter->neq);
1172 ehea_debug("next eqe=%p", eqe);
1175 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1176 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1177 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1179 ehea_h_reset_events(adapter->handle,
1180 adapter->neq->fw_handle, event_mask);
1183 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1185 struct ehea_adapter *adapter = param;
1186 tasklet_hi_schedule(&adapter->neq_tasklet);
1191 static int ehea_fill_port_res(struct ehea_port_res *pr)
1194 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1196 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1197 - init_attr->act_nr_rwqes_rq2
1198 - init_attr->act_nr_rwqes_rq3 - 1);
1200 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1202 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1207 static int ehea_reg_interrupts(struct net_device *dev)
1209 struct ehea_port *port = netdev_priv(dev);
1210 struct ehea_port_res *pr;
1214 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1217 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1218 ehea_qp_aff_irq_handler,
1219 IRQF_DISABLED, port->int_aff_name, port);
1221 ehea_error("failed registering irq for qp_aff_irq_handler:"
1222 "ist=%X", port->qp_eq->attr.ist1);
1226 if (netif_msg_ifup(port))
1227 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1228 "registered", port->qp_eq->attr.ist1);
1231 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1232 pr = &port->port_res[i];
1233 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1234 "%s-queue%d", dev->name, i);
1235 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1236 ehea_recv_irq_handler,
1237 IRQF_DISABLED, pr->int_send_name,
1240 ehea_error("failed registering irq for ehea_queue "
1241 "port_res_nr:%d, ist=%X", i,
1245 if (netif_msg_ifup(port))
1246 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1247 "%d registered", pr->eq->attr.ist1, i);
1255 u32 ist = port->port_res[i].eq->attr.ist1;
1256 ibmebus_free_irq(ist, &port->port_res[i]);
1260 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1261 i = port->num_def_qps;
1267 static void ehea_free_interrupts(struct net_device *dev)
1269 struct ehea_port *port = netdev_priv(dev);
1270 struct ehea_port_res *pr;
1275 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1276 pr = &port->port_res[i];
1277 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1278 if (netif_msg_intr(port))
1279 ehea_info("free send irq for res %d with handle 0x%X",
1280 i, pr->eq->attr.ist1);
1283 /* associated events */
1284 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1285 if (netif_msg_intr(port))
1286 ehea_info("associated event interrupt for handle 0x%X freed",
1287 port->qp_eq->attr.ist1);
1290 static int ehea_configure_port(struct ehea_port *port)
1294 struct hcp_ehea_port_cb0 *cb0;
1297 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1301 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1302 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1303 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1304 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1305 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1306 PXLY_RC_VLAN_FILTER)
1307 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1309 for (i = 0; i < port->num_mcs; i++)
1311 cb0->default_qpn_arr[i] =
1312 port->port_res[i].qp->init_attr.qp_nr;
1314 cb0->default_qpn_arr[i] =
1315 port->port_res[0].qp->init_attr.qp_nr;
1317 if (netif_msg_ifup(port))
1318 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1320 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1321 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1323 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1324 port->logical_port_id,
1325 H_PORT_CB0, mask, cb0);
1327 if (hret != H_SUCCESS)
1338 int ehea_gen_smrs(struct ehea_port_res *pr)
1341 struct ehea_adapter *adapter = pr->port->adapter;
1343 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1347 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1354 ehea_rem_mr(&pr->send_mr);
1356 ehea_error("Generating SMRS failed\n");
1360 int ehea_rem_smrs(struct ehea_port_res *pr)
1362 if ((ehea_rem_mr(&pr->send_mr))
1363 || (ehea_rem_mr(&pr->recv_mr)))
1369 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1371 int arr_size = sizeof(void *) * max_q_entries;
1373 q_skba->arr = vmalloc(arr_size);
1377 memset(q_skba->arr, 0, arr_size);
1379 q_skba->len = max_q_entries;
1381 q_skba->os_skbs = 0;
1386 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1387 struct port_res_cfg *pr_cfg, int queue_token)
1389 struct ehea_adapter *adapter = port->adapter;
1390 enum ehea_eq_type eq_type = EHEA_EQ;
1391 struct ehea_qp_init_attr *init_attr = NULL;
1394 memset(pr, 0, sizeof(struct ehea_port_res));
1397 spin_lock_init(&pr->xmit_lock);
1398 spin_lock_init(&pr->netif_queue);
1400 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1402 ehea_error("create_eq failed (eq)");
1406 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1408 port->logical_port_id);
1410 ehea_error("create_cq failed (cq_recv)");
1414 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1416 port->logical_port_id);
1418 ehea_error("create_cq failed (cq_send)");
1422 if (netif_msg_ifup(port))
1423 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1424 pr->send_cq->attr.act_nr_of_cqes,
1425 pr->recv_cq->attr.act_nr_of_cqes);
1427 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1430 ehea_error("no mem for ehea_qp_init_attr");
1434 init_attr->low_lat_rq1 = 1;
1435 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1436 init_attr->rq_count = 3;
1437 init_attr->qp_token = queue_token;
1438 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1439 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1440 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1441 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1442 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1443 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1444 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1445 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1446 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1447 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1448 init_attr->port_nr = port->logical_port_id;
1449 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1450 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1451 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1453 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1455 ehea_error("create_qp failed");
1460 if (netif_msg_ifup(port))
1461 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1462 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1463 init_attr->act_nr_send_wqes,
1464 init_attr->act_nr_rwqes_rq1,
1465 init_attr->act_nr_rwqes_rq2,
1466 init_attr->act_nr_rwqes_rq3);
1468 ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1469 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1470 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1471 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1475 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1476 if (ehea_gen_smrs(pr) != 0) {
1481 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1485 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1487 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1488 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1489 pr->lro_mgr.lro_arr = pr->lro_desc;
1490 pr->lro_mgr.get_skb_header = get_skb_hdr;
1491 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1492 pr->lro_mgr.dev = port->netdev;
1493 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1494 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1501 vfree(pr->sq_skba.arr);
1502 vfree(pr->rq1_skba.arr);
1503 vfree(pr->rq2_skba.arr);
1504 vfree(pr->rq3_skba.arr);
1505 ehea_destroy_qp(pr->qp);
1506 ehea_destroy_cq(pr->send_cq);
1507 ehea_destroy_cq(pr->recv_cq);
1508 ehea_destroy_eq(pr->eq);
1513 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1517 ret = ehea_destroy_qp(pr->qp);
1520 ehea_destroy_cq(pr->send_cq);
1521 ehea_destroy_cq(pr->recv_cq);
1522 ehea_destroy_eq(pr->eq);
1524 for (i = 0; i < pr->rq1_skba.len; i++)
1525 if (pr->rq1_skba.arr[i])
1526 dev_kfree_skb(pr->rq1_skba.arr[i]);
1528 for (i = 0; i < pr->rq2_skba.len; i++)
1529 if (pr->rq2_skba.arr[i])
1530 dev_kfree_skb(pr->rq2_skba.arr[i]);
1532 for (i = 0; i < pr->rq3_skba.len; i++)
1533 if (pr->rq3_skba.arr[i])
1534 dev_kfree_skb(pr->rq3_skba.arr[i]);
1536 for (i = 0; i < pr->sq_skba.len; i++)
1537 if (pr->sq_skba.arr[i])
1538 dev_kfree_skb(pr->sq_skba.arr[i]);
1540 vfree(pr->rq1_skba.arr);
1541 vfree(pr->rq2_skba.arr);
1542 vfree(pr->rq3_skba.arr);
1543 vfree(pr->sq_skba.arr);
1544 ret = ehea_rem_smrs(pr);
1550 * The write_* functions store information in swqe which is used by
1551 * the hardware to calculate the ip/tcp/udp checksum
1554 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1555 const struct sk_buff *skb)
1557 swqe->ip_start = skb_network_offset(skb);
1558 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1561 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1562 const struct sk_buff *skb)
1565 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1567 swqe->tcp_end = (u16)skb->len - 1;
1570 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1571 const struct sk_buff *skb)
1574 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1576 swqe->tcp_end = (u16)skb->len - 1;
1580 static void write_swqe2_TSO(struct sk_buff *skb,
1581 struct ehea_swqe *swqe, u32 lkey)
1583 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1584 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1585 int skb_data_size = skb->len - skb->data_len;
1588 /* Packet is TCP with TSO enabled */
1589 swqe->tx_control |= EHEA_SWQE_TSO;
1590 swqe->mss = skb_shinfo(skb)->gso_size;
1591 /* copy only eth/ip/tcp headers to immediate data and
1592 * the rest of skb->data to sg1entry
1594 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1596 skb_data_size = skb->len - skb->data_len;
1598 if (skb_data_size >= headersize) {
1599 /* copy immediate data */
1600 skb_copy_from_linear_data(skb, imm_data, headersize);
1601 swqe->immediate_data_length = headersize;
1603 if (skb_data_size > headersize) {
1604 /* set sg1entry data */
1605 sg1entry->l_key = lkey;
1606 sg1entry->len = skb_data_size - headersize;
1608 ehea_map_vaddr(skb->data + headersize);
1609 swqe->descriptors++;
1612 ehea_error("cannot handle fragmented headers");
1615 static void write_swqe2_nonTSO(struct sk_buff *skb,
1616 struct ehea_swqe *swqe, u32 lkey)
1618 int skb_data_size = skb->len - skb->data_len;
1619 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1620 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1622 /* Packet is any nonTSO type
1624 * Copy as much as possible skb->data to immediate data and
1625 * the rest to sg1entry
1627 if (skb_data_size >= SWQE2_MAX_IMM) {
1628 /* copy immediate data */
1629 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1631 swqe->immediate_data_length = SWQE2_MAX_IMM;
1633 if (skb_data_size > SWQE2_MAX_IMM) {
1634 /* copy sg1entry data */
1635 sg1entry->l_key = lkey;
1636 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1638 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1639 swqe->descriptors++;
1642 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1643 swqe->immediate_data_length = skb_data_size;
1647 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1648 struct ehea_swqe *swqe, u32 lkey)
1650 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1652 int nfrags, sg1entry_contains_frag_data, i;
1654 nfrags = skb_shinfo(skb)->nr_frags;
1655 sg1entry = &swqe->u.immdata_desc.sg_entry;
1656 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1657 swqe->descriptors = 0;
1658 sg1entry_contains_frag_data = 0;
1660 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1661 write_swqe2_TSO(skb, swqe, lkey);
1663 write_swqe2_nonTSO(skb, swqe, lkey);
1665 /* write descriptors */
1667 if (swqe->descriptors == 0) {
1668 /* sg1entry not yet used */
1669 frag = &skb_shinfo(skb)->frags[0];
1671 /* copy sg1entry data */
1672 sg1entry->l_key = lkey;
1673 sg1entry->len = frag->size;
1675 ehea_map_vaddr(page_address(frag->page)
1676 + frag->page_offset);
1677 swqe->descriptors++;
1678 sg1entry_contains_frag_data = 1;
1681 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1683 frag = &skb_shinfo(skb)->frags[i];
1684 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1686 sgentry->l_key = lkey;
1687 sgentry->len = frag->size;
1689 ehea_map_vaddr(page_address(frag->page)
1690 + frag->page_offset);
1691 swqe->descriptors++;
1696 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1702 /* De/Register untagged packets */
1703 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1704 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1705 port->logical_port_id,
1706 reg_type, port->mac_addr, 0, hcallid);
1707 if (hret != H_SUCCESS) {
1708 ehea_error("%sregistering bc address failed (tagged)",
1709 hcallid == H_REG_BCMC ? "" : "de");
1714 /* De/Register VLAN packets */
1715 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1716 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1717 port->logical_port_id,
1718 reg_type, port->mac_addr, 0, hcallid);
1719 if (hret != H_SUCCESS) {
1720 ehea_error("%sregistering bc address failed (vlan)",
1721 hcallid == H_REG_BCMC ? "" : "de");
1728 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1730 struct ehea_port *port = netdev_priv(dev);
1731 struct sockaddr *mac_addr = sa;
1732 struct hcp_ehea_port_cb0 *cb0;
1736 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1737 ret = -EADDRNOTAVAIL;
1741 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1743 ehea_error("no mem for cb0");
1748 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1750 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1752 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1753 port->logical_port_id, H_PORT_CB0,
1754 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1755 if (hret != H_SUCCESS) {
1760 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1762 down(&ehea_bcmc_regs.lock);
1764 /* Deregister old MAC in pHYP */
1765 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1769 port->mac_addr = cb0->port_mac_addr << 16;
1771 /* Register new MAC in pHYP */
1772 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1779 ehea_update_bcmc_registrations();
1780 up(&ehea_bcmc_regs.lock);
1787 static void ehea_promiscuous_error(u64 hret, int enable)
1789 if (hret == H_AUTHORITY)
1790 ehea_info("Hypervisor denied %sabling promiscuous mode",
1791 enable == 1 ? "en" : "dis");
1793 ehea_error("failed %sabling promiscuous mode",
1794 enable == 1 ? "en" : "dis");
1797 static void ehea_promiscuous(struct net_device *dev, int enable)
1799 struct ehea_port *port = netdev_priv(dev);
1800 struct hcp_ehea_port_cb7 *cb7;
1803 if ((enable && port->promisc) || (!enable && !port->promisc))
1806 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1808 ehea_error("no mem for cb7");
1812 /* Modify Pxs_DUCQPN in CB7 */
1813 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1815 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1816 port->logical_port_id,
1817 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1819 ehea_promiscuous_error(hret, enable);
1823 port->promisc = enable;
1829 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1835 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1836 | EHEA_BCMC_UNTAGGED;
1838 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1839 port->logical_port_id,
1840 reg_type, mc_mac_addr, 0, hcallid);
1844 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1845 | EHEA_BCMC_VLANID_ALL;
1847 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1848 port->logical_port_id,
1849 reg_type, mc_mac_addr, 0, hcallid);
1854 static int ehea_drop_multicast_list(struct net_device *dev)
1856 struct ehea_port *port = netdev_priv(dev);
1857 struct ehea_mc_list *mc_entry = port->mc_list;
1858 struct list_head *pos;
1859 struct list_head *temp;
1863 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1864 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1866 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1869 ehea_error("failed deregistering mcast MAC");
1879 static void ehea_allmulti(struct net_device *dev, int enable)
1881 struct ehea_port *port = netdev_priv(dev);
1884 if (!port->allmulti) {
1886 /* Enable ALLMULTI */
1887 ehea_drop_multicast_list(dev);
1888 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1892 ehea_error("failed enabling IFF_ALLMULTI");
1896 /* Disable ALLMULTI */
1897 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1901 ehea_error("failed disabling IFF_ALLMULTI");
1905 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1907 struct ehea_mc_list *ehea_mcl_entry;
1910 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1911 if (!ehea_mcl_entry) {
1912 ehea_error("no mem for mcl_entry");
1916 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1918 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1920 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1923 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1925 ehea_error("failed registering mcast MAC");
1926 kfree(ehea_mcl_entry);
1930 static void ehea_set_multicast_list(struct net_device *dev)
1932 struct ehea_port *port = netdev_priv(dev);
1933 struct dev_mc_list *k_mcl_entry;
1936 if (dev->flags & IFF_PROMISC) {
1937 ehea_promiscuous(dev, 1);
1940 ehea_promiscuous(dev, 0);
1942 down(&ehea_bcmc_regs.lock);
1944 if (dev->flags & IFF_ALLMULTI) {
1945 ehea_allmulti(dev, 1);
1948 ehea_allmulti(dev, 0);
1950 if (dev->mc_count) {
1951 ret = ehea_drop_multicast_list(dev);
1953 /* Dropping the current multicast list failed.
1954 * Enabling ALL_MULTI is the best we can do.
1956 ehea_allmulti(dev, 1);
1959 if (dev->mc_count > port->adapter->max_mc_mac) {
1960 ehea_info("Mcast registration limit reached (0x%lx). "
1962 port->adapter->max_mc_mac);
1966 for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
1967 k_mcl_entry = k_mcl_entry->next)
1968 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1972 ehea_update_bcmc_registrations();
1973 up(&ehea_bcmc_regs.lock);
1977 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1979 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1985 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1986 struct ehea_swqe *swqe, u32 lkey)
1988 if (skb->protocol == htons(ETH_P_IP)) {
1989 const struct iphdr *iph = ip_hdr(skb);
1992 swqe->tx_control |= EHEA_SWQE_CRC
1993 | EHEA_SWQE_IP_CHECKSUM
1994 | EHEA_SWQE_TCP_CHECKSUM
1995 | EHEA_SWQE_IMM_DATA_PRESENT
1996 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1998 write_ip_start_end(swqe, skb);
2000 if (iph->protocol == IPPROTO_UDP) {
2001 if ((iph->frag_off & IP_MF)
2002 || (iph->frag_off & IP_OFFSET))
2003 /* IP fragment, so don't change cs */
2004 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2006 write_udp_offset_end(swqe, skb);
2007 } else if (iph->protocol == IPPROTO_TCP) {
2008 write_tcp_offset_end(swqe, skb);
2011 /* icmp (big data) and ip segmentation packets (all other ip
2012 packets) do not require any special handling */
2015 /* Other Ethernet Protocol */
2016 swqe->tx_control |= EHEA_SWQE_CRC
2017 | EHEA_SWQE_IMM_DATA_PRESENT
2018 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2021 write_swqe2_data(skb, dev, swqe, lkey);
2024 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2025 struct ehea_swqe *swqe)
2027 int nfrags = skb_shinfo(skb)->nr_frags;
2028 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2032 if (skb->protocol == htons(ETH_P_IP)) {
2033 const struct iphdr *iph = ip_hdr(skb);
2036 write_ip_start_end(swqe, skb);
2038 if (iph->protocol == IPPROTO_TCP) {
2039 swqe->tx_control |= EHEA_SWQE_CRC
2040 | EHEA_SWQE_IP_CHECKSUM
2041 | EHEA_SWQE_TCP_CHECKSUM
2042 | EHEA_SWQE_IMM_DATA_PRESENT;
2044 write_tcp_offset_end(swqe, skb);
2046 } else if (iph->protocol == IPPROTO_UDP) {
2047 if ((iph->frag_off & IP_MF)
2048 || (iph->frag_off & IP_OFFSET))
2049 /* IP fragment, so don't change cs */
2050 swqe->tx_control |= EHEA_SWQE_CRC
2051 | EHEA_SWQE_IMM_DATA_PRESENT;
2053 swqe->tx_control |= EHEA_SWQE_CRC
2054 | EHEA_SWQE_IP_CHECKSUM
2055 | EHEA_SWQE_TCP_CHECKSUM
2056 | EHEA_SWQE_IMM_DATA_PRESENT;
2058 write_udp_offset_end(swqe, skb);
2061 /* icmp (big data) and
2062 ip segmentation packets (all other ip packets) */
2063 swqe->tx_control |= EHEA_SWQE_CRC
2064 | EHEA_SWQE_IP_CHECKSUM
2065 | EHEA_SWQE_IMM_DATA_PRESENT;
2068 /* Other Ethernet Protocol */
2069 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2071 /* copy (immediate) data */
2073 /* data is in a single piece */
2074 skb_copy_from_linear_data(skb, imm_data, skb->len);
2076 /* first copy data from the skb->data buffer ... */
2077 skb_copy_from_linear_data(skb, imm_data,
2078 skb->len - skb->data_len);
2079 imm_data += skb->len - skb->data_len;
2081 /* ... then copy data from the fragments */
2082 for (i = 0; i < nfrags; i++) {
2083 frag = &skb_shinfo(skb)->frags[i];
2085 page_address(frag->page) + frag->page_offset,
2087 imm_data += frag->size;
2090 swqe->immediate_data_length = skb->len;
2094 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2099 if ((skb->protocol == htons(ETH_P_IP)) &&
2100 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2101 tcp = (struct tcphdr *)(skb_network_header(skb) +
2102 (ip_hdr(skb)->ihl * 4));
2103 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2104 tmp += ip_hdr(skb)->daddr % 31;
2105 return tmp % num_qps;
2110 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2112 struct ehea_port *port = netdev_priv(dev);
2113 struct ehea_swqe *swqe;
2114 unsigned long flags;
2117 struct ehea_port_res *pr;
2119 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2121 if (!spin_trylock(&pr->xmit_lock))
2122 return NETDEV_TX_BUSY;
2124 if (pr->queue_stopped) {
2125 spin_unlock(&pr->xmit_lock);
2126 return NETDEV_TX_BUSY;
2129 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2130 memset(swqe, 0, SWQE_HEADER_SIZE);
2131 atomic_dec(&pr->swqe_avail);
2133 if (skb->len <= SWQE3_MAX_IMM) {
2134 u32 sig_iv = port->sig_comp_iv;
2135 u32 swqe_num = pr->swqe_id_counter;
2136 ehea_xmit3(skb, dev, swqe);
2137 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2138 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2139 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2140 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2142 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2143 pr->swqe_ll_count = 0;
2145 pr->swqe_ll_count += 1;
2148 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2149 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2150 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2151 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2152 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2154 pr->sq_skba.index++;
2155 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2157 lkey = pr->send_mr.lkey;
2158 ehea_xmit2(skb, dev, swqe, lkey);
2159 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2161 pr->swqe_id_counter += 1;
2163 if (port->vgrp && vlan_tx_tag_present(skb)) {
2164 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2165 swqe->vlan_tag = vlan_tx_tag_get(skb);
2168 if (netif_msg_tx_queued(port)) {
2169 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2170 ehea_dump(swqe, 512, "swqe");
2173 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2174 netif_stop_queue(dev);
2175 swqe->tx_control |= EHEA_SWQE_PURGE;
2178 ehea_post_swqe(pr->qp, swqe);
2181 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2182 spin_lock_irqsave(&pr->netif_queue, flags);
2183 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2184 pr->p_stats.queue_stopped++;
2185 netif_stop_queue(dev);
2186 pr->queue_stopped = 1;
2188 spin_unlock_irqrestore(&pr->netif_queue, flags);
2190 dev->trans_start = jiffies;
2191 spin_unlock(&pr->xmit_lock);
2193 return NETDEV_TX_OK;
2196 static void ehea_vlan_rx_register(struct net_device *dev,
2197 struct vlan_group *grp)
2199 struct ehea_port *port = netdev_priv(dev);
2200 struct ehea_adapter *adapter = port->adapter;
2201 struct hcp_ehea_port_cb1 *cb1;
2206 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2208 ehea_error("no mem for cb1");
2212 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
2214 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2215 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2216 if (hret != H_SUCCESS)
2217 ehea_error("modify_ehea_port failed");
2224 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2226 struct ehea_port *port = netdev_priv(dev);
2227 struct ehea_adapter *adapter = port->adapter;
2228 struct hcp_ehea_port_cb1 *cb1;
2232 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2234 ehea_error("no mem for cb1");
2238 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2239 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2240 if (hret != H_SUCCESS) {
2241 ehea_error("query_ehea_port failed");
2246 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2248 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2249 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2250 if (hret != H_SUCCESS)
2251 ehea_error("modify_ehea_port failed");
2257 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2259 struct ehea_port *port = netdev_priv(dev);
2260 struct ehea_adapter *adapter = port->adapter;
2261 struct hcp_ehea_port_cb1 *cb1;
2265 vlan_group_set_device(port->vgrp, vid, NULL);
2267 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2269 ehea_error("no mem for cb1");
2273 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2274 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2275 if (hret != H_SUCCESS) {
2276 ehea_error("query_ehea_port failed");
2281 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2283 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2284 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2285 if (hret != H_SUCCESS)
2286 ehea_error("modify_ehea_port failed");
2292 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2298 struct hcp_modify_qp_cb0 *cb0;
2300 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2306 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2307 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2308 if (hret != H_SUCCESS) {
2309 ehea_error("query_ehea_qp failed (1)");
2313 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2314 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2315 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2316 &dummy64, &dummy64, &dummy16, &dummy16);
2317 if (hret != H_SUCCESS) {
2318 ehea_error("modify_ehea_qp failed (1)");
2322 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2323 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2324 if (hret != H_SUCCESS) {
2325 ehea_error("query_ehea_qp failed (2)");
2329 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2330 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2331 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2332 &dummy64, &dummy64, &dummy16, &dummy16);
2333 if (hret != H_SUCCESS) {
2334 ehea_error("modify_ehea_qp failed (2)");
2338 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2339 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2340 if (hret != H_SUCCESS) {
2341 ehea_error("query_ehea_qp failed (3)");
2345 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2346 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2347 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2348 &dummy64, &dummy64, &dummy16, &dummy16);
2349 if (hret != H_SUCCESS) {
2350 ehea_error("modify_ehea_qp failed (3)");
2354 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2355 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2356 if (hret != H_SUCCESS) {
2357 ehea_error("query_ehea_qp failed (4)");
2367 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2371 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2372 enum ehea_eq_type eq_type = EHEA_EQ;
2374 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2375 EHEA_MAX_ENTRIES_EQ, 1);
2378 ehea_error("ehea_create_eq failed (qp_eq)");
2382 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2383 pr_cfg.max_entries_scq = sq_entries * 2;
2384 pr_cfg.max_entries_sq = sq_entries;
2385 pr_cfg.max_entries_rq1 = rq1_entries;
2386 pr_cfg.max_entries_rq2 = rq2_entries;
2387 pr_cfg.max_entries_rq3 = rq3_entries;
2389 pr_cfg_small_rx.max_entries_rcq = 1;
2390 pr_cfg_small_rx.max_entries_scq = sq_entries;
2391 pr_cfg_small_rx.max_entries_sq = sq_entries;
2392 pr_cfg_small_rx.max_entries_rq1 = 1;
2393 pr_cfg_small_rx.max_entries_rq2 = 1;
2394 pr_cfg_small_rx.max_entries_rq3 = 1;
2396 for (i = 0; i < def_qps; i++) {
2397 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2401 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2402 ret = ehea_init_port_res(port, &port->port_res[i],
2403 &pr_cfg_small_rx, i);
2412 ehea_clean_portres(port, &port->port_res[i]);
2415 ehea_destroy_eq(port->qp_eq);
2419 static int ehea_clean_all_portres(struct ehea_port *port)
2424 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2425 ret |= ehea_clean_portres(port, &port->port_res[i]);
2427 ret |= ehea_destroy_eq(port->qp_eq);
2432 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2434 if (adapter->active_ports)
2437 ehea_rem_mr(&adapter->mr);
2440 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2442 if (adapter->active_ports)
2445 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2448 static int ehea_up(struct net_device *dev)
2451 struct ehea_port *port = netdev_priv(dev);
2453 if (port->state == EHEA_PORT_UP)
2456 down(&ehea_fw_handles.lock);
2458 ret = ehea_port_res_setup(port, port->num_def_qps,
2459 port->num_add_tx_qps);
2461 ehea_error("port_res_failed");
2465 /* Set default QP for this port */
2466 ret = ehea_configure_port(port);
2468 ehea_error("ehea_configure_port failed. ret:%d", ret);
2472 ret = ehea_reg_interrupts(dev);
2474 ehea_error("reg_interrupts failed. ret:%d", ret);
2478 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2479 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2481 ehea_error("activate_qp failed");
2486 for (i = 0; i < port->num_def_qps; i++) {
2487 ret = ehea_fill_port_res(&port->port_res[i]);
2489 ehea_error("out_free_irqs");
2494 down(&ehea_bcmc_regs.lock);
2496 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2502 port->state = EHEA_PORT_UP;
2508 ehea_free_interrupts(dev);
2511 ehea_clean_all_portres(port);
2514 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2516 ehea_update_bcmc_registrations();
2517 up(&ehea_bcmc_regs.lock);
2519 ehea_update_firmware_handles();
2520 up(&ehea_fw_handles.lock);
2525 static void port_napi_disable(struct ehea_port *port)
2529 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2530 napi_disable(&port->port_res[i].napi);
2533 static void port_napi_enable(struct ehea_port *port)
2537 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2538 napi_enable(&port->port_res[i].napi);
2541 static int ehea_open(struct net_device *dev)
2544 struct ehea_port *port = netdev_priv(dev);
2546 down(&port->port_lock);
2548 if (netif_msg_ifup(port))
2549 ehea_info("enabling port %s", dev->name);
2553 port_napi_enable(port);
2554 netif_start_queue(dev);
2557 up(&port->port_lock);
2562 static int ehea_down(struct net_device *dev)
2565 struct ehea_port *port = netdev_priv(dev);
2567 if (port->state == EHEA_PORT_DOWN)
2570 down(&ehea_bcmc_regs.lock);
2571 ehea_drop_multicast_list(dev);
2572 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2574 ehea_free_interrupts(dev);
2576 down(&ehea_fw_handles.lock);
2578 port->state = EHEA_PORT_DOWN;
2580 ehea_update_bcmc_registrations();
2581 up(&ehea_bcmc_regs.lock);
2583 ret = ehea_clean_all_portres(port);
2585 ehea_info("Failed freeing resources for %s. ret=%i",
2588 ehea_update_firmware_handles();
2589 up(&ehea_fw_handles.lock);
2594 static int ehea_stop(struct net_device *dev)
2597 struct ehea_port *port = netdev_priv(dev);
2599 if (netif_msg_ifdown(port))
2600 ehea_info("disabling port %s", dev->name);
2602 flush_scheduled_work();
2603 down(&port->port_lock);
2604 netif_stop_queue(dev);
2605 port_napi_disable(port);
2606 ret = ehea_down(dev);
2607 up(&port->port_lock);
2611 void ehea_purge_sq(struct ehea_qp *orig_qp)
2613 struct ehea_qp qp = *orig_qp;
2614 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2615 struct ehea_swqe *swqe;
2619 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2620 swqe = ehea_get_swqe(&qp, &wqe_index);
2621 swqe->tx_control |= EHEA_SWQE_PURGE;
2625 int ehea_stop_qps(struct net_device *dev)
2627 struct ehea_port *port = netdev_priv(dev);
2628 struct ehea_adapter *adapter = port->adapter;
2629 struct hcp_modify_qp_cb0 *cb0;
2637 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2643 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2644 struct ehea_port_res *pr = &port->port_res[i];
2645 struct ehea_qp *qp = pr->qp;
2647 /* Purge send queue */
2650 /* Disable queue pair */
2651 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2652 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2654 if (hret != H_SUCCESS) {
2655 ehea_error("query_ehea_qp failed (1)");
2659 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2660 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2662 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2663 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2665 &dummy64, &dummy16, &dummy16);
2666 if (hret != H_SUCCESS) {
2667 ehea_error("modify_ehea_qp failed (1)");
2671 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2672 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2674 if (hret != H_SUCCESS) {
2675 ehea_error("query_ehea_qp failed (2)");
2679 /* deregister shared memory regions */
2680 dret = ehea_rem_smrs(pr);
2682 ehea_error("unreg shared memory region failed");
2694 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2696 struct ehea_qp qp = *orig_qp;
2697 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2698 struct ehea_rwqe *rwqe;
2699 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2700 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2701 struct sk_buff *skb;
2702 u32 lkey = pr->recv_mr.lkey;
2708 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2709 rwqe = ehea_get_next_rwqe(&qp, 2);
2710 rwqe->sg_list[0].l_key = lkey;
2711 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2712 skb = skba_rq2[index];
2714 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2717 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2718 rwqe = ehea_get_next_rwqe(&qp, 3);
2719 rwqe->sg_list[0].l_key = lkey;
2720 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2721 skb = skba_rq3[index];
2723 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2727 int ehea_restart_qps(struct net_device *dev)
2729 struct ehea_port *port = netdev_priv(dev);
2730 struct ehea_adapter *adapter = port->adapter;
2734 struct hcp_modify_qp_cb0 *cb0;
2739 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2745 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2746 struct ehea_port_res *pr = &port->port_res[i];
2747 struct ehea_qp *qp = pr->qp;
2749 ret = ehea_gen_smrs(pr);
2751 ehea_error("creation of shared memory regions failed");
2755 ehea_update_rqs(qp, pr);
2757 /* Enable queue pair */
2758 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2759 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2761 if (hret != H_SUCCESS) {
2762 ehea_error("query_ehea_qp failed (1)");
2766 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2767 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2769 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2770 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2772 &dummy64, &dummy16, &dummy16);
2773 if (hret != H_SUCCESS) {
2774 ehea_error("modify_ehea_qp failed (1)");
2778 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2779 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2781 if (hret != H_SUCCESS) {
2782 ehea_error("query_ehea_qp failed (2)");
2786 /* refill entire queue */
2787 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2788 ehea_refill_rq2(pr, 0);
2789 ehea_refill_rq3(pr, 0);
2797 static void ehea_reset_port(struct work_struct *work)
2800 struct ehea_port *port =
2801 container_of(work, struct ehea_port, reset_task);
2802 struct net_device *dev = port->netdev;
2805 down(&port->port_lock);
2806 netif_stop_queue(dev);
2808 port_napi_disable(port);
2816 ehea_set_multicast_list(dev);
2818 if (netif_msg_timer(port))
2819 ehea_info("Device %s resetted successfully", dev->name);
2821 port_napi_enable(port);
2823 netif_wake_queue(dev);
2825 up(&port->port_lock);
2829 static void ehea_rereg_mrs(struct work_struct *work)
2832 struct ehea_adapter *adapter;
2834 mutex_lock(&dlpar_mem_lock);
2835 ehea_info("LPAR memory enlarged - re-initializing driver");
2837 list_for_each_entry(adapter, &adapter_list, list)
2838 if (adapter->active_ports) {
2839 /* Shutdown all ports */
2840 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2841 struct ehea_port *port = adapter->port[i];
2844 struct net_device *dev = port->netdev;
2846 if (dev->flags & IFF_UP) {
2847 down(&port->port_lock);
2848 netif_stop_queue(dev);
2849 ret = ehea_stop_qps(dev);
2851 up(&port->port_lock);
2854 port_napi_disable(port);
2855 up(&port->port_lock);
2860 /* Unregister old memory region */
2861 ret = ehea_rem_mr(&adapter->mr);
2863 ehea_error("unregister MR failed - driver"
2869 ehea_destroy_busmap();
2870 ret = ehea_create_busmap();
2872 ehea_error("creating ehea busmap failed");
2876 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2878 list_for_each_entry(adapter, &adapter_list, list)
2879 if (adapter->active_ports) {
2880 /* Register new memory region */
2881 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2883 ehea_error("register MR failed - driver"
2888 /* Restart all ports */
2889 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2890 struct ehea_port *port = adapter->port[i];
2893 struct net_device *dev = port->netdev;
2895 if (dev->flags & IFF_UP) {
2896 down(&port->port_lock);
2897 port_napi_enable(port);
2898 ret = ehea_restart_qps(dev);
2900 netif_wake_queue(dev);
2901 up(&port->port_lock);
2906 mutex_unlock(&dlpar_mem_lock);
2907 ehea_info("re-initializing driver complete");
2912 static void ehea_tx_watchdog(struct net_device *dev)
2914 struct ehea_port *port = netdev_priv(dev);
2916 if (netif_carrier_ok(dev) &&
2917 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2918 schedule_work(&port->reset_task);
2921 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2923 struct hcp_query_ehea *cb;
2927 cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2933 hret = ehea_h_query_ehea(adapter->handle, cb);
2935 if (hret != H_SUCCESS) {
2940 adapter->max_mc_mac = cb->max_mc_mac - 1;
2949 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2951 struct hcp_ehea_port_cb4 *cb4;
2957 /* (Try to) enable *jumbo frames */
2958 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2960 ehea_error("no mem for cb4");
2964 hret = ehea_h_query_ehea_port(port->adapter->handle,
2965 port->logical_port_id,
2967 H_PORT_CB4_JUMBO, cb4);
2968 if (hret == H_SUCCESS) {
2969 if (cb4->jumbo_frame)
2972 cb4->jumbo_frame = 1;
2973 hret = ehea_h_modify_ehea_port(port->adapter->
2980 if (hret == H_SUCCESS)
2992 static ssize_t ehea_show_port_id(struct device *dev,
2993 struct device_attribute *attr, char *buf)
2995 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2996 return sprintf(buf, "%d", port->logical_port_id);
2999 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3002 static void __devinit logical_port_release(struct device *dev)
3004 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3005 of_node_put(port->ofdev.node);
3008 static struct device *ehea_register_port(struct ehea_port *port,
3009 struct device_node *dn)
3013 port->ofdev.node = of_node_get(dn);
3014 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3015 port->ofdev.dev.bus = &ibmebus_bus_type;
3017 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
3018 port->ofdev.dev.release = logical_port_release;
3020 ret = of_device_register(&port->ofdev);
3022 ehea_error("failed to register device. ret=%d", ret);
3026 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3028 ehea_error("failed to register attributes, ret=%d", ret);
3029 goto out_unreg_of_dev;
3032 return &port->ofdev.dev;
3035 of_device_unregister(&port->ofdev);
3040 static void ehea_unregister_port(struct ehea_port *port)
3042 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3043 of_device_unregister(&port->ofdev);
3046 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3047 u32 logical_port_id,
3048 struct device_node *dn)
3051 struct net_device *dev;
3052 struct ehea_port *port;
3053 struct device *port_dev;
3056 /* allocate memory for the port structures */
3057 dev = alloc_etherdev(sizeof(struct ehea_port));
3060 ehea_error("no mem for net_device");
3065 port = netdev_priv(dev);
3067 sema_init(&port->port_lock, 1);
3068 port->state = EHEA_PORT_DOWN;
3069 port->sig_comp_iv = sq_entries / 10;
3071 port->adapter = adapter;
3073 port->logical_port_id = logical_port_id;
3075 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3077 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3078 if (!port->mc_list) {
3080 goto out_free_ethdev;
3083 INIT_LIST_HEAD(&port->mc_list->list);
3085 ret = ehea_sense_port_attr(port);
3087 goto out_free_mc_list;
3089 port_dev = ehea_register_port(port, dn);
3091 goto out_free_mc_list;
3093 SET_NETDEV_DEV(dev, port_dev);
3095 /* initialize net_device structure */
3096 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3098 dev->open = ehea_open;
3099 #ifdef CONFIG_NET_POLL_CONTROLLER
3100 dev->poll_controller = ehea_netpoll;
3102 dev->stop = ehea_stop;
3103 dev->hard_start_xmit = ehea_start_xmit;
3104 dev->get_stats = ehea_get_stats;
3105 dev->set_multicast_list = ehea_set_multicast_list;
3106 dev->set_mac_address = ehea_set_mac_addr;
3107 dev->change_mtu = ehea_change_mtu;
3108 dev->vlan_rx_register = ehea_vlan_rx_register;
3109 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
3110 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
3111 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3112 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3113 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3115 dev->tx_timeout = &ehea_tx_watchdog;
3116 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3118 INIT_WORK(&port->reset_task, ehea_reset_port);
3119 ehea_set_ethtool_ops(dev);
3121 ret = register_netdev(dev);
3123 ehea_error("register_netdev failed. ret=%d", ret);
3124 goto out_unreg_port;
3127 port->lro_max_aggr = lro_max_aggr;
3129 ret = ehea_get_jumboframe_status(port, &jumbo);
3131 ehea_error("failed determining jumbo frame status for %s",
3132 port->netdev->name);
3134 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3135 jumbo == 1 ? "en" : "dis");
3137 adapter->active_ports++;
3142 ehea_unregister_port(port);
3145 kfree(port->mc_list);
3151 ehea_error("setting up logical port with id=%d failed, ret=%d",
3152 logical_port_id, ret);
3156 static void ehea_shutdown_single_port(struct ehea_port *port)
3158 unregister_netdev(port->netdev);
3159 ehea_unregister_port(port);
3160 kfree(port->mc_list);
3161 free_netdev(port->netdev);
3162 port->adapter->active_ports--;
3165 static int ehea_setup_ports(struct ehea_adapter *adapter)
3167 struct device_node *lhea_dn;
3168 struct device_node *eth_dn = NULL;
3170 const u32 *dn_log_port_id;
3173 lhea_dn = adapter->ofdev->node;
3174 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3176 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3178 if (!dn_log_port_id) {
3179 ehea_error("bad device node: eth_dn name=%s",
3184 if (ehea_add_adapter_mr(adapter)) {
3185 ehea_error("creating MR failed");
3186 of_node_put(eth_dn);
3190 adapter->port[i] = ehea_setup_single_port(adapter,
3193 if (adapter->port[i])
3194 ehea_info("%s -> logical port id #%d",
3195 adapter->port[i]->netdev->name,
3198 ehea_remove_adapter_mr(adapter);
3205 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3206 u32 logical_port_id)
3208 struct device_node *lhea_dn;
3209 struct device_node *eth_dn = NULL;
3210 const u32 *dn_log_port_id;
3212 lhea_dn = adapter->ofdev->node;
3213 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3215 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3218 if (*dn_log_port_id == logical_port_id)
3225 static ssize_t ehea_probe_port(struct device *dev,
3226 struct device_attribute *attr,
3227 const char *buf, size_t count)
3229 struct ehea_adapter *adapter = dev->driver_data;
3230 struct ehea_port *port;
3231 struct device_node *eth_dn = NULL;
3234 u32 logical_port_id;
3236 sscanf(buf, "%d", &logical_port_id);
3238 port = ehea_get_port(adapter, logical_port_id);
3241 ehea_info("adding port with logical port id=%d failed. port "
3242 "already configured as %s.", logical_port_id,
3243 port->netdev->name);
3247 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3250 ehea_info("no logical port with id %d found", logical_port_id);
3254 if (ehea_add_adapter_mr(adapter)) {
3255 ehea_error("creating MR failed");
3259 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3261 of_node_put(eth_dn);
3264 for (i = 0; i < EHEA_MAX_PORTS; i++)
3265 if (!adapter->port[i]) {
3266 adapter->port[i] = port;
3270 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3273 ehea_remove_adapter_mr(adapter);
3277 return (ssize_t) count;
3280 static ssize_t ehea_remove_port(struct device *dev,
3281 struct device_attribute *attr,
3282 const char *buf, size_t count)
3284 struct ehea_adapter *adapter = dev->driver_data;
3285 struct ehea_port *port;
3287 u32 logical_port_id;
3289 sscanf(buf, "%d", &logical_port_id);
3291 port = ehea_get_port(adapter, logical_port_id);
3294 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3297 ehea_shutdown_single_port(port);
3299 for (i = 0; i < EHEA_MAX_PORTS; i++)
3300 if (adapter->port[i] == port) {
3301 adapter->port[i] = NULL;
3305 ehea_error("removing port with logical port id=%d failed. port "
3306 "not configured.", logical_port_id);
3310 ehea_remove_adapter_mr(adapter);
3312 return (ssize_t) count;
3315 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3316 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3318 int ehea_create_device_sysfs(struct of_device *dev)
3320 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3324 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3329 void ehea_remove_device_sysfs(struct of_device *dev)
3331 device_remove_file(&dev->dev, &dev_attr_probe_port);
3332 device_remove_file(&dev->dev, &dev_attr_remove_port);
3335 static int __devinit ehea_probe_adapter(struct of_device *dev,
3336 const struct of_device_id *id)
3338 struct ehea_adapter *adapter;
3339 const u64 *adapter_handle;
3342 if (!dev || !dev->node) {
3343 ehea_error("Invalid ibmebus device probed");
3346 down(&ehea_fw_handles.lock);
3348 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3351 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3355 list_add(&adapter->list, &adapter_list);
3357 adapter->ofdev = dev;
3359 adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3362 adapter->handle = *adapter_handle;
3364 if (!adapter->handle) {
3365 dev_err(&dev->dev, "failed getting handle for adapter"
3366 " '%s'\n", dev->node->full_name);
3371 adapter->pd = EHEA_PD_ID;
3373 dev->dev.driver_data = adapter;
3376 /* initialize adapter and ports */
3377 /* get adapter properties */
3378 ret = ehea_sense_adapter_attr(adapter);
3380 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3384 adapter->neq = ehea_create_eq(adapter,
3385 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3386 if (!adapter->neq) {
3388 dev_err(&dev->dev, "NEQ creation failed\n");
3392 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3393 (unsigned long)adapter);
3395 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3396 ehea_interrupt_neq, IRQF_DISABLED,
3397 "ehea_neq", adapter);
3399 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3403 ret = ehea_create_device_sysfs(dev);
3407 ret = ehea_setup_ports(adapter);
3409 dev_err(&dev->dev, "setup_ports failed\n");
3410 goto out_rem_dev_sysfs;
3417 ehea_remove_device_sysfs(dev);
3420 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3423 ehea_destroy_eq(adapter->neq);
3429 ehea_update_firmware_handles();
3430 up(&ehea_fw_handles.lock);
3434 static int __devexit ehea_remove(struct of_device *dev)
3436 struct ehea_adapter *adapter = dev->dev.driver_data;
3439 for (i = 0; i < EHEA_MAX_PORTS; i++)
3440 if (adapter->port[i]) {
3441 ehea_shutdown_single_port(adapter->port[i]);
3442 adapter->port[i] = NULL;
3445 ehea_remove_device_sysfs(dev);
3447 flush_scheduled_work();
3449 down(&ehea_fw_handles.lock);
3451 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3452 tasklet_kill(&adapter->neq_tasklet);
3454 ehea_destroy_eq(adapter->neq);
3455 ehea_remove_adapter_mr(adapter);
3456 list_del(&adapter->list);
3459 ehea_update_firmware_handles();
3460 up(&ehea_fw_handles.lock);
3465 void ehea_crash_handler(void)
3469 if (ehea_fw_handles.arr)
3470 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3471 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3472 ehea_fw_handles.arr[i].fwh,
3475 if (ehea_bcmc_regs.arr)
3476 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3477 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3478 ehea_bcmc_regs.arr[i].port_id,
3479 ehea_bcmc_regs.arr[i].reg_type,
3480 ehea_bcmc_regs.arr[i].macaddr,
3484 static int ehea_reboot_notifier(struct notifier_block *nb,
3485 unsigned long action, void *unused)
3487 if (action == SYS_RESTART) {
3488 ehea_info("Reboot: freeing all eHEA resources");
3489 ibmebus_unregister_driver(&ehea_driver);
3494 static struct notifier_block ehea_reboot_nb = {
3495 .notifier_call = ehea_reboot_notifier,
3498 static int check_module_parm(void)
3502 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3503 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3504 ehea_info("Bad parameter: rq1_entries");
3507 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3508 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3509 ehea_info("Bad parameter: rq2_entries");
3512 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3513 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3514 ehea_info("Bad parameter: rq3_entries");
3517 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3518 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3519 ehea_info("Bad parameter: sq_entries");
3526 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3529 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3532 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3533 ehea_show_capabilities, NULL);
3535 int __init ehea_module_init(void)
3539 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3543 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3544 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3545 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3547 sema_init(&ehea_fw_handles.lock, 1);
3548 sema_init(&ehea_bcmc_regs.lock, 1);
3550 ret = check_module_parm();
3554 ret = ehea_create_busmap();
3558 ret = register_reboot_notifier(&ehea_reboot_nb);
3560 ehea_info("failed registering reboot notifier");
3562 ret = crash_shutdown_register(&ehea_crash_handler);
3564 ehea_info("failed registering crash handler");
3566 ret = ibmebus_register_driver(&ehea_driver);
3568 ehea_error("failed registering eHEA device driver on ebus");
3572 ret = driver_create_file(&ehea_driver.driver,
3573 &driver_attr_capabilities);
3575 ehea_error("failed to register capabilities attribute, ret=%d",
3583 ibmebus_unregister_driver(&ehea_driver);
3585 unregister_reboot_notifier(&ehea_reboot_nb);
3586 crash_shutdown_unregister(&ehea_crash_handler);
3591 static void __exit ehea_module_exit(void)
3595 flush_scheduled_work();
3596 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3597 ibmebus_unregister_driver(&ehea_driver);
3598 unregister_reboot_notifier(&ehea_reboot_nb);
3599 ret = crash_shutdown_unregister(&ehea_crash_handler);
3601 ehea_info("failed unregistering crash handler");
3602 kfree(ehea_fw_handles.arr);
3603 kfree(ehea_bcmc_regs.arr);
3604 ehea_destroy_busmap();
3607 module_init(ehea_module_init);
3608 module_exit(ehea_module_exit);