2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <linux/notifier.h>
37 #include <linux/reboot.h>
38 #include <linux/memory.h>
39 #include <asm/kexec.h>
40 #include <linux/mutex.h>
46 #include "ehea_phyp.h"
49 MODULE_LICENSE("GPL");
50 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51 MODULE_DESCRIPTION("IBM eServer HEA Driver");
52 MODULE_VERSION(DRV_VERSION);
55 static int msg_level = -1;
56 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
57 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
58 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
59 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
62 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
63 static int num_tx_qps = EHEA_NUM_TX_QP;
64 static int prop_carrier_state;
66 module_param(msg_level, int, 0);
67 module_param(rq1_entries, int, 0);
68 module_param(rq2_entries, int, 0);
69 module_param(rq3_entries, int, 0);
70 module_param(sq_entries, int, 0);
71 module_param(prop_carrier_state, int, 0);
72 module_param(use_mcs, int, 0);
73 module_param(use_lro, int, 0);
74 module_param(lro_max_aggr, int, 0);
75 module_param(num_tx_qps, int, 0);
77 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
78 MODULE_PARM_DESC(msg_level, "msg_level");
79 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
80 "port to stack. 1:yes, 0:no. Default = 0 ");
81 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
82 "[2^x - 1], x = [6..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
84 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
87 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
90 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
91 "[2^x - 1], x = [6..14]. Default = "
92 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
93 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
95 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
96 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
97 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
100 static int port_name_cnt;
101 static LIST_HEAD(adapter_list);
102 u64 ehea_driver_flags;
103 struct work_struct ehea_rereg_mr_task;
104 static DEFINE_MUTEX(dlpar_mem_lock);
105 struct ehea_fw_handle_array ehea_fw_handles;
106 struct ehea_bcmc_reg_array ehea_bcmc_regs;
109 static int __devinit ehea_probe_adapter(struct of_device *dev,
110 const struct of_device_id *id);
112 static int __devexit ehea_remove(struct of_device *dev);
114 static struct of_device_id ehea_device_table[] = {
117 .compatible = "IBM,lhea",
121 MODULE_DEVICE_TABLE(of, ehea_device_table);
123 static struct of_platform_driver ehea_driver = {
125 .match_table = ehea_device_table,
126 .probe = ehea_probe_adapter,
127 .remove = ehea_remove,
130 void ehea_dump(void *adr, int len, char *msg)
133 unsigned char *deb = adr;
134 for (x = 0; x < len; x += 16) {
135 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
136 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
141 void ehea_schedule_port_reset(struct ehea_port *port)
143 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
144 schedule_work(&port->reset_task);
147 static void ehea_update_firmware_handles(void)
149 struct ehea_fw_handle_entry *arr = NULL;
150 struct ehea_adapter *adapter;
151 int num_adapters = 0;
155 int num_fw_handles, k, l;
157 /* Determine number of handles */
158 list_for_each_entry(adapter, &adapter_list, list) {
161 for (k = 0; k < EHEA_MAX_PORTS; k++) {
162 struct ehea_port *port = adapter->port[k];
164 if (!port || (port->state != EHEA_PORT_UP))
168 num_portres += port->num_def_qps + port->num_add_tx_qps;
172 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
173 num_ports * EHEA_NUM_PORT_FW_HANDLES +
174 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
176 if (num_fw_handles) {
177 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
179 return; /* Keep the existing array */
183 list_for_each_entry(adapter, &adapter_list, list) {
184 for (k = 0; k < EHEA_MAX_PORTS; k++) {
185 struct ehea_port *port = adapter->port[k];
187 if (!port || (port->state != EHEA_PORT_UP))
191 l < port->num_def_qps + port->num_add_tx_qps;
193 struct ehea_port_res *pr = &port->port_res[l];
195 arr[i].adh = adapter->handle;
196 arr[i++].fwh = pr->qp->fw_handle;
197 arr[i].adh = adapter->handle;
198 arr[i++].fwh = pr->send_cq->fw_handle;
199 arr[i].adh = adapter->handle;
200 arr[i++].fwh = pr->recv_cq->fw_handle;
201 arr[i].adh = adapter->handle;
202 arr[i++].fwh = pr->eq->fw_handle;
203 arr[i].adh = adapter->handle;
204 arr[i++].fwh = pr->send_mr.handle;
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->recv_mr.handle;
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = port->qp_eq->fw_handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = adapter->neq->fw_handle;
215 if (adapter->mr.handle) {
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = adapter->mr.handle;
222 mutex_lock(&ehea_fw_handles.lock);
223 kfree(ehea_fw_handles.arr);
224 ehea_fw_handles.arr = arr;
225 ehea_fw_handles.num_entries = i;
226 mutex_unlock(&ehea_fw_handles.lock);
229 static void ehea_update_bcmc_registrations(void)
231 struct ehea_bcmc_reg_entry *arr = NULL;
232 struct ehea_adapter *adapter;
233 struct ehea_mc_list *mc_entry;
234 int num_registrations = 0;
238 /* Determine number of registrations */
239 list_for_each_entry(adapter, &adapter_list, list)
240 for (k = 0; k < EHEA_MAX_PORTS; k++) {
241 struct ehea_port *port = adapter->port[k];
243 if (!port || (port->state != EHEA_PORT_UP))
246 num_registrations += 2; /* Broadcast registrations */
248 list_for_each_entry(mc_entry, &port->mc_list->list,list)
249 num_registrations += 2;
252 if (num_registrations) {
253 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
255 return; /* Keep the existing array */
259 list_for_each_entry(adapter, &adapter_list, list) {
260 for (k = 0; k < EHEA_MAX_PORTS; k++) {
261 struct ehea_port *port = adapter->port[k];
263 if (!port || (port->state != EHEA_PORT_UP))
266 arr[i].adh = adapter->handle;
267 arr[i].port_id = port->logical_port_id;
268 arr[i].reg_type = EHEA_BCMC_BROADCAST |
270 arr[i++].macaddr = port->mac_addr;
272 arr[i].adh = adapter->handle;
273 arr[i].port_id = port->logical_port_id;
274 arr[i].reg_type = EHEA_BCMC_BROADCAST |
275 EHEA_BCMC_VLANID_ALL;
276 arr[i++].macaddr = port->mac_addr;
278 list_for_each_entry(mc_entry,
279 &port->mc_list->list, list) {
280 arr[i].adh = adapter->handle;
281 arr[i].port_id = port->logical_port_id;
282 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
283 EHEA_BCMC_MULTICAST |
285 arr[i++].macaddr = mc_entry->macaddr;
287 arr[i].adh = adapter->handle;
288 arr[i].port_id = port->logical_port_id;
289 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
290 EHEA_BCMC_MULTICAST |
291 EHEA_BCMC_VLANID_ALL;
292 arr[i++].macaddr = mc_entry->macaddr;
298 spin_lock(&ehea_bcmc_regs.lock);
299 kfree(ehea_bcmc_regs.arr);
300 ehea_bcmc_regs.arr = arr;
301 ehea_bcmc_regs.num_entries = i;
302 spin_unlock(&ehea_bcmc_regs.lock);
305 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
307 struct ehea_port *port = netdev_priv(dev);
308 struct net_device_stats *stats = &port->stats;
309 struct hcp_ehea_port_cb2 *cb2;
310 u64 hret, rx_packets, tx_packets;
313 memset(stats, 0, sizeof(*stats));
315 cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
317 ehea_error("no mem for cb2");
321 hret = ehea_h_query_ehea_port(port->adapter->handle,
322 port->logical_port_id,
323 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
324 if (hret != H_SUCCESS) {
325 ehea_error("query_ehea_port failed");
329 if (netif_msg_hw(port))
330 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
333 for (i = 0; i < port->num_def_qps; i++)
334 rx_packets += port->port_res[i].rx_packets;
337 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
338 tx_packets += port->port_res[i].tx_packets;
340 stats->tx_packets = tx_packets;
341 stats->multicast = cb2->rxmcp;
342 stats->rx_errors = cb2->rxuerr;
343 stats->rx_bytes = cb2->rxo;
344 stats->tx_bytes = cb2->txo;
345 stats->rx_packets = rx_packets;
353 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
355 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
356 struct net_device *dev = pr->port->netdev;
357 int max_index_mask = pr->rq1_skba.len - 1;
358 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
362 pr->rq1_skba.os_skbs = 0;
364 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
366 pr->rq1_skba.index = index;
367 pr->rq1_skba.os_skbs = fill_wqes;
371 for (i = 0; i < fill_wqes; i++) {
372 if (!skb_arr_rq1[index]) {
373 skb_arr_rq1[index] = netdev_alloc_skb(dev,
375 if (!skb_arr_rq1[index]) {
376 pr->rq1_skba.os_skbs = fill_wqes - i;
377 ehea_error("%s: no mem for skb/%d wqes filled",
383 index &= max_index_mask;
391 ehea_update_rq1a(pr->qp, adder);
394 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
397 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
398 struct net_device *dev = pr->port->netdev;
401 for (i = 0; i < pr->rq1_skba.len; i++) {
402 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
403 if (!skb_arr_rq1[i]) {
404 ehea_error("%s: no mem for skb/%d wqes filled",
411 ehea_update_rq1a(pr->qp, nr_rq1a);
416 static int ehea_refill_rq_def(struct ehea_port_res *pr,
417 struct ehea_q_skb_arr *q_skba, int rq_nr,
418 int num_wqes, int wqe_type, int packet_size)
420 struct net_device *dev = pr->port->netdev;
421 struct ehea_qp *qp = pr->qp;
422 struct sk_buff **skb_arr = q_skba->arr;
423 struct ehea_rwqe *rwqe;
424 int i, index, max_index_mask, fill_wqes;
428 fill_wqes = q_skba->os_skbs + num_wqes;
431 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
432 q_skba->os_skbs = fill_wqes;
436 index = q_skba->index;
437 max_index_mask = q_skba->len - 1;
438 for (i = 0; i < fill_wqes; i++) {
440 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
442 ehea_error("%s: no mem for skb/%d wqes filled",
443 pr->port->netdev->name, i);
444 q_skba->os_skbs = fill_wqes - i;
448 skb_reserve(skb, NET_IP_ALIGN);
450 skb_arr[index] = skb;
451 tmp_addr = ehea_map_vaddr(skb->data);
452 if (tmp_addr == -1) {
454 q_skba->os_skbs = fill_wqes - i;
459 rwqe = ehea_get_next_rwqe(qp, rq_nr);
460 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
461 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
462 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
463 rwqe->sg_list[0].vaddr = tmp_addr;
464 rwqe->sg_list[0].len = packet_size;
465 rwqe->data_segments = 1;
468 index &= max_index_mask;
472 q_skba->index = index;
479 ehea_update_rq2a(pr->qp, adder);
481 ehea_update_rq3a(pr->qp, adder);
487 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
489 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
490 nr_of_wqes, EHEA_RWQE2_TYPE,
491 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
495 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
497 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
498 nr_of_wqes, EHEA_RWQE3_TYPE,
499 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
502 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
504 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
505 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
507 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
508 (cqe->header_length == 0))
513 static inline void ehea_fill_skb(struct net_device *dev,
514 struct sk_buff *skb, struct ehea_cqe *cqe)
516 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
518 skb_put(skb, length);
519 skb->ip_summed = CHECKSUM_UNNECESSARY;
520 skb->protocol = eth_type_trans(skb, dev);
523 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
525 struct ehea_cqe *cqe)
527 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
537 prefetchw(pref + EHEA_CACHE_LINE);
539 pref = (skb_array[x]->data);
541 prefetch(pref + EHEA_CACHE_LINE);
542 prefetch(pref + EHEA_CACHE_LINE * 2);
543 prefetch(pref + EHEA_CACHE_LINE * 3);
544 skb = skb_array[skb_index];
545 skb_array[skb_index] = NULL;
549 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
550 int arr_len, int wqe_index)
561 prefetchw(pref + EHEA_CACHE_LINE);
563 pref = (skb_array[x]->data);
565 prefetchw(pref + EHEA_CACHE_LINE);
567 skb = skb_array[wqe_index];
568 skb_array[wqe_index] = NULL;
572 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
573 struct ehea_cqe *cqe, int *processed_rq2,
578 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
579 pr->p_stats.err_tcp_cksum++;
580 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
581 pr->p_stats.err_ip_cksum++;
582 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
583 pr->p_stats.err_frame_crc++;
587 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
589 } else if (rq == 3) {
591 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
595 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
596 if (netif_msg_rx_err(pr->port)) {
597 ehea_error("Critical receive error for QP %d. "
598 "Resetting port.", pr->qp->init_attr.qp_nr);
599 ehea_dump(cqe, sizeof(*cqe), "CQE");
601 ehea_schedule_port_reset(pr->port);
608 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
609 void **tcph, u64 *hdr_flags, void *priv)
611 struct ehea_cqe *cqe = priv;
615 /* non tcp/udp packets */
616 if (!cqe->header_length)
620 skb_reset_network_header(skb);
622 if (iph->protocol != IPPROTO_TCP)
625 ip_len = ip_hdrlen(skb);
626 skb_set_transport_header(skb, ip_len);
627 *tcph = tcp_hdr(skb);
629 /* check if ip header and tcp header are complete */
630 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
633 *hdr_flags = LRO_IPV4 | LRO_TCP;
639 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
642 int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
647 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
652 lro_receive_skb(&pr->lro_mgr, skb, cqe);
655 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
658 netif_receive_skb(skb);
662 static int ehea_proc_rwqes(struct net_device *dev,
663 struct ehea_port_res *pr,
666 struct ehea_port *port = pr->port;
667 struct ehea_qp *qp = pr->qp;
668 struct ehea_cqe *cqe;
670 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
671 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
672 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
673 int skb_arr_rq1_len = pr->rq1_skba.len;
674 int skb_arr_rq2_len = pr->rq2_skba.len;
675 int skb_arr_rq3_len = pr->rq3_skba.len;
676 int processed, processed_rq1, processed_rq2, processed_rq3;
677 int wqe_index, last_wqe_index, rq, port_reset;
679 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
682 cqe = ehea_poll_rq1(qp, &wqe_index);
683 while ((processed < budget) && cqe) {
687 if (netif_msg_rx_status(port))
688 ehea_dump(cqe, sizeof(*cqe), "CQE");
690 last_wqe_index = wqe_index;
692 if (!ehea_check_cqe(cqe, &rq)) {
695 skb = get_skb_by_index_ll(skb_arr_rq1,
698 if (unlikely(!skb)) {
699 if (netif_msg_rx_err(port))
700 ehea_error("LL rq1: skb=NULL");
702 skb = netdev_alloc_skb(dev,
707 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
708 cqe->num_bytes_transfered - 4);
709 ehea_fill_skb(dev, skb, cqe);
710 } else if (rq == 2) {
712 skb = get_skb_by_index(skb_arr_rq2,
713 skb_arr_rq2_len, cqe);
714 if (unlikely(!skb)) {
715 if (netif_msg_rx_err(port))
716 ehea_error("rq2: skb=NULL");
719 ehea_fill_skb(dev, skb, cqe);
723 skb = get_skb_by_index(skb_arr_rq3,
724 skb_arr_rq3_len, cqe);
725 if (unlikely(!skb)) {
726 if (netif_msg_rx_err(port))
727 ehea_error("rq3: skb=NULL");
730 ehea_fill_skb(dev, skb, cqe);
734 ehea_proc_skb(pr, cqe, skb);
735 dev->last_rx = jiffies;
737 pr->p_stats.poll_receive_errors++;
738 port_reset = ehea_treat_poll_error(pr, rq, cqe,
744 cqe = ehea_poll_rq1(qp, &wqe_index);
747 lro_flush_all(&pr->lro_mgr);
749 pr->rx_packets += processed;
751 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
752 ehea_refill_rq2(pr, processed_rq2);
753 ehea_refill_rq3(pr, processed_rq3);
758 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
761 struct ehea_cq *send_cq = pr->send_cq;
762 struct ehea_cqe *cqe;
763 int quota = my_quota;
769 cqe = ehea_poll_cq(send_cq);
770 while (cqe && (quota > 0)) {
771 ehea_inc_cq(send_cq);
775 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
776 ehea_error("Send Completion Error: Resetting port");
777 if (netif_msg_tx_err(pr->port))
778 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
779 ehea_schedule_port_reset(pr->port);
783 if (netif_msg_tx_done(pr->port))
784 ehea_dump(cqe, sizeof(*cqe), "CQE");
786 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
787 == EHEA_SWQE2_TYPE)) {
789 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
790 skb = pr->sq_skba.arr[index];
792 pr->sq_skba.arr[index] = NULL;
795 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
798 cqe = ehea_poll_cq(send_cq);
801 ehea_update_feca(send_cq, cqe_counter);
802 atomic_add(swqe_av, &pr->swqe_avail);
804 spin_lock_irqsave(&pr->netif_queue, flags);
806 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
807 >= pr->swqe_refill_th)) {
808 netif_wake_queue(pr->port->netdev);
809 pr->queue_stopped = 0;
811 spin_unlock_irqrestore(&pr->netif_queue, flags);
816 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
817 #define EHEA_POLL_MAX_CQES 65535
819 static int ehea_poll(struct napi_struct *napi, int budget)
821 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
823 struct net_device *dev = pr->port->netdev;
824 struct ehea_cqe *cqe;
825 struct ehea_cqe *cqe_skb = NULL;
826 int force_irq, wqe_index;
829 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
830 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
833 rx += ehea_proc_rwqes(dev, pr, budget - rx);
835 while ((rx != budget) || force_irq) {
836 pr->poll_counter = 0;
838 netif_rx_complete(dev, napi);
839 ehea_reset_cq_ep(pr->recv_cq);
840 ehea_reset_cq_ep(pr->send_cq);
841 ehea_reset_cq_n1(pr->recv_cq);
842 ehea_reset_cq_n1(pr->send_cq);
843 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
844 cqe_skb = ehea_poll_cq(pr->send_cq);
846 if (!cqe && !cqe_skb)
849 if (!netif_rx_reschedule(dev, napi))
852 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
853 rx += ehea_proc_rwqes(dev, pr, budget - rx);
860 #ifdef CONFIG_NET_POLL_CONTROLLER
861 static void ehea_netpoll(struct net_device *dev)
863 struct ehea_port *port = netdev_priv(dev);
866 for (i = 0; i < port->num_def_qps; i++)
867 netif_rx_schedule(dev, &port->port_res[i].napi);
871 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
873 struct ehea_port_res *pr = param;
875 netif_rx_schedule(pr->port->netdev, &pr->napi);
880 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
882 struct ehea_port *port = param;
883 struct ehea_eqe *eqe;
887 eqe = ehea_poll_eq(port->qp_eq);
890 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
891 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
892 eqe->entry, qp_token);
894 qp = port->port_res[qp_token].qp;
895 ehea_error_data(port->adapter, qp->fw_handle);
896 eqe = ehea_poll_eq(port->qp_eq);
899 ehea_schedule_port_reset(port);
904 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
909 for (i = 0; i < EHEA_MAX_PORTS; i++)
910 if (adapter->port[i])
911 if (adapter->port[i]->logical_port_id == logical_port)
912 return adapter->port[i];
916 int ehea_sense_port_attr(struct ehea_port *port)
920 struct hcp_ehea_port_cb0 *cb0;
922 /* may be called via ehea_neq_tasklet() */
923 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
925 ehea_error("no mem for cb0");
930 hret = ehea_h_query_ehea_port(port->adapter->handle,
931 port->logical_port_id, H_PORT_CB0,
932 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
934 if (hret != H_SUCCESS) {
940 port->mac_addr = cb0->port_mac_addr << 16;
942 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
943 ret = -EADDRNOTAVAIL;
948 switch (cb0->port_speed) {
950 port->port_speed = EHEA_SPEED_10M;
951 port->full_duplex = 0;
954 port->port_speed = EHEA_SPEED_10M;
955 port->full_duplex = 1;
958 port->port_speed = EHEA_SPEED_100M;
959 port->full_duplex = 0;
962 port->port_speed = EHEA_SPEED_100M;
963 port->full_duplex = 1;
966 port->port_speed = EHEA_SPEED_1G;
967 port->full_duplex = 1;
970 port->port_speed = EHEA_SPEED_10G;
971 port->full_duplex = 1;
974 port->port_speed = 0;
975 port->full_duplex = 0;
980 port->num_mcs = cb0->num_default_qps;
982 /* Number of default QPs */
984 port->num_def_qps = cb0->num_default_qps;
986 port->num_def_qps = 1;
988 if (!port->num_def_qps) {
993 port->num_tx_qps = num_tx_qps;
995 if (port->num_def_qps >= port->num_tx_qps)
996 port->num_add_tx_qps = 0;
998 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1002 if (ret || netif_msg_probe(port))
1003 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1009 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1011 struct hcp_ehea_port_cb4 *cb4;
1015 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1017 ehea_error("no mem for cb4");
1022 cb4->port_speed = port_speed;
1024 netif_carrier_off(port->netdev);
1026 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1027 port->logical_port_id,
1028 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1029 if (hret == H_SUCCESS) {
1030 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1032 hret = ehea_h_query_ehea_port(port->adapter->handle,
1033 port->logical_port_id,
1034 H_PORT_CB4, H_PORT_CB4_SPEED,
1036 if (hret == H_SUCCESS) {
1037 switch (cb4->port_speed) {
1039 port->port_speed = EHEA_SPEED_10M;
1040 port->full_duplex = 0;
1043 port->port_speed = EHEA_SPEED_10M;
1044 port->full_duplex = 1;
1046 case H_SPEED_100M_H:
1047 port->port_speed = EHEA_SPEED_100M;
1048 port->full_duplex = 0;
1050 case H_SPEED_100M_F:
1051 port->port_speed = EHEA_SPEED_100M;
1052 port->full_duplex = 1;
1055 port->port_speed = EHEA_SPEED_1G;
1056 port->full_duplex = 1;
1059 port->port_speed = EHEA_SPEED_10G;
1060 port->full_duplex = 1;
1063 port->port_speed = 0;
1064 port->full_duplex = 0;
1068 ehea_error("Failed sensing port speed");
1072 if (hret == H_AUTHORITY) {
1073 ehea_info("Hypervisor denied setting port speed");
1077 ehea_error("Failed setting port speed");
1080 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1081 netif_carrier_on(port->netdev);
1088 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1093 struct ehea_port *port;
1095 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1096 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1097 port = ehea_get_port(adapter, portnum);
1100 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1103 ehea_error("unknown portnum %x", portnum);
1107 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1108 if (!netif_carrier_ok(port->netdev)) {
1109 ret = ehea_sense_port_attr(port);
1111 ehea_error("failed resensing port "
1116 if (netif_msg_link(port))
1117 ehea_info("%s: Logical port up: %dMbps "
1121 port->full_duplex ==
1122 1 ? "Full" : "Half");
1124 netif_carrier_on(port->netdev);
1125 netif_wake_queue(port->netdev);
1128 if (netif_carrier_ok(port->netdev)) {
1129 if (netif_msg_link(port))
1130 ehea_info("%s: Logical port down",
1131 port->netdev->name);
1132 netif_carrier_off(port->netdev);
1133 netif_stop_queue(port->netdev);
1136 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1137 port->phy_link = EHEA_PHY_LINK_UP;
1138 if (netif_msg_link(port))
1139 ehea_info("%s: Physical port up",
1140 port->netdev->name);
1141 if (prop_carrier_state)
1142 netif_carrier_on(port->netdev);
1144 port->phy_link = EHEA_PHY_LINK_DOWN;
1145 if (netif_msg_link(port))
1146 ehea_info("%s: Physical port down",
1147 port->netdev->name);
1148 if (prop_carrier_state)
1149 netif_carrier_off(port->netdev);
1152 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1153 ehea_info("External switch port is primary port");
1155 ehea_info("External switch port is backup port");
1158 case EHEA_EC_ADAPTER_MALFUNC:
1159 ehea_error("Adapter malfunction");
1161 case EHEA_EC_PORT_MALFUNC:
1162 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1163 netif_carrier_off(port->netdev);
1164 netif_stop_queue(port->netdev);
1167 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
1172 static void ehea_neq_tasklet(unsigned long data)
1174 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1175 struct ehea_eqe *eqe;
1178 eqe = ehea_poll_eq(adapter->neq);
1179 ehea_debug("eqe=%p", eqe);
1182 ehea_debug("*eqe=%lx", eqe->entry);
1183 ehea_parse_eqe(adapter, eqe->entry);
1184 eqe = ehea_poll_eq(adapter->neq);
1185 ehea_debug("next eqe=%p", eqe);
1188 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1189 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1190 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1192 ehea_h_reset_events(adapter->handle,
1193 adapter->neq->fw_handle, event_mask);
1196 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1198 struct ehea_adapter *adapter = param;
1199 tasklet_hi_schedule(&adapter->neq_tasklet);
1204 static int ehea_fill_port_res(struct ehea_port_res *pr)
1207 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1209 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1210 - init_attr->act_nr_rwqes_rq2
1211 - init_attr->act_nr_rwqes_rq3 - 1);
1213 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1215 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1220 static int ehea_reg_interrupts(struct net_device *dev)
1222 struct ehea_port *port = netdev_priv(dev);
1223 struct ehea_port_res *pr;
1227 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1230 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1231 ehea_qp_aff_irq_handler,
1232 IRQF_DISABLED, port->int_aff_name, port);
1234 ehea_error("failed registering irq for qp_aff_irq_handler:"
1235 "ist=%X", port->qp_eq->attr.ist1);
1239 if (netif_msg_ifup(port))
1240 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1241 "registered", port->qp_eq->attr.ist1);
1244 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1245 pr = &port->port_res[i];
1246 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1247 "%s-queue%d", dev->name, i);
1248 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1249 ehea_recv_irq_handler,
1250 IRQF_DISABLED, pr->int_send_name,
1253 ehea_error("failed registering irq for ehea_queue "
1254 "port_res_nr:%d, ist=%X", i,
1258 if (netif_msg_ifup(port))
1259 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1260 "%d registered", pr->eq->attr.ist1, i);
1268 u32 ist = port->port_res[i].eq->attr.ist1;
1269 ibmebus_free_irq(ist, &port->port_res[i]);
1273 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1274 i = port->num_def_qps;
1280 static void ehea_free_interrupts(struct net_device *dev)
1282 struct ehea_port *port = netdev_priv(dev);
1283 struct ehea_port_res *pr;
1288 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1289 pr = &port->port_res[i];
1290 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1291 if (netif_msg_intr(port))
1292 ehea_info("free send irq for res %d with handle 0x%X",
1293 i, pr->eq->attr.ist1);
1296 /* associated events */
1297 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1298 if (netif_msg_intr(port))
1299 ehea_info("associated event interrupt for handle 0x%X freed",
1300 port->qp_eq->attr.ist1);
1303 static int ehea_configure_port(struct ehea_port *port)
1307 struct hcp_ehea_port_cb0 *cb0;
1310 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1314 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1315 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1316 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1317 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1318 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1319 PXLY_RC_VLAN_FILTER)
1320 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1322 for (i = 0; i < port->num_mcs; i++)
1324 cb0->default_qpn_arr[i] =
1325 port->port_res[i].qp->init_attr.qp_nr;
1327 cb0->default_qpn_arr[i] =
1328 port->port_res[0].qp->init_attr.qp_nr;
1330 if (netif_msg_ifup(port))
1331 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1333 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1334 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1336 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1337 port->logical_port_id,
1338 H_PORT_CB0, mask, cb0);
1340 if (hret != H_SUCCESS)
1351 int ehea_gen_smrs(struct ehea_port_res *pr)
1354 struct ehea_adapter *adapter = pr->port->adapter;
1356 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1360 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1367 ehea_rem_mr(&pr->send_mr);
1369 ehea_error("Generating SMRS failed\n");
1373 int ehea_rem_smrs(struct ehea_port_res *pr)
1375 if ((ehea_rem_mr(&pr->send_mr))
1376 || (ehea_rem_mr(&pr->recv_mr)))
1382 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1384 int arr_size = sizeof(void *) * max_q_entries;
1386 q_skba->arr = vmalloc(arr_size);
1390 memset(q_skba->arr, 0, arr_size);
1392 q_skba->len = max_q_entries;
1394 q_skba->os_skbs = 0;
1399 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1400 struct port_res_cfg *pr_cfg, int queue_token)
1402 struct ehea_adapter *adapter = port->adapter;
1403 enum ehea_eq_type eq_type = EHEA_EQ;
1404 struct ehea_qp_init_attr *init_attr = NULL;
1407 memset(pr, 0, sizeof(struct ehea_port_res));
1410 spin_lock_init(&pr->xmit_lock);
1411 spin_lock_init(&pr->netif_queue);
1413 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1415 ehea_error("create_eq failed (eq)");
1419 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1421 port->logical_port_id);
1423 ehea_error("create_cq failed (cq_recv)");
1427 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1429 port->logical_port_id);
1431 ehea_error("create_cq failed (cq_send)");
1435 if (netif_msg_ifup(port))
1436 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1437 pr->send_cq->attr.act_nr_of_cqes,
1438 pr->recv_cq->attr.act_nr_of_cqes);
1440 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1443 ehea_error("no mem for ehea_qp_init_attr");
1447 init_attr->low_lat_rq1 = 1;
1448 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1449 init_attr->rq_count = 3;
1450 init_attr->qp_token = queue_token;
1451 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1452 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1453 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1454 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1455 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1456 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1457 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1458 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1459 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1460 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1461 init_attr->port_nr = port->logical_port_id;
1462 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1463 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1464 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1466 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1468 ehea_error("create_qp failed");
1473 if (netif_msg_ifup(port))
1474 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1475 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1476 init_attr->act_nr_send_wqes,
1477 init_attr->act_nr_rwqes_rq1,
1478 init_attr->act_nr_rwqes_rq2,
1479 init_attr->act_nr_rwqes_rq3);
1481 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1483 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1484 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1485 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1486 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1490 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1491 if (ehea_gen_smrs(pr) != 0) {
1496 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1500 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1502 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1503 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1504 pr->lro_mgr.lro_arr = pr->lro_desc;
1505 pr->lro_mgr.get_skb_header = get_skb_hdr;
1506 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1507 pr->lro_mgr.dev = port->netdev;
1508 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1509 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1516 vfree(pr->sq_skba.arr);
1517 vfree(pr->rq1_skba.arr);
1518 vfree(pr->rq2_skba.arr);
1519 vfree(pr->rq3_skba.arr);
1520 ehea_destroy_qp(pr->qp);
1521 ehea_destroy_cq(pr->send_cq);
1522 ehea_destroy_cq(pr->recv_cq);
1523 ehea_destroy_eq(pr->eq);
1528 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1532 ret = ehea_destroy_qp(pr->qp);
1535 ehea_destroy_cq(pr->send_cq);
1536 ehea_destroy_cq(pr->recv_cq);
1537 ehea_destroy_eq(pr->eq);
1539 for (i = 0; i < pr->rq1_skba.len; i++)
1540 if (pr->rq1_skba.arr[i])
1541 dev_kfree_skb(pr->rq1_skba.arr[i]);
1543 for (i = 0; i < pr->rq2_skba.len; i++)
1544 if (pr->rq2_skba.arr[i])
1545 dev_kfree_skb(pr->rq2_skba.arr[i]);
1547 for (i = 0; i < pr->rq3_skba.len; i++)
1548 if (pr->rq3_skba.arr[i])
1549 dev_kfree_skb(pr->rq3_skba.arr[i]);
1551 for (i = 0; i < pr->sq_skba.len; i++)
1552 if (pr->sq_skba.arr[i])
1553 dev_kfree_skb(pr->sq_skba.arr[i]);
1555 vfree(pr->rq1_skba.arr);
1556 vfree(pr->rq2_skba.arr);
1557 vfree(pr->rq3_skba.arr);
1558 vfree(pr->sq_skba.arr);
1559 ret = ehea_rem_smrs(pr);
1565 * The write_* functions store information in swqe which is used by
1566 * the hardware to calculate the ip/tcp/udp checksum
1569 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1570 const struct sk_buff *skb)
1572 swqe->ip_start = skb_network_offset(skb);
1573 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1576 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1577 const struct sk_buff *skb)
1580 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1582 swqe->tcp_end = (u16)skb->len - 1;
1585 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1586 const struct sk_buff *skb)
1589 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1591 swqe->tcp_end = (u16)skb->len - 1;
1595 static void write_swqe2_TSO(struct sk_buff *skb,
1596 struct ehea_swqe *swqe, u32 lkey)
1598 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1599 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1600 int skb_data_size = skb->len - skb->data_len;
1603 /* Packet is TCP with TSO enabled */
1604 swqe->tx_control |= EHEA_SWQE_TSO;
1605 swqe->mss = skb_shinfo(skb)->gso_size;
1606 /* copy only eth/ip/tcp headers to immediate data and
1607 * the rest of skb->data to sg1entry
1609 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1611 skb_data_size = skb->len - skb->data_len;
1613 if (skb_data_size >= headersize) {
1614 /* copy immediate data */
1615 skb_copy_from_linear_data(skb, imm_data, headersize);
1616 swqe->immediate_data_length = headersize;
1618 if (skb_data_size > headersize) {
1619 /* set sg1entry data */
1620 sg1entry->l_key = lkey;
1621 sg1entry->len = skb_data_size - headersize;
1623 ehea_map_vaddr(skb->data + headersize);
1624 swqe->descriptors++;
1627 ehea_error("cannot handle fragmented headers");
1630 static void write_swqe2_nonTSO(struct sk_buff *skb,
1631 struct ehea_swqe *swqe, u32 lkey)
1633 int skb_data_size = skb->len - skb->data_len;
1634 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1635 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1637 /* Packet is any nonTSO type
1639 * Copy as much as possible skb->data to immediate data and
1640 * the rest to sg1entry
1642 if (skb_data_size >= SWQE2_MAX_IMM) {
1643 /* copy immediate data */
1644 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1646 swqe->immediate_data_length = SWQE2_MAX_IMM;
1648 if (skb_data_size > SWQE2_MAX_IMM) {
1649 /* copy sg1entry data */
1650 sg1entry->l_key = lkey;
1651 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1653 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1654 swqe->descriptors++;
1657 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1658 swqe->immediate_data_length = skb_data_size;
1662 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1663 struct ehea_swqe *swqe, u32 lkey)
1665 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1667 int nfrags, sg1entry_contains_frag_data, i;
1669 nfrags = skb_shinfo(skb)->nr_frags;
1670 sg1entry = &swqe->u.immdata_desc.sg_entry;
1671 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1672 swqe->descriptors = 0;
1673 sg1entry_contains_frag_data = 0;
1675 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1676 write_swqe2_TSO(skb, swqe, lkey);
1678 write_swqe2_nonTSO(skb, swqe, lkey);
1680 /* write descriptors */
1682 if (swqe->descriptors == 0) {
1683 /* sg1entry not yet used */
1684 frag = &skb_shinfo(skb)->frags[0];
1686 /* copy sg1entry data */
1687 sg1entry->l_key = lkey;
1688 sg1entry->len = frag->size;
1690 ehea_map_vaddr(page_address(frag->page)
1691 + frag->page_offset);
1692 swqe->descriptors++;
1693 sg1entry_contains_frag_data = 1;
1696 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1698 frag = &skb_shinfo(skb)->frags[i];
1699 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1701 sgentry->l_key = lkey;
1702 sgentry->len = frag->size;
1704 ehea_map_vaddr(page_address(frag->page)
1705 + frag->page_offset);
1706 swqe->descriptors++;
1711 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1717 /* De/Register untagged packets */
1718 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1719 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1720 port->logical_port_id,
1721 reg_type, port->mac_addr, 0, hcallid);
1722 if (hret != H_SUCCESS) {
1723 ehea_error("%sregistering bc address failed (tagged)",
1724 hcallid == H_REG_BCMC ? "" : "de");
1729 /* De/Register VLAN packets */
1730 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1731 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1732 port->logical_port_id,
1733 reg_type, port->mac_addr, 0, hcallid);
1734 if (hret != H_SUCCESS) {
1735 ehea_error("%sregistering bc address failed (vlan)",
1736 hcallid == H_REG_BCMC ? "" : "de");
1743 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1745 struct ehea_port *port = netdev_priv(dev);
1746 struct sockaddr *mac_addr = sa;
1747 struct hcp_ehea_port_cb0 *cb0;
1751 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1752 ret = -EADDRNOTAVAIL;
1756 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1758 ehea_error("no mem for cb0");
1763 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1765 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1767 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1768 port->logical_port_id, H_PORT_CB0,
1769 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1770 if (hret != H_SUCCESS) {
1775 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1777 /* Deregister old MAC in pHYP */
1778 if (port->state == EHEA_PORT_UP) {
1779 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1784 port->mac_addr = cb0->port_mac_addr << 16;
1786 /* Register new MAC in pHYP */
1787 if (port->state == EHEA_PORT_UP) {
1788 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1796 ehea_update_bcmc_registrations();
1803 static void ehea_promiscuous_error(u64 hret, int enable)
1805 if (hret == H_AUTHORITY)
1806 ehea_info("Hypervisor denied %sabling promiscuous mode",
1807 enable == 1 ? "en" : "dis");
1809 ehea_error("failed %sabling promiscuous mode",
1810 enable == 1 ? "en" : "dis");
1813 static void ehea_promiscuous(struct net_device *dev, int enable)
1815 struct ehea_port *port = netdev_priv(dev);
1816 struct hcp_ehea_port_cb7 *cb7;
1819 if ((enable && port->promisc) || (!enable && !port->promisc))
1822 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1824 ehea_error("no mem for cb7");
1828 /* Modify Pxs_DUCQPN in CB7 */
1829 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1831 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1832 port->logical_port_id,
1833 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1835 ehea_promiscuous_error(hret, enable);
1839 port->promisc = enable;
1845 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1851 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1852 | EHEA_BCMC_UNTAGGED;
1854 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1855 port->logical_port_id,
1856 reg_type, mc_mac_addr, 0, hcallid);
1860 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1861 | EHEA_BCMC_VLANID_ALL;
1863 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1864 port->logical_port_id,
1865 reg_type, mc_mac_addr, 0, hcallid);
1870 static int ehea_drop_multicast_list(struct net_device *dev)
1872 struct ehea_port *port = netdev_priv(dev);
1873 struct ehea_mc_list *mc_entry = port->mc_list;
1874 struct list_head *pos;
1875 struct list_head *temp;
1879 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1880 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1882 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1885 ehea_error("failed deregistering mcast MAC");
1895 static void ehea_allmulti(struct net_device *dev, int enable)
1897 struct ehea_port *port = netdev_priv(dev);
1900 if (!port->allmulti) {
1902 /* Enable ALLMULTI */
1903 ehea_drop_multicast_list(dev);
1904 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1908 ehea_error("failed enabling IFF_ALLMULTI");
1912 /* Disable ALLMULTI */
1913 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1917 ehea_error("failed disabling IFF_ALLMULTI");
1921 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1923 struct ehea_mc_list *ehea_mcl_entry;
1926 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1927 if (!ehea_mcl_entry) {
1928 ehea_error("no mem for mcl_entry");
1932 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1934 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1936 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1939 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1941 ehea_error("failed registering mcast MAC");
1942 kfree(ehea_mcl_entry);
1946 static void ehea_set_multicast_list(struct net_device *dev)
1948 struct ehea_port *port = netdev_priv(dev);
1949 struct dev_mc_list *k_mcl_entry;
1952 if (dev->flags & IFF_PROMISC) {
1953 ehea_promiscuous(dev, 1);
1956 ehea_promiscuous(dev, 0);
1958 if (dev->flags & IFF_ALLMULTI) {
1959 ehea_allmulti(dev, 1);
1962 ehea_allmulti(dev, 0);
1964 if (dev->mc_count) {
1965 ret = ehea_drop_multicast_list(dev);
1967 /* Dropping the current multicast list failed.
1968 * Enabling ALL_MULTI is the best we can do.
1970 ehea_allmulti(dev, 1);
1973 if (dev->mc_count > port->adapter->max_mc_mac) {
1974 ehea_info("Mcast registration limit reached (0x%lx). "
1976 port->adapter->max_mc_mac);
1980 for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
1981 k_mcl_entry = k_mcl_entry->next)
1982 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1986 ehea_update_bcmc_registrations();
1990 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1992 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1998 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1999 struct ehea_swqe *swqe, u32 lkey)
2001 if (skb->protocol == htons(ETH_P_IP)) {
2002 const struct iphdr *iph = ip_hdr(skb);
2005 swqe->tx_control |= EHEA_SWQE_CRC
2006 | EHEA_SWQE_IP_CHECKSUM
2007 | EHEA_SWQE_TCP_CHECKSUM
2008 | EHEA_SWQE_IMM_DATA_PRESENT
2009 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2011 write_ip_start_end(swqe, skb);
2013 if (iph->protocol == IPPROTO_UDP) {
2014 if ((iph->frag_off & IP_MF)
2015 || (iph->frag_off & IP_OFFSET))
2016 /* IP fragment, so don't change cs */
2017 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2019 write_udp_offset_end(swqe, skb);
2020 } else if (iph->protocol == IPPROTO_TCP) {
2021 write_tcp_offset_end(swqe, skb);
2024 /* icmp (big data) and ip segmentation packets (all other ip
2025 packets) do not require any special handling */
2028 /* Other Ethernet Protocol */
2029 swqe->tx_control |= EHEA_SWQE_CRC
2030 | EHEA_SWQE_IMM_DATA_PRESENT
2031 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2034 write_swqe2_data(skb, dev, swqe, lkey);
2037 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2038 struct ehea_swqe *swqe)
2040 int nfrags = skb_shinfo(skb)->nr_frags;
2041 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2045 if (skb->protocol == htons(ETH_P_IP)) {
2046 const struct iphdr *iph = ip_hdr(skb);
2049 write_ip_start_end(swqe, skb);
2051 if (iph->protocol == IPPROTO_TCP) {
2052 swqe->tx_control |= EHEA_SWQE_CRC
2053 | EHEA_SWQE_IP_CHECKSUM
2054 | EHEA_SWQE_TCP_CHECKSUM
2055 | EHEA_SWQE_IMM_DATA_PRESENT;
2057 write_tcp_offset_end(swqe, skb);
2059 } else if (iph->protocol == IPPROTO_UDP) {
2060 if ((iph->frag_off & IP_MF)
2061 || (iph->frag_off & IP_OFFSET))
2062 /* IP fragment, so don't change cs */
2063 swqe->tx_control |= EHEA_SWQE_CRC
2064 | EHEA_SWQE_IMM_DATA_PRESENT;
2066 swqe->tx_control |= EHEA_SWQE_CRC
2067 | EHEA_SWQE_IP_CHECKSUM
2068 | EHEA_SWQE_TCP_CHECKSUM
2069 | EHEA_SWQE_IMM_DATA_PRESENT;
2071 write_udp_offset_end(swqe, skb);
2074 /* icmp (big data) and
2075 ip segmentation packets (all other ip packets) */
2076 swqe->tx_control |= EHEA_SWQE_CRC
2077 | EHEA_SWQE_IP_CHECKSUM
2078 | EHEA_SWQE_IMM_DATA_PRESENT;
2081 /* Other Ethernet Protocol */
2082 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2084 /* copy (immediate) data */
2086 /* data is in a single piece */
2087 skb_copy_from_linear_data(skb, imm_data, skb->len);
2089 /* first copy data from the skb->data buffer ... */
2090 skb_copy_from_linear_data(skb, imm_data,
2091 skb->len - skb->data_len);
2092 imm_data += skb->len - skb->data_len;
2094 /* ... then copy data from the fragments */
2095 for (i = 0; i < nfrags; i++) {
2096 frag = &skb_shinfo(skb)->frags[i];
2098 page_address(frag->page) + frag->page_offset,
2100 imm_data += frag->size;
2103 swqe->immediate_data_length = skb->len;
2107 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2112 if ((skb->protocol == htons(ETH_P_IP)) &&
2113 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2114 tcp = (struct tcphdr *)(skb_network_header(skb) +
2115 (ip_hdr(skb)->ihl * 4));
2116 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2117 tmp += ip_hdr(skb)->daddr % 31;
2118 return tmp % num_qps;
2123 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2125 struct ehea_port *port = netdev_priv(dev);
2126 struct ehea_swqe *swqe;
2127 unsigned long flags;
2130 struct ehea_port_res *pr;
2132 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2134 if (!spin_trylock(&pr->xmit_lock))
2135 return NETDEV_TX_BUSY;
2137 if (pr->queue_stopped) {
2138 spin_unlock(&pr->xmit_lock);
2139 return NETDEV_TX_BUSY;
2142 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2143 memset(swqe, 0, SWQE_HEADER_SIZE);
2144 atomic_dec(&pr->swqe_avail);
2146 if (skb->len <= SWQE3_MAX_IMM) {
2147 u32 sig_iv = port->sig_comp_iv;
2148 u32 swqe_num = pr->swqe_id_counter;
2149 ehea_xmit3(skb, dev, swqe);
2150 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2151 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2152 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2153 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2155 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2156 pr->swqe_ll_count = 0;
2158 pr->swqe_ll_count += 1;
2161 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2162 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2163 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2164 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2165 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2167 pr->sq_skba.index++;
2168 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2170 lkey = pr->send_mr.lkey;
2171 ehea_xmit2(skb, dev, swqe, lkey);
2172 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2174 pr->swqe_id_counter += 1;
2176 if (port->vgrp && vlan_tx_tag_present(skb)) {
2177 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2178 swqe->vlan_tag = vlan_tx_tag_get(skb);
2181 if (netif_msg_tx_queued(port)) {
2182 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2183 ehea_dump(swqe, 512, "swqe");
2186 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2187 netif_stop_queue(dev);
2188 swqe->tx_control |= EHEA_SWQE_PURGE;
2191 ehea_post_swqe(pr->qp, swqe);
2194 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2195 spin_lock_irqsave(&pr->netif_queue, flags);
2196 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2197 pr->p_stats.queue_stopped++;
2198 netif_stop_queue(dev);
2199 pr->queue_stopped = 1;
2201 spin_unlock_irqrestore(&pr->netif_queue, flags);
2203 dev->trans_start = jiffies;
2204 spin_unlock(&pr->xmit_lock);
2206 return NETDEV_TX_OK;
2209 static void ehea_vlan_rx_register(struct net_device *dev,
2210 struct vlan_group *grp)
2212 struct ehea_port *port = netdev_priv(dev);
2213 struct ehea_adapter *adapter = port->adapter;
2214 struct hcp_ehea_port_cb1 *cb1;
2219 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2221 ehea_error("no mem for cb1");
2225 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2226 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2227 if (hret != H_SUCCESS)
2228 ehea_error("modify_ehea_port failed");
2235 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2237 struct ehea_port *port = netdev_priv(dev);
2238 struct ehea_adapter *adapter = port->adapter;
2239 struct hcp_ehea_port_cb1 *cb1;
2243 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2245 ehea_error("no mem for cb1");
2249 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2250 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2251 if (hret != H_SUCCESS) {
2252 ehea_error("query_ehea_port failed");
2257 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2259 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2260 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2261 if (hret != H_SUCCESS)
2262 ehea_error("modify_ehea_port failed");
2268 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2270 struct ehea_port *port = netdev_priv(dev);
2271 struct ehea_adapter *adapter = port->adapter;
2272 struct hcp_ehea_port_cb1 *cb1;
2276 vlan_group_set_device(port->vgrp, vid, NULL);
2278 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2280 ehea_error("no mem for cb1");
2284 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2285 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2286 if (hret != H_SUCCESS) {
2287 ehea_error("query_ehea_port failed");
2292 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2294 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2295 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2296 if (hret != H_SUCCESS)
2297 ehea_error("modify_ehea_port failed");
2303 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2309 struct hcp_modify_qp_cb0 *cb0;
2311 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2317 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2318 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2319 if (hret != H_SUCCESS) {
2320 ehea_error("query_ehea_qp failed (1)");
2324 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2325 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2326 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2327 &dummy64, &dummy64, &dummy16, &dummy16);
2328 if (hret != H_SUCCESS) {
2329 ehea_error("modify_ehea_qp failed (1)");
2333 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2334 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2335 if (hret != H_SUCCESS) {
2336 ehea_error("query_ehea_qp failed (2)");
2340 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2341 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2342 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2343 &dummy64, &dummy64, &dummy16, &dummy16);
2344 if (hret != H_SUCCESS) {
2345 ehea_error("modify_ehea_qp failed (2)");
2349 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2350 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2351 if (hret != H_SUCCESS) {
2352 ehea_error("query_ehea_qp failed (3)");
2356 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2357 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2358 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2359 &dummy64, &dummy64, &dummy16, &dummy16);
2360 if (hret != H_SUCCESS) {
2361 ehea_error("modify_ehea_qp failed (3)");
2365 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2366 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2367 if (hret != H_SUCCESS) {
2368 ehea_error("query_ehea_qp failed (4)");
2378 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2382 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2383 enum ehea_eq_type eq_type = EHEA_EQ;
2385 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2386 EHEA_MAX_ENTRIES_EQ, 1);
2389 ehea_error("ehea_create_eq failed (qp_eq)");
2393 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2394 pr_cfg.max_entries_scq = sq_entries * 2;
2395 pr_cfg.max_entries_sq = sq_entries;
2396 pr_cfg.max_entries_rq1 = rq1_entries;
2397 pr_cfg.max_entries_rq2 = rq2_entries;
2398 pr_cfg.max_entries_rq3 = rq3_entries;
2400 pr_cfg_small_rx.max_entries_rcq = 1;
2401 pr_cfg_small_rx.max_entries_scq = sq_entries;
2402 pr_cfg_small_rx.max_entries_sq = sq_entries;
2403 pr_cfg_small_rx.max_entries_rq1 = 1;
2404 pr_cfg_small_rx.max_entries_rq2 = 1;
2405 pr_cfg_small_rx.max_entries_rq3 = 1;
2407 for (i = 0; i < def_qps; i++) {
2408 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2412 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2413 ret = ehea_init_port_res(port, &port->port_res[i],
2414 &pr_cfg_small_rx, i);
2423 ehea_clean_portres(port, &port->port_res[i]);
2426 ehea_destroy_eq(port->qp_eq);
2430 static int ehea_clean_all_portres(struct ehea_port *port)
2435 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2436 ret |= ehea_clean_portres(port, &port->port_res[i]);
2438 ret |= ehea_destroy_eq(port->qp_eq);
2443 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2445 if (adapter->active_ports)
2448 ehea_rem_mr(&adapter->mr);
2451 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2453 if (adapter->active_ports)
2456 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2459 static int ehea_up(struct net_device *dev)
2462 struct ehea_port *port = netdev_priv(dev);
2464 if (port->state == EHEA_PORT_UP)
2467 ret = ehea_port_res_setup(port, port->num_def_qps,
2468 port->num_add_tx_qps);
2470 ehea_error("port_res_failed");
2474 /* Set default QP for this port */
2475 ret = ehea_configure_port(port);
2477 ehea_error("ehea_configure_port failed. ret:%d", ret);
2481 ret = ehea_reg_interrupts(dev);
2483 ehea_error("reg_interrupts failed. ret:%d", ret);
2487 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2488 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2490 ehea_error("activate_qp failed");
2495 for (i = 0; i < port->num_def_qps; i++) {
2496 ret = ehea_fill_port_res(&port->port_res[i]);
2498 ehea_error("out_free_irqs");
2503 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2509 port->state = EHEA_PORT_UP;
2515 ehea_free_interrupts(dev);
2518 ehea_clean_all_portres(port);
2521 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2523 ehea_update_bcmc_registrations();
2525 ehea_update_firmware_handles();
2530 static void port_napi_disable(struct ehea_port *port)
2534 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2535 napi_disable(&port->port_res[i].napi);
2538 static void port_napi_enable(struct ehea_port *port)
2542 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2543 napi_enable(&port->port_res[i].napi);
2546 static int ehea_open(struct net_device *dev)
2549 struct ehea_port *port = netdev_priv(dev);
2551 mutex_lock(&port->port_lock);
2553 if (netif_msg_ifup(port))
2554 ehea_info("enabling port %s", dev->name);
2558 port_napi_enable(port);
2559 netif_start_queue(dev);
2562 mutex_unlock(&port->port_lock);
2567 static int ehea_down(struct net_device *dev)
2570 struct ehea_port *port = netdev_priv(dev);
2572 if (port->state == EHEA_PORT_DOWN)
2575 ehea_drop_multicast_list(dev);
2576 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2578 ehea_free_interrupts(dev);
2580 port->state = EHEA_PORT_DOWN;
2582 ehea_update_bcmc_registrations();
2584 ret = ehea_clean_all_portres(port);
2586 ehea_info("Failed freeing resources for %s. ret=%i",
2589 ehea_update_firmware_handles();
2594 static int ehea_stop(struct net_device *dev)
2597 struct ehea_port *port = netdev_priv(dev);
2599 if (netif_msg_ifdown(port))
2600 ehea_info("disabling port %s", dev->name);
2602 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2603 cancel_work_sync(&port->reset_task);
2604 mutex_lock(&port->port_lock);
2605 netif_stop_queue(dev);
2606 port_napi_disable(port);
2607 ret = ehea_down(dev);
2608 mutex_unlock(&port->port_lock);
2609 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2613 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2615 struct ehea_qp qp = *orig_qp;
2616 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2617 struct ehea_swqe *swqe;
2621 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2622 swqe = ehea_get_swqe(&qp, &wqe_index);
2623 swqe->tx_control |= EHEA_SWQE_PURGE;
2627 static void ehea_flush_sq(struct ehea_port *port)
2631 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2632 struct ehea_port_res *pr = &port->port_res[i];
2633 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2635 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2643 int ehea_stop_qps(struct net_device *dev)
2645 struct ehea_port *port = netdev_priv(dev);
2646 struct ehea_adapter *adapter = port->adapter;
2647 struct hcp_modify_qp_cb0 *cb0;
2655 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2661 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2662 struct ehea_port_res *pr = &port->port_res[i];
2663 struct ehea_qp *qp = pr->qp;
2665 /* Purge send queue */
2668 /* Disable queue pair */
2669 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2670 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2672 if (hret != H_SUCCESS) {
2673 ehea_error("query_ehea_qp failed (1)");
2677 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2678 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2680 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2681 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2683 &dummy64, &dummy16, &dummy16);
2684 if (hret != H_SUCCESS) {
2685 ehea_error("modify_ehea_qp failed (1)");
2689 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2690 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2692 if (hret != H_SUCCESS) {
2693 ehea_error("query_ehea_qp failed (2)");
2697 /* deregister shared memory regions */
2698 dret = ehea_rem_smrs(pr);
2700 ehea_error("unreg shared memory region failed");
2712 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2714 struct ehea_qp qp = *orig_qp;
2715 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2716 struct ehea_rwqe *rwqe;
2717 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2718 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2719 struct sk_buff *skb;
2720 u32 lkey = pr->recv_mr.lkey;
2726 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2727 rwqe = ehea_get_next_rwqe(&qp, 2);
2728 rwqe->sg_list[0].l_key = lkey;
2729 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2730 skb = skba_rq2[index];
2732 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2735 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2736 rwqe = ehea_get_next_rwqe(&qp, 3);
2737 rwqe->sg_list[0].l_key = lkey;
2738 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2739 skb = skba_rq3[index];
2741 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2745 int ehea_restart_qps(struct net_device *dev)
2747 struct ehea_port *port = netdev_priv(dev);
2748 struct ehea_adapter *adapter = port->adapter;
2752 struct hcp_modify_qp_cb0 *cb0;
2757 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2763 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2764 struct ehea_port_res *pr = &port->port_res[i];
2765 struct ehea_qp *qp = pr->qp;
2767 ret = ehea_gen_smrs(pr);
2769 ehea_error("creation of shared memory regions failed");
2773 ehea_update_rqs(qp, pr);
2775 /* Enable queue pair */
2776 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2777 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2779 if (hret != H_SUCCESS) {
2780 ehea_error("query_ehea_qp failed (1)");
2784 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2785 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2787 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2788 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2790 &dummy64, &dummy16, &dummy16);
2791 if (hret != H_SUCCESS) {
2792 ehea_error("modify_ehea_qp failed (1)");
2796 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2797 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2799 if (hret != H_SUCCESS) {
2800 ehea_error("query_ehea_qp failed (2)");
2804 /* refill entire queue */
2805 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2806 ehea_refill_rq2(pr, 0);
2807 ehea_refill_rq3(pr, 0);
2815 static void ehea_reset_port(struct work_struct *work)
2818 struct ehea_port *port =
2819 container_of(work, struct ehea_port, reset_task);
2820 struct net_device *dev = port->netdev;
2823 mutex_lock(&port->port_lock);
2824 netif_stop_queue(dev);
2826 port_napi_disable(port);
2834 ehea_set_multicast_list(dev);
2836 if (netif_msg_timer(port))
2837 ehea_info("Device %s resetted successfully", dev->name);
2839 port_napi_enable(port);
2841 netif_wake_queue(dev);
2843 mutex_unlock(&port->port_lock);
2847 static void ehea_rereg_mrs(struct work_struct *work)
2850 struct ehea_adapter *adapter;
2852 mutex_lock(&dlpar_mem_lock);
2853 ehea_info("LPAR memory enlarged - re-initializing driver");
2855 list_for_each_entry(adapter, &adapter_list, list)
2856 if (adapter->active_ports) {
2857 /* Shutdown all ports */
2858 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2859 struct ehea_port *port = adapter->port[i];
2860 struct net_device *dev;
2867 if (dev->flags & IFF_UP) {
2868 mutex_lock(&port->port_lock);
2869 netif_stop_queue(dev);
2870 ehea_flush_sq(port);
2871 ret = ehea_stop_qps(dev);
2873 mutex_unlock(&port->port_lock);
2876 port_napi_disable(port);
2877 mutex_unlock(&port->port_lock);
2881 /* Unregister old memory region */
2882 ret = ehea_rem_mr(&adapter->mr);
2884 ehea_error("unregister MR failed - driver"
2890 ehea_destroy_busmap();
2891 ret = ehea_create_busmap();
2893 ehea_error("creating ehea busmap failed");
2897 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2899 list_for_each_entry(adapter, &adapter_list, list)
2900 if (adapter->active_ports) {
2901 /* Register new memory region */
2902 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2904 ehea_error("register MR failed - driver"
2909 /* Restart all ports */
2910 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2911 struct ehea_port *port = adapter->port[i];
2914 struct net_device *dev = port->netdev;
2916 if (dev->flags & IFF_UP) {
2917 mutex_lock(&port->port_lock);
2918 port_napi_enable(port);
2919 ret = ehea_restart_qps(dev);
2921 netif_wake_queue(dev);
2922 mutex_unlock(&port->port_lock);
2927 ehea_info("re-initializing driver complete");
2929 mutex_unlock(&dlpar_mem_lock);
2933 static void ehea_tx_watchdog(struct net_device *dev)
2935 struct ehea_port *port = netdev_priv(dev);
2937 if (netif_carrier_ok(dev) &&
2938 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2939 ehea_schedule_port_reset(port);
2942 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2944 struct hcp_query_ehea *cb;
2948 cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2954 hret = ehea_h_query_ehea(adapter->handle, cb);
2956 if (hret != H_SUCCESS) {
2961 adapter->max_mc_mac = cb->max_mc_mac - 1;
2970 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2972 struct hcp_ehea_port_cb4 *cb4;
2978 /* (Try to) enable *jumbo frames */
2979 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2981 ehea_error("no mem for cb4");
2985 hret = ehea_h_query_ehea_port(port->adapter->handle,
2986 port->logical_port_id,
2988 H_PORT_CB4_JUMBO, cb4);
2989 if (hret == H_SUCCESS) {
2990 if (cb4->jumbo_frame)
2993 cb4->jumbo_frame = 1;
2994 hret = ehea_h_modify_ehea_port(port->adapter->
3001 if (hret == H_SUCCESS)
3013 static ssize_t ehea_show_port_id(struct device *dev,
3014 struct device_attribute *attr, char *buf)
3016 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3017 return sprintf(buf, "%d", port->logical_port_id);
3020 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3023 static void __devinit logical_port_release(struct device *dev)
3025 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3026 of_node_put(port->ofdev.node);
3029 static struct device *ehea_register_port(struct ehea_port *port,
3030 struct device_node *dn)
3034 port->ofdev.node = of_node_get(dn);
3035 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3036 port->ofdev.dev.bus = &ibmebus_bus_type;
3038 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
3039 port->ofdev.dev.release = logical_port_release;
3041 ret = of_device_register(&port->ofdev);
3043 ehea_error("failed to register device. ret=%d", ret);
3047 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3049 ehea_error("failed to register attributes, ret=%d", ret);
3050 goto out_unreg_of_dev;
3053 return &port->ofdev.dev;
3056 of_device_unregister(&port->ofdev);
3061 static void ehea_unregister_port(struct ehea_port *port)
3063 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3064 of_device_unregister(&port->ofdev);
3067 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3068 u32 logical_port_id,
3069 struct device_node *dn)
3072 struct net_device *dev;
3073 struct ehea_port *port;
3074 struct device *port_dev;
3077 /* allocate memory for the port structures */
3078 dev = alloc_etherdev(sizeof(struct ehea_port));
3081 ehea_error("no mem for net_device");
3086 port = netdev_priv(dev);
3088 mutex_init(&port->port_lock);
3089 port->state = EHEA_PORT_DOWN;
3090 port->sig_comp_iv = sq_entries / 10;
3092 port->adapter = adapter;
3094 port->logical_port_id = logical_port_id;
3096 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3098 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3099 if (!port->mc_list) {
3101 goto out_free_ethdev;
3104 INIT_LIST_HEAD(&port->mc_list->list);
3106 ret = ehea_sense_port_attr(port);
3108 goto out_free_mc_list;
3110 port_dev = ehea_register_port(port, dn);
3112 goto out_free_mc_list;
3114 SET_NETDEV_DEV(dev, port_dev);
3116 /* initialize net_device structure */
3117 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3119 dev->open = ehea_open;
3120 #ifdef CONFIG_NET_POLL_CONTROLLER
3121 dev->poll_controller = ehea_netpoll;
3123 dev->stop = ehea_stop;
3124 dev->hard_start_xmit = ehea_start_xmit;
3125 dev->get_stats = ehea_get_stats;
3126 dev->set_multicast_list = ehea_set_multicast_list;
3127 dev->set_mac_address = ehea_set_mac_addr;
3128 dev->change_mtu = ehea_change_mtu;
3129 dev->vlan_rx_register = ehea_vlan_rx_register;
3130 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
3131 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
3132 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3133 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3134 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3136 dev->tx_timeout = &ehea_tx_watchdog;
3137 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3139 INIT_WORK(&port->reset_task, ehea_reset_port);
3140 ehea_set_ethtool_ops(dev);
3142 ret = register_netdev(dev);
3144 ehea_error("register_netdev failed. ret=%d", ret);
3145 goto out_unreg_port;
3148 port->lro_max_aggr = lro_max_aggr;
3150 ret = ehea_get_jumboframe_status(port, &jumbo);
3152 ehea_error("failed determining jumbo frame status for %s",
3153 port->netdev->name);
3155 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3156 jumbo == 1 ? "en" : "dis");
3158 adapter->active_ports++;
3163 ehea_unregister_port(port);
3166 kfree(port->mc_list);
3172 ehea_error("setting up logical port with id=%d failed, ret=%d",
3173 logical_port_id, ret);
3177 static void ehea_shutdown_single_port(struct ehea_port *port)
3179 struct ehea_adapter *adapter = port->adapter;
3180 unregister_netdev(port->netdev);
3181 ehea_unregister_port(port);
3182 kfree(port->mc_list);
3183 free_netdev(port->netdev);
3184 adapter->active_ports--;
3187 static int ehea_setup_ports(struct ehea_adapter *adapter)
3189 struct device_node *lhea_dn;
3190 struct device_node *eth_dn = NULL;
3192 const u32 *dn_log_port_id;
3195 lhea_dn = adapter->ofdev->node;
3196 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3198 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3200 if (!dn_log_port_id) {
3201 ehea_error("bad device node: eth_dn name=%s",
3206 if (ehea_add_adapter_mr(adapter)) {
3207 ehea_error("creating MR failed");
3208 of_node_put(eth_dn);
3212 adapter->port[i] = ehea_setup_single_port(adapter,
3215 if (adapter->port[i])
3216 ehea_info("%s -> logical port id #%d",
3217 adapter->port[i]->netdev->name,
3220 ehea_remove_adapter_mr(adapter);
3227 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3228 u32 logical_port_id)
3230 struct device_node *lhea_dn;
3231 struct device_node *eth_dn = NULL;
3232 const u32 *dn_log_port_id;
3234 lhea_dn = adapter->ofdev->node;
3235 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3237 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3240 if (*dn_log_port_id == logical_port_id)
3247 static ssize_t ehea_probe_port(struct device *dev,
3248 struct device_attribute *attr,
3249 const char *buf, size_t count)
3251 struct ehea_adapter *adapter = dev->driver_data;
3252 struct ehea_port *port;
3253 struct device_node *eth_dn = NULL;
3256 u32 logical_port_id;
3258 sscanf(buf, "%d", &logical_port_id);
3260 port = ehea_get_port(adapter, logical_port_id);
3263 ehea_info("adding port with logical port id=%d failed. port "
3264 "already configured as %s.", logical_port_id,
3265 port->netdev->name);
3269 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3272 ehea_info("no logical port with id %d found", logical_port_id);
3276 if (ehea_add_adapter_mr(adapter)) {
3277 ehea_error("creating MR failed");
3281 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3283 of_node_put(eth_dn);
3286 for (i = 0; i < EHEA_MAX_PORTS; i++)
3287 if (!adapter->port[i]) {
3288 adapter->port[i] = port;
3292 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3295 ehea_remove_adapter_mr(adapter);
3299 return (ssize_t) count;
3302 static ssize_t ehea_remove_port(struct device *dev,
3303 struct device_attribute *attr,
3304 const char *buf, size_t count)
3306 struct ehea_adapter *adapter = dev->driver_data;
3307 struct ehea_port *port;
3309 u32 logical_port_id;
3311 sscanf(buf, "%d", &logical_port_id);
3313 port = ehea_get_port(adapter, logical_port_id);
3316 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3319 ehea_shutdown_single_port(port);
3321 for (i = 0; i < EHEA_MAX_PORTS; i++)
3322 if (adapter->port[i] == port) {
3323 adapter->port[i] = NULL;
3327 ehea_error("removing port with logical port id=%d failed. port "
3328 "not configured.", logical_port_id);
3332 ehea_remove_adapter_mr(adapter);
3334 return (ssize_t) count;
3337 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3338 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3340 int ehea_create_device_sysfs(struct of_device *dev)
3342 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3346 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3351 void ehea_remove_device_sysfs(struct of_device *dev)
3353 device_remove_file(&dev->dev, &dev_attr_probe_port);
3354 device_remove_file(&dev->dev, &dev_attr_remove_port);
3357 static int __devinit ehea_probe_adapter(struct of_device *dev,
3358 const struct of_device_id *id)
3360 struct ehea_adapter *adapter;
3361 const u64 *adapter_handle;
3364 if (!dev || !dev->node) {
3365 ehea_error("Invalid ibmebus device probed");
3369 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3372 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3376 list_add(&adapter->list, &adapter_list);
3378 adapter->ofdev = dev;
3380 adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3383 adapter->handle = *adapter_handle;
3385 if (!adapter->handle) {
3386 dev_err(&dev->dev, "failed getting handle for adapter"
3387 " '%s'\n", dev->node->full_name);
3392 adapter->pd = EHEA_PD_ID;
3394 dev->dev.driver_data = adapter;
3397 /* initialize adapter and ports */
3398 /* get adapter properties */
3399 ret = ehea_sense_adapter_attr(adapter);
3401 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3405 adapter->neq = ehea_create_eq(adapter,
3406 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3407 if (!adapter->neq) {
3409 dev_err(&dev->dev, "NEQ creation failed\n");
3413 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3414 (unsigned long)adapter);
3416 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3417 ehea_interrupt_neq, IRQF_DISABLED,
3418 "ehea_neq", adapter);
3420 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3424 ret = ehea_create_device_sysfs(dev);
3428 ret = ehea_setup_ports(adapter);
3430 dev_err(&dev->dev, "setup_ports failed\n");
3431 goto out_rem_dev_sysfs;
3438 ehea_remove_device_sysfs(dev);
3441 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3444 ehea_destroy_eq(adapter->neq);
3450 ehea_update_firmware_handles();
3454 static int __devexit ehea_remove(struct of_device *dev)
3456 struct ehea_adapter *adapter = dev->dev.driver_data;
3459 for (i = 0; i < EHEA_MAX_PORTS; i++)
3460 if (adapter->port[i]) {
3461 ehea_shutdown_single_port(adapter->port[i]);
3462 adapter->port[i] = NULL;
3465 ehea_remove_device_sysfs(dev);
3467 flush_scheduled_work();
3469 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3470 tasklet_kill(&adapter->neq_tasklet);
3472 ehea_destroy_eq(adapter->neq);
3473 ehea_remove_adapter_mr(adapter);
3474 list_del(&adapter->list);
3477 ehea_update_firmware_handles();
3482 void ehea_crash_handler(void)
3486 if (ehea_fw_handles.arr)
3487 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3488 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3489 ehea_fw_handles.arr[i].fwh,
3492 if (ehea_bcmc_regs.arr)
3493 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3494 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3495 ehea_bcmc_regs.arr[i].port_id,
3496 ehea_bcmc_regs.arr[i].reg_type,
3497 ehea_bcmc_regs.arr[i].macaddr,
3501 static int ehea_mem_notifier(struct notifier_block *nb,
3502 unsigned long action, void *data)
3506 ehea_info("memory has been removed");
3507 ehea_rereg_mrs(NULL);
3515 static struct notifier_block ehea_mem_nb = {
3516 .notifier_call = ehea_mem_notifier,
3519 static int ehea_reboot_notifier(struct notifier_block *nb,
3520 unsigned long action, void *unused)
3522 if (action == SYS_RESTART) {
3523 ehea_info("Reboot: freeing all eHEA resources");
3524 ibmebus_unregister_driver(&ehea_driver);
3529 static struct notifier_block ehea_reboot_nb = {
3530 .notifier_call = ehea_reboot_notifier,
3533 static int check_module_parm(void)
3537 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3538 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3539 ehea_info("Bad parameter: rq1_entries");
3542 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3543 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3544 ehea_info("Bad parameter: rq2_entries");
3547 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3548 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3549 ehea_info("Bad parameter: rq3_entries");
3552 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3553 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3554 ehea_info("Bad parameter: sq_entries");
3561 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3564 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3567 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3568 ehea_show_capabilities, NULL);
3570 int __init ehea_module_init(void)
3574 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3578 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3579 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3580 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3582 mutex_init(&ehea_fw_handles.lock);
3583 spin_lock_init(&ehea_bcmc_regs.lock);
3585 ret = check_module_parm();
3589 ret = ehea_create_busmap();
3593 ret = register_reboot_notifier(&ehea_reboot_nb);
3595 ehea_info("failed registering reboot notifier");
3597 ret = register_memory_notifier(&ehea_mem_nb);
3599 ehea_info("failed registering memory remove notifier");
3601 ret = crash_shutdown_register(&ehea_crash_handler);
3603 ehea_info("failed registering crash handler");
3605 ret = ibmebus_register_driver(&ehea_driver);
3607 ehea_error("failed registering eHEA device driver on ebus");
3611 ret = driver_create_file(&ehea_driver.driver,
3612 &driver_attr_capabilities);
3614 ehea_error("failed to register capabilities attribute, ret=%d",
3622 ibmebus_unregister_driver(&ehea_driver);
3624 unregister_memory_notifier(&ehea_mem_nb);
3625 unregister_reboot_notifier(&ehea_reboot_nb);
3626 crash_shutdown_unregister(&ehea_crash_handler);
3631 static void __exit ehea_module_exit(void)
3635 flush_scheduled_work();
3636 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3637 ibmebus_unregister_driver(&ehea_driver);
3638 unregister_reboot_notifier(&ehea_reboot_nb);
3639 ret = crash_shutdown_unregister(&ehea_crash_handler);
3641 ehea_info("failed unregistering crash handler");
3642 unregister_memory_notifier(&ehea_mem_nb);
3643 kfree(ehea_fw_handles.arr);
3644 kfree(ehea_bcmc_regs.arr);
3645 ehea_destroy_busmap();
3648 module_init(ehea_module_init);
3649 module_exit(ehea_module_exit);