]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/infiniband/ulp/ipoib/ipoib_main.c
[PATCH] IB: Eliminate redundant NULL checks
[linux-2.6-omap-h63xx.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35  */
36
37 #include "ipoib.h"
38
39 #include <linux/version.h>
40 #include <linux/module.h>
41
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/vmalloc.h>
45
46 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
47
48 #include <linux/ip.h>
49 #include <linux/in.h>
50
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54
55 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
56 int ipoib_debug_level;
57
58 module_param_named(debug_level, ipoib_debug_level, int, 0644);
59 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
60 #endif
61
62 static const u8 ipv4_bcast_addr[] = {
63         0x00, 0xff, 0xff, 0xff,
64         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
65         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
66 };
67
68 struct workqueue_struct *ipoib_workqueue;
69
70 static void ipoib_add_one(struct ib_device *device);
71 static void ipoib_remove_one(struct ib_device *device);
72
73 static struct ib_client ipoib_client = {
74         .name   = "ipoib",
75         .add    = ipoib_add_one,
76         .remove = ipoib_remove_one
77 };
78
79 int ipoib_open(struct net_device *dev)
80 {
81         struct ipoib_dev_priv *priv = netdev_priv(dev);
82
83         ipoib_dbg(priv, "bringing up interface\n");
84
85         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
86
87         if (ipoib_pkey_dev_delay_open(dev))
88                 return 0;
89
90         if (ipoib_ib_dev_open(dev))
91                 return -EINVAL;
92
93         if (ipoib_ib_dev_up(dev))
94                 return -EINVAL;
95
96         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
97                 struct ipoib_dev_priv *cpriv;
98
99                 /* Bring up any child interfaces too */
100                 down(&priv->vlan_mutex);
101                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
102                         int flags;
103
104                         flags = cpriv->dev->flags;
105                         if (flags & IFF_UP)
106                                 continue;
107
108                         dev_change_flags(cpriv->dev, flags | IFF_UP);
109                 }
110                 up(&priv->vlan_mutex);
111         }
112
113         netif_start_queue(dev);
114
115         return 0;
116 }
117
118 static int ipoib_stop(struct net_device *dev)
119 {
120         struct ipoib_dev_priv *priv = netdev_priv(dev);
121
122         ipoib_dbg(priv, "stopping interface\n");
123
124         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
125
126         netif_stop_queue(dev);
127
128         ipoib_ib_dev_down(dev);
129         ipoib_ib_dev_stop(dev);
130
131         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
132                 struct ipoib_dev_priv *cpriv;
133
134                 /* Bring down any child interfaces too */
135                 down(&priv->vlan_mutex);
136                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
137                         int flags;
138
139                         flags = cpriv->dev->flags;
140                         if (!(flags & IFF_UP))
141                                 continue;
142
143                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
144                 }
145                 up(&priv->vlan_mutex);
146         }
147
148         return 0;
149 }
150
151 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
152 {
153         struct ipoib_dev_priv *priv = netdev_priv(dev);
154
155         if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
156                 return -EINVAL;
157
158         priv->admin_mtu = new_mtu;
159
160         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
161
162         return 0;
163 }
164
165 static struct ipoib_path *__path_find(struct net_device *dev,
166                                       union ib_gid *gid)
167 {
168         struct ipoib_dev_priv *priv = netdev_priv(dev);
169         struct rb_node *n = priv->path_tree.rb_node;
170         struct ipoib_path *path;
171         int ret;
172
173         while (n) {
174                 path = rb_entry(n, struct ipoib_path, rb_node);
175
176                 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
177                              sizeof (union ib_gid));
178
179                 if (ret < 0)
180                         n = n->rb_left;
181                 else if (ret > 0)
182                         n = n->rb_right;
183                 else
184                         return path;
185         }
186
187         return NULL;
188 }
189
190 static int __path_add(struct net_device *dev, struct ipoib_path *path)
191 {
192         struct ipoib_dev_priv *priv = netdev_priv(dev);
193         struct rb_node **n = &priv->path_tree.rb_node;
194         struct rb_node *pn = NULL;
195         struct ipoib_path *tpath;
196         int ret;
197
198         while (*n) {
199                 pn = *n;
200                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
201
202                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
203                              sizeof (union ib_gid));
204                 if (ret < 0)
205                         n = &pn->rb_left;
206                 else if (ret > 0)
207                         n = &pn->rb_right;
208                 else
209                         return -EEXIST;
210         }
211
212         rb_link_node(&path->rb_node, pn, n);
213         rb_insert_color(&path->rb_node, &priv->path_tree);
214
215         list_add_tail(&path->list, &priv->path_list);
216
217         return 0;
218 }
219
220 static void path_free(struct net_device *dev, struct ipoib_path *path)
221 {
222         struct ipoib_dev_priv *priv = netdev_priv(dev);
223         struct ipoib_neigh *neigh, *tn;
224         struct sk_buff *skb;
225         unsigned long flags;
226
227         while ((skb = __skb_dequeue(&path->queue)))
228                 dev_kfree_skb_irq(skb);
229
230         spin_lock_irqsave(&priv->lock, flags);
231
232         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
233                 /*
234                  * It's safe to call ipoib_put_ah() inside priv->lock
235                  * here, because we know that path->ah will always
236                  * hold one more reference, so ipoib_put_ah() will
237                  * never do more than decrement the ref count.
238                  */
239                 if (neigh->ah)
240                         ipoib_put_ah(neigh->ah);
241                 *to_ipoib_neigh(neigh->neighbour) = NULL;
242                 neigh->neighbour->ops->destructor = NULL;
243                 kfree(neigh);
244         }
245
246         spin_unlock_irqrestore(&priv->lock, flags);
247
248         if (path->ah)
249                 ipoib_put_ah(path->ah);
250
251         kfree(path);
252 }
253
254 void ipoib_flush_paths(struct net_device *dev)
255 {
256         struct ipoib_dev_priv *priv = netdev_priv(dev);
257         struct ipoib_path *path, *tp;
258         LIST_HEAD(remove_list);
259         unsigned long flags;
260
261         spin_lock_irqsave(&priv->lock, flags);
262
263         list_splice(&priv->path_list, &remove_list);
264         INIT_LIST_HEAD(&priv->path_list);
265
266         list_for_each_entry(path, &remove_list, list)
267                 rb_erase(&path->rb_node, &priv->path_tree);
268
269         spin_unlock_irqrestore(&priv->lock, flags);
270
271         list_for_each_entry_safe(path, tp, &remove_list, list) {
272                 if (path->query)
273                         ib_sa_cancel_query(path->query_id, path->query);
274                 wait_for_completion(&path->done);
275                 path_free(dev, path);
276         }
277 }
278
279 static void path_rec_completion(int status,
280                                 struct ib_sa_path_rec *pathrec,
281                                 void *path_ptr)
282 {
283         struct ipoib_path *path = path_ptr;
284         struct net_device *dev = path->dev;
285         struct ipoib_dev_priv *priv = netdev_priv(dev);
286         struct ipoib_ah *ah = NULL;
287         struct ipoib_neigh *neigh;
288         struct sk_buff_head skqueue;
289         struct sk_buff *skb;
290         unsigned long flags;
291
292         if (pathrec)
293                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
294                           be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
295         else
296                 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
297                           status, IPOIB_GID_ARG(path->pathrec.dgid));
298
299         skb_queue_head_init(&skqueue);
300
301         if (!status) {
302                 struct ib_ah_attr av = {
303                         .dlid          = be16_to_cpu(pathrec->dlid),
304                         .sl            = pathrec->sl,
305                         .port_num      = priv->port
306                 };
307                 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
308
309                 if (path_rate > 0 && priv->local_rate > path_rate)
310                         av.static_rate = (priv->local_rate - 1) / path_rate;
311
312                 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
313                           av.static_rate, priv->local_rate,
314                           ib_sa_rate_enum_to_int(pathrec->rate));
315
316                 ah = ipoib_create_ah(dev, priv->pd, &av);
317         }
318
319         spin_lock_irqsave(&priv->lock, flags);
320
321         path->ah = ah;
322
323         if (ah) {
324                 path->pathrec = *pathrec;
325
326                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
327                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
328
329                 while ((skb = __skb_dequeue(&path->queue)))
330                         __skb_queue_tail(&skqueue, skb);
331
332                 list_for_each_entry(neigh, &path->neigh_list, list) {
333                         kref_get(&path->ah->ref);
334                         neigh->ah = path->ah;
335
336                         while ((skb = __skb_dequeue(&neigh->queue)))
337                                 __skb_queue_tail(&skqueue, skb);
338                 }
339         } else
340                 path->query = NULL;
341
342         complete(&path->done);
343
344         spin_unlock_irqrestore(&priv->lock, flags);
345
346         while ((skb = __skb_dequeue(&skqueue))) {
347                 skb->dev = dev;
348                 if (dev_queue_xmit(skb))
349                         ipoib_warn(priv, "dev_queue_xmit failed "
350                                    "to requeue packet\n");
351         }
352 }
353
354 static struct ipoib_path *path_rec_create(struct net_device *dev,
355                                           union ib_gid *gid)
356 {
357         struct ipoib_dev_priv *priv = netdev_priv(dev);
358         struct ipoib_path *path;
359
360         path = kmalloc(sizeof *path, GFP_ATOMIC);
361         if (!path)
362                 return NULL;
363
364         path->dev          = dev;
365         path->pathrec.dlid = 0;
366         path->ah           = NULL;
367
368         skb_queue_head_init(&path->queue);
369
370         INIT_LIST_HEAD(&path->neigh_list);
371         path->query = NULL;
372         init_completion(&path->done);
373
374         memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
375         path->pathrec.sgid      = priv->local_gid;
376         path->pathrec.pkey      = cpu_to_be16(priv->pkey);
377         path->pathrec.numb_path = 1;
378
379         return path;
380 }
381
382 static int path_rec_start(struct net_device *dev,
383                           struct ipoib_path *path)
384 {
385         struct ipoib_dev_priv *priv = netdev_priv(dev);
386
387         ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
388                   IPOIB_GID_ARG(path->pathrec.dgid));
389
390         path->query_id =
391                 ib_sa_path_rec_get(priv->ca, priv->port,
392                                    &path->pathrec,
393                                    IB_SA_PATH_REC_DGID          |
394                                    IB_SA_PATH_REC_SGID          |
395                                    IB_SA_PATH_REC_NUMB_PATH     |
396                                    IB_SA_PATH_REC_PKEY,
397                                    1000, GFP_ATOMIC,
398                                    path_rec_completion,
399                                    path, &path->query);
400         if (path->query_id < 0) {
401                 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
402                 path->query = NULL;
403                 return path->query_id;
404         }
405
406         return 0;
407 }
408
409 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
410 {
411         struct ipoib_dev_priv *priv = netdev_priv(dev);
412         struct ipoib_path *path;
413         struct ipoib_neigh *neigh;
414
415         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
416         if (!neigh) {
417                 ++priv->stats.tx_dropped;
418                 dev_kfree_skb_any(skb);
419                 return;
420         }
421
422         skb_queue_head_init(&neigh->queue);
423         neigh->neighbour = skb->dst->neighbour;
424         *to_ipoib_neigh(skb->dst->neighbour) = neigh;
425
426         /*
427          * We can only be called from ipoib_start_xmit, so we're
428          * inside tx_lock -- no need to save/restore flags.
429          */
430         spin_lock(&priv->lock);
431
432         path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
433         if (!path) {
434                 path = path_rec_create(dev,
435                                        (union ib_gid *) (skb->dst->neighbour->ha + 4));
436                 if (!path)
437                         goto err;
438
439                 __path_add(dev, path);
440         }
441
442         list_add_tail(&neigh->list, &path->neigh_list);
443
444         if (path->pathrec.dlid) {
445                 kref_get(&path->ah->ref);
446                 neigh->ah = path->ah;
447
448                 ipoib_send(dev, skb, path->ah,
449                            be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
450         } else {
451                 neigh->ah  = NULL;
452                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
453                         __skb_queue_tail(&neigh->queue, skb);
454                 } else {
455                         ++priv->stats.tx_dropped;
456                         dev_kfree_skb_any(skb);
457                 }
458
459                 if (!path->query && path_rec_start(dev, path))
460                         goto err;
461         }
462
463         spin_unlock(&priv->lock);
464         return;
465
466 err:
467         *to_ipoib_neigh(skb->dst->neighbour) = NULL;
468         list_del(&neigh->list);
469         neigh->neighbour->ops->destructor = NULL;
470         kfree(neigh);
471
472         ++priv->stats.tx_dropped;
473         dev_kfree_skb_any(skb);
474
475         spin_unlock(&priv->lock);
476 }
477
478 static void path_lookup(struct sk_buff *skb, struct net_device *dev)
479 {
480         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
481
482         /* Look up path record for unicasts */
483         if (skb->dst->neighbour->ha[4] != 0xff) {
484                 neigh_add_path(skb, dev);
485                 return;
486         }
487
488         /* Add in the P_Key for multicasts */
489         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
490         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
491         ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
492 }
493
494 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
495                              struct ipoib_pseudoheader *phdr)
496 {
497         struct ipoib_dev_priv *priv = netdev_priv(dev);
498         struct ipoib_path *path;
499
500         /*
501          * We can only be called from ipoib_start_xmit, so we're
502          * inside tx_lock -- no need to save/restore flags.
503          */
504         spin_lock(&priv->lock);
505
506         path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
507         if (!path) {
508                 path = path_rec_create(dev,
509                                        (union ib_gid *) (phdr->hwaddr + 4));
510                 if (path) {
511                         /* put pseudoheader back on for next time */
512                         skb_push(skb, sizeof *phdr);
513                         __skb_queue_tail(&path->queue, skb);
514
515                         if (path_rec_start(dev, path)) {
516                                 spin_unlock(&priv->lock);
517                                 path_free(dev, path);
518                                 return;
519                         } else
520                                 __path_add(dev, path);
521                 } else {
522                         ++priv->stats.tx_dropped;
523                         dev_kfree_skb_any(skb);
524                 }
525
526                 spin_unlock(&priv->lock);
527                 return;
528         }
529
530         if (path->pathrec.dlid) {
531                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
532                           be16_to_cpu(path->pathrec.dlid));
533
534                 ipoib_send(dev, skb, path->ah,
535                            be32_to_cpup((__be32 *) phdr->hwaddr));
536         } else if ((path->query || !path_rec_start(dev, path)) &&
537                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
538                 /* put pseudoheader back on for next time */
539                 skb_push(skb, sizeof *phdr);
540                 __skb_queue_tail(&path->queue, skb);
541         } else {
542                 ++priv->stats.tx_dropped;
543                 dev_kfree_skb_any(skb);
544         }
545
546         spin_unlock(&priv->lock);
547 }
548
549 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
550 {
551         struct ipoib_dev_priv *priv = netdev_priv(dev);
552         struct ipoib_neigh *neigh;
553         unsigned long flags;
554
555         local_irq_save(flags);
556         if (!spin_trylock(&priv->tx_lock)) {
557                 local_irq_restore(flags);
558                 return NETDEV_TX_LOCKED;
559         }
560
561         /*
562          * Check if our queue is stopped.  Since we have the LLTX bit
563          * set, we can't rely on netif_stop_queue() preventing our
564          * xmit function from being called with a full queue.
565          */
566         if (unlikely(netif_queue_stopped(dev))) {
567                 spin_unlock_irqrestore(&priv->tx_lock, flags);
568                 return NETDEV_TX_BUSY;
569         }
570
571         if (skb->dst && skb->dst->neighbour) {
572                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
573                         path_lookup(skb, dev);
574                         goto out;
575                 }
576
577                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
578
579                 if (likely(neigh->ah)) {
580                         ipoib_send(dev, skb, neigh->ah,
581                                    be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
582                         goto out;
583                 }
584
585                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
586                         spin_lock(&priv->lock);
587                         __skb_queue_tail(&neigh->queue, skb);
588                         spin_unlock(&priv->lock);
589                 } else {
590                         ++priv->stats.tx_dropped;
591                         dev_kfree_skb_any(skb);
592                 }
593         } else {
594                 struct ipoib_pseudoheader *phdr =
595                         (struct ipoib_pseudoheader *) skb->data;
596                 skb_pull(skb, sizeof *phdr);
597
598                 if (phdr->hwaddr[4] == 0xff) {
599                         /* Add in the P_Key for multicast*/
600                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
601                         phdr->hwaddr[9] = priv->pkey & 0xff;
602
603                         ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
604                 } else {
605                         /* unicast GID -- should be ARP or RARP reply */
606
607                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
608                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
609                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
610                                            IPOIB_GID_FMT "\n",
611                                            skb->dst ? "neigh" : "dst",
612                                            be16_to_cpup((u16 *) skb->data),
613                                            be32_to_cpup((u32 *) phdr->hwaddr),
614                                            IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
615                                 dev_kfree_skb_any(skb);
616                                 ++priv->stats.tx_dropped;
617                                 goto out;
618                         }
619
620                         unicast_arp_send(skb, dev, phdr);
621                 }
622         }
623
624 out:
625         spin_unlock_irqrestore(&priv->tx_lock, flags);
626
627         return NETDEV_TX_OK;
628 }
629
630 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
631 {
632         struct ipoib_dev_priv *priv = netdev_priv(dev);
633
634         return &priv->stats;
635 }
636
637 static void ipoib_timeout(struct net_device *dev)
638 {
639         struct ipoib_dev_priv *priv = netdev_priv(dev);
640
641         ipoib_warn(priv, "transmit timeout: latency %ld\n",
642                    jiffies - dev->trans_start);
643         /* XXX reset QP, etc. */
644 }
645
646 static int ipoib_hard_header(struct sk_buff *skb,
647                              struct net_device *dev,
648                              unsigned short type,
649                              void *daddr, void *saddr, unsigned len)
650 {
651         struct ipoib_header *header;
652
653         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
654
655         header->proto = htons(type);
656         header->reserved = 0;
657
658         /*
659          * If we don't have a neighbour structure, stuff the
660          * destination address onto the front of the skb so we can
661          * figure out where to send the packet later.
662          */
663         if (!skb->dst || !skb->dst->neighbour) {
664                 struct ipoib_pseudoheader *phdr =
665                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
666                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
667         }
668
669         return 0;
670 }
671
672 static void ipoib_set_mcast_list(struct net_device *dev)
673 {
674         struct ipoib_dev_priv *priv = netdev_priv(dev);
675
676         schedule_work(&priv->restart_task);
677 }
678
679 static void ipoib_neigh_destructor(struct neighbour *n)
680 {
681         struct ipoib_neigh *neigh;
682         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
683         unsigned long flags;
684         struct ipoib_ah *ah = NULL;
685
686         ipoib_dbg(priv,
687                   "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
688                   be32_to_cpup((__be32 *) n->ha),
689                   IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
690
691         spin_lock_irqsave(&priv->lock, flags);
692
693         neigh = *to_ipoib_neigh(n);
694         if (neigh) {
695                 if (neigh->ah)
696                         ah = neigh->ah;
697                 list_del(&neigh->list);
698                 *to_ipoib_neigh(n) = NULL;
699                 kfree(neigh);
700         }
701
702         spin_unlock_irqrestore(&priv->lock, flags);
703
704         if (ah)
705                 ipoib_put_ah(ah);
706 }
707
708 static int ipoib_neigh_setup(struct neighbour *neigh)
709 {
710         /*
711          * Is this kosher?  I can't find anybody in the kernel that
712          * sets neigh->destructor, so we should be able to set it here
713          * without trouble.
714          */
715         neigh->ops->destructor = ipoib_neigh_destructor;
716
717         return 0;
718 }
719
720 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
721 {
722         parms->neigh_setup = ipoib_neigh_setup;
723
724         return 0;
725 }
726
727 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
728 {
729         struct ipoib_dev_priv *priv = netdev_priv(dev);
730
731         /* Allocate RX/TX "rings" to hold queued skbs */
732
733         priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf),
734                                 GFP_KERNEL);
735         if (!priv->rx_ring) {
736                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
737                        ca->name, IPOIB_RX_RING_SIZE);
738                 goto out;
739         }
740         memset(priv->rx_ring, 0,
741                IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf));
742
743         priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf),
744                                 GFP_KERNEL);
745         if (!priv->tx_ring) {
746                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
747                        ca->name, IPOIB_TX_RING_SIZE);
748                 goto out_rx_ring_cleanup;
749         }
750         memset(priv->tx_ring, 0,
751                IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf));
752
753         /* priv->tx_head & tx_tail are already 0 */
754
755         if (ipoib_ib_dev_init(dev, ca, port))
756                 goto out_tx_ring_cleanup;
757
758         return 0;
759
760 out_tx_ring_cleanup:
761         kfree(priv->tx_ring);
762
763 out_rx_ring_cleanup:
764         kfree(priv->rx_ring);
765
766 out:
767         return -ENOMEM;
768 }
769
770 void ipoib_dev_cleanup(struct net_device *dev)
771 {
772         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
773
774         ipoib_delete_debug_file(dev);
775
776         /* Delete any child interfaces first */
777         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
778                 unregister_netdev(cpriv->dev);
779                 ipoib_dev_cleanup(cpriv->dev);
780                 free_netdev(cpriv->dev);
781         }
782
783         ipoib_ib_dev_cleanup(dev);
784
785         kfree(priv->rx_ring);
786         kfree(priv->tx_ring);
787
788         priv->rx_ring = NULL;
789         priv->tx_ring = NULL;
790 }
791
792 static void ipoib_setup(struct net_device *dev)
793 {
794         struct ipoib_dev_priv *priv = netdev_priv(dev);
795
796         dev->open                = ipoib_open;
797         dev->stop                = ipoib_stop;
798         dev->change_mtu          = ipoib_change_mtu;
799         dev->hard_start_xmit     = ipoib_start_xmit;
800         dev->get_stats           = ipoib_get_stats;
801         dev->tx_timeout          = ipoib_timeout;
802         dev->hard_header         = ipoib_hard_header;
803         dev->set_multicast_list  = ipoib_set_mcast_list;
804         dev->neigh_setup         = ipoib_neigh_setup_dev;
805
806         dev->watchdog_timeo      = HZ;
807
808         dev->rebuild_header      = NULL;
809         dev->set_mac_address     = NULL;
810         dev->header_cache_update = NULL;
811
812         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
813
814         /*
815          * We add in INFINIBAND_ALEN to allow for the destination
816          * address "pseudoheader" for skbs without neighbour struct.
817          */
818         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
819         dev->addr_len            = INFINIBAND_ALEN;
820         dev->type                = ARPHRD_INFINIBAND;
821         dev->tx_queue_len        = IPOIB_TX_RING_SIZE * 2;
822         dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
823
824         /* MTU will be reset when mcast join happens */
825         dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
826         priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
827
828         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
829
830         netif_carrier_off(dev);
831
832         SET_MODULE_OWNER(dev);
833
834         priv->dev = dev;
835
836         spin_lock_init(&priv->lock);
837         spin_lock_init(&priv->tx_lock);
838
839         init_MUTEX(&priv->mcast_mutex);
840         init_MUTEX(&priv->vlan_mutex);
841
842         INIT_LIST_HEAD(&priv->path_list);
843         INIT_LIST_HEAD(&priv->child_intfs);
844         INIT_LIST_HEAD(&priv->dead_ahs);
845         INIT_LIST_HEAD(&priv->multicast_list);
846
847         INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
848         INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
849         INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
850         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
851         INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
852 }
853
854 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
855 {
856         struct net_device *dev;
857
858         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
859                            ipoib_setup);
860         if (!dev)
861                 return NULL;
862
863         return netdev_priv(dev);
864 }
865
866 static ssize_t show_pkey(struct class_device *cdev, char *buf)
867 {
868         struct ipoib_dev_priv *priv =
869                 netdev_priv(container_of(cdev, struct net_device, class_dev));
870
871         return sprintf(buf, "0x%04x\n", priv->pkey);
872 }
873 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
874
875 static ssize_t create_child(struct class_device *cdev,
876                             const char *buf, size_t count)
877 {
878         int pkey;
879         int ret;
880
881         if (sscanf(buf, "%i", &pkey) != 1)
882                 return -EINVAL;
883
884         if (pkey < 0 || pkey > 0xffff)
885                 return -EINVAL;
886
887         ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
888                              pkey);
889
890         return ret ? ret : count;
891 }
892 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
893
894 static ssize_t delete_child(struct class_device *cdev,
895                             const char *buf, size_t count)
896 {
897         int pkey;
898         int ret;
899
900         if (sscanf(buf, "%i", &pkey) != 1)
901                 return -EINVAL;
902
903         if (pkey < 0 || pkey > 0xffff)
904                 return -EINVAL;
905
906         ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
907                                 pkey);
908
909         return ret ? ret : count;
910
911 }
912 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
913
914 int ipoib_add_pkey_attr(struct net_device *dev)
915 {
916         return class_device_create_file(&dev->class_dev,
917                                         &class_device_attr_pkey);
918 }
919
920 static struct net_device *ipoib_add_port(const char *format,
921                                          struct ib_device *hca, u8 port)
922 {
923         struct ipoib_dev_priv *priv;
924         int result = -ENOMEM;
925
926         priv = ipoib_intf_alloc(format);
927         if (!priv)
928                 goto alloc_mem_failed;
929
930         SET_NETDEV_DEV(priv->dev, hca->dma_device);
931
932         result = ib_query_pkey(hca, port, 0, &priv->pkey);
933         if (result) {
934                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
935                        hca->name, port, result);
936                 goto alloc_mem_failed;
937         }
938
939         priv->dev->broadcast[8] = priv->pkey >> 8;
940         priv->dev->broadcast[9] = priv->pkey & 0xff;
941
942         result = ib_query_gid(hca, port, 0, &priv->local_gid);
943         if (result) {
944                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
945                        hca->name, port, result);
946                 goto alloc_mem_failed;
947         } else
948                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
949
950
951         result = ipoib_dev_init(priv->dev, hca, port);
952         if (result < 0) {
953                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
954                        hca->name, port, result);
955                 goto device_init_failed;
956         }
957
958         INIT_IB_EVENT_HANDLER(&priv->event_handler,
959                               priv->ca, ipoib_event);
960         result = ib_register_event_handler(&priv->event_handler);
961         if (result < 0) {
962                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
963                        "port %d (ret = %d)\n",
964                        hca->name, port, result);
965                 goto event_failed;
966         }
967
968         result = register_netdev(priv->dev);
969         if (result) {
970                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
971                        hca->name, port, result);
972                 goto register_failed;
973         }
974
975         if (ipoib_create_debug_file(priv->dev))
976                 goto debug_failed;
977
978         if (ipoib_add_pkey_attr(priv->dev))
979                 goto sysfs_failed;
980         if (class_device_create_file(&priv->dev->class_dev,
981                                      &class_device_attr_create_child))
982                 goto sysfs_failed;
983         if (class_device_create_file(&priv->dev->class_dev,
984                                      &class_device_attr_delete_child))
985                 goto sysfs_failed;
986
987         return priv->dev;
988
989 sysfs_failed:
990         ipoib_delete_debug_file(priv->dev);
991
992 debug_failed:
993         unregister_netdev(priv->dev);
994
995 register_failed:
996         ib_unregister_event_handler(&priv->event_handler);
997
998 event_failed:
999         ipoib_dev_cleanup(priv->dev);
1000
1001 device_init_failed:
1002         free_netdev(priv->dev);
1003
1004 alloc_mem_failed:
1005         return ERR_PTR(result);
1006 }
1007
1008 static void ipoib_add_one(struct ib_device *device)
1009 {
1010         struct list_head *dev_list;
1011         struct net_device *dev;
1012         struct ipoib_dev_priv *priv;
1013         int s, e, p;
1014
1015         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1016         if (!dev_list)
1017                 return;
1018
1019         INIT_LIST_HEAD(dev_list);
1020
1021         if (device->node_type == IB_NODE_SWITCH) {
1022                 s = 0;
1023                 e = 0;
1024         } else {
1025                 s = 1;
1026                 e = device->phys_port_cnt;
1027         }
1028
1029         for (p = s; p <= e; ++p) {
1030                 dev = ipoib_add_port("ib%d", device, p);
1031                 if (!IS_ERR(dev)) {
1032                         priv = netdev_priv(dev);
1033                         list_add_tail(&priv->list, dev_list);
1034                 }
1035         }
1036
1037         ib_set_client_data(device, &ipoib_client, dev_list);
1038 }
1039
1040 static void ipoib_remove_one(struct ib_device *device)
1041 {
1042         struct ipoib_dev_priv *priv, *tmp;
1043         struct list_head *dev_list;
1044
1045         dev_list = ib_get_client_data(device, &ipoib_client);
1046
1047         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1048                 ib_unregister_event_handler(&priv->event_handler);
1049
1050                 unregister_netdev(priv->dev);
1051                 ipoib_dev_cleanup(priv->dev);
1052                 free_netdev(priv->dev);
1053         }
1054 }
1055
1056 static int __init ipoib_init_module(void)
1057 {
1058         int ret;
1059
1060         ret = ipoib_register_debugfs();
1061         if (ret)
1062                 return ret;
1063
1064         /*
1065          * We create our own workqueue mainly because we want to be
1066          * able to flush it when devices are being removed.  We can't
1067          * use schedule_work()/flush_scheduled_work() because both
1068          * unregister_netdev() and linkwatch_event take the rtnl lock,
1069          * so flush_scheduled_work() can deadlock during device
1070          * removal.
1071          */
1072         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1073         if (!ipoib_workqueue) {
1074                 ret = -ENOMEM;
1075                 goto err_fs;
1076         }
1077
1078         ret = ib_register_client(&ipoib_client);
1079         if (ret)
1080                 goto err_wq;
1081
1082         return 0;
1083
1084 err_wq:
1085         destroy_workqueue(ipoib_workqueue);
1086
1087 err_fs:
1088         ipoib_unregister_debugfs();
1089
1090         return ret;
1091 }
1092
1093 static void __exit ipoib_cleanup_module(void)
1094 {
1095         ib_unregister_client(&ipoib_client);
1096         ipoib_unregister_debugfs();
1097         destroy_workqueue(ipoib_workqueue);
1098 }
1099
1100 module_init(ipoib_init_module);
1101 module_exit(ipoib_cleanup_module);