]> pilppa.org Git - familiar-h63xx-build.git/blob - org.handhelds.familiar/packages/linux/linux-wrt-2.4.20/110-sch_htb.patch
OE tree imported from monotone branch org.openembedded.oz354fam083 at revision 8b12e3...
[familiar-h63xx-build.git] / org.handhelds.familiar / packages / linux / linux-wrt-2.4.20 / 110-sch_htb.patch
1
2 #
3 # Patch managed by http://www.mn-logistik.de/unsupported/pxa250/patcher
4 #
5
6 --- linux-2.4.20/net/sched/sch_htb.c~110-schhtb 2005-01-07 02:53:54.527710000 -0500
7 +++ linux-2.4.20/net/sched/sch_htb.c    2005-01-07 02:54:26.350872392 -0500
8 @@ -9,6 +9,8 @@
9   * Authors:    Martin Devera, <devik@cdi.cz>
10   *
11   * Credits (in time order) for older HTB versions:
12 + *              Stef Coene <stef.coene@docum.org>
13 + *                     HTB support at LARTC mailing list
14   *             Ondrej Kraus, <krauso@barr.cz> 
15   *                     found missing INIT_QDISC(htb)
16   *             Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 @@ -17,9 +19,13 @@
18   *                     code review and helpful comments on shaping
19   *             Tomasz Wrona, <tw@eter.tym.pl>
20   *                     created test case so that I was able to fix nasty bug
21 + *             Wilfried Weissmann
22 + *                     spotted bug in dequeue code and helped with fix
23 + *             Jiri Fojtasek
24 + *                     fixed requeue routine
25   *             and many others. thanks.
26   *
27 - * $Id: sch_htb.c,v 1.14 2002/09/28 12:55:22 devik Exp devik $
28 + * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29   */
30  #include <linux/config.h>
31  #include <linux/module.h>
32 @@ -71,16 +77,12 @@
33  #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
34  #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
35  #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
36 -#define HTB_VER 0x30007        /* major must be matched with number suplied by TC as version */
37 +#define HTB_VER 0x30010        /* major must be matched with number suplied by TC as version */
38  
39  #if HTB_VER >> 16 != TC_HTB_PROTOVER
40  #error "Mismatched sch_htb.c and pkt_sch.h"
41  #endif
42  
43 -/* temporary debug defines to be removed after beta stage */
44 -#define DEVIK_MEND(N)
45 -#define DEVIK_MSTART(N)
46 -
47  /* debugging support; S is subsystem, these are defined:
48    0 - netlink messages
49    1 - enqueue
50 @@ -100,13 +102,16 @@
51   from LSB
52   */
53  #ifdef HTB_DEBUG
54 -#define HTB_DBG(S,L,FMT,ARG...) if (((q->debug>>(2*S))&3) >= L) \
55 +#define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
56 +#define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
57         printk(KERN_DEBUG FMT,##ARG)
58  #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
59  #define HTB_PASSQ q,
60  #define HTB_ARGQ struct htb_sched *q,
61  #define static
62 +#undef __inline__
63  #define __inline__
64 +#undef inline
65  #define inline
66  #define HTB_CMAGIC 0xFEFAFEF1
67  #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
68 @@ -114,6 +119,7 @@
69                 rb_erase(N,R); \
70                 (N)->rb_color = -1; } while (0)
71  #else
72 +#define HTB_DBG_COND(S,L) (0)
73  #define HTB_DBG(S,L,FMT,ARG...)
74  #define HTB_PASSQ
75  #define HTB_ARGQ
76 @@ -219,6 +225,9 @@
77      /* time of nearest event per level (row) */
78      unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
79  
80 +    /* cached value of jiffies in dequeue */
81 +    unsigned long jiffies;
82 +
83      /* whether we hit non-work conserving class during this dequeue; we use */
84      int nwc_hit;       /* this to disable mindelay complaint in dequeue */
85  
86 @@ -297,7 +306,7 @@
87            rules in it */
88         if (skb->priority == sch->handle)
89                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
90 -       if ((cl = htb_find(skb->priority,sch)) != NULL) 
91 +       if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
92                 return cl;
93  
94         tcf = q->filter_list;
95 @@ -338,7 +347,7 @@
96  static void htb_debug_dump (struct htb_sched *q)
97  {
98         int i,p;
99 -       printk(KERN_DEBUG "htb*g j=%lu\n",jiffies);
100 +       printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
101         /* rows */
102         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
103                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
104 @@ -421,26 +430,24 @@
105         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
106                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
107  #endif
108 -       DEVIK_MSTART(9);
109 -       cl->pq_key = jiffies + PSCHED_US2JIFFIE(delay);
110 -       if (cl->pq_key == jiffies)
111 +       cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
112 +       if (cl->pq_key == q->jiffies)
113                 cl->pq_key++;
114  
115         /* update the nearest event cache */
116 -       if (q->near_ev_cache[cl->level] - cl->pq_key < 0x80000000)
117 +       if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
118                 q->near_ev_cache[cl->level] = cl->pq_key;
119         
120         while (*p) {
121                 struct htb_class *c; parent = *p;
122                 c = rb_entry(parent, struct htb_class, pq_node);
123 -               if (cl->pq_key - c->pq_key < 0x80000000)
124 +               if (time_after_eq(cl->pq_key, c->pq_key))
125                         p = &parent->rb_right;
126                 else 
127                         p = &parent->rb_left;
128         }
129         rb_link_node(&cl->pq_node, parent, p);
130         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
131 -       DEVIK_MEND(9);
132  }
133  
134  /**
135 @@ -453,12 +460,14 @@
136  {
137         rb_node_t *p;
138         if ((*n)->rb_right) {
139 +               /* child at right. use it or its leftmost ancestor */
140                 *n = (*n)->rb_right;
141                 while ((*n)->rb_left) 
142                         *n = (*n)->rb_left;
143                 return;
144         }
145         while ((p = (*n)->rb_parent) != NULL) {
146 +               /* if we've arrived from left child then we have next node */
147                 if (p->rb_left == *n) break;
148                 *n = p;
149         }
150 @@ -602,7 +611,7 @@
151      long toks;
152  
153      if ((toks = (cl->ctokens + *diff)) < (
154 -#ifdef HTB_HYSTERESIS
155 +#if HTB_HYSTERESIS
156             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
157  #endif
158                     0)) {
159 @@ -610,7 +619,7 @@
160             return HTB_CANT_SEND;
161      }
162      if ((toks = (cl->tokens + *diff)) >= (
163 -#ifdef HTB_HYSTERESIS
164 +#if HTB_HYSTERESIS
165             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
166  #endif
167             0))
168 @@ -689,7 +698,6 @@
169      struct htb_sched *q = (struct htb_sched *)sch->data;
170      struct htb_class *cl = htb_classify(skb,sch);
171  
172 -    DEVIK_MSTART(0);
173      if (cl == HTB_DIRECT || !cl) {
174         /* enqueue to helper queue */
175         if (q->direct_queue.qlen < q->direct_qlen && cl) {
176 @@ -698,25 +706,20 @@
177         } else {
178             kfree_skb (skb);
179             sch->stats.drops++;
180 -           DEVIK_MEND(0);
181             return NET_XMIT_DROP;
182         }
183      } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
184         sch->stats.drops++;
185         cl->stats.drops++;
186 -       DEVIK_MEND(0);
187         return NET_XMIT_DROP;
188      } else {
189         cl->stats.packets++; cl->stats.bytes += skb->len;
190 -       DEVIK_MSTART(1);
191         htb_activate (q,cl);
192 -       DEVIK_MEND(1);
193      }
194  
195      sch->q.qlen++;
196      sch->stats.packets++; sch->stats.bytes += skb->len;
197 -    HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
198 -    DEVIK_MEND(0);
199 +    HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
200      return NET_XMIT_SUCCESS;
201  }
202  
203 @@ -725,16 +728,18 @@
204  {
205      struct htb_sched *q = (struct htb_sched *)sch->data;
206      struct htb_class *cl = htb_classify(skb,sch);
207 +    struct sk_buff *tskb;
208  
209      if (cl == HTB_DIRECT || !cl) {
210         /* enqueue to helper queue */
211         if (q->direct_queue.qlen < q->direct_qlen && cl) {
212 -           __skb_queue_tail(&q->direct_queue, skb);
213 -           q->direct_pkts++;
214 +           __skb_queue_head(&q->direct_queue, skb);
215         } else {
216 -           kfree_skb (skb);
217 -           sch->stats.drops++;
218 -           return NET_XMIT_DROP;
219 +            __skb_queue_head(&q->direct_queue, skb);
220 +            tskb = __skb_dequeue_tail(&q->direct_queue);
221 +            kfree_skb (tskb);
222 +            sch->stats.drops++;
223 +            return NET_XMIT_CN;        
224         }
225      } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
226         sch->stats.drops++;
227 @@ -744,7 +749,7 @@
228             htb_activate (q,cl);
229  
230      sch->q.qlen++;
231 -    HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
232 +    HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
233      return NET_XMIT_SUCCESS;
234  }
235  
236 @@ -819,7 +824,7 @@
237                                        cl->classid, diff,
238                                        (unsigned long long) q->now,
239                                        (unsigned long long) cl->t_c,
240 -                                      jiffies);
241 +                                      q->jiffies);
242                         diff = 1000;
243                 }
244  #endif
245 @@ -862,6 +867,7 @@
246   *
247   * Scans event queue for pending events and applies them. Returns jiffies to
248   * next pending event (0 for no event in pq).
249 + * Note: Aplied are events whose have cl->pq_key <= jiffies.
250   */
251  static long htb_do_events(struct htb_sched *q,int level)
252  {
253 @@ -876,9 +882,9 @@
254                 while (p->rb_left) p = p->rb_left;
255  
256                 cl = rb_entry(p, struct htb_class, pq_node);
257 -               if (cl->pq_key - (jiffies+1) < 0x80000000) {
258 -                       HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - jiffies);
259 -                       return cl->pq_key - jiffies;
260 +               if (time_after(cl->pq_key, q->jiffies)) {
261 +                       HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
262 +                       return cl->pq_key - q->jiffies;
263                 }
264                 htb_safe_rb_erase(p,q->wait_pq+level);
265                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
266 @@ -889,7 +895,7 @@
267                                        cl->classid, diff,
268                                        (unsigned long long) q->now,
269                                        (unsigned long long) cl->t_c,
270 -                                      jiffies);
271 +                                      q->jiffies);
272                         diff = 1000;
273                 }
274  #endif
275 @@ -916,6 +922,7 @@
276                 rb_node_t **pptr;
277         } stk[TC_HTB_MAXDEPTH],*sp = stk;
278         
279 +       BUG_TRAP(tree->rb_node);
280         sp->root = tree->rb_node;
281         sp->pptr = pptr;
282  
283 @@ -949,16 +956,36 @@
284  htb_dequeue_tree(struct htb_sched *q,int prio,int level)
285  {
286         struct sk_buff *skb = NULL;
287 -       //struct htb_sched *q = (struct htb_sched *)sch->data;
288         struct htb_class *cl,*start;
289         /* look initial class up in the row */
290 -       DEVIK_MSTART(6);
291         start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
292         
293         do {
294 -               BUG_TRAP(cl && cl->un.leaf.q->q.qlen); if (!cl) return NULL;
295 +next:
296 +               BUG_TRAP(cl); 
297 +               if (!cl) return NULL;
298                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
299                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
300 +
301 +               /* class can be empty - it is unlikely but can be true if leaf
302 +                  qdisc drops packets in enqueue routine or if someone used
303 +                  graft operation on the leaf since last dequeue; 
304 +                  simply deactivate and skip such class */
305 +               if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
306 +                       struct htb_class *next;
307 +                       htb_deactivate(q,cl);
308 +
309 +                       /* row/level might become empty */
310 +                       if ((q->row_mask[level] & (1 << prio)) == 0)
311 +                               return NULL; 
312 +                       
313 +                       next = htb_lookup_leaf (q->row[level]+prio,
314 +                                       prio,q->ptr[level]+prio);
315 +                       if (cl == start) /* fix start if we just deleted it */
316 +                               start = next;
317 +                       cl = next;
318 +                       goto next;
319 +               }
320         
321                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
322                         break;
323 @@ -971,8 +998,6 @@
324                 cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
325         } while (cl != start);
326  
327 -       DEVIK_MEND(6);
328 -       DEVIK_MSTART(7);
329         if (likely(skb != NULL)) {
330                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
331                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
332 @@ -984,11 +1009,8 @@
333                    gives us slightly better performance */
334                 if (!cl->un.leaf.q->q.qlen)
335                         htb_deactivate (q,cl);
336 -       DEVIK_MSTART(8);
337                 htb_charge_class (q,cl,level,skb->len);
338 -       DEVIK_MEND(8);
339         }
340 -       DEVIK_MEND(7);
341         return skb;
342  }
343  
344 @@ -1002,9 +1024,8 @@
345                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
346                 delay = 5*HZ;
347         }
348 -       del_timer(&q->timer);
349 -       q->timer.expires = jiffies + delay;
350 -       add_timer(&q->timer);
351 +       /* why don't use jiffies here ? because expires can be in past */
352 +       mod_timer(&q->timer, q->jiffies + delay);
353         sch->flags |= TCQ_F_THROTTLED;
354         sch->stats.overlimits++;
355         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
356 @@ -1016,7 +1037,11 @@
357         struct htb_sched *q = (struct htb_sched *)sch->data;
358         int level;
359         long min_delay;
360 +#ifdef HTB_DEBUG
361 +       int evs_used = 0;
362 +#endif
363  
364 +       q->jiffies = jiffies;
365         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
366                         sch->q.qlen);
367  
368 @@ -1027,27 +1052,26 @@
369                 return skb;
370         }
371  
372 -       DEVIK_MSTART(2);
373         if (!sch->q.qlen) goto fin;
374         PSCHED_GET_TIME(q->now);
375  
376 -       min_delay = HZ*5;
377 +       min_delay = LONG_MAX;
378         q->nwc_hit = 0;
379         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
380                 /* common case optimization - skip event handler quickly */
381                 int m;
382                 long delay;
383 -       DEVIK_MSTART(3);
384 -               if (jiffies - q->near_ev_cache[level] < 0x80000000 || 0) {
385 +               if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
386                         delay = htb_do_events(q,level);
387 -                       q->near_ev_cache[level] += delay ? delay : HZ;
388 +                       q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
389 +#ifdef HTB_DEBUG
390 +                       evs_used++;
391 +#endif
392                 } else
393 -                       delay = q->near_ev_cache[level] - jiffies;      
394 +                       delay = q->near_ev_cache[level] - q->jiffies;   
395                 
396                 if (delay && min_delay > delay) 
397                         min_delay = delay;
398 -       DEVIK_MEND(3);
399 -       DEVIK_MSTART(5);
400                 m = ~q->row_mask[level];
401                 while (m != (int)(-1)) {
402                         int prio = ffz (m);
403 @@ -1056,29 +1080,29 @@
404                         if (likely(skb != NULL)) {
405                                 sch->q.qlen--;
406                                 sch->flags &= ~TCQ_F_THROTTLED;
407 -       DEVIK_MEND(5);
408                                 goto fin;
409                         }
410                 }
411 -       DEVIK_MEND(5);
412         }
413 -       DEVIK_MSTART(4);
414  #ifdef HTB_DEBUG
415 -       if (!q->nwc_hit && min_delay >= 5*HZ && net_ratelimit()) { 
416 -               printk(KERN_ERR "HTB: mindelay=%ld, report it please !\n",min_delay);
417 -               htb_debug_dump(q);
418 +       if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
419 +               if (min_delay == LONG_MAX) {
420 +                       printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
421 +                                       evs_used,q->jiffies,jiffies);
422 +                       htb_debug_dump(q);
423 +               } else 
424 +                       printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
425 +                                       "too small rate\n",min_delay);
426         }
427  #endif
428 -       htb_delay_by (sch,min_delay);
429 -       DEVIK_MEND(4);
430 +       htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
431  fin:
432 -       HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,jiffies,skb);
433 -       DEVIK_MEND(2);
434 +       HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
435         return skb;
436  }
437  
438  /* try to drop from each class (by prio) until one succeed */
439 -static int htb_drop(struct Qdisc* sch)
440 +static unsigned int htb_drop(struct Qdisc* sch)
441  {
442         struct htb_sched *q = (struct htb_sched *)sch->data;
443         int prio;
444 @@ -1086,14 +1110,15 @@
445         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
446                 struct list_head *p;
447                 list_for_each (p,q->drops+prio) {
448 -                       struct htb_class *cl = list_entry(p,struct htb_class,
449 -                                       un.leaf.drop_list);
450 +                       struct htb_class *cl = list_entry(p, struct htb_class,
451 +                                                         un.leaf.drop_list);
452 +                       unsigned int len;
453                         if (cl->un.leaf.q->ops->drop && 
454 -                               cl->un.leaf.q->ops->drop(cl->un.leaf.q)) {
455 +                               (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
456                                 sch->q.qlen--;
457                                 if (!cl->un.leaf.q->q.qlen)
458                                         htb_deactivate (q,cl);
459 -                               return 1;
460 +                               return len;
461                         }
462                 }
463         }
464 @@ -1208,7 +1233,8 @@
465         gopt.direct_pkts = q->direct_pkts;
466  
467  #ifdef HTB_DEBUG
468 -       htb_debug_dump(q);
469 +       if (HTB_DBG_COND(0,2))
470 +               htb_debug_dump(q);
471  #endif
472         gopt.version = HTB_VER;
473         gopt.rate2quantum = q->rate2quantum;
474 @@ -1289,6 +1315,9 @@
475                                         return -ENOBUFS;
476                 sch_tree_lock(sch);
477                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
478 +                       if (cl->prio_activity)
479 +                               htb_deactivate ((struct htb_sched*)sch->data,cl);
480 +
481                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
482                         sch->q.qlen -= (*old)->q.qlen;  
483                         qdisc_reset(*old);
484 @@ -1323,7 +1352,7 @@
485  
486         while ((tp = *fl) != NULL) {
487                 *fl = tp->next;
488 -               tp->ops->destroy(tp);
489 +               tcf_destroy(tp);
490         }
491  }
492  
493 @@ -1371,11 +1400,16 @@
494  #ifdef HTB_RATECM
495         del_timer_sync (&q->rttim);
496  #endif
497 +       /* This line used to be after htb_destroy_class call below
498 +          and surprisingly it worked in 2.4. But it must precede it 
499 +          because filter need its target class alive to be able to call
500 +          unbind_filter on it (without Oops). */
501 +       htb_destroy_filters(&q->filter_list);
502 +       
503         while (!list_empty(&q->root)) 
504                 htb_destroy_class (sch,list_entry(q->root.next,
505                                         struct htb_class,sibling));
506  
507 -       htb_destroy_filters(&q->filter_list);
508         __skb_queue_purge(&q->direct_queue);
509         MOD_DEC_USE_COUNT;
510  }
511 @@ -1438,12 +1472,13 @@
512         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
513  
514         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
515 -       HTB_DBG(0,1,"htb_chg cl=%p, clid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
516 +       HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
517         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
518         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
519         if (!rtab || !ctab) goto failure;
520  
521         if (!cl) { /* new class */
522 +               struct Qdisc *new_q;
523                 /* check for valid classid */
524                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
525                         goto failure;
526 @@ -1467,6 +1502,10 @@
527                 cl->magic = HTB_CMAGIC;
528  #endif
529  
530 +               /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
531 +                  so that can't be used inside of sch_tree_lock
532 +                  -- thanks to Karlis Peisenieks */
533 +               new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
534                 sch_tree_lock(sch);
535                 if (parent && !parent->level) {
536                         /* turn parent into inner node */
537 @@ -1485,8 +1524,7 @@
538                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
539                 }
540                 /* leaf (we) needs elementary qdisc */
541 -               if (!(cl->un.leaf.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
542 -                       cl->un.leaf.q = &noop_qdisc;
543 +               cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
544  
545                 cl->classid = classid; cl->parent = parent;
546  
547 @@ -1514,11 +1552,11 @@
548         if (!cl->level) {
549                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
550                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
551 -                       printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.", cl->classid);
552 +                       printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
553                         cl->un.leaf.quantum = 1000;
554                 }
555                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
556 -                       printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.", cl->classid);
557 +                       printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
558                         cl->un.leaf.quantum = 200000;
559                 }
560                 if (hopt->quantum)
561 --- linux-2.4.20/include/net/pkt_cls.h~110-schhtb       2005-01-07 02:53:54.529709000 -0500
562 +++ linux-2.4.20/include/net/pkt_cls.h  2005-01-07 02:53:55.918498816 -0500
563 @@ -77,7 +77,11 @@
564         return -1;
565  }
566  
567 -
568 +static inline void tcf_destroy(struct tcf_proto *tp)
569 +{
570 +       tp->ops->destroy(tp);
571 +       kfree(tp);
572 +}
573  
574  extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
575  extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);