]> pilppa.org Git - familiar-h63xx-build.git/blob - org.handhelds.familiar/packages/linux/openzaurus-pxa27x-2.4.20-rmk2-embedix20050602/P14-lowlatency_041221.patch
OE tree imported from monotone branch org.openembedded.oz354fam083 at revision 8b12e3...
[familiar-h63xx-build.git] / org.handhelds.familiar / packages / linux / openzaurus-pxa27x-2.4.20-rmk2-embedix20050602 / P14-lowlatency_041221.patch
1 diff -Nur c3000_pre/linux/arch/arm/config.in c3000_test/linux/arch/arm/config.in
2 --- c3000_pre/linux/arch/arm/config.in  2004-12-16 22:55:34.000000000 +0900
3 +++ c3000_test/linux/arch/arm/config.in 2004-12-20 23:23:28.000000000 +0900
4 @@ -574,6 +574,8 @@
5     fi
6  fi
7  dep_bool 'Preemptible Kernel support' CONFIG_PREEMPT $CONFIG_CPU_32
8 +bool 'Low latency scheduling' CONFIG_LOLAT
9 +dep_bool 'Control low latency with sysctl' CONFIG_LOLAT_SYSCTL $CONFIG_LOLAT
10  
11  endmenu
12  
13 diff -Nur c3000_pre/linux/arch/i386/config.in c3000_test/linux/arch/i386/config.in
14 --- c3000_pre/linux/arch/i386/config.in 2004-08-21 09:48:09.000000000 +0900
15 +++ c3000_test/linux/arch/i386/config.in        2004-12-20 22:56:21.000000000 +0900
16 @@ -25,6 +25,9 @@
17  
18  mainmenu_option next_comment
19  comment 'Processor type and features'
20 +bool 'Low latency scheduling' CONFIG_LOLAT
21 +dep_bool 'Control low latency with sysctl' CONFIG_LOLAT_SYSCTL $CONFIG_LOLAT
22 +
23  choice 'Processor family' \
24         "386                                    CONFIG_M386 \
25          486                                    CONFIG_M486 \
26 diff -Nur c3000_pre/linux/drivers/block/ll_rw_blk.c c3000_test/linux/drivers/block/ll_rw_blk.c
27 --- c3000_pre/linux/drivers/block/ll_rw_blk.c   2004-08-21 09:48:24.000000000 +0900
28 +++ c3000_test/linux/drivers/block/ll_rw_blk.c  2004-12-20 22:56:21.000000000 +0900
29 @@ -1211,6 +1211,7 @@
30                         kstat.pgpgin += count;
31                         break;
32         }
33 +       conditional_schedule();
34  }
35  
36  /**
37 diff -Nur c3000_pre/linux/drivers/char/mem.c c3000_test/linux/drivers/char/mem.c
38 --- c3000_pre/linux/drivers/char/mem.c  2004-08-21 09:48:25.000000000 +0900
39 +++ c3000_test/linux/drivers/char/mem.c 2004-12-20 22:56:21.000000000 +0900
40 @@ -422,7 +422,7 @@
41                 if (count > size)
42                         count = size;
43  
44 -               zap_page_range(mm, addr, count);
45 +               zap_page_range(mm, addr, count, 0);
46                 zeromap_page_range(addr, count, PAGE_COPY);
47  
48                 size -= count;
49 diff -Nur c3000_pre/linux/drivers/char/random.c c3000_test/linux/drivers/char/random.c
50 --- c3000_pre/linux/drivers/char/random.c       2004-08-21 09:48:25.000000000 +0900
51 +++ c3000_test/linux/drivers/char/random.c      2004-12-20 22:56:21.000000000 +0900
52 @@ -1374,6 +1374,11 @@
53                 buf += i;
54                 ret += i;
55                 add_timer_randomness(&extract_timer_state, nbytes);
56 +#if LOWLATENCY_NEEDED
57 +               /* This can happen in softirq's, but that's what we want */
58 +               if (conditional_schedule_needed())
59 +                       break;
60 +#endif
61         }
62  
63         /* Wipe data just returned from memory */
64 diff -Nur c3000_pre/linux/drivers/i2c/i2c-core.c c3000_test/linux/drivers/i2c/i2c-core.c
65 --- c3000_pre/linux/drivers/i2c/i2c-core.c      2004-08-21 09:48:34.000000000 +0900
66 +++ c3000_test/linux/drivers/i2c/i2c-core.c     2004-12-20 22:56:21.000000000 +0900
67 @@ -761,6 +761,8 @@
68  {
69         int ret;
70  
71 +       conditional_schedule();
72 +
73         if (adap->algo->master_xfer) {
74                 DEB2(printk("i2c-core.o: master_xfer: %s with %d msgs.\n",
75                             adap->name,num));
76 @@ -783,6 +785,8 @@
77         struct i2c_adapter *adap=client->adapter;
78         struct i2c_msg msg;
79  
80 +       conditional_schedule();
81 +
82         if (client->adapter->algo->master_xfer) {
83                 msg.addr   = client->addr;
84                 msg.flags = client->flags & I2C_M_TEN;
85 @@ -812,6 +816,9 @@
86         struct i2c_adapter *adap=client->adapter;
87         struct i2c_msg msg;
88         int ret;
89 +       
90 +       conditional_schedule();  
91 +        
92         if (client->adapter->algo->master_xfer) {
93                 msg.addr   = client->addr;
94                 msg.flags = client->flags & I2C_M_TEN;
95 diff -Nur c3000_pre/linux/fs/buffer.c c3000_test/linux/fs/buffer.c
96 --- c3000_pre/linux/fs/buffer.c 2004-08-21 09:48:58.000000000 +0900
97 +++ c3000_test/linux/fs/buffer.c        2004-12-20 22:56:21.000000000 +0900
98 @@ -216,8 +216,10 @@
99  
100                 if (dev != NODEV && bh->b_dev != dev)
101                         continue;
102 -               if (test_and_set_bit(BH_Lock, &bh->b_state))
103 +               if (test_and_set_bit(BH_Lock, &bh->b_state)) {
104 +                       __refile_buffer(bh);
105                         continue;
106 +               }
107                 if (atomic_set_buffer_clean(bh)) {
108                         __refile_buffer(bh);
109                         get_bh(bh);
110 @@ -227,6 +229,7 @@
111  
112                         spin_unlock(&lru_list_lock);
113                         write_locked_buffers(array, count);
114 +                       conditional_schedule();
115                         return -EAGAIN;
116                 }
117                 unlock_buffer(bh);
118 @@ -260,12 +263,19 @@
119         struct buffer_head * next;
120         int nr;
121  
122 -       next = lru_list[index];
123         nr = nr_buffers_type[index];
124 +repeat:
125 +       next = lru_list[index];
126         while (next && --nr >= 0) {
127                 struct buffer_head *bh = next;
128                 next = bh->b_next_free;
129  
130 +               if (conditional_schedule_needed()) {
131 +                       spin_unlock(&lru_list_lock);
132 +                       unconditional_schedule();
133 +                       spin_lock(&lru_list_lock);
134 +                       goto repeat;
135 +               }
136                 if (!buffer_locked(bh)) {
137                         if (refile)
138                                 __refile_buffer(bh);
139 @@ -273,7 +283,6 @@
140                 }
141                 if (dev != NODEV && bh->b_dev != dev)
142                         continue;
143 -
144                 get_bh(bh);
145                 spin_unlock(&lru_list_lock);
146                 wait_on_buffer (bh);
147 @@ -306,6 +315,15 @@
148  {
149         int err = 0;
150  
151 +#if LOWLATENCY_NEEDED
152 +       /*
153 +        * syncing devA when there are lots of buffers dirty against
154 +        * devB is expensive.
155 +        */
156 +       if (enable_lowlatency)
157 +               dev = NODEV;
158 +#endif
159 +
160         /* One pass for no-wait, three for wait:
161          * 0) write out all dirty, unlocked buffers;
162          * 1) wait for all dirty locked buffers;
163 @@ -697,6 +715,16 @@
164                         /* Not hashed? */
165                         if (!bh->b_pprev)
166                                 continue;
167 +
168 +                       if (conditional_schedule_needed()) {
169 +                               get_bh(bh);
170 +                               spin_unlock(&lru_list_lock);
171 +                               unconditional_schedule();
172 +                               spin_lock(&lru_list_lock);
173 +                               put_bh(bh);
174 +                               slept = 1;
175 +                       }
176 +
177                         if (buffer_locked(bh)) {
178                                 get_bh(bh);
179                                 spin_unlock(&lru_list_lock);
180 @@ -848,12 +876,19 @@
181         struct buffer_head *bh;
182         struct inode tmp;
183         int err = 0, err2;
184 -       
185 +       DEFINE_RESCHED_COUNT;
186 +
187         INIT_LIST_HEAD(&tmp.i_dirty_buffers);
188 -       
189 +
190 +repeat:
191         spin_lock(&lru_list_lock);
192  
193         while (!list_empty(list)) {
194 +               if (conditional_schedule_needed()) {
195 +                       spin_unlock(&lru_list_lock);
196 +                       unconditional_schedule();
197 +                       goto repeat;
198 +               }
199                 bh = BH_ENTRY(list->next);
200                 list_del(&bh->b_inode_buffers);
201                 if (!buffer_dirty(bh) && !buffer_locked(bh))
202 @@ -878,8 +913,18 @@
203                                 spin_lock(&lru_list_lock);
204                         }
205                 }
206 +               if (TEST_RESCHED_COUNT(32)) {
207 +                       RESET_RESCHED_COUNT();
208 +                       if (conditional_schedule_needed()) {
209 +                               spin_unlock(&lru_list_lock);
210 +                               unconditional_schedule();       /* Syncing many dirty buffers */
211 +                               spin_lock(&lru_list_lock);
212 +                       }
213 +               }
214         }
215  
216 +       RESET_RESCHED_COUNT();
217 +
218         while (!list_empty(&tmp.i_dirty_buffers)) {
219                 bh = BH_ENTRY(tmp.i_dirty_buffers.prev);
220                 remove_inode_queue(bh);
221 @@ -889,6 +934,7 @@
222                 if (!buffer_uptodate(bh))
223                         err = -EIO;
224                 brelse(bh);
225 +               conditional_schedule();
226                 spin_lock(&lru_list_lock);
227         }
228         
229 @@ -916,11 +962,20 @@
230         struct buffer_head *bh;
231         struct list_head *p;
232         int err = 0;
233 +       DEFINE_RESCHED_COUNT;
234  
235 +repeat:
236 +       conditional_schedule();
237         spin_lock(&lru_list_lock);
238         
239 - repeat:
240         list_for_each_prev(p, list) {
241 +               if (TEST_RESCHED_COUNT(32)) {
242 +                       RESET_RESCHED_COUNT();
243 +                       if (conditional_schedule_needed()) {
244 +                               spin_unlock(&lru_list_lock);
245 +                               goto repeat;
246 +                       }
247 +               }
248                 bh = BH_ENTRY(p);
249                 if (buffer_locked(bh)) {
250                         get_bh(bh);
251 @@ -929,7 +984,6 @@
252                         if (!buffer_uptodate(bh))
253                                 err = -EIO;
254                         brelse(bh);
255 -                       spin_lock(&lru_list_lock);
256                         goto repeat;
257                 }
258         }
259 @@ -946,12 +1000,24 @@
260  void invalidate_inode_buffers(struct inode *inode)
261  {
262         struct list_head * entry;
263 -       
264 +
265 +repeat:
266 +       conditional_schedule(); 
267         spin_lock(&lru_list_lock);
268 -       while ((entry = inode->i_dirty_buffers.next) != &inode->i_dirty_buffers)
269 +       while ((entry = inode->i_dirty_buffers.next) != &inode->i_dirty_buffers) {
270 +               if (conditional_schedule_needed()) {
271 +                       spin_unlock(&lru_list_lock);
272 +                       goto repeat;
273 +               }
274                 remove_inode_queue(BH_ENTRY(entry));
275 -       while ((entry = inode->i_dirty_data_buffers.next) != &inode->i_dirty_data_buffers)
276 +       }
277 +       while ((entry = inode->i_dirty_data_buffers.next) != &inode->i_dirty_data_buffers) {
278 +               if (conditional_schedule_needed()) {
279 +                       spin_unlock(&lru_list_lock);
280 +                       goto repeat;
281 +               }
282                 remove_inode_queue(BH_ENTRY(entry));
283 +       }
284         spin_unlock(&lru_list_lock);
285  }
286  
287 @@ -974,6 +1040,7 @@
288                 bh = get_hash_table(dev, block, size);
289                 if (bh) {
290                         touch_buffer(bh);
291 +                       conditional_schedule();
292                         return bh;
293                 }
294  
295 @@ -2831,7 +2898,7 @@
296  
297  DECLARE_WAIT_QUEUE_HEAD(bdflush_wait);
298  
299 -void wakeup_bdflush(void)
300 +void wakeup_bdflush(void) 
301  {
302         wake_up_interruptible(&bdflush_wait);
303  }
304 diff -Nur c3000_pre/linux/fs/dcache.c c3000_test/linux/fs/dcache.c
305 --- c3000_pre/linux/fs/dcache.c 2004-08-21 09:48:58.000000000 +0900
306 +++ c3000_test/linux/fs/dcache.c        2004-12-20 22:56:21.000000000 +0900
307 @@ -320,11 +320,23 @@
308   
309  void prune_dcache(int count)
310  {
311 +       DEFINE_RESCHED_COUNT;
312 +
313 +redo:
314         spin_lock(&dcache_lock);
315         for (;;) {
316                 struct dentry *dentry;
317                 struct list_head *tmp;
318  
319 +               if (TEST_RESCHED_COUNT(100)) {
320 +                       RESET_RESCHED_COUNT();
321 +                       if (conditional_schedule_needed()) {
322 +                               spin_unlock(&dcache_lock);
323 +                               unconditional_schedule();
324 +                               goto redo;
325 +                       }
326 +               }
327 +
328                 tmp = dentry_unused.prev;
329  
330                 if (tmp == &dentry_unused)
331 @@ -479,6 +491,7 @@
332         struct dentry *this_parent = parent;
333         struct list_head *next;
334         int found = 0;
335 +       DEFINE_RESCHED_COUNT;
336  
337         spin_lock(&dcache_lock);
338  repeat:
339 @@ -493,6 +506,13 @@
340                         list_add(&dentry->d_lru, dentry_unused.prev);
341                         found++;
342                 }
343 +
344 +               if (TEST_RESCHED_COUNT(500) && found > 10) {
345 +                       if (conditional_schedule_needed())      /* Typically sys_rmdir() */
346 +                               goto out;
347 +                       RESET_RESCHED_COUNT();
348 +               }
349 +
350                 /*
351                  * Descend a level if the d_subdirs list is non-empty.
352                  */
353 @@ -517,6 +537,7 @@
354  #endif
355                 goto resume;
356         }
357 +out:
358         spin_unlock(&dcache_lock);
359         return found;
360  }
361 @@ -532,8 +553,10 @@
362  {
363         int found;
364  
365 -       while ((found = select_parent(parent)) != 0)
366 +       while ((found = select_parent(parent)) != 0) {
367                 prune_dcache(found);
368 +               conditional_schedule();         /* Typically sys_rmdir() */
369 +       }
370  }
371  
372  /*
373 diff -Nur c3000_pre/linux/fs/exec.c c3000_test/linux/fs/exec.c
374 --- c3000_pre/linux/fs/exec.c   2004-08-21 09:48:58.000000000 +0900
375 +++ c3000_test/linux/fs/exec.c  2004-12-20 22:56:21.000000000 +0900
376 @@ -249,7 +249,7 @@
377                                         memset(kaddr+offset+len, 0,
378                                                 PAGE_SIZE-offset-len);
379                         }
380 -                       err = copy_from_user(kaddr+offset, str, bytes_to_copy);
381 +                       err = ll_copy_from_user(kaddr+offset, str, bytes_to_copy);
382                         if (err) {
383                                 ret = -EFAULT;
384                                 goto out;
385 diff -Nur c3000_pre/linux/fs/ext2/dir.c c3000_test/linux/fs/ext2/dir.c
386 --- c3000_pre/linux/fs/ext2/dir.c       2004-08-21 09:48:59.000000000 +0900
387 +++ c3000_test/linux/fs/ext2/dir.c      2004-12-20 22:56:21.000000000 +0900
388 @@ -153,6 +153,7 @@
389         struct address_space *mapping = dir->i_mapping;
390         struct page *page = read_cache_page(mapping, n,
391                                 (filler_t*)mapping->a_ops->readpage, NULL);
392 +       conditional_schedule();         /* Scanning large directories */
393         if (!IS_ERR(page)) {
394                 wait_on_page(page);
395                 kmap(page);
396 diff -Nur c3000_pre/linux/fs/ext2/inode.c c3000_test/linux/fs/ext2/inode.c
397 --- c3000_pre/linux/fs/ext2/inode.c     2004-08-21 09:48:59.000000000 +0900
398 +++ c3000_test/linux/fs/ext2/inode.c    2004-12-20 22:56:21.000000000 +0900
399 @@ -715,8 +715,13 @@
400  {
401         unsigned long block_to_free = 0, count = 0;
402         unsigned long nr;
403 +       DEFINE_RESCHED_COUNT;
404  
405         for ( ; p < q ; p++) {
406 +               if (TEST_RESCHED_COUNT(32)) {
407 +                       RESET_RESCHED_COUNT();
408 +                       conditional_schedule();
409 +               }
410                 nr = le32_to_cpu(*p);
411                 if (nr) {
412                         *p = 0;
413 @@ -759,6 +764,7 @@
414         if (depth--) {
415                 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
416                 for ( ; p < q ; p++) {
417 +                       conditional_schedule();         /* Deleting large files */
418                         nr = le32_to_cpu(*p);
419                         if (!nr)
420                                 continue;
421 diff -Nur c3000_pre/linux/fs/ext3/balloc.c c3000_test/linux/fs/ext3/balloc.c
422 --- c3000_pre/linux/fs/ext3/balloc.c    2004-08-21 09:48:59.000000000 +0900
423 +++ c3000_test/linux/fs/ext3/balloc.c   2004-12-20 22:56:21.000000000 +0900
424 @@ -363,6 +363,9 @@
425                         }
426                 }
427  #endif
428 +               /* superblock lock is held, so this is safe */
429 +               conditional_schedule();
430 +
431                 BUFFER_TRACE(bitmap_bh, "clear bit");
432                 if (!ext3_clear_bit (bit + i, bitmap_bh->b_data)) {
433                         ext3_error (sb, __FUNCTION__,
434 diff -Nur c3000_pre/linux/fs/ext3/inode.c c3000_test/linux/fs/ext3/inode.c
435 --- c3000_pre/linux/fs/ext3/inode.c     2004-08-21 09:48:59.000000000 +0900
436 +++ c3000_test/linux/fs/ext3/inode.c    2004-12-20 22:56:21.000000000 +0900
437 @@ -902,6 +902,8 @@
438  
439         prev_blocks = inode->i_blocks;
440  
441 +       conditional_schedule();         /* Reading large directories */
442 +
443         bh = ext3_getblk (handle, inode, block, create, err);
444         if (!bh)
445                 return bh;
446 @@ -1605,6 +1607,7 @@
447          */
448         for (p = first; p < last; p++) {
449                 u32 nr = le32_to_cpu(*p);
450 +               conditional_schedule();
451                 if (nr) {
452                         struct buffer_head *bh;
453  
454 @@ -1659,6 +1662,7 @@
455         }
456  
457         for (p = first; p < last; p++) {
458 +               conditional_schedule();
459                 nr = le32_to_cpu(*p);
460                 if (nr) {
461                         /* accumulate blocks to free if they're contiguous */
462 diff -Nur c3000_pre/linux/fs/ext3/namei.c c3000_test/linux/fs/ext3/namei.c
463 --- c3000_pre/linux/fs/ext3/namei.c     2004-08-21 09:48:59.000000000 +0900
464 +++ c3000_test/linux/fs/ext3/namei.c    2004-12-20 22:56:21.000000000 +0900
465 @@ -157,6 +157,7 @@
466                 if ((bh = bh_use[ra_ptr++]) == NULL)
467                         goto next;
468                 wait_on_buffer(bh);
469 +               conditional_schedule();
470                 if (!buffer_uptodate(bh)) {
471                         /* read error, skip block & hope for the best */
472                         brelse(bh);
473 diff -Nur c3000_pre/linux/fs/inode.c c3000_test/linux/fs/inode.c
474 --- c3000_pre/linux/fs/inode.c  2004-08-21 09:48:58.000000000 +0900
475 +++ c3000_test/linux/fs/inode.c 2004-12-20 23:00:06.000000000 +0900
476 @@ -251,6 +251,8 @@
477  
478         filemap_fdatawait(inode->i_mapping);
479  
480 +       conditional_schedule();
481 +
482         spin_lock(&inode_lock);
483         inode->i_state &= ~I_LOCK;
484         if (!(inode->i_state & I_FREEING)) {
485 @@ -561,6 +563,7 @@
486  
487         while ((inode_entry = head->next) != head)
488         {
489 +               conditional_schedule();
490                 list_del(inode_entry);
491  
492                 inode = list_entry(inode_entry, struct inode, i_list);
493 @@ -589,9 +592,22 @@
494                 if (tmp == head)
495                         break;
496                 inode = list_entry(tmp, struct inode, i_list);
497 +
498 +               if (conditional_schedule_needed()) {
499 +                       atomic_inc(&inode->i_count);
500 +                       spin_unlock(&inode_lock);
501 +                       unconditional_schedule();
502 +                       spin_lock(&inode_lock);
503 +                       atomic_dec(&inode->i_count);
504 +               }
505 +                       
506                 if (inode->i_sb != sb)
507                         continue;
508 +               atomic_inc(&inode->i_count);
509 +               spin_unlock(&inode_lock);
510                 invalidate_inode_buffers(inode);
511 +               spin_lock(&inode_lock);
512 +               atomic_dec(&inode->i_count);
513                 if (!atomic_read(&inode->i_count)) {
514                         list_del_init(&inode->i_hash);
515                         list_del(&inode->i_list);
516 @@ -896,6 +912,8 @@
517         if (inode) {
518                 struct inode * old;
519  
520 +               conditional_schedule();                 /* sync_old_buffers */
521 +
522                 spin_lock(&inode_lock);
523                 /* We released the lock, so.. */
524                 old = find_inode(sb, ino, head, find_actor, opaque);
525 @@ -1313,18 +1331,32 @@
526         int request=goal;
527         int loop=0;
528  #endif
529 +       int nr_to_scan = inodes_stat.nr_unused;
530  
531 +resume:
532         spin_lock(&inode_lock);
533  
534         count = 0;
535         entry = inode_unused.prev;
536 -       while (entry != &inode_unused)
537 -       {
538 +       while (entry != &inode_unused && nr_to_scan--) {
539                 struct list_head *tmp = entry;
540  
541  #ifdef JFFS2_INODE_DEBUG
542                 loop++;
543  #endif
544 +               if (conditional_schedule_needed()) {
545 +                       /*
546 +                        * Need to drop the lock.  Reposition
547 +                        * the list head so we start here next time.
548 +                        * This can corrupt the LRU nature of the
549 +                        * unused list, but this isn't very important.
550 +                        */
551 +                       list_del(&inode_unused);
552 +                       list_add(&inode_unused, entry);
553 +                       spin_unlock(&inode_lock);
554 +                       unconditional_schedule();
555 +                       goto resume;
556 +               }
557                 entry = entry->prev;
558                 inode = INODE(tmp);
559                 if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK))
560 diff -Nur c3000_pre/linux/fs/jbd/checkpoint.c c3000_test/linux/fs/jbd/checkpoint.c
561 --- c3000_pre/linux/fs/jbd/checkpoint.c 2004-08-21 09:48:59.000000000 +0900
562 +++ c3000_test/linux/fs/jbd/checkpoint.c        2004-12-20 22:56:21.000000000 +0900
563 @@ -431,7 +431,11 @@
564  {
565         transaction_t *transaction, *last_transaction, *next_transaction;
566         int ret = 0;
567 +       int ll_retries = 4;             /* lowlatency addition */
568  
569 +restart:
570 +       if (ll_retries-- == 0)
571 +               goto out;
572         transaction = journal->j_checkpoint_transactions;
573         if (transaction == 0)
574                 goto out;
575 @@ -451,6 +455,12 @@
576                                 jh = next_jh;
577                                 next_jh = jh->b_cpnext;
578                                 ret += __try_to_free_cp_buf(jh);
579 +                               if (conditional_schedule_needed()) {
580 +                                       spin_unlock(&journal_datalist_lock);
581 +                                       unconditional_schedule();
582 +                                       spin_lock(&journal_datalist_lock);
583 +                                       goto restart;
584 +                               }
585                         } while (jh != last_jh);
586                 }
587         } while (transaction != last_transaction);
588 diff -Nur c3000_pre/linux/fs/jbd/commit.c c3000_test/linux/fs/jbd/commit.c
589 --- c3000_pre/linux/fs/jbd/commit.c     2004-08-21 09:48:59.000000000 +0900
590 +++ c3000_test/linux/fs/jbd/commit.c    2004-12-20 22:56:21.000000000 +0900
591 @@ -212,6 +212,16 @@
592                                 __journal_remove_journal_head(bh);
593                                 refile_buffer(bh);
594                                 __brelse(bh);
595 +                               if (conditional_schedule_needed()) {
596 +                                       if (commit_transaction->t_sync_datalist)
597 +                                               commit_transaction->t_sync_datalist =
598 +                                                       next_jh;
599 +                                       if (bufs)
600 +                                               break;
601 +                                       spin_unlock(&journal_datalist_lock);
602 +                                       unconditional_schedule();
603 +                                       goto write_out_data;
604 +                               }
605                         }
606                 }
607                 if (bufs == ARRAY_SIZE(wbuf)) {
608 @@ -235,8 +245,7 @@
609                 journal_brelse_array(wbuf, bufs);
610                 lock_journal(journal);
611                 spin_lock(&journal_datalist_lock);
612 -               if (bufs)
613 -                       goto write_out_data_locked;
614 +               goto write_out_data_locked;
615         }
616  
617         /*
618 @@ -272,6 +281,14 @@
619          */
620         while ((jh = commit_transaction->t_async_datalist)) {
621                 struct buffer_head *bh = jh2bh(jh);
622 +               if (conditional_schedule_needed()) {
623 +                       spin_unlock(&journal_datalist_lock);
624 +                       unlock_journal(journal);
625 +                       unconditional_schedule();
626 +                       lock_journal(journal);
627 +                       spin_lock(&journal_datalist_lock);
628 +                       continue;       /* List may have changed */
629 +               }
630                 if (buffer_locked(bh)) {
631                         spin_unlock(&journal_datalist_lock);
632                         unlock_journal(journal);
633 @@ -486,6 +503,8 @@
634   wait_for_iobuf:
635         while (commit_transaction->t_iobuf_list != NULL) {
636                 struct buffer_head *bh;
637 +
638 +               conditional_schedule();
639                 jh = commit_transaction->t_iobuf_list->b_tprev;
640                 bh = jh2bh(jh);
641                 if (buffer_locked(bh)) {
642 @@ -644,6 +663,8 @@
643                 transaction_t *cp_transaction;
644                 struct buffer_head *bh;
645  
646 +               conditional_schedule();         /* journal is locked */
647 +
648                 jh = commit_transaction->t_forget;
649                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
650                         jh->b_transaction == journal->j_running_transaction);
651 diff -Nur c3000_pre/linux/fs/proc/array.c c3000_test/linux/fs/proc/array.c
652 --- c3000_pre/linux/fs/proc/array.c     2004-08-21 09:49:01.000000000 +0900
653 +++ c3000_test/linux/fs/proc/array.c    2004-12-20 22:56:21.000000000 +0900
654 @@ -498,9 +498,11 @@
655         if (end > PMD_SIZE)
656                 end = PMD_SIZE;
657         do {
658 -               pte_t page = *pte;
659 +               pte_t page;
660                 struct page *ptpage;
661  
662 +               conditional_schedule();         /* For `top' and `ps' */
663 +               page = *pte;
664                 address += PAGE_SIZE;
665                 pte++;
666                 if (pte_none(page))
667 diff -Nur c3000_pre/linux/fs/proc/generic.c c3000_test/linux/fs/proc/generic.c
668 --- c3000_pre/linux/fs/proc/generic.c   2004-08-21 09:49:01.000000000 +0900
669 +++ c3000_test/linux/fs/proc/generic.c  2004-12-20 22:56:21.000000000 +0900
670 @@ -98,6 +98,8 @@
671                                 retval = n;
672                         break;
673                 }
674 +
675 +               conditional_schedule();         /* Some /proc files are large */
676                 
677                 /* This is a hack to allow mangling of file pos independent
678                  * of actual bytes read.  Simply place the data at page,
679 diff -Nur c3000_pre/linux/fs/reiserfs/buffer2.c c3000_test/linux/fs/reiserfs/buffer2.c
680 --- c3000_pre/linux/fs/reiserfs/buffer2.c       2004-08-21 09:49:01.000000000 +0900
681 +++ c3000_test/linux/fs/reiserfs/buffer2.c      2004-12-20 22:56:21.000000000 +0900
682 @@ -54,6 +54,7 @@
683      PROC_EXP( unsigned int ctx_switches = kstat.context_swtch );
684  
685      result = bread (super -> s_dev, n_block, n_size);
686 +    conditional_schedule();
687      PROC_INFO_INC( super, breads );
688      PROC_EXP( if( kstat.context_swtch != ctx_switches ) 
689               PROC_INFO_INC( super, bread_miss ) );
690 diff -Nur c3000_pre/linux/fs/reiserfs/journal.c c3000_test/linux/fs/reiserfs/journal.c
691 --- c3000_pre/linux/fs/reiserfs/journal.c       2004-08-21 09:49:01.000000000 +0900
692 +++ c3000_test/linux/fs/reiserfs/journal.c      2004-12-20 22:56:21.000000000 +0900
693 @@ -573,6 +573,7 @@
694  /* lock the current transaction */
695  inline static void lock_journal(struct super_block *p_s_sb) {
696    PROC_INFO_INC( p_s_sb, journal.lock_journal );
697 +  conditional_schedule();
698    while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)) > 0) {
699      PROC_INFO_INC( p_s_sb, journal.lock_journal_wait );
700      sleep_on(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
701 @@ -703,6 +704,7 @@
702         mark_buffer_dirty(tbh) ;
703        }
704        ll_rw_block(WRITE, 1, &tbh) ;
705 +      conditional_schedule();
706        count++ ;
707        put_bh(tbh) ; /* once for our get_hash */
708      } 
709 @@ -832,6 +834,7 @@
710      set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ;
711      ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
712      wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; 
713 +    conditional_schedule();
714      if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
715        printk( "reiserfs: journal-837: IO error during journal replay\n" );
716        return -EIO ;
717 @@ -2125,6 +2128,7 @@
718  }
719  
720  int journal_begin(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, unsigned long nblocks) {
721 +  conditional_schedule();
722    return do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
723  }
724  
725 @@ -2265,6 +2269,7 @@
726  }
727  
728  int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
729 +  conditional_schedule();
730    return do_journal_end(th, p_s_sb, nblocks, 0) ;
731  }
732  
733 @@ -2716,6 +2721,7 @@
734        RFALSE( buffer_locked(bh) && cur_tb != NULL,
735               "waiting while do_balance was running\n") ;
736        wait_on_buffer(bh) ;
737 +      conditional_schedule();
738      }
739      PROC_INFO_INC( p_s_sb, journal.prepare_retry );
740      retry_count++ ;
741 @@ -2888,6 +2894,7 @@
742      /* copy all the real blocks into log area.  dirty log blocks */
743      if (test_bit(BH_JDirty, &cn->bh->b_state)) {
744        struct buffer_head *tmp_bh ;
745 +      conditional_schedule();
746        tmp_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + 
747                      ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT)) ;
748        mark_buffer_uptodate(tmp_bh, 1) ;
749 diff -Nur c3000_pre/linux/fs/reiserfs/stree.c c3000_test/linux/fs/reiserfs/stree.c
750 --- c3000_pre/linux/fs/reiserfs/stree.c 2004-08-21 09:49:01.000000000 +0900
751 +++ c3000_test/linux/fs/reiserfs/stree.c        2004-12-20 22:56:21.000000000 +0900
752 @@ -652,9 +652,8 @@
753                                         stop at leaf level - set to
754                                         DISK_LEAF_NODE_LEVEL */
755      ) {
756 -    int  n_block_number = SB_ROOT_BLOCK (p_s_sb),
757 -      expected_level = SB_TREE_HEIGHT (p_s_sb),
758 -      n_block_size    = p_s_sb->s_blocksize;
759 +    int n_block_number, expected_level;
760 +    int n_block_size    = p_s_sb->s_blocksize;
761      struct buffer_head  *       p_s_bh;
762      struct path_element *       p_s_last_element;
763      int                                n_node_level, n_retval;
764 @@ -666,7 +665,8 @@
765  #endif
766      
767      PROC_INFO_INC( p_s_sb, search_by_key );
768 -    
769 +    conditional_schedule();
770 +
771      /* As we add each node to a path we increase its count.  This means that
772         we must be careful to release all nodes in a path before we either
773         discard the path struct or re-use the path struct, as we do here. */
774 @@ -678,6 +678,8 @@
775      /* With each iteration of this loop we search through the items in the
776         current node, and calculate the next current node(next path element)
777         for the next iteration of this loop.. */
778 +    n_block_number = SB_ROOT_BLOCK (p_s_sb);
779 +    expected_level = SB_TREE_HEIGHT (p_s_sb);
780      while ( 1 ) {
781  
782  #ifdef CONFIG_REISERFS_CHECK
783 @@ -1104,6 +1106,8 @@
784             for (n_counter = *p_n_removed;
785                  n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
786  
787 +               conditional_schedule();
788 +
789                 if (item_moved (&s_ih, p_s_path)) {
790                     need_research = 1 ;
791                     break;
792 diff -Nur c3000_pre/linux/include/linux/low-latency.h c3000_test/linux/include/linux/low-latency.h
793 --- c3000_pre/linux/include/linux/low-latency.h 1970-01-01 09:00:00.000000000 +0900
794 +++ c3000_test/linux/include/linux/low-latency.h        2004-12-20 22:56:21.000000000 +0900
795 @@ -0,0 +1,109 @@
796 +/*
797 + * include/linux/low-latency.h
798 + *
799 + * Andrew Morton <akpm@zip.com.au>
800 + */
801 +
802 +#ifndef LOW_LATENCY_H_INCLUDED
803 +#define LOW_LATENCY_H_INCLUDED
804 +
805 +#if defined(CONFIG_LOLAT)
806 +#define LOWLATENCY_NEEDED      1
807 +#else
808 +#define LOWLATENCY_NEEDED      0
809 +#endif
810 +
811 +#if LOWLATENCY_NEEDED
812 +
813 +#include <linux/cache.h>               /* For ____cacheline_aligned */
814 +
815 +#ifdef CONFIG_LOLAT_SYSCTL
816 +extern struct low_latency_enable_struct {
817 +       int yep;
818 +} ____cacheline_aligned __enable_lowlatency;
819 +#define enable_lowlatency __enable_lowlatency.yep
820 +
821 +#else
822 +#define enable_lowlatency 1
823 +#endif
824 +
825 +/*
826 + * Set this non-zero to generate low-latency instrumentation
827 + */
828 +#define LOWLATENCY_DEBUG               0
829 +
830 +/*
831 + * Set this non-zero for robustness testing
832 + */
833 +#define LOWLATENCY_ALWAYS_SCHEDULE     0
834 +
835 +#if LOWLATENCY_DEBUG
836 +
837 +#if LOWLATENCY_ALWAYS_SCHEDULE
838 +#define conditional_schedule_needed() ((enable_lowlatency == 2) || (enable_lowlatency && current->need_resched))
839 +#else
840 +#define conditional_schedule_needed() (enable_lowlatency && current->need_resched)
841 +#endif
842 +
843 +struct lolat_stats_t {
844 +       unsigned long count;
845 +       int visited;
846 +       const char *file;
847 +       int line;
848 +       struct lolat_stats_t *next;
849 +};
850 +
851 +void set_running_and_schedule(struct lolat_stats_t *stats);
852 +
853 +#define unconditional_schedule()                                       \
854 +       do {                                                            \
855 +               static struct lolat_stats_t stats = {                   \
856 +                       file: __FILE__,                                 \
857 +                       line: __LINE__,                                 \
858 +               };                                                      \
859 +               set_running_and_schedule(&stats);                       \
860 +       } while (0)
861 +
862 +extern void show_lolat_stats(void);
863 +
864 +#else  /* LOWLATENCY_DEBUG */
865 +
866 +#if LOWLATENCY_ALWAYS_SCHEDULE
867 +#define conditional_schedule_needed() 1
868 +#else
869 +#define conditional_schedule_needed() (current->need_resched)
870 +#endif
871 +
872 +void set_running_and_schedule(void);
873 +#define unconditional_schedule() set_running_and_schedule()
874 +
875 +#endif /* LOWLATENCY_DEBUG */
876 +
877 +#define conditional_schedule()                                         \
878 +       do {                                                            \
879 +               if (conditional_schedule_needed())                      \
880 +                       unconditional_schedule();                       \
881 +       } while (0)
882 +
883 +#define DEFINE_RESCHED_COUNT   int resched_count = 0
884 +#define TEST_RESCHED_COUNT(n)  (enable_lowlatency && (++resched_count > (n)))
885 +#define RESET_RESCHED_COUNT()  resched_count = 0
886 +extern int ll_copy_to_user(void *to_user, const void *from, unsigned long len);
887 +extern int ll_copy_from_user(void *to, const void *from_user, unsigned long len);
888 +
889 +#else  /* LOWLATENCY_NEEDED */
890 +
891 +#define conditional_schedule_needed() 0
892 +#define conditional_schedule()
893 +#define unconditional_schedule()
894 +
895 +#define DEFINE_RESCHED_COUNT
896 +#define TEST_RESCHED_COUNT(n)  0
897 +#define RESET_RESCHED_COUNT()
898 +#define ll_copy_to_user(to_user, from, len) copy_to_user((to_user), (from), (len))
899 +#define ll_copy_from_user(to, from_user, len) copy_from_user((to), (from_user), (len))
900 +
901 +#endif /* LOWLATENCY_NEEDED */
902 +
903 +#endif /* LOW_LATENCY_H_INCLUDED */
904 +
905 diff -Nur c3000_pre/linux/include/linux/mm.h c3000_test/linux/include/linux/mm.h
906 --- c3000_pre/linux/include/linux/mm.h  2004-08-21 09:49:13.000000000 +0900
907 +++ c3000_test/linux/include/linux/mm.h 2004-12-20 22:56:21.000000000 +0900
908 @@ -149,6 +149,8 @@
909   */
910  extern pgprot_t protection_map[16];
911  
912 +/* Actions for zap_page_range() */
913 +#define ZPR_COND_RESCHED       1       /* Do a conditional_schedule() occasionally */
914  
915  /*
916   * These are the virtual MM functions - opening of an area, closing and
917 @@ -500,7 +502,7 @@
918  extern void shmem_lock(struct file * file, int lock);
919  extern int shmem_zero_setup(struct vm_area_struct *);
920  
921 -extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
922 +extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, int actions);
923  extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
924  extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
925  extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
926 diff -Nur c3000_pre/linux/include/linux/reiserfs_fs.h c3000_test/linux/include/linux/reiserfs_fs.h
927 --- c3000_pre/linux/include/linux/reiserfs_fs.h 2004-08-21 09:49:13.000000000 +0900
928 +++ c3000_test/linux/include/linux/reiserfs_fs.h        2004-12-20 22:56:21.000000000 +0900
929 @@ -1197,8 +1197,8 @@
930  #define fs_generation(s) ((s)->u.reiserfs_sb.s_generation_counter)
931  #define get_generation(s) atomic_read (&fs_generation(s))
932  #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
933 -#define fs_changed(gen,s) (gen != get_generation (s))
934 -
935 +#define __fs_changed(gen,s) (gen != get_generation (s))
936 +#define fs_changed(gen,s) ({conditional_schedule(); __fs_changed(gen,s);})
937  
938  /***************************************************************************/
939  /*                  FIXATE NODES                                           */
940 diff -Nur c3000_pre/linux/include/linux/sched.h c3000_test/linux/include/linux/sched.h
941 --- c3000_pre/linux/include/linux/sched.h       2004-08-21 09:49:13.000000000 +0900
942 +++ c3000_test/linux/include/linux/sched.h      2004-12-20 22:56:21.000000000 +0900
943 @@ -1092,6 +1092,7 @@
944  #include <linux/dcache.h>
945  #include <linux/tqueue.h>
946  #include <linux/fs_struct.h>
947 +#include <linux/low-latency.h>
948  
949  #endif /* __KERNEL__ */
950  #endif
951 diff -Nur c3000_pre/linux/include/linux/sysctl.h c3000_test/linux/include/linux/sysctl.h
952 --- c3000_pre/linux/include/linux/sysctl.h      2004-08-21 09:49:13.000000000 +0900
953 +++ c3000_test/linux/include/linux/sysctl.h     2004-12-20 22:56:21.000000000 +0900
954 @@ -131,6 +131,7 @@
955         KERN_CORE_USES_PID=52,          /* int: use core or core.%pid */
956         KERN_TAINTED=53,        /* int: various kernel tainted flags */
957         KERN_CADPID=54,         /* int: PID of the process to notify on CAD */
958 +       KERN_LOWLATENCY=55,     /* int: enable low latency scheduling */
959  };
960  
961  
962 diff -Nur c3000_pre/linux/kernel/exit.c c3000_test/linux/kernel/exit.c
963 --- c3000_pre/linux/kernel/exit.c       2004-08-21 09:49:14.000000000 +0900
964 +++ c3000_test/linux/kernel/exit.c      2004-12-20 22:56:21.000000000 +0900
965 @@ -196,6 +196,7 @@
966                         }
967                         i++;
968                         set >>= 1;
969 +                       conditional_schedule();         /* sys_exit, many files open */
970                 }
971         }
972  }
973 diff -Nur c3000_pre/linux/kernel/ksyms.c c3000_test/linux/kernel/ksyms.c
974 --- c3000_pre/linux/kernel/ksyms.c      2004-12-19 00:35:59.000000000 +0900
975 +++ c3000_test/linux/kernel/ksyms.c     2004-12-20 23:07:26.000000000 +0900
976 @@ -481,6 +481,13 @@
977  EXPORT_SYMBOL(do_gettimeofday);
978  EXPORT_SYMBOL(do_settimeofday);
979  
980 +#if LOWLATENCY_NEEDED
981 +EXPORT_SYMBOL(set_running_and_schedule);
982 +#ifdef CONFIG_LOLAT_SYSCTL
983 +EXPORT_SYMBOL(__enable_lowlatency);
984 +#endif
985 +#endif
986 +
987  #if !defined(__ia64__)
988  EXPORT_SYMBOL(loops_per_jiffy);
989  #endif
990 diff -Nur c3000_pre/linux/kernel/module.c c3000_test/linux/kernel/module.c
991 --- c3000_pre/linux/kernel/module.c     2004-08-21 09:49:14.000000000 +0900
992 +++ c3000_test/linux/kernel/module.c    2004-12-20 22:56:21.000000000 +0900
993 @@ -1174,6 +1174,11 @@
994                 return ERR_PTR(-ENOMEM);
995         lock_kernel();
996         for (v = module_list, n = *pos; v; n -= v->nsyms, v = v->next) {
997 +#if 0
998 +               /* We can't actually do this, because we'd create a
999 +                * race against module unload.  Need a semaphore. */
1000 +               conditional_schedule();
1001 +#endif
1002                 if (n < v->nsyms) {
1003                         p->mod = v;
1004                         p->index = n;
1005 diff -Nur c3000_pre/linux/kernel/sched.c c3000_test/linux/kernel/sched.c
1006 --- c3000_pre/linux/kernel/sched.c      2004-08-21 09:49:14.000000000 +0900
1007 +++ c3000_test/linux/kernel/sched.c     2004-12-20 22:56:21.000000000 +0900
1008 @@ -302,6 +302,17 @@
1009                 if (tsk->processor != this_cpu)
1010                         smp_send_reschedule(tsk->processor);
1011         }
1012 +#if LOWLATENCY_NEEDED
1013 +       if (enable_lowlatency && (p->policy != SCHED_OTHER)) {
1014 +               struct task_struct *t;
1015 +               for (i = 0; i < smp_num_cpus; i++) {
1016 +                       cpu = cpu_logical_map(i);
1017 +                       t = cpu_curr(cpu);
1018 +                       if (t != tsk)
1019 +                               t->need_resched = 1;
1020 +               }
1021 +       }
1022 +#endif
1023         return;
1024                 
1025  
1026 @@ -1429,3 +1440,93 @@
1027         atomic_inc(&init_mm.mm_count);
1028         enter_lazy_tlb(&init_mm, current, cpu);
1029  }
1030 +
1031 +#if LOWLATENCY_NEEDED
1032 +#if LOWLATENCY_DEBUG
1033 +
1034 +static struct lolat_stats_t *lolat_stats_head;
1035 +static spinlock_t lolat_stats_lock = SPIN_LOCK_UNLOCKED;
1036 +
1037 +void set_running_and_schedule(struct lolat_stats_t *stats)
1038 +{
1039 +       spin_lock(&lolat_stats_lock);
1040 +       if (stats->visited == 0) {
1041 +               stats->visited = 1;
1042 +               stats->next = lolat_stats_head;
1043 +               lolat_stats_head = stats;
1044 +       }
1045 +       stats->count++;
1046 +       spin_unlock(&lolat_stats_lock);
1047 +
1048 +       if (current->state != TASK_RUNNING)
1049 +               set_current_state(TASK_RUNNING);
1050 +       schedule();
1051 +}
1052 +
1053 +void show_lolat_stats(void)
1054 +{
1055 +       struct lolat_stats_t *stats = lolat_stats_head;
1056 +
1057 +       printk("Low latency scheduling stats:\n");
1058 +       while (stats) {
1059 +               printk("%s:%d: %lu\n", stats->file, stats->line, stats->count);
1060 +               stats->count = 0;
1061 +               stats = stats->next;
1062 +       }
1063 +}
1064 +
1065 +#else  /* LOWLATENCY_DEBUG */
1066 +
1067 +void set_running_and_schedule()
1068 +{
1069 +       if (current->state != TASK_RUNNING)
1070 +               __set_current_state(TASK_RUNNING);
1071 +       schedule();
1072 +}
1073 +
1074 +#endif /* LOWLATENCY_DEBUG */
1075 +
1076 +int ll_copy_to_user(void *to_user, const void *from, unsigned long len)
1077 +{
1078 +       while (len) {
1079 +               unsigned long n_to_copy = len;
1080 +               unsigned long remainder;
1081 +
1082 +               if (n_to_copy > 4096)
1083 +                       n_to_copy = 4096;
1084 +               remainder = copy_to_user(to_user, from, n_to_copy);
1085 +               if (remainder)
1086 +                       return remainder + len;
1087 +               to_user = ((char *)to_user) + n_to_copy;
1088 +               from = ((char *)from) + n_to_copy;
1089 +               len -= n_to_copy;
1090 +               conditional_schedule();
1091 +       }
1092 +       return 0;
1093 +}
1094 +
1095 +int ll_copy_from_user(void *to, const void *from_user, unsigned long len)
1096 +{
1097 +       while (len) {
1098 +               unsigned long n_to_copy = len;
1099 +               unsigned long remainder;
1100 +
1101 +               if (n_to_copy > 4096)
1102 +                       n_to_copy = 4096;
1103 +               remainder = copy_from_user(to, from_user, n_to_copy);
1104 +               if (remainder)
1105 +                       return remainder + len;
1106 +               to = ((char *)to) + n_to_copy;
1107 +               from_user = ((char *)from_user) + n_to_copy;
1108 +               len -= n_to_copy;
1109 +               conditional_schedule();
1110 +       }
1111 +       return 0;
1112 +}
1113 +
1114 +#ifdef CONFIG_LOLAT_SYSCTL
1115 +struct low_latency_enable_struct __enable_lowlatency = { 0, };
1116 +#endif
1117 +
1118 +#endif /* LOWLATENCY_NEEDED */
1119 +
1120 diff -Nur c3000_pre/linux/kernel/sysctl.c c3000_test/linux/kernel/sysctl.c
1121 --- c3000_pre/linux/kernel/sysctl.c     2004-08-21 09:49:14.000000000 +0900
1122 +++ c3000_test/linux/kernel/sysctl.c    2004-12-20 22:56:21.000000000 +0900
1123 @@ -271,6 +271,10 @@
1124         {KERN_S390_USER_DEBUG_LOGGING,"userprocess_debug",
1125          &sysctl_userprocess_debug,sizeof(int),0644,NULL,&proc_dointvec},
1126  #endif
1127 +#ifdef CONFIG_LOLAT_SYSCTL
1128 +       {KERN_LOWLATENCY, "lowlatency", &enable_lowlatency, sizeof (int),
1129 +        0644, NULL, &proc_dointvec},
1130 +#endif
1131         {0}
1132  };
1133  
1134 diff -Nur c3000_pre/linux/mm/filemap.c c3000_test/linux/mm/filemap.c
1135 --- c3000_pre/linux/mm/filemap.c        2004-08-21 09:49:15.000000000 +0900
1136 +++ c3000_test/linux/mm/filemap.c       2004-12-20 22:56:21.000000000 +0900
1137 @@ -179,7 +179,9 @@
1138  {
1139         struct list_head *head, *curr;
1140         struct page * page;
1141 +       int ll_count = 100;
1142  
1143 +restart:
1144         head = &inode->i_mapping->clean_pages;
1145  
1146         spin_lock(&pagemap_lru_lock);
1147 @@ -190,6 +192,14 @@
1148                 page = list_entry(curr, struct page, list);
1149                 curr = curr->next;
1150  
1151 +               if (conditional_schedule_needed() && ll_count) {
1152 +                       spin_unlock(&pagecache_lock);
1153 +                       spin_unlock(&pagemap_lru_lock);
1154 +                       unconditional_schedule();
1155 +                       ll_count--;
1156 +                       goto restart;
1157 +               }
1158 +
1159                 /* We cannot invalidate something in dirty.. */
1160                 if (PageDirty(page))
1161                         continue;
1162 @@ -253,8 +263,7 @@
1163         page_cache_release(page);
1164  }
1165  
1166 -static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));
1167 -static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial)
1168 +static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial, int *restart_count)
1169  {
1170         struct list_head *curr;
1171         struct page * page;
1172 @@ -265,6 +274,17 @@
1173         while (curr != head) {
1174                 unsigned long offset;
1175  
1176 +               if (conditional_schedule_needed() && *restart_count) {
1177 +                       (*restart_count)--;
1178 +                       list_del(head);
1179 +                       list_add(head, curr);           /* Restart on this page */
1180 +                       spin_unlock(&pagecache_lock);
1181 +                       unconditional_schedule();
1182 +                       spin_lock(&pagecache_lock);
1183 +                       unlocked = 1;
1184 +                       goto restart;
1185 +               }
1186 +
1187                 page = list_entry(curr, struct page, list);
1188                 offset = page->index;
1189  
1190 @@ -297,13 +317,11 @@
1191                         } else
1192                                 wait_on_page(page);
1193  
1194 -                       page_cache_release(page);
1195 -
1196 -                       if (current->need_resched) {
1197 -                               __set_current_state(TASK_RUNNING);
1198 -                               schedule();
1199 +                       if (LOWLATENCY_NEEDED) {
1200 +                               *restart_count = 4;     /* We made progress */
1201                         }
1202  
1203 +                       page_cache_release(page);
1204                         spin_lock(&pagecache_lock);
1205                         goto restart;
1206                 }
1207 @@ -326,13 +344,14 @@
1208  {
1209         unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1210         unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
1211 +       int restart_count = 4;
1212         int unlocked;
1213  
1214         spin_lock(&pagecache_lock);
1215         do {
1216 -               unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial);
1217 -               unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial);
1218 -               unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
1219 +               unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial, &restart_count);
1220 +               unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial, &restart_count);
1221 +               unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial, &restart_count);
1222         } while (unlocked);
1223         /* Traversed all three lists without dropping the lock */
1224         spin_unlock(&pagecache_lock);
1225 @@ -477,6 +496,7 @@
1226  
1227                 page_cache_get(page);
1228                 spin_unlock(&pagecache_lock);
1229 +               conditional_schedule();         /* sys_msync() (only used by minixfs, udf) */
1230                 lock_page(page);
1231  
1232                 /* The buffers could have been free'd while we waited for the page lock */
1233 @@ -563,12 +583,14 @@
1234                 list_del(&page->list);
1235                 list_add(&page->list, &mapping->locked_pages);
1236  
1237 -               if (!PageDirty(page))
1238 -                       continue;
1239 -
1240                 page_cache_get(page);
1241                 spin_unlock(&pagecache_lock);
1242  
1243 +               conditional_schedule();         /* sys_msync() */
1244 +
1245 +               if (!PageDirty(page))
1246 +                       goto clean;
1247 +
1248                 lock_page(page);
1249  
1250                 if (PageDirty(page)) {
1251 @@ -579,7 +601,7 @@
1252                                 ret = err;
1253                 } else
1254                         UnlockPage(page);
1255 -
1256 +clean:
1257                 page_cache_release(page);
1258                 spin_lock(&pagecache_lock);
1259         }
1260 @@ -597,7 +619,8 @@
1261  int filemap_fdatawait(struct address_space * mapping)
1262  {
1263         int ret = 0;
1264 -
1265 +       DEFINE_RESCHED_COUNT;
1266 +restart:
1267         spin_lock(&pagecache_lock);
1268  
1269          while (!list_empty(&mapping->locked_pages)) {
1270 @@ -606,6 +629,17 @@
1271                 list_del(&page->list);
1272                 list_add(&page->list, &mapping->clean_pages);
1273  
1274 +               if (TEST_RESCHED_COUNT(32)) {
1275 +                       RESET_RESCHED_COUNT();
1276 +                       if (conditional_schedule_needed()) {
1277 +                               page_cache_get(page);
1278 +                               spin_unlock(&pagecache_lock);
1279 +                               unconditional_schedule();
1280 +                               page_cache_release(page);
1281 +                               goto restart;
1282 +                       }
1283 +               }
1284 +
1285                 if (!PageLocked(page))
1286                         continue;
1287  
1288 @@ -706,8 +740,10 @@
1289         spin_lock(&pagecache_lock);
1290         page = __find_page_nolock(mapping, offset, *hash);
1291         spin_unlock(&pagecache_lock);
1292 -       if (page)
1293 +       if (page) {
1294 +               conditional_schedule();
1295                 return 0;
1296 +       }
1297  
1298         page = page_cache_alloc(mapping);
1299         if (!page)
1300 @@ -963,6 +999,11 @@
1301          * the hash-list needs a held write-lock.
1302          */
1303  repeat:
1304 +       if (conditional_schedule_needed()) {
1305 +               spin_unlock(&pagecache_lock);
1306 +               unconditional_schedule();
1307 +               spin_lock(&pagecache_lock);
1308 +       }
1309         page = __find_page_nolock(mapping, offset, hash);
1310         if (page) {
1311                 page_cache_get(page);
1312 @@ -1413,6 +1454,8 @@
1313                 page_cache_get(page);
1314                 spin_unlock(&pagecache_lock);
1315  
1316 +               conditional_schedule();         /* sys_read() */
1317 +
1318                 if (!Page_Uptodate(page))
1319                         goto page_not_up_to_date;
1320                 generic_file_readahead(reada_ok, filp, inode, page);
1321 @@ -2114,6 +2157,12 @@
1322                 address += PAGE_SIZE;
1323                 pte++;
1324         } while (address && (address < end));
1325 +
1326 +       if (conditional_schedule_needed()) {
1327 +               spin_unlock(&vma->vm_mm->page_table_lock);
1328 +               unconditional_schedule();               /* syncing large mapped files */
1329 +               spin_lock(&vma->vm_mm->page_table_lock);
1330 +       }
1331         return error;
1332  }
1333  
1334 @@ -2530,7 +2579,9 @@
1335         if (vma->vm_flags & VM_LOCKED)
1336                 return -EINVAL;
1337  
1338 -       zap_page_range(vma->vm_mm, start, end - start);
1339 +        zap_page_range(vma->vm_mm, start, end - start,
1340 +               ZPR_COND_RESCHED);        /* sys_madvise(MADV_DONTNEED) */
1341 +
1342         return 0;
1343  }
1344  
1345 @@ -3095,6 +3146,9 @@
1346                         goto sync_failure;
1347                 page_fault = __copy_from_user(kaddr+offset, buf, bytes);
1348                 flush_dcache_page(page);
1349 +
1350 +                conditional_schedule();
1351 +
1352                 status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
1353                 if (page_fault)
1354                         goto fail_write;
1355 diff -Nur c3000_pre/linux/mm/memory.c c3000_test/linux/mm/memory.c
1356 --- c3000_pre/linux/mm/memory.c 2004-08-21 09:49:15.000000000 +0900
1357 +++ c3000_test/linux/mm/memory.c        2004-12-20 22:56:21.000000000 +0900
1358 @@ -370,7 +370,7 @@
1359  /*
1360   * remove user pages in a given range.
1361   */
1362 -void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
1363 +static void do_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
1364  {
1365         mmu_gather_t *tlb;
1366         pgd_t * dir;
1367 @@ -494,6 +494,10 @@
1368                         struct page *map;
1369                         while (!(map = follow_page(mm, start, write))) {
1370                                 spin_unlock(&mm->page_table_lock);
1371 +
1372 +                               /* Pinning down many physical pages (kiobufs, mlockall) */
1373 +                               conditional_schedule();
1374 +
1375                                 switch (handle_mm_fault(mm, vma, start, write)) {
1376                                 case 1:
1377                                         tsk->min_flt++;
1378 @@ -655,6 +659,21 @@
1379         iobuf->locked = 0;
1380  }
1381  
1382 +#define MAX_ZAP_BYTES 256*PAGE_SIZE
1383 +
1384 +void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, int actions)
1385 +{
1386 +       while (size) {
1387 +               unsigned long chunk = size;
1388 +               if (actions & ZPR_COND_RESCHED && chunk > MAX_ZAP_BYTES)
1389 +                       chunk = MAX_ZAP_BYTES;
1390 +               do_zap_page_range(mm, address, chunk);
1391 +               if (actions & ZPR_COND_RESCHED)
1392 +                       conditional_schedule();
1393 +               address += chunk;
1394 +               size -= chunk;
1395 +       }
1396 +}
1397  
1398  /*
1399   * Lock down all of the pages of a kiovec for IO.
1400 @@ -764,11 +783,18 @@
1401         return 0;
1402  }
1403  
1404 -static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
1405 -                                     unsigned long size, pgprot_t prot)
1406 +static inline void zeromap_pte_range(struct mm_struct *mm, pte_t * pte,
1407 +                               unsigned long address, unsigned long size,
1408 +                               pgprot_t prot)
1409  {
1410         unsigned long end;
1411  
1412 +       if (conditional_schedule_needed()) {
1413 +               spin_unlock(&mm->page_table_lock);
1414 +               unconditional_schedule();               /* mmap(/dev/zero) */
1415 +               spin_lock(&mm->page_table_lock);
1416 +       }
1417 +
1418         address &= ~PMD_MASK;
1419         end = address + size;
1420         if (end > PMD_SIZE)
1421 @@ -796,7 +822,7 @@
1422                 pte_t * pte = pte_alloc(mm, pmd, address);
1423                 if (!pte)
1424                         return -ENOMEM;
1425 -               zeromap_pte_range(pte, address, end - address, prot);
1426 +               zeromap_pte_range(mm, pte, address, end - address, prot);
1427                 address = (address + PMD_SIZE) & PMD_MASK;
1428                 pmd++;
1429         } while (address && (address < end));
1430 @@ -1044,7 +1070,7 @@
1431  
1432                 /* mapping wholly truncated? */
1433                 if (mpnt->vm_pgoff >= pgoff) {
1434 -                       zap_page_range(mm, start, len);
1435 +                        zap_page_range(mm, start, len, 0);
1436                         continue;
1437                 }
1438  
1439 @@ -1057,7 +1083,7 @@
1440                 /* Ok, partially affected.. */
1441                 start += diff << PAGE_SHIFT;
1442                 len = (len - diff) << PAGE_SHIFT;
1443 -               zap_page_range(mm, start, len);
1444 +                zap_page_range(mm, start, len, 0);
1445         } while ((mpnt = mpnt->vm_next_share) != NULL);
1446  }
1447  
1448 diff -Nur c3000_pre/linux/mm/mmap.c c3000_test/linux/mm/mmap.c
1449 --- c3000_pre/linux/mm/mmap.c   2004-12-16 22:55:54.000000000 +0900
1450 +++ c3000_test/linux/mm/mmap.c  2004-12-20 23:07:25.000000000 +0900
1451 @@ -598,7 +598,7 @@
1452         fput(file);
1453  
1454         /* Undo any partial mapping done by a device driver. */
1455 -       zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1456 +        zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start, 0);
1457  free_vma:
1458         kmem_cache_free(vm_area_cachep, vma);
1459         return error;
1460 @@ -998,7 +998,7 @@
1461                 remove_shared_vm_struct(mpnt);
1462                 mm->map_count--;
1463  
1464 -               zap_page_range(mm, st, size);
1465 +                zap_page_range(mm, st, size, ZPR_COND_RESCHED);   /* sys_munmap() */
1466  
1467                 /*
1468                  * Fix the mapping, and free the old area if it wasn't reused.
1469 @@ -1158,7 +1158,7 @@
1470                 }
1471                 mm->map_count--;
1472                 remove_shared_vm_struct(mpnt);
1473 -               zap_page_range(mm, start, size);
1474 +               zap_page_range(mm, start, size, ZPR_COND_RESCHED);      /* sys_exit() */
1475                 if (mpnt->vm_file)
1476                         fput(mpnt->vm_file);
1477                 kmem_cache_free(vm_area_cachep, mpnt);
1478 diff -Nur c3000_pre/linux/mm/mremap.c c3000_test/linux/mm/mremap.c
1479 --- c3000_pre/linux/mm/mremap.c 2004-12-16 22:55:54.000000000 +0900
1480 +++ c3000_test/linux/mm/mremap.c        2004-12-20 23:07:25.000000000 +0900
1481 @@ -121,7 +121,7 @@
1482         flush_cache_range(mm, new_addr, new_addr + len);
1483         while ((offset += PAGE_SIZE) < len)
1484                 move_one_page(mm, new_addr + offset, old_addr + offset);
1485 -       zap_page_range(mm, new_addr, len);
1486 +       zap_page_range(mm, new_addr, len, 0);
1487  #ifdef __arm__
1488         memc_update_mm(mm);
1489  #endif
1490 diff -Nur c3000_pre/linux/mm/slab.c c3000_test/linux/mm/slab.c
1491 --- c3000_pre/linux/mm/slab.c   2004-11-16 15:31:09.000000000 +0900
1492 +++ c3000_test/linux/mm/slab.c  2004-12-20 22:56:21.000000000 +0900
1493 @@ -940,6 +940,7 @@
1494                 list_del(&slabp->list);
1495  
1496                 spin_unlock_irq(&cachep->spinlock);
1497 +               conditional_schedule();
1498                 kmem_slab_destroy(cachep, slabp);
1499                 ret++;
1500                 spin_lock_irq(&cachep->spinlock);
1501 @@ -1853,6 +1854,7 @@
1502                  */
1503                 spin_unlock_irq(&best_cachep->spinlock);
1504                 kmem_slab_destroy(best_cachep, slabp);
1505 +               conditional_schedule();         /* try_to_free_pages() */
1506                 spin_lock_irq(&best_cachep->spinlock);
1507         }
1508         spin_unlock_irq(&best_cachep->spinlock);
1509 diff -Nur c3000_pre/linux/mm/swapfile.c c3000_test/linux/mm/swapfile.c
1510 --- c3000_pre/linux/mm/swapfile.c       2004-08-21 09:49:16.000000000 +0900
1511 +++ c3000_test/linux/mm/swapfile.c      2004-12-20 22:56:21.000000000 +0900
1512 @@ -819,7 +819,7 @@
1513                                 len += sprintf(buf + len, "partition\t");
1514  
1515                         usedswap = 0;
1516 -                       for (j = 0; j < ptr->max; ++j)
1517 +                       for (j = 0; j < ptr->max; ++j) {
1518                                 switch (ptr->swap_map[j]) {
1519                                         case SWAP_MAP_BAD:
1520                                         case 0:
1521 @@ -827,6 +827,8 @@
1522                                         default:
1523                                                 usedswap++;
1524                                 }
1525 +                               conditional_schedule();
1526 +                       }
1527                         len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10), 
1528                                 usedswap << (PAGE_SHIFT - 10), ptr->prio);
1529                 }
1530 @@ -1120,6 +1122,11 @@
1531                 if (swap_info[i].flags != SWP_USED)
1532                         continue;
1533                 for (j = 0; j < swap_info[i].max; ++j) {
1534 +                       if (conditional_schedule_needed()) {
1535 +                               swap_list_unlock();
1536 +                               conditional_schedule();
1537 +                               swap_list_lock();
1538 +                       }
1539                         switch (swap_info[i].swap_map[j]) {
1540                                 case 0:
1541                                 case SWAP_MAP_BAD:
1542 diff -Nur c3000_pre/linux/mm/vmscan.c c3000_test/linux/mm/vmscan.c
1543 --- c3000_pre/linux/mm/vmscan.c 2004-08-21 09:49:16.000000000 +0900
1544 +++ c3000_test/linux/mm/vmscan.c        2004-12-20 22:56:21.000000000 +0900
1545 @@ -173,6 +173,7 @@
1546  {
1547         pte_t * pte;
1548         unsigned long pmd_end;
1549 +       DEFINE_RESCHED_COUNT;
1550  
1551         if (pmd_none(*dir))
1552                 return count;
1553 @@ -198,11 +199,17 @@
1554                                         address += PAGE_SIZE;
1555                                         break;
1556                                 }
1557 +                                if (TEST_RESCHED_COUNT(4)) {
1558 +                                        if (conditional_schedule_needed())
1559 +                                               goto out;
1560 +                                        RESET_RESCHED_COUNT();
1561 +                                }
1562                         }
1563                 }
1564                 address += PAGE_SIZE;
1565                 pte++;
1566         } while (address && (address < end));
1567 +out:
1568         mm->swap_address = address;
1569         return count;
1570  }
1571 @@ -231,6 +238,8 @@
1572                 count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone);
1573                 if (!count)
1574                         break;
1575 +               if (conditional_schedule_needed())
1576 +                       return count;
1577                 address = (address + PMD_SIZE) & PMD_MASK;
1578                 pmd++;
1579         } while (address && (address < end));
1580 @@ -255,6 +264,8 @@
1581                 count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone);
1582                 if (!count)
1583                         break;
1584 +               if (conditional_schedule_needed())
1585 +                       return count;
1586                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
1587                 pgdir++;
1588         } while (address && (address < end));
1589 @@ -276,6 +287,7 @@
1590          * Find the proper vm-area after freezing the vma chain 
1591          * and ptes.
1592          */
1593 +continue_scan:
1594         spin_lock(&mm->page_table_lock);
1595         address = mm->swap_address;
1596         if (address == TASK_SIZE || swap_mm != mm) {
1597 @@ -293,6 +305,12 @@
1598                         vma = vma->vm_next;
1599                         if (!vma)
1600                                 break;
1601 +                        if (conditional_schedule_needed()) {    /* Scanning a large vma */
1602 +                                spin_unlock(&mm->page_table_lock);
1603 +                                unconditional_schedule();
1604 +                                /* Continue from where we left off */
1605 +                                goto continue_scan;
1606 +                        }
1607                         if (!count)
1608                                 goto out_unlock;
1609                         address = vma->vm_start;
1610 diff -Nur c3000_pre/linux/net/core/iovec.c c3000_test/linux/net/core/iovec.c
1611 --- c3000_pre/linux/net/core/iovec.c    2004-08-21 11:23:13.000000000 +0900
1612 +++ c3000_test/linux/net/core/iovec.c   2004-12-20 22:56:21.000000000 +0900
1613 @@ -88,7 +88,7 @@
1614                 if(iov->iov_len)
1615                 {
1616                         int copy = min_t(unsigned int, iov->iov_len, len);
1617 -                       if (copy_to_user(iov->iov_base, kdata, copy))
1618 +                        if (ll_copy_to_user(iov->iov_base, kdata, copy))
1619                                 goto out;
1620                         kdata+=copy;
1621                         len-=copy;
1622 diff -Nur c3000_pre/linux/net/ipv4/tcp_minisocks.c c3000_test/linux/net/ipv4/tcp_minisocks.c
1623 --- c3000_pre/linux/net/ipv4/tcp_minisocks.c    2004-08-21 09:49:22.000000000 +0900
1624 +++ c3000_test/linux/net/ipv4/tcp_minisocks.c   2004-12-20 22:56:21.000000000 +0900
1625 @@ -440,6 +440,9 @@
1626  {
1627         struct tcp_tw_bucket *tw;
1628         int killed = 0;
1629 +#if LOWLATENCY_NEEDED
1630 +       int max_killed = 0;
1631 +#endif
1632  
1633         /* NOTE: compare this to previous version where lock
1634          * was released after detaching chain. It was racy,
1635 @@ -453,6 +456,13 @@
1636                 goto out;
1637  
1638         while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
1639 +#if LOWLATENCY_NEEDED
1640 +               /* This loop takes ~6 usecs per iteration. */
1641 +               if (killed > 100) {
1642 +                       max_killed = 1;
1643 +                       break;
1644 +               }
1645 +#endif
1646                 tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
1647                 tw->pprev_death = NULL;
1648                 spin_unlock(&tw_death_lock);
1649 @@ -463,12 +473,24 @@
1650                 killed++;
1651  
1652                 spin_lock(&tw_death_lock);
1653 +
1654 +       }
1655 +
1656 +#if LOWLATENCY_NEEDED
1657 +       if (max_killed) {       /* More to do: do it soon */
1658 +               mod_timer(&tcp_tw_timer, jiffies+2);
1659 +               tcp_tw_count -= killed;
1660 +       }
1661 +       else
1662 +#endif
1663 +       {
1664 +               tcp_tw_death_row_slot =
1665 +                       ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
1666 +       
1667 +               if ((tcp_tw_count -= killed) != 0)
1668 +                       mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
1669         }
1670 -       tcp_tw_death_row_slot =
1671 -               ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
1672  
1673 -       if ((tcp_tw_count -= killed) != 0)
1674 -               mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
1675         net_statistics[smp_processor_id()*2].TimeWaited += killed;
1676  out:
1677         spin_unlock(&tw_death_lock);