]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branches 'timers/new-apis', 'timers/ntp' and 'timers/urgent' into timers/core
authorIngo Molnar <mingo@elte.hu>
Thu, 26 Mar 2009 14:45:52 +0000 (15:45 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 26 Mar 2009 14:45:52 +0000 (15:45 +0100)
arch/powerpc/platforms/cell/spufs/sched.c
drivers/infiniband/hw/ipath/ipath_driver.c
include/linux/timer.h
include/linux/timex.h
kernel/posix-cpu-timers.c
kernel/relay.c
kernel/time/ntp.c
kernel/timer.c

index 6a0ad196aeb348243e904aedba3804c0081e431e..f085369301b13ced53dce0b48bd46f8f9ae9eb19 100644 (file)
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
                list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
                set_bit(ctx->prio, spu_prio->bitmap);
                if (!spu_prio->nr_waiting++)
-                       __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+                       mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
        }
 }
 
index 69c0ce321b4e7cecf4b475bd230cc5bda2614245..cb9daa6ac029d44a6cadd88057c9d3ddef57e020 100644 (file)
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd)
  * to prevent HoL blocking, then start the HoL timer that
  * periodically continues, then stop procs, so they can detect
  * link down if they want, and do something about it.
- * Timer may already be running, so use __mod_timer, not add_timer.
+ * Timer may already be running, so use mod_timer, not add_timer.
  */
 void ipath_hol_down(struct ipath_devdata *dd)
 {
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd)
        dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
        dd->ipath_hol_timer.expires = jiffies +
                msecs_to_jiffies(ipath_hol_timeout_ms);
-       __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
+       mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
 }
 
 /*
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque)
        else {
                dd->ipath_hol_timer.expires = jiffies +
                        msecs_to_jiffies(ipath_hol_timeout_ms);
-               __mod_timer(&dd->ipath_hol_timer,
+               mod_timer(&dd->ipath_hol_timer,
                        dd->ipath_hol_timer.expires);
        }
 }
index daf9685b861c05143b17c529b1b72e77aa49c4ee..e2d662e3416e38026bbc7385f744481de1b7d0a0 100644 (file)
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer)
 
 extern void add_timer_on(struct timer_list *timer, int cpu);
 extern int del_timer(struct timer_list * timer);
-extern int __mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
 
 /*
  * The jiffies value which is added to now, when there is no timer
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
 }
 #endif
 
-/**
- * add_timer - start a timer
- * @timer: the timer to be added
- *
- * The kernel will do a ->function(->data) callback from the
- * timer interrupt at the ->expires point in the future. The
- * current time is 'jiffies'.
- *
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
- *
- * Timers with an ->expires field in the past will be executed in the next
- * timer tick.
- */
-static inline void add_timer(struct timer_list *timer)
-{
-       BUG_ON(timer_pending(timer));
-       __mod_timer(timer, timer->expires);
-}
+extern void add_timer(struct timer_list *timer);
 
 #ifdef CONFIG_SMP
   extern int try_to_del_timer_sync(struct timer_list *timer);
index 998a55d80acf1b1c62bd8d91755448f9a0edebd6..aa3475fcff643a3d2b32fddff911b66e88eea48e 100644 (file)
@@ -190,7 +190,7 @@ struct timex {
  * offset and maximum frequency tolerance.
  */
 #define SHIFT_USEC 16          /* frequency offset scale (shift) */
-#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
 #define PPM_SCALE_INV_SHIFT 19
 #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
                       PPM_SCALE + 1)
index e976e505648d2cf9d8e94085bcce6f1ec16afd8b..8e5d9a68b0222f0c028c42c059a39a736c636dba 100644 (file)
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
                        return 1;
        }
-       return 0;
+
+       return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
 }
 
 /*
index 9d79b7854fa6f98ee3595615798188ccf2f482fc..8f2179c8056ff9f9de0b2679588d86589393b508 100644 (file)
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
                         * from the scheduler (trying to re-grab
                         * rq->lock), so defer it.
                         */
-                       __mod_timer(&buf->timer, jiffies + 1);
+                       mod_timer(&buf->timer, jiffies + 1);
        }
 
        old = buf->data;
index f5f793d924151736d92034ea0accd37fb354eef3..7fc64375ff43350ce59cb3a1d0db220c57d79a46 100644 (file)
 /*
- * linux/kernel/time/ntp.c
- *
  * NTP state machine interfaces and logic.
  *
  * This code was mainly moved from kernel/timer.c and kernel/time.c
  * Please see those files for relevant copyright info and historical
  * changelogs.
  */
-
-#include <linux/mm.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
 #include <linux/capability.h>
-#include <linux/math64.h>
 #include <linux/clocksource.h>
 #include <linux/workqueue.h>
-#include <asm/timex.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+#include <linux/timex.h>
+#include <linux/time.h>
+#include <linux/mm.h>
 
 /*
- * Timekeeping variables
+ * NTP timekeeping variables:
  */
-unsigned long tick_usec = TICK_USEC;           /* USER_HZ period (usec) */
-unsigned long tick_nsec;                       /* ACTHZ period (nsec) */
-u64 tick_length;
-static u64 tick_length_base;
 
-static struct hrtimer leap_timer;
+/* USER_HZ period (usecs): */
+unsigned long                  tick_usec = TICK_USEC;
 
-#define MAX_TICKADJ            500             /* microsecs */
-#define MAX_TICKADJ_SCALED     (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
-                                 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+/* ACTHZ period (nsecs): */
+unsigned long                  tick_nsec;
+
+u64                            tick_length;
+static u64                     tick_length_base;
+
+static struct hrtimer          leap_timer;
+
+#define MAX_TICKADJ            500LL           /* usecs */
+#define MAX_TICKADJ_SCALED \
+       (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
 
 /*
  * phase-lock loop variables
  */
-/* TIME_ERROR prevents overwriting the CMOS clock */
-static int time_state = TIME_OK;       /* clock synchronization status */
-int time_status = STA_UNSYNC;          /* clock status bits            */
-static long time_tai;                  /* TAI offset (s)               */
-static s64 time_offset;                        /* time adjustment (ns)         */
-static long time_constant = 2;         /* pll time constant            */
-long time_maxerror = NTP_PHASE_LIMIT;  /* maximum error (us)           */
-long time_esterror = NTP_PHASE_LIMIT;  /* estimated error (us)         */
-static s64 time_freq;                  /* frequency offset (scaled ns/s)*/
-static long time_reftime;              /* time at last adjustment (s)  */
-long time_adjust;
-static long ntp_tick_adj;
 
+/*
+ * clock synchronization status
+ *
+ * (TIME_ERROR prevents overwriting the CMOS clock)
+ */
+static int                     time_state = TIME_OK;
+
+/* clock status bits:                                                  */
+int                            time_status = STA_UNSYNC;
+
+/* TAI offset (secs):                                                  */
+static long                    time_tai;
+
+/* time adjustment (nsecs):                                            */
+static s64                     time_offset;
+
+/* pll time constant:                                                  */
+static long                    time_constant = 2;
+
+/* maximum error (usecs):                                              */
+long                           time_maxerror = NTP_PHASE_LIMIT;
+
+/* estimated error (usecs):                                            */
+long                           time_esterror = NTP_PHASE_LIMIT;
+
+/* frequency offset (scaled nsecs/secs):                               */
+static s64                     time_freq;
+
+/* time at last adjustment (secs):                                     */
+static long                    time_reftime;
+
+long                           time_adjust;
+
+/* constant (boot-param configurable) NTP tick adjustment (upscaled)   */
+static s64                     ntp_tick_adj;
+
+/*
+ * NTP methods:
+ */
+
+/*
+ * Update (tick_length, tick_length_base, tick_nsec), based
+ * on (tick_usec, ntp_tick_adj, time_freq):
+ */
 static void ntp_update_frequency(void)
 {
-       u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
-                               << NTP_SCALE_SHIFT;
-       second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT;
-       second_length += time_freq;
+       u64 second_length;
+       u64 new_base;
+
+       second_length            = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
+                                               << NTP_SCALE_SHIFT;
+
+       second_length           += ntp_tick_adj;
+       second_length           += time_freq;
 
-       tick_length_base = second_length;
+       tick_nsec                = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
+       new_base                 = div_u64(second_length, NTP_INTERVAL_FREQ);
 
-       tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
-       tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ);
+       /*
+        * Don't wait for the next second_overflow, apply
+        * the change to the tick length immediately:
+        */
+       tick_length             += new_base - tick_length_base;
+       tick_length_base         = new_base;
+}
+
+static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
+{
+       time_status &= ~STA_MODE;
+
+       if (secs < MINSEC)
+               return 0;
+
+       if (!(time_status & STA_FLL) && (secs <= MAXSEC))
+               return 0;
+
+       time_status |= STA_MODE;
+
+       return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
 }
 
 static void ntp_update_offset(long offset)
 {
-       long mtemp;
        s64 freq_adj;
+       s64 offset64;
+       long secs;
 
        if (!(time_status & STA_PLL))
                return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
         * Select how the frequency is to be controlled
         * and in which mode (PLL or FLL).
         */
-       if (time_status & STA_FREQHOLD || time_reftime == 0)
-               time_reftime = xtime.tv_sec;
-       mtemp = xtime.tv_sec - time_reftime;
+       secs = xtime.tv_sec - time_reftime;
+       if (unlikely(time_status & STA_FREQHOLD))
+               secs = 0;
+
        time_reftime = xtime.tv_sec;
 
-       freq_adj = (s64)offset * mtemp;
-       freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant);
-       time_status &= ~STA_MODE;
-       if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
-               freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
-                                   mtemp);
-               time_status |= STA_MODE;
-       }
-       freq_adj += time_freq;
-       freq_adj = min(freq_adj, MAXFREQ_SCALED);
-       time_freq = max(freq_adj, -MAXFREQ_SCALED);
+       offset64    = offset;
+       freq_adj    = (offset64 * secs) <<
+                       (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
 
-       time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
+       freq_adj    += ntp_update_offset_fll(offset64, secs);
+
+       freq_adj    = min(freq_adj + time_freq, MAXFREQ_SCALED);
+
+       time_freq   = max(freq_adj, -MAXFREQ_SCALED);
+
+       time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
 }
 
 /**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
  */
 void ntp_clear(void)
 {
-       time_adjust = 0;                /* stop active adjtime() */
-       time_status |= STA_UNSYNC;
-       time_maxerror = NTP_PHASE_LIMIT;
-       time_esterror = NTP_PHASE_LIMIT;
+       time_adjust     = 0;            /* stop active adjtime() */
+       time_status     |= STA_UNSYNC;
+       time_maxerror   = NTP_PHASE_LIMIT;
+       time_esterror   = NTP_PHASE_LIMIT;
 
        ntp_update_frequency();
 
-       tick_length = tick_length_base;
-       time_offset = 0;
+       tick_length     = tick_length_base;
+       time_offset     = 0;
 }
 
 /*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
                xtime.tv_sec--;
                wall_to_monotonic.tv_sec++;
                time_state = TIME_OOP;
-               printk(KERN_NOTICE "Clock: "
-                      "inserting leap second 23:59:60 UTC\n");
+               printk(KERN_NOTICE
+                       "Clock: inserting leap second 23:59:60 UTC\n");
                hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
                res = HRTIMER_RESTART;
                break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
                time_tai--;
                wall_to_monotonic.tv_sec--;
                time_state = TIME_WAIT;
-               printk(KERN_NOTICE "Clock: "
-                      "deleting leap second 23:59:59 UTC\n");
+               printk(KERN_NOTICE
+                       "Clock: deleting leap second 23:59:59 UTC\n");
                break;
        case TIME_OOP:
                time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
  */
 void second_overflow(void)
 {
-       s64 time_adj;
+       s64 delta;
 
        /* Bump the maxerror field */
        time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
         * Compute the phase adjustment for the next second. The offset is
         * reduced by a fixed factor times the time constant.
         */
-       tick_length = tick_length_base;
-       time_adj = shift_right(time_offset, SHIFT_PLL + time_constant);
-       time_offset -= time_adj;
-       tick_length += time_adj;
-
-       if (unlikely(time_adjust)) {
-               if (time_adjust > MAX_TICKADJ) {
-                       time_adjust -= MAX_TICKADJ;
-                       tick_length += MAX_TICKADJ_SCALED;
-               } else if (time_adjust < -MAX_TICKADJ) {
-                       time_adjust += MAX_TICKADJ;
-                       tick_length -= MAX_TICKADJ_SCALED;
-               } else {
-                       tick_length += (s64)(time_adjust * NSEC_PER_USEC /
-                                       NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
-                       time_adjust = 0;
-               }
+       tick_length      = tick_length_base;
+
+       delta            = shift_right(time_offset, SHIFT_PLL + time_constant);
+       time_offset     -= delta;
+       tick_length     += delta;
+
+       if (!time_adjust)
+               return;
+
+       if (time_adjust > MAX_TICKADJ) {
+               time_adjust -= MAX_TICKADJ;
+               tick_length += MAX_TICKADJ_SCALED;
+               return;
        }
+
+       if (time_adjust < -MAX_TICKADJ) {
+               time_adjust += MAX_TICKADJ;
+               tick_length -= MAX_TICKADJ_SCALED;
+               return;
+       }
+
+       tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
+                                                        << NTP_SCALE_SHIFT;
+       time_adjust = 0;
 }
 
 #ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
         * This code is run on a timer.  If the clock is set, that timer
         * may not expire at the correct time.  Thus, we adjust...
         */
-       if (!ntp_synced())
+       if (!ntp_synced()) {
                /*
                 * Not synced, exit, do not restart a timer (if one is
                 * running, let it run out).
                 */
                return;
+       }
 
        getnstimeofday(&now);
        if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
 static inline void notify_cmos_timer(void) { }
 #endif
 
-/* adjtimex mainly allows reading (and writing, if superuser) of
+/*
+ * Start the leap seconds timer:
+ */
+static inline void ntp_start_leap_timer(struct timespec *ts)
+{
+       long now = ts->tv_sec;
+
+       if (time_status & STA_INS) {
+               time_state = TIME_INS;
+               now += 86400 - now % 86400;
+               hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+
+               return;
+       }
+
+       if (time_status & STA_DEL) {
+               time_state = TIME_DEL;
+               now += 86400 - (now + 1) % 86400;
+               hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+       }
+}
+
+/*
+ * Propagate a new txc->status value into the NTP state:
+ */
+static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+{
+       if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
+               time_state = TIME_OK;
+               time_status = STA_UNSYNC;
+       }
+
+       /*
+        * If we turn on PLL adjustments then reset the
+        * reference time to current time.
+        */
+       if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
+               time_reftime = xtime.tv_sec;
+
+       /* only set allowed bits */
+       time_status &= STA_RONLY;
+       time_status |= txc->status & ~STA_RONLY;
+
+       switch (time_state) {
+       case TIME_OK:
+               ntp_start_leap_timer(ts);
+               break;
+       case TIME_INS:
+       case TIME_DEL:
+               time_state = TIME_OK;
+               ntp_start_leap_timer(ts);
+       case TIME_WAIT:
+               if (!(time_status & (STA_INS | STA_DEL)))
+                       time_state = TIME_OK;
+               break;
+       case TIME_OOP:
+               hrtimer_restart(&leap_timer);
+               break;
+       }
+}
+/*
+ * Called with the xtime lock held, so we can access and modify
+ * all the global NTP state:
+ */
+static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
+{
+       if (txc->modes & ADJ_STATUS)
+               process_adj_status(txc, ts);
+
+       if (txc->modes & ADJ_NANO)
+               time_status |= STA_NANO;
+
+       if (txc->modes & ADJ_MICRO)
+               time_status &= ~STA_NANO;
+
+       if (txc->modes & ADJ_FREQUENCY) {
+               time_freq = txc->freq * PPM_SCALE;
+               time_freq = min(time_freq, MAXFREQ_SCALED);
+               time_freq = max(time_freq, -MAXFREQ_SCALED);
+       }
+
+       if (txc->modes & ADJ_MAXERROR)
+               time_maxerror = txc->maxerror;
+
+       if (txc->modes & ADJ_ESTERROR)
+               time_esterror = txc->esterror;
+
+       if (txc->modes & ADJ_TIMECONST) {
+               time_constant = txc->constant;
+               if (!(time_status & STA_NANO))
+                       time_constant += 4;
+               time_constant = min(time_constant, (long)MAXTC);
+               time_constant = max(time_constant, 0l);
+       }
+
+       if (txc->modes & ADJ_TAI && txc->constant > 0)
+               time_tai = txc->constant;
+
+       if (txc->modes & ADJ_OFFSET)
+               ntp_update_offset(txc->offset);
+
+       if (txc->modes & ADJ_TICK)
+               tick_usec = txc->tick;
+
+       if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
+               ntp_update_frequency();
+}
+
+/*
+ * adjtimex mainly allows reading (and writing, if superuser) of
  * kernel time-keeping variables. used by xntpd.
  */
 int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
                 if (txc->modes && !capable(CAP_SYS_TIME))
                        return -EPERM;
 
-               /* if the quartz is off by more than 10% something is VERY wrong! */
+               /*
+                * if the quartz is off by more than 10% then
+                * something is VERY wrong!
+                */
                if (txc->modes & ADJ_TICK &&
                    (txc->tick <  900000/USER_HZ ||
                     txc->tick > 1100000/USER_HZ))
-                               return -EINVAL;
+                       return -EINVAL;
 
                if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
                        hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
 
        write_seqlock_irq(&xtime_lock);
 
-       /* If there are input parameters, then process them */
        if (txc->modes & ADJ_ADJTIME) {
                long save_adjust = time_adjust;
 
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
                        ntp_update_frequency();
                }
                txc->offset = save_adjust;
-               goto adj_done;
-       }
-       if (txc->modes) {
-               long sec;
-
-               if (txc->modes & ADJ_STATUS) {
-                       if ((time_status & STA_PLL) &&
-                           !(txc->status & STA_PLL)) {
-                               time_state = TIME_OK;
-                               time_status = STA_UNSYNC;
-                       }
-                       /* only set allowed bits */
-                       time_status &= STA_RONLY;
-                       time_status |= txc->status & ~STA_RONLY;
-
-                       switch (time_state) {
-                       case TIME_OK:
-                       start_timer:
-                               sec = ts.tv_sec;
-                               if (time_status & STA_INS) {
-                                       time_state = TIME_INS;
-                                       sec += 86400 - sec % 86400;
-                                       hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
-                               } else if (time_status & STA_DEL) {
-                                       time_state = TIME_DEL;
-                                       sec += 86400 - (sec + 1) % 86400;
-                                       hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
-                               }
-                               break;
-                       case TIME_INS:
-                       case TIME_DEL:
-                               time_state = TIME_OK;
-                               goto start_timer;
-                               break;
-                       case TIME_WAIT:
-                               if (!(time_status & (STA_INS | STA_DEL)))
-                                       time_state = TIME_OK;
-                               break;
-                       case TIME_OOP:
-                               hrtimer_restart(&leap_timer);
-                               break;
-                       }
-               }
-
-               if (txc->modes & ADJ_NANO)
-                       time_status |= STA_NANO;
-               if (txc->modes & ADJ_MICRO)
-                       time_status &= ~STA_NANO;
-
-               if (txc->modes & ADJ_FREQUENCY) {
-                       time_freq = (s64)txc->freq * PPM_SCALE;
-                       time_freq = min(time_freq, MAXFREQ_SCALED);
-                       time_freq = max(time_freq, -MAXFREQ_SCALED);
-               }
-
-               if (txc->modes & ADJ_MAXERROR)
-                       time_maxerror = txc->maxerror;
-               if (txc->modes & ADJ_ESTERROR)
-                       time_esterror = txc->esterror;
-
-               if (txc->modes & ADJ_TIMECONST) {
-                       time_constant = txc->constant;
-                       if (!(time_status & STA_NANO))
-                               time_constant += 4;
-                       time_constant = min(time_constant, (long)MAXTC);
-                       time_constant = max(time_constant, 0l);
-               }
-
-               if (txc->modes & ADJ_TAI && txc->constant > 0)
-                       time_tai = txc->constant;
-
-               if (txc->modes & ADJ_OFFSET)
-                       ntp_update_offset(txc->offset);
-               if (txc->modes & ADJ_TICK)
-                       tick_usec = txc->tick;
+       } else {
 
-               if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
-                       ntp_update_frequency();
-       }
+               /* If there are input parameters, then process them: */
+               if (txc->modes)
+                       process_adjtimex_modes(txc, &ts);
 
-       txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+               txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
                                  NTP_SCALE_SHIFT);
-       if (!(time_status & STA_NANO))
-               txc->offset /= NSEC_PER_USEC;
+               if (!(time_status & STA_NANO))
+                       txc->offset /= NSEC_PER_USEC;
+       }
 
-adj_done:
        result = time_state;    /* mostly `TIME_OK' */
        if (time_status & (STA_UNSYNC|STA_CLOCKERR))
                result = TIME_ERROR;
 
        txc->freq          = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
-                                        (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
+                                        PPM_SCALE_INV, NTP_SCALE_SHIFT);
        txc->maxerror      = time_maxerror;
        txc->esterror      = time_esterror;
        txc->status        = time_status;
@@ -425,6 +526,7 @@ adj_done:
        txc->calcnt        = 0;
        txc->errcnt        = 0;
        txc->stbcnt        = 0;
+
        write_sequnlock_irq(&xtime_lock);
 
        txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
 static int __init ntp_tick_adj_setup(char *str)
 {
        ntp_tick_adj = simple_strtol(str, NULL, 0);
+       ntp_tick_adj <<= NTP_SCALE_SHIFT;
+
        return 1;
 }
 
index 13dd64fe143db216a31e750d66bb29d07c932ba3..9b77fc9a9ac8286074b1784771dac7c0eb2a08e2 100644 (file)
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
        }
 }
 
-int __mod_timer(struct timer_list *timer, unsigned long expires)
+static inline int
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
        struct tvec_base *base, *new_base;
        unsigned long flags;
-       int ret = 0;
+       int ret;
+
+       ret = 0;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(!timer->function);
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
        if (timer_pending(timer)) {
                detach_timer(timer, 0);
                ret = 1;
+       } else {
+               if (pending_only)
+                       goto out_unlock;
        }
 
        debug_timer_activate(timer);
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
 
        timer->expires = expires;
        internal_add_timer(base, timer);
+
+out_unlock:
        spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
 }
 
-EXPORT_SYMBOL(__mod_timer);
-
 /**
- * add_timer_on - start a timer on a particular CPU
- * @timer: the timer to be added
- * @cpu: the CPU to start it on
+ * mod_timer_pending - modify a pending timer's timeout
+ * @timer: the pending timer to be modified
+ * @expires: new timeout in jiffies
  *
- * This is not very scalable on SMP. Double adds are not possible.
+ * mod_timer_pending() is the same for pending timers as mod_timer(),
+ * but will not re-activate and modify already deleted timers.
+ *
+ * It is useful for unserialized use of timers.
  */
-void add_timer_on(struct timer_list *timer, int cpu)
+int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-       struct tvec_base *base = per_cpu(tvec_bases, cpu);
-       unsigned long flags;
-
-       timer_stats_timer_set_start_info(timer);
-       BUG_ON(timer_pending(timer) || !timer->function);
-       spin_lock_irqsave(&base->lock, flags);
-       timer_set_base(timer, base);
-       debug_timer_activate(timer);
-       internal_add_timer(base, timer);
-       /*
-        * Check whether the other CPU is idle and needs to be
-        * triggered to reevaluate the timer wheel when nohz is
-        * active. We are protected against the other CPU fiddling
-        * with the timer by holding the timer base lock. This also
-        * makes sure that a CPU on the way to idle can not evaluate
-        * the timer wheel.
-        */
-       wake_up_idle_cpu(cpu);
-       spin_unlock_irqrestore(&base->lock, flags);
+       return __mod_timer(timer, expires, true);
 }
+EXPORT_SYMBOL(mod_timer_pending);
 
 /**
  * mod_timer - modify a timer's timeout
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
-       BUG_ON(!timer->function);
-
-       timer_stats_timer_set_start_info(timer);
        /*
         * This is a common optimization triggered by the
         * networking code - if the timer is re-modified
@@ -699,11 +688,61 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
        if (timer->expires == expires && timer_pending(timer))
                return 1;
 
-       return __mod_timer(timer, expires);
+       return __mod_timer(timer, expires, false);
 }
-
 EXPORT_SYMBOL(mod_timer);
 
+/**
+ * add_timer - start a timer
+ * @timer: the timer to be added
+ *
+ * The kernel will do a ->function(->data) callback from the
+ * timer interrupt at the ->expires point in the future. The
+ * current time is 'jiffies'.
+ *
+ * The timer's ->expires, ->function (and if the handler uses it, ->data)
+ * fields must be set prior calling this function.
+ *
+ * Timers with an ->expires field in the past will be executed in the next
+ * timer tick.
+ */
+void add_timer(struct timer_list *timer)
+{
+       BUG_ON(timer_pending(timer));
+       mod_timer(timer, timer->expires);
+}
+EXPORT_SYMBOL(add_timer);
+
+/**
+ * add_timer_on - start a timer on a particular CPU
+ * @timer: the timer to be added
+ * @cpu: the CPU to start it on
+ *
+ * This is not very scalable on SMP. Double adds are not possible.
+ */
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+       struct tvec_base *base = per_cpu(tvec_bases, cpu);
+       unsigned long flags;
+
+       timer_stats_timer_set_start_info(timer);
+       BUG_ON(timer_pending(timer) || !timer->function);
+       spin_lock_irqsave(&base->lock, flags);
+       timer_set_base(timer, base);
+       debug_timer_activate(timer);
+       internal_add_timer(base, timer);
+       /*
+        * Check whether the other CPU is idle and needs to be
+        * triggered to reevaluate the timer wheel when nohz is
+        * active. We are protected against the other CPU fiddling
+        * with the timer by holding the timer base lock. This also
+        * makes sure that a CPU on the way to idle can not evaluate
+        * the timer wheel.
+        */
+       wake_up_idle_cpu(cpu);
+       spin_unlock_irqrestore(&base->lock, flags);
+}
+
 /**
  * del_timer - deactive a timer.
  * @timer: the timer to be deactivated
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
 
        return ret;
 }
-
 EXPORT_SYMBOL(del_timer);
 
 #ifdef CONFIG_SMP
@@ -767,7 +805,6 @@ out:
 
        return ret;
 }
-
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
 /**
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
                cpu_relax();
        }
 }
-
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
        expire = timeout + jiffies;
 
        setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-       __mod_timer(&timer, expire);
+       __mod_timer(&timer, expire, false);
        schedule();
        del_singleshot_timer_sync(&timer);