rename all 'cnt' fields and variables to the less yucky 'count' name.
yuckage noticed by Andrew Morton.
no change in code, other than the /proc/sched_debug bkl_count string got
a bit larger:
   text    data     bss     dec     hex filename
  38236    3506      24   41766    a326 sched.o.before
  38240    3506      24   41770    a32a sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
        return sprintf(buffer, "%llu %llu %lu\n",
                        task->sched_info.cpu_time,
                        task->sched_info.run_delay,
-                       task->sched_info.pcnt);
+                       task->sched_info.pcount);
 }
 #endif
 
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 struct sched_info {
        /* cumulative counters */
-       unsigned long pcnt;           /* # of times run on this cpu */
+       unsigned long pcount;         /* # of times run on this cpu */
        unsigned long long cpu_time,  /* time spent on the cpu */
                           run_delay; /* time spent waiting on a runqueue */
 
                           last_queued; /* when we were last queued to run */
 #ifdef CONFIG_SCHEDSTATS
        /* BKL stats */
-       unsigned long bkl_cnt;
+       unsigned long bkl_count;
 #endif
 };
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 
 #ifdef CONFIG_SCHEDSTATS
        /* load_balance() stats */
-       unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+       unsigned long lb_count[CPU_MAX_IDLE_TYPES];
        unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
        unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
        unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
        unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
 
        /* Active load balancing */
-       unsigned long alb_cnt;
+       unsigned long alb_count;
        unsigned long alb_failed;
        unsigned long alb_pushed;
 
        /* SD_BALANCE_EXEC stats */
-       unsigned long sbe_cnt;
+       unsigned long sbe_count;
        unsigned long sbe_balanced;
        unsigned long sbe_pushed;
 
        /* SD_BALANCE_FORK stats */
-       unsigned long sbf_cnt;
+       unsigned long sbf_count;
        unsigned long sbf_balanced;
        unsigned long sbf_pushed;
 
 
         * No locking available for sched_info (and too expensive to add one)
         * Mitigate by taking snapshot of values
         */
-       t1 = tsk->sched_info.pcnt;
+       t1 = tsk->sched_info.pcount;
        t2 = tsk->sched_info.run_delay;
        t3 = tsk->sched_info.cpu_time;
 
 
        unsigned long yld_exp_empty;
        unsigned long yld_act_empty;
        unsigned long yld_both_empty;
-       unsigned long yld_cnt;
+       unsigned long yld_count;
 
        /* schedule() stats */
        unsigned long sched_switch;
-       unsigned long sched_cnt;
+       unsigned long sched_count;
        unsigned long sched_goidle;
 
        /* try_to_wake_up() stats */
-       unsigned long ttwu_cnt;
+       unsigned long ttwu_count;
        unsigned long ttwu_local;
 
        /* BKL stats */
-       unsigned long bkl_cnt;
+       unsigned long bkl_count;
 #endif
        struct lock_class_key rq_lock_key;
 };
 
        new_cpu = cpu;
 
-       schedstat_inc(rq, ttwu_cnt);
+       schedstat_inc(rq, ttwu_count);
        if (cpu == this_cpu) {
                schedstat_inc(rq, ttwu_local);
                goto out_set_cpu;
            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                sd_idle = 1;
 
-       schedstat_inc(sd, lb_cnt[idle]);
+       schedstat_inc(sd, lb_count[idle]);
 
 redo:
        group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                sd_idle = 1;
 
-       schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
+       schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
 redo:
        group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
                                   &sd_idle, &cpus, NULL);
        }
 
        if (likely(sd)) {
-               schedstat_inc(sd, alb_cnt);
+               schedstat_inc(sd, alb_count);
 
                if (move_one_task(target_rq, target_cpu, busiest_rq,
                                  sd, CPU_IDLE))
 
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
 
-       schedstat_inc(this_rq(), sched_cnt);
+       schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
        if (unlikely(prev->lock_depth >= 0)) {
-               schedstat_inc(this_rq(), bkl_cnt);
-               schedstat_inc(prev, sched_info.bkl_cnt);
+               schedstat_inc(this_rq(), bkl_count);
+               schedstat_inc(prev, sched_info.bkl_count);
        }
 #endif
 }
 {
        struct rq *rq = this_rq_lock();
 
-       schedstat_inc(rq, yld_cnt);
+       schedstat_inc(rq, yld_count);
        current->sched_class->yield_task(rq);
 
        /*
 
        SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SCHEDSTATS
-       SEQ_printf(m, "  .%-30s: %ld\n", "bkl_cnt",
-                       rq->bkl_cnt);
+       SEQ_printf(m, "  .%-30s: %ld\n", "bkl_count",
+                       rq->bkl_count);
 #endif
        SEQ_printf(m, "  .%-30s: %ld\n", "nr_spread_over",
                        cfs_rq->nr_spread_over);
        PN(se.exec_max);
        PN(se.slice_max);
        PN(se.wait_max);
-       P(sched_info.bkl_cnt);
+       P(sched_info.bkl_count);
 #endif
        SEQ_printf(m, "%-25s:%20Ld\n",
                   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
        p->se.exec_max                  = 0;
        p->se.slice_max                 = 0;
        p->se.wait_max                  = 0;
-       p->sched_info.bkl_cnt           = 0;
+       p->sched_info.bkl_count         = 0;
 #endif
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
 
                struct rq *rq = cpu_rq(cpu);
 #ifdef CONFIG_SMP
                struct sched_domain *sd;
-               int dcnt = 0;
+               int dcount = 0;
 #endif
 
                /* runqueue-specific stats */
                seq_printf(seq,
                    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
                    cpu, rq->yld_both_empty,
-                   rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
-                   rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
-                   rq->ttwu_cnt, rq->ttwu_local,
+                   rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
+                   rq->sched_switch, rq->sched_count, rq->sched_goidle,
+                   rq->ttwu_count, rq->ttwu_local,
                    rq->rq_sched_info.cpu_time,
-                   rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+                   rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 
                seq_printf(seq, "\n");
 
                        char mask_str[NR_CPUS];
 
                        cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
-                       seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+                       seq_printf(seq, "domain%d %s", dcount++, mask_str);
                        for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
                                        itype++) {
                                seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
                                                "%lu",
-                                   sd->lb_cnt[itype],
+                                   sd->lb_count[itype],
                                    sd->lb_balanced[itype],
                                    sd->lb_failed[itype],
                                    sd->lb_imbalance[itype],
                        }
                        seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
                            " %lu %lu %lu\n",
-                           sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
-                           sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
-                           sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+                           sd->alb_count, sd->alb_failed, sd->alb_pushed,
+                           sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+                           sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
                            sd->ttwu_wake_remote, sd->ttwu_move_affine,
                            sd->ttwu_move_balance);
                }
 {
        if (rq) {
                rq->rq_sched_info.run_delay += delta;
-               rq->rq_sched_info.pcnt++;
+               rq->rq_sched_info.pcount++;
        }
 }
 
        sched_info_dequeued(t);
        t->sched_info.run_delay += delta;
        t->sched_info.last_arrival = now;
-       t->sched_info.pcnt++;
+       t->sched_info.pcount++;
 
        rq_sched_info_arrive(task_rq(t), delta);
 }