atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ struct list_head signalfd_list;
};
struct pacct_struct {
cputime_t utime, stime, cutime, cstime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
/*
* Cumulative ns of scheduled CPU time for dead threads in the
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU. This is read only (except for setup, hotplug CPU).
+ * Note : Never change cpu_power without recompute its reciprocal
*/
- unsigned long cpu_power;
+ unsigned int __cpu_power;
+ /*
+ * reciprocal value of cpu_power to avoid expensive divides
+ * (see include/linux/reciprocal_div.h)
+ */
+ u32 reciprocal_cpu_power;
};
struct sched_domain {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- struct thread_info *thread_info;
+ void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
#ifndef __HAVE_THREAD_FUNCTIONS
-#define task_thread_info(task) (task)->thread_info
-#define task_stack_page(task) ((void*)((task)->thread_info))
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
static inline unsigned long *end_of_stack(struct task_struct *p)
{
- return (unsigned long *)(p->thread_info + 1);
+ return (unsigned long *)(task_thread_info(p) + 1);
}
#endif