X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=670b89a20070c9d5a2283faa1a6f0346ce7b2f4a;hb=1651e14e28a2d9f446018ef522882e0709a2ce4f;hp=34ed0d99b1bd1ccde1dee28a164bfded8fb1fc37;hpb=115b384cf87249d76adb0b21aca11ee22128927d;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/sched.h b/include/linux/sched.h index 34ed0d99b1b..670b89a2007 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -118,7 +118,6 @@ extern unsigned long avenrun[]; /* Load averages */ extern unsigned long total_forks; extern int nr_threads; -extern int last_pid; DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); @@ -148,6 +147,7 @@ extern unsigned long weighted_cpuload(const int cpu); #define EXIT_DEAD 32 /* in tsk->state again */ #define TASK_NONINTERACTIVE 64 +#define TASK_DEAD 128 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -238,7 +238,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); -struct namespace; +struct nsproxy; /* Maximum number of active map areas.. This is a random (large) number */ #define DEFAULT_MAX_MAP_COUNT 65536 @@ -504,8 +504,8 @@ struct signal_struct { #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) #define rt_task(p) rt_prio((p)->prio) #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) -#define has_rt_policy(p) \ - unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) +#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH) +#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) /* * Some day this will be a full-fledged user tracking system.. @@ -709,7 +709,6 @@ extern unsigned int max_cache_size; struct io_context; /* See blkdev.h */ -void exit_io_context(void); struct cpuset; #define NGROUPS_SMALL 32 @@ -784,8 +783,9 @@ struct task_struct { struct prio_array *array; unsigned short ioprio; +#ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; - +#endif unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ @@ -819,6 +819,11 @@ struct task_struct { unsigned did_exec:1; pid_t pid; pid_t tgid; + +#ifdef CONFIG_CC_STACKPROTECTOR + /* Canary value for the -fstack-protector gcc feature */ + unsigned long stack_canary; +#endif /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with @@ -865,6 +870,15 @@ struct task_struct { struct key *thread_keyring; /* keyring private to this thread */ unsigned char jit_keyring; /* default keyring to attach requested keys to */ #endif + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; int oomkilladj; /* OOM kill score adjustment (bit shift). */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock @@ -872,16 +886,18 @@ struct task_struct { - initialized normally by flush_old_exec */ /* file system info */ int link_count, total_link_count; +#ifdef CONFIG_SYSVIPC /* ipc stuff */ struct sysv_sem sysvsem; +#endif /* CPU-specific state of this task */ struct thread_struct thread; /* filesystem information */ struct fs_struct *fs; /* open file information */ struct files_struct *files; -/* namespace */ - struct namespace *namespace; +/* namespaces */ + struct nsproxy *nsproxy; /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; @@ -964,10 +980,10 @@ struct task_struct { wait_queue_t *io_wait; /* i/o counters(bytes read/written, #syscalls */ u64 rchar, wchar, syscr, syscw; -#if defined(CONFIG_BSD_PROCESS_ACCT) +#if defined(CONFIG_TASK_XACCT) u64 acct_rss_mem1; /* accumulated rss usage */ u64 acct_vm_mem1; /* accumulated virtual memory usage */ - clock_t acct_stimexpd; /* clock_t-converted stime since last update */ + cputime_t acct_stimexpd;/* stime since last update */ #endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; @@ -1003,6 +1019,26 @@ static inline pid_t process_group(struct task_struct *tsk) return tsk->signal->pgrp; } +static inline struct pid *task_pid(struct task_struct *task) +{ + return task->pids[PIDTYPE_PID].pid; +} + +static inline struct pid *task_tgid(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PID].pid; +} + +static inline struct pid *task_pgrp(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PGID].pid; +} + +static inline struct pid *task_session(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_SID].pid; +} + /** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. @@ -1016,6 +1052,16 @@ static inline int pid_alive(struct task_struct *p) return p->pids[PIDTYPE_PID].pid != NULL; } +/** + * is_init - check if a task structure is the first user space + * task the kernel created. + * @p: Task structure to be checked. + */ +static inline int is_init(struct task_struct *tsk) +{ + return tsk->pid == 1; +} + extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) @@ -1034,7 +1080,6 @@ static inline void put_task_struct(struct task_struct *t) /* Not implemented yet, only for 486*/ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_DEAD 0x00000008 /* Dead */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -1179,7 +1224,7 @@ extern void switch_uid(struct user_struct *); #include -extern void do_timer(struct pt_regs *); +extern void do_timer(unsigned long ticks); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); @@ -1221,10 +1266,15 @@ extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); extern int force_sigsegv(int, struct task_struct *); extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); @@ -1332,6 +1382,17 @@ extern void wait_task_inactive(struct task_struct * p); /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline int has_group_leader_pid(struct task_struct *p) +{ + return p->pid == p->tgid; +} + static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next),