Added ck2 patchset with BFS357
/include/linux/sched.h
blob:ae682f7d123ce0d4cdfffeac82ae34534b847833 -> blob:25688fd1585667512a560e7153df3376d8698504
--- include/linux/sched.h
+++ include/linux/sched.h
@@ -739,14 +739,6 @@ struct user_struct {
uid_t uid;
struct user_namespace *user_ns;
-#ifdef CONFIG_USER_SCHED
- struct task_group *tg;
-#ifdef CONFIG_SYSFS
- struct kobject kobj;
- struct delayed_work work;
-#endif
-#endif
-
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
@@ -913,6 +905,7 @@ struct sched_group {
* single CPU.
*/
unsigned int cpu_power;
+ unsigned int group_weight;
/*
* The CPUs this group covers.
@@ -1132,7 +1125,7 @@ struct sched_class {
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1598,7 +1591,7 @@ static inline void tsk_cpus_current(stru
static inline void print_scheduler_version(void)
{
- printk(KERN_INFO"BFS CPU scheduler v0.357 by Con Kolivas.\n");
+ printk(KERN_INFO"BFS CPU scheduler v0.363 by Con Kolivas.\n");
}
static inline int iso_task(struct task_struct *p)
@@ -1836,8 +1829,7 @@ extern int task_free_unregister(struct n
/*
* Per process flags
*/
-#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
+#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1974,6 +1966,19 @@ extern void sched_clock_idle_wakeup_even
*/
extern unsigned long long cpu_clock(int cpu);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2525,9 +2530,9 @@ extern int __cond_resched_lock(spinlock_
extern int __cond_resched_softirq(void);
-#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
@@ -2616,13 +2621,9 @@ extern long sched_getaffinity(pid_t pid,
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group;
-#ifdef CONFIG_USER_SCHED
-extern struct task_group root_task_group;
-extern void set_tg_uid(struct user_struct *user);
-#endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);