ONDEMAND: add early suspend drivers to adjust cpu sampling rates

file:a27a9c62175646dfc6419cee121db6bb9ec8a9dc -> file:9303079afba2a054a5ea0a7826cc81e597ce8537
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -25,6 +25,9 @@
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#ifdef CONFIG_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
/*
* dbs is used in this file as a shortform for demandbased switching
@@ -56,6 +59,10 @@
#define MIN_SAMPLING_RATE_RATIO (2)
static unsigned int min_sampling_rate;
+#ifdef CONFIG_EARLYSUSPEND
+bool screen_is_on = true;
+static unsigned long stored_sampling_rate;
+#endif
#define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
@@ -308,6 +315,62 @@ static ssize_t show_powersave_bias
return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
}
+/**
+ * update_sampling_rate - update sampling rate effective immediately if needed.
+ * @new_rate: new sampling rate
+ *
+ * If new rate is smaller than the old, simply updaing
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example,
+ * if the original sampling_rate was 1 second and the requested new sampling
+ * rate is 10 ms because the user needs immediate reaction from ondemand
+ * governor, but not sure if higher frequency will be required or not,
+ * then, the governor may change the sampling rate too late; up to 1 second
+ * later. Thus, if we are reducing the sampling rate, we need to make the
+ * new value effective immediately.
+ */
+static void update_sampling_rate(unsigned int new_rate)
+{
+ int cpu;
+
+ dbs_tuners_ins.sampling_rate = new_rate
+ = max(new_rate, min_sampling_rate);
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ struct cpu_dbs_info_s *dbs_info;
+ unsigned long next_sampling, appointed_at;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&dbs_info->timer_mutex);
+
+ if (!delayed_work_pending(&dbs_info->work)) {
+ mutex_unlock(&dbs_info->timer_mutex);
+ continue;
+ }
+
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->work.timer.expires;
+
+
+ if (time_before(next_sampling, appointed_at)) {
+
+ mutex_unlock(&dbs_info->timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->work);
+ mutex_lock(&dbs_info->timer_mutex);
+
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
+ usecs_to_jiffies(new_rate));
+
+ }
+ mutex_unlock(&dbs_info->timer_mutex);
+ }
+}
+
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -316,7 +379,7 @@ static ssize_t store_sampling_rate(struc
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
+ update_sampling_rate(input);
return count;
}
@@ -423,9 +486,12 @@ static ssize_t store_powersave_bias(stru
{
int input = 0;
int bypass = 0;
- int ret, cpu, reenable_timer;
+ int ret, cpu, reenable_timer, j;
struct cpu_dbs_info_s *dbs_info;
+ struct cpumask cpus_timer_done;
+ cpumask_clear(&cpus_timer_done);
+
ret = sscanf(buf, "%d", &input);
if (ret != 1)
@@ -458,10 +524,25 @@ static ssize_t store_powersave_bias(stru
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ for_each_cpu(j, &cpus_timer_done) {
+ if (!dbs_info->cur_policy) {
+ printk(KERN_ERR
+ "%s Dbs policy is NULL\n",
+ __func__);
+ goto skip_this_cpu;
+ }
+ if (cpumask_test_cpu(j, dbs_info->
+ cur_policy->cpus))
+ goto skip_this_cpu;
+ }
+
+ cpumask_set_cpu(cpu, &cpus_timer_done);
if (dbs_info->cur_policy) {
/* restart dbs timer */
dbs_timer_init(dbs_info);
}
+skip_this_cpu:
unlock_policy_rwsem_write(cpu);
}
}
@@ -474,6 +555,21 @@ static ssize_t store_powersave_bias(stru
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ for_each_cpu(j, &cpus_timer_done) {
+ if (!dbs_info->cur_policy) {
+ printk(KERN_ERR
+ "%s Dbs policy is NULL\n",
+ __func__);
+ goto skip_this_cpu_bypass;
+ }
+ if (cpumask_test_cpu(j, dbs_info->
+ cur_policy->cpus))
+ goto skip_this_cpu_bypass;
+ }
+
+ cpumask_set_cpu(cpu, &cpus_timer_done);
+
if (dbs_info->cur_policy) {
/* cpu using ondemand, cancel dbs timer */
mutex_lock(&dbs_info->timer_mutex);
@@ -486,6 +582,7 @@ static ssize_t store_powersave_bias(stru
mutex_unlock(&dbs_info->timer_mutex);
}
+skip_this_cpu_bypass:
unlock_policy_rwsem_write(cpu);
}
}
@@ -966,6 +1063,31 @@ static int cpufreq_governor_dbs(struct c
return 0;
}
+#ifdef CONFIG_EARLYSUSPEND
+static void cpufreq_ondemand_early_suspend(struct early_suspend *h)
+{
+ mutex_lock(&dbs_mutex);
+ screen_is_on = false;
+ stored_sampling_rate = min_sampling_rate;
+ min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE * 6;
+ mutex_unlock(&dbs_mutex);
+}
+
+static void cpufreq_ondemand_late_resume(struct early_suspend *h)
+{
+ mutex_lock(&dbs_mutex);
+ min_sampling_rate = stored_sampling_rate;
+ screen_is_on = true;
+ mutex_unlock(&dbs_mutex);
+}
+
+static struct early_suspend cpufreq_ondemand_early_suspend_info = {
+ .suspend = cpufreq_ondemand_early_suspend,
+ .resume = cpufreq_ondemand_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif
+
static int __init cpufreq_gov_dbs_init(void)
{
cputime64_t wall;
@@ -1001,12 +1123,18 @@ static int __init cpufreq_gov_dbs_init(v
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
}
+#ifdef CONFIG_EARLYSUSPEND
+ register_early_suspend(&cpufreq_ondemand_early_suspend_info);
+#endif
return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+#ifdef CONFIG_EARLYSUSPEND
+ unregister_early_suspend(&cpufreq_ondemand_early_suspend_info);
+#endif
destroy_workqueue(input_wq);
}