Multiple Ondemand commits merged into one
    [CPUFREQ] ondemand: Refactor frequency increase code
    [CPUFREQ] ondemand: Fix ondemand to not request targets outside policy limits
    [CPUFREQ] ondemand: add sampling_down_factor tunable to improve ondemand performance
    [CPUFREQ] ondemand: Add filter for input events
    [CPUFREQ] ondemand: Featurize the input event handler
    [CPUFREQ] ondemand: Set sampling_down_factor to 50

file:ee65bf6b0a91a403daa3c6f0fd0f0c62c64697f7 -> file:9294cfc3bfc630753ec93fab419940303e227fc2
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -279,6 +279,14 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_ONDEMAND_INPUT
+ bool "Ramp up CPU frequency on input events"
+ default y
+ depends on CPU_FREQ_GOV_ONDEMAND
+ help
+ Enable installation of an input event handler which will ramp up the
+ CPU to max frequency when an input event is received.
+
config CPU_FREQ_GOV_INTERACTIVE
tristate "'interactive' cpufreq policy governor"
help
file:4b34ade2332baaa50bb1ca1af9c45e1a9893d321 -> file:a2590e37f2c8bae7b49c89f045832f32acee269f
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,6 +22,8 @@
#include <linux/tick.h>
#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
/*
* dbs is used in this file as a shortform for demandbased switching
@@ -29,12 +31,14 @@
*/
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
-#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_SAMPLING_DOWN_FACTOR (50)
+#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
-#define MIN_FREQUENCY_UP_THRESHOLD (11)
-#define MAX_FREQUENCY_UP_THRESHOLD (100)
+#define MIN_FREQUENCY_UP_THRESHOLD (11)
+#define MAX_FREQUENCY_UP_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
@@ -50,7 +54,7 @@
static unsigned int min_sampling_rate;
-#define LATENCY_MULTIPLIER (1000)
+#define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -81,6 +85,7 @@ struct cpu_dbs_info_s {
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
int cpu;
unsigned int sample_type:1;
/*
@@ -107,9 +112,11 @@ static struct dbs_tuners {
unsigned int up_threshold;
unsigned int down_differential;
unsigned int ignore_nice;
+ unsigned int sampling_down_factor;
unsigned int powersave_bias;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
@@ -250,6 +257,7 @@ static ssize_t show_##file_name \
}
show_one(sampling_rate, sampling_rate);
show_one(up_threshold, up_threshold);
+show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -318,6 +326,29 @@ static ssize_t store_up_threshold(struct
return count;
}
+static ssize_t store_sampling_down_factor(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input, j;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_down_factor = input;
+
+ /* Reset down sampling multiplier in case it was active */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ dbs_info->rate_mult = 1;
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -384,12 +415,14 @@ define_one_rw(sampling_rate);
define_one_rw(up_threshold);
define_one_rw(ignore_nice_load);
define_one_rw(powersave_bias);
+define_one_rw(sampling_down_factor);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
+ &sampling_down_factor.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
NULL
@@ -443,6 +476,17 @@ static struct attribute_group dbs_attr_g
/************************** sysfs end ************************/
+static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+{
+ if (dbs_tuners_ins.powersave_bias)
+ freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ else if (p->cur == p->max)
+ return;
+
+ __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int max_load_freq;
@@ -520,19 +564,11 @@ static void dbs_check_cpu(struct cpu_dbs
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
- /* if we are already at full speed then break out early */
- if (!dbs_tuners_ins.powersave_bias) {
- if (policy->cur == policy->max)
- return;
-
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- } else {
- int freq = powersave_bias_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
- }
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ this_dbs_info->rate_mult =
+ dbs_tuners_ins.sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
return;
}
@@ -554,6 +590,12 @@ static void dbs_check_cpu(struct cpu_dbs
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ /* No longer fully busy, reset rate_mult */
+ this_dbs_info->rate_mult = 1;
+
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
@@ -574,7 +616,7 @@ static void do_dbs_timer(struct work_str
int sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate * dbs_info->rate_mult);
delay -= jiffies % delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -614,6 +656,104 @@ static inline void dbs_timer_exit(struct
cancel_delayed_work_sync(&dbs_info->work);
}
+#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_INPUT
+static void dbs_refresh_callback(struct work_struct *unused)
+{
+ struct cpufreq_policy *policy;
+ struct cpu_dbs_info_s *this_dbs_info;
+
+ if (lock_policy_rwsem_write(0) < 0)
+ return;
+
+ this_dbs_info = &per_cpu(od_cpu_dbs_info, 0);
+ policy = this_dbs_info->cur_policy;
+
+ if (policy->cur < policy->max) {
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_L);
+ this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
+ &this_dbs_info->prev_cpu_wall);
+ }
+ unlock_policy_rwsem_write(0);
+}
+
+static DECLARE_WORK(dbs_refresh_work, dbs_refresh_callback);
+
+static void dbs_input_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ schedule_work(&dbs_refresh_work);
+}
+
+static int input_dev_filter(const char* input_dev_name)
+{
+ int ret = 0;
+ if (strstr(input_dev_name, "touchscreen") ||
+ strstr(input_dev_name, "-keypad") ||
+ strstr(input_dev_name, "-nav") ||
+ strstr(input_dev_name, "-oj")) {
+ }
+ else {
+ ret = 1;
+ }
+ return ret;
+}
+
+static int dbs_input_connect(struct input_handler *handler,
+ struct input_dev *dev, const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ /* filter out those input_dev that we don't care */
+ if (input_dev_filter(dev->name))
+ return 0;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "cpufreq";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err2;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err1;
+
+ return 0;
+err1:
+ input_unregister_handle(handle);
+err2:
+ kfree(handle);
+ return error;
+}
+
+static void dbs_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id dbs_ids[] = {
+ { .driver_info = 1 },
+ { },
+};
+
+static struct input_handler dbs_input_handler = {
+ .event = dbs_input_event,
+ .connect = dbs_input_connect,
+ .disconnect = dbs_input_disconnect,
+ .name = "cpufreq_ond",
+ .id_table = dbs_ids,
+};
+#endif
+
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -651,6 +791,7 @@ static int cpufreq_governor_dbs(struct c
}
}
this_dbs_info->cpu = cpu;
+ this_dbs_info->rate_mult = 1;
ondemand_powersave_bias_init_cpu(cpu);
/*
* Start the timerschedule work, when this governor
@@ -677,6 +818,9 @@ static int cpufreq_governor_dbs(struct c
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
}
+#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_INPUT
+ rc = input_register_handler(&dbs_input_handler);
+#endif
mutex_unlock(&dbs_mutex);
mutex_init(&this_dbs_info->timer_mutex);
@@ -690,6 +834,9 @@ static int cpufreq_governor_dbs(struct c
sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
+#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_INPUT
+ input_unregister_handler(&dbs_input_handler);
+#endif
mutex_unlock(&dbs_mutex);
if (!dbs_enable)
sysfs_remove_group(cpufreq_global_kobject,