Skip to content

Commit

Permalink
Merge branch 'pm-cpufreq'
Browse files Browse the repository at this point in the history
* pm-cpufreq: (94 commits)
  intel_pstate: Do not skip samples partially
  intel_pstate: Remove freq calculation from intel_pstate_calc_busy()
  intel_pstate: Move intel_pstate_calc_busy() into get_target_pstate_use_performance()
  intel_pstate: Optimize calculation for max/min_perf_adj
  intel_pstate: Remove extra conversions in pid calculation
  cpufreq: Move scheduler-related code to the sched directory
  Revert "cpufreq: postfix policy directory with the first CPU in related_cpus"
  cpufreq: Reduce cpufreq_update_util() overhead a bit
  cpufreq: Select IRQ_WORK if CPU_FREQ_GOV_COMMON is set
  cpufreq: Remove 'policy->governor_enabled'
  cpufreq: Rename __cpufreq_governor() to cpufreq_governor()
  cpufreq: Relocate handle_update() to kill its declaration
  cpufreq: governor: Drop unnecessary checks from show() and store()
  cpufreq: governor: Fix race in dbs_update_util_handler()
  cpufreq: governor: Make gov_set_update_util() static
  cpufreq: governor: Narrow down the dbs_data_mutex coverage
  cpufreq: governor: Make dbs_data_mutex static
  cpufreq: governor: Relocate definitions of tuners structures
  cpufreq: governor: Move per-CPU data to the common code
  cpufreq: governor: Make governor private data per-policy
  ...
  • Loading branch information
rafaeljw committed Mar 14, 2016
2 parents b5d5fad + 4fec7ad commit 4ed3900
Show file tree
Hide file tree
Showing 26 changed files with 1,503 additions and 1,718 deletions.
2 changes: 1 addition & 1 deletion Documentation/cpu-freq/intel-pstate.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ callback, so cpufreq core can't request a transition to a specific frequency.
The driver provides minimum and maximum frequency limits and callbacks to set a
policy. The policy in cpufreq sysfs is referred to as the "scaling governor".
The cpufreq core can request the driver to operate in any of the two policies:
"performance: and "powersave". The driver decides which frequency to use based
"performance" and "powersave". The driver decides which frequency to use based
on the above policy selection considering minimum and maximum frequency limits.

The Intel P-State driver falls under the latter category, which implements the
Expand Down
1 change: 1 addition & 0 deletions drivers/cpufreq/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ config CPU_FREQ
if CPU_FREQ

config CPU_FREQ_GOV_COMMON
select IRQ_WORK
bool

config CPU_FREQ_BOOST_SW
Expand Down
212 changes: 96 additions & 116 deletions drivers/cpufreq/acpi-cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
};

/* acpi_perf_data is a pointer to percpu data. */
Expand Down Expand Up @@ -243,125 +245,119 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
}
}

struct msr_addr {
u32 reg;
};
u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
{
u32 val, dummy;

struct io_addr {
u16 port;
u8 bit_width;
};
rdmsr(MSR_IA32_PERF_CTL, val, dummy);
return val;
}

void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
{
u32 lo, hi;

rdmsr(MSR_IA32_PERF_CTL, lo, hi);
lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
wrmsr(MSR_IA32_PERF_CTL, lo, hi);
}

u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
{
u32 val, dummy;

rdmsr(MSR_AMD_PERF_CTL, val, dummy);
return val;
}

void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
{
wrmsr(MSR_AMD_PERF_CTL, val, 0);
}

u32 cpu_freq_read_io(struct acpi_pct_register *reg)
{
u32 val;

acpi_os_read_port(reg->address, &val, reg->bit_width);
return val;
}

void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
{
acpi_os_write_port(reg->address, val, reg->bit_width);
}

struct drv_cmd {
unsigned int type;
const struct cpumask *mask;
union {
struct msr_addr msr;
struct io_addr io;
} addr;
struct acpi_pct_register *reg;
u32 val;
union {
void (*write)(struct acpi_pct_register *reg, u32 val);
u32 (*read)(struct acpi_pct_register *reg);
} func;
};

/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
u32 h;

switch (cmd->type) {
case SYSTEM_INTEL_MSR_CAPABLE:
case SYSTEM_AMD_MSR_CAPABLE:
rdmsr(cmd->addr.msr.reg, cmd->val, h);
break;
case SYSTEM_IO_CAPABLE:
acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
&cmd->val,
(u32)cmd->addr.io.bit_width);
break;
default:
break;
}
cmd->val = cmd->func.read(cmd->reg);
}

/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
struct drv_cmd *cmd = _cmd;
u32 lo, hi;
struct acpi_processor_performance *perf = to_perf_data(data);
struct drv_cmd cmd = {
.reg = &perf->control_register,
.func.read = data->cpu_freq_read,
};
int err;

switch (cmd->type) {
case SYSTEM_INTEL_MSR_CAPABLE:
rdmsr(cmd->addr.msr.reg, lo, hi);
lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
wrmsr(cmd->addr.msr.reg, lo, hi);
break;
case SYSTEM_AMD_MSR_CAPABLE:
wrmsr(cmd->addr.msr.reg, cmd->val, 0);
break;
case SYSTEM_IO_CAPABLE:
acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
cmd->val,
(u32)cmd->addr.io.bit_width);
break;
default:
break;
}
err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
return cmd.val;
}

static void drv_read(struct drv_cmd *cmd)
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
int err;
cmd->val = 0;
struct drv_cmd *cmd = _cmd;

err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
cmd->func.write(cmd->reg, cmd->val);
}

static void drv_write(struct drv_cmd *cmd)
static void drv_write(struct acpi_cpufreq_data *data,
const struct cpumask *mask, u32 val)
{
struct acpi_processor_performance *perf = to_perf_data(data);
struct drv_cmd cmd = {
.reg = &perf->control_register,
.val = val,
.func.write = data->cpu_freq_write,
};
int this_cpu;

this_cpu = get_cpu();
if (cpumask_test_cpu(this_cpu, cmd->mask))
do_drv_write(cmd);
smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
if (cpumask_test_cpu(this_cpu, mask))
do_drv_write(&cmd);

smp_call_function_many(mask, do_drv_write, &cmd, 1);
put_cpu();
}

static u32
get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
struct acpi_processor_performance *perf;
struct drv_cmd cmd;
u32 val;

if (unlikely(cpumask_empty(mask)))
return 0;

switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
perf = to_perf_data(data);
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
default:
return 0;
}

cmd.mask = mask;
drv_read(&cmd);
val = drv_read(data, mask);

pr_debug("get_cur_val = %u\n", cmd.val);
pr_debug("get_cur_val = %u\n", val);

return cmd.val;
return val;
}

static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
Expand Down Expand Up @@ -416,7 +412,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
struct drv_cmd cmd;
const struct cpumask *mask;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;

Expand All @@ -434,42 +430,21 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
} else {
pr_debug("Already at target state (P%d)\n",
next_perf_state);
goto out;
return 0;
}
}

switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
cmd.val = (u32) perf->states[next_perf_state].control;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
cmd.val = (u32) perf->states[next_perf_state].control;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
cmd.val = (u32) perf->states[next_perf_state].control;
break;
default:
result = -ENODEV;
goto out;
}

/* cpufreq holds the hotplug lock, so we are safe from here on */
if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
cmd.mask = policy->cpus;
else
cmd.mask = cpumask_of(policy->cpu);
/*
* The core won't allow CPUs to go away until the governor has been
* stopped, so we can rely on the stability of policy->cpus.
*/
mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
cpumask_of(policy->cpu) : policy->cpus;

drv_write(&cmd);
drv_write(data, mask, perf->states[next_perf_state].control);

if (acpi_pstate_strict) {
if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
if (!check_freqs(mask, data->freq_table[index].frequency,
data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
Expand All @@ -480,7 +455,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (!result)
perf->state = next_perf_state;

out:
return result;
}

Expand Down Expand Up @@ -740,15 +714,21 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
data->cpu_freq_read = cpu_freq_read_io;
data->cpu_freq_write = cpu_freq_write_io;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
data->cpu_freq_read = cpu_freq_read_intel;
data->cpu_freq_write = cpu_freq_write_intel;
break;
}
if (check_amd_hwpstate_cpu(cpu)) {
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
data->cpu_freq_read = cpu_freq_read_amd;
data->cpu_freq_write = cpu_freq_write_amd;
break;
}
result = -ENODEV;
Expand Down
8 changes: 4 additions & 4 deletions drivers/cpufreq/amd_freq_sensitivity.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>

#include "cpufreq_governor.h"
#include "cpufreq_ondemand.h"

#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
Expand All @@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
long d_actual, d_reference;
struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
struct dbs_data *od_data = policy->governor_data;
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
struct od_cpu_dbs_info_s *od_info =
od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);

if (!od_info->freq_table)
return freq_next;
Expand Down
Loading

0 comments on commit 4ed3900

Please sign in to comment.