Skip to content

Commit

Permalink
x86/resctrl: Prepare to split rdt_domain structure
Browse files Browse the repository at this point in the history
The rdt_domain structure is used for both control and monitor features.
It is about to be split into separate structures for these two usages
because the scope for control and monitoring features for a resource
will be different for future resources.

To allow for common code that scans a list of domains looking for a
specific domain id, move all the common fields ("list", "id", "cpu_mask")
into their own structure within the rdt_domain structure.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Tested-by: Babu Moger <babu.moger@amd.com>
Link: https://lore.kernel.org/r/20240628215619.76401-3-tony.luck@intel.com
  • Loading branch information
aegl authored and bp3tk0v committed Jul 2, 2024
1 parent f436cb6 commit c103d4d
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 73 deletions.
26 changes: 13 additions & 13 deletions arch/x86/kernel/cpu/resctrl/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,9 +355,9 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)

lockdep_assert_cpus_held();

list_for_each_entry(d, &r->domains, list) {
list_for_each_entry(d, &r->domains, hdr.list) {
/* Find the domain that contains this CPU */
if (cpumask_test_cpu(cpu, &d->cpu_mask))
if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
return d;
}

Expand Down Expand Up @@ -393,12 +393,12 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head *l;

list_for_each(l, &r->domains) {
d = list_entry(l, struct rdt_domain, list);
d = list_entry(l, struct rdt_domain, hdr.list);
/* When id is found, return its domain. */
if (id == d->id)
if (id == d->hdr.id)
return d;
/* Stop searching when finding id's position in sorted list. */
if (id < d->id)
if (id < d->hdr.id)
break;
}

Expand Down Expand Up @@ -526,7 +526,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d = rdt_find_domain(r, id, &add_pos);

if (d) {
cpumask_set_cpu(cpu, &d->cpu_mask);
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
if (r->cache.arch_has_per_cpu_cfg)
rdt_domain_reconfigure_cdp(r);
return;
Expand All @@ -537,8 +537,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
return;

d = &hw_dom->d_resctrl;
d->id = id;
cpumask_set_cpu(cpu, &d->cpu_mask);
d->hdr.id = id;
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);

rdt_domain_reconfigure_cdp(r);

Expand All @@ -552,11 +552,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
return;
}

list_add_tail_rcu(&d->list, add_pos);
list_add_tail_rcu(&d->hdr.list, add_pos);

err = resctrl_online_domain(r, d);
if (err) {
list_del_rcu(&d->list);
list_del_rcu(&d->hdr.list);
synchronize_rcu();
domain_free(hw_dom);
}
Expand All @@ -583,10 +583,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
}
hw_dom = resctrl_to_arch_dom(d);

cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
cpumask_clear_cpu(cpu, &d->hdr.cpu_mask);
if (cpumask_empty(&d->hdr.cpu_mask)) {
resctrl_offline_domain(r, d);
list_del_rcu(&d->list);
list_del_rcu(&d->hdr.list);
synchronize_rcu();

/*
Expand Down
24 changes: 12 additions & 12 deletions arch/x86/kernel/cpu/resctrl/ctrlmondata.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,

cfg = &d->staged_config[s->conf_type];
if (cfg->have_new_ctrl) {
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
return -EINVAL;
}

Expand Down Expand Up @@ -148,7 +148,7 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,

cfg = &d->staged_config[s->conf_type];
if (cfg->have_new_ctrl) {
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
return -EINVAL;
}

Expand Down Expand Up @@ -231,8 +231,8 @@ static int parse_line(char *line, struct resctrl_schema *s,
return -EINVAL;
}
dom = strim(dom);
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
list_for_each_entry(d, &r->domains, hdr.list) {
if (d->hdr.id == dom_id) {
data.buf = dom;
data.rdtgrp = rdtgrp;
if (r->parse_ctrlval(&data, s, d))
Expand Down Expand Up @@ -280,7 +280,7 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
u32 idx = get_config_index(closid, t);
struct msr_param msr_param;

if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
return -EINVAL;

hw_dom->ctrl_val[idx] = cfg_val;
Expand All @@ -306,7 +306,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();

list_for_each_entry(d, &r->domains, list) {
list_for_each_entry(d, &r->domains, hdr.list) {
hw_dom = resctrl_to_arch_dom(d);
msr_param.res = NULL;
for (t = 0; t < CDP_NUM_TYPES; t++) {
Expand All @@ -330,7 +330,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
}
}
if (msr_param.res)
smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1);
smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
}

return 0;
Expand Down Expand Up @@ -450,7 +450,7 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
lockdep_assert_cpus_held();

seq_printf(s, "%*s:", max_name_width, schema->name);
list_for_each_entry(dom, &r->domains, list) {
list_for_each_entry(dom, &r->domains, hdr.list) {
if (sep)
seq_puts(s, ";");

Expand All @@ -460,7 +460,7 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
ctrl_val = resctrl_arch_get_config(r, dom, closid,
schema->conf_type);

seq_printf(s, r->format_str, dom->id, max_data_width,
seq_printf(s, r->format_str, dom->hdr.id, max_data_width,
ctrl_val);
sep = true;
}
Expand Down Expand Up @@ -489,7 +489,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
} else {
seq_printf(s, "%s:%d=%x\n",
rdtgrp->plr->s->res->name,
rdtgrp->plr->d->id,
rdtgrp->plr->d->hdr.id,
rdtgrp->plr->cbm);
}
} else {
Expand Down Expand Up @@ -537,7 +537,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
return;
}

cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU);
cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask, RESCTRL_PICK_ANY_CPU);

/*
* cpumask_any_housekeeping() prefers housekeeping CPUs, but
Expand All @@ -546,7 +546,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
* counters on some platforms if its called in IRQ context.
*/
if (tick_nohz_full_cpu(cpu))
smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
smp_call_function_any(&d->hdr.cpu_mask, mon_event_count, rr, 1);
else
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);

Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kernel/cpu/resctrl/monitor.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,

resctrl_arch_rmid_read_context_check();

if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
return -EINVAL;

ret = __rmid_read(rmid, eventid, &msr_val);
Expand Down Expand Up @@ -364,7 +364,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
* CLOSID and RMID because there may be dependencies between them
* on some architectures.
*/
trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->id, val);
trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
}

if (force_free || !rmid_dirty) {
Expand Down Expand Up @@ -490,7 +490,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);

entry->busy = 0;
list_for_each_entry(d, &r->domains, list) {
list_for_each_entry(d, &r->domains, hdr.list) {
/*
* For the first limbo RMID in the domain,
* setup up the limbo worker.
Expand Down Expand Up @@ -801,7 +801,7 @@ void cqm_handle_limbo(struct work_struct *work)
__check_limbo(d, false);

if (has_busy_rmid(d)) {
d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
RESCTRL_PICK_ANY_CPU);
schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
delay);
Expand All @@ -825,7 +825,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms,
unsigned long delay = msecs_to_jiffies(delay_ms);
int cpu;

cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
dom->cqm_work_cpu = cpu;

if (cpu < nr_cpu_ids)
Expand Down Expand Up @@ -868,7 +868,7 @@ void mbm_handle_overflow(struct work_struct *work)
* Re-check for housekeeping CPUs. This allows the overflow handler to
* move off a nohz_full CPU quickly.
*/
d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
RESCTRL_PICK_ANY_CPU);
schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);

Expand Down Expand Up @@ -897,7 +897,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms,
*/
if (!resctrl_mounted || !resctrl_arch_mon_capable())
return;
cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
dom->mbm_work_cpu = cpu;

if (cpu < nr_cpu_ids)
Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kernel/cpu/resctrl/pseudo_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
int cpu;
int ret;

for_each_cpu(cpu, &plr->d->cpu_mask) {
for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) {
rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
Expand Down Expand Up @@ -300,7 +300,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
return -ENODEV;

/* Pick the first cpu we find that is associated with the cache. */
plr->cpu = cpumask_first(&plr->d->cpu_mask);
plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);

if (!cpu_online(plr->cpu)) {
rdt_last_cmd_printf("CPU %u associated with cache not online\n",
Expand Down Expand Up @@ -854,18 +854,18 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
* associated with them.
*/
for_each_alloc_capable_rdt_resource(r) {
list_for_each_entry(d_i, &r->domains, list) {
list_for_each_entry(d_i, &r->domains, hdr.list) {
if (d_i->plr)
cpumask_or(cpu_with_psl, cpu_with_psl,
&d_i->cpu_mask);
&d_i->hdr.cpu_mask);
}
}

/*
* Next test if new pseudo-locked region would intersect with
* existing region.
*/
if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl))
ret = true;

free_cpumask_var(cpu_with_psl);
Expand Down Expand Up @@ -1197,7 +1197,7 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
}

plr->thread_done = 0;
cpu = cpumask_first(&plr->d->cpu_mask);
cpu = cpumask_first(&plr->d->hdr.cpu_mask);
if (!cpu_online(cpu)) {
ret = -ENODEV;
goto out;
Expand Down Expand Up @@ -1527,7 +1527,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
* may be scheduled elsewhere and invalidate entries in the
* pseudo-locked region.
*/
if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
mutex_unlock(&rdtgroup_mutex);
return -EINVAL;
}
Expand Down
Loading

0 comments on commit c103d4d

Please sign in to comment.