Skip to content

Commit

Permalink
perf: Add group scheduling transactional APIs
Browse files Browse the repository at this point in the history
Add group scheduling transactional APIs to struct pmu.
These APIs will be implemented in arch code, based on Peter's idea as
below.

> the idea behind hw_perf_group_sched_in() is to not perform
> schedulability tests on each event in the group, but to add the group
> as a whole and then perform one test.
>
> Of course, when that test fails, you'll have to roll-back the whole
> group again.
>
> So start_txn (or a better name) would simply toggle a flag in the pmu
> implementation that will make pmu::enable() not perform the
> schedulablilty test.
>
> Then commit_txn() will perform the schedulability test (so note the
> method has to have a !void return value.
>
> This will allow us to use the regular
> kernel/perf_event.c::group_sched_in() and all the rollback code.
> Currently each hw_perf_group_sched_in() implementation duplicates all
> the rolllback code (with various bugs).

->start_txn:
Start group events scheduling transaction, set a flag to make
pmu::enable() not perform the schedulability test, it will be performed
at commit time.

->commit_txn:
Commit group events scheduling transaction, perform the group
schedulability as a whole

->cancel_txn:
Stop group events scheduling transaction, clear the flag so
pmu::enable() will perform the schedulability test.

Reviewed-by: Stephane Eranian <eranian@google.com>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1272002160.5707.60.camel@minggr.sh.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Lin Ming authored and Ingo Molnar committed May 7, 2010
1 parent ab60834 commit 6bde9b6
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 16 deletions.
15 changes: 12 additions & 3 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,8 @@ struct hw_perf_event {

struct perf_event;

#define PERF_EVENT_TXN_STARTED 1

/**
* struct pmu - generic performance monitoring unit
*/
Expand All @@ -557,6 +559,16 @@ struct pmu {
void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event);

/*
* group events scheduling is treated as a transaction,
* add group events as a whole and perform one schedulability test.
* If test fails, roll back the whole group
*/

void (*start_txn) (const struct pmu *pmu);
void (*cancel_txn) (const struct pmu *pmu);
int (*commit_txn) (const struct pmu *pmu);
};

/**
Expand Down Expand Up @@ -823,9 +835,6 @@ extern void perf_disable(void);
extern void perf_enable(void);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
Expand Down
33 changes: 20 additions & 13 deletions kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
void __weak hw_perf_disable(void) { barrier(); }
void __weak hw_perf_enable(void) { barrier(); }

int __weak
hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
return 0;
}

void __weak perf_event_print_debug(void) { }

static DEFINE_PER_CPU(int, perf_disable_count);
Expand Down Expand Up @@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group;
struct perf_event *event, *partial_group = NULL;
const struct pmu *pmu = group_event->pmu;
bool txn = false;
int ret;

if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;

ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
if (ret)
return ret < 0 ? ret : 0;
/* Check if group transaction availabe */
if (pmu->start_txn)
txn = true;

if (txn)
pmu->start_txn(pmu);

if (event_sched_in(group_event, cpuctx, ctx))
return -EAGAIN;
Expand All @@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event,
}
}

return 0;
if (txn) {
ret = pmu->commit_txn(pmu);
if (!ret) {
pmu->cancel_txn(pmu);

return 0;
}
}

group_error:
if (txn)
pmu->cancel_txn(pmu);

/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
Expand Down

0 comments on commit 6bde9b6

Please sign in to comment.