6262#define FTRACE_HASH_DEFAULT_BITS 10
6363#define FTRACE_HASH_MAX_BITS 12
6464
65- #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
65+ #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
6666
6767#ifdef CONFIG_DYNAMIC_FTRACE
6868#define INIT_REGEX_LOCK (opsname ) \
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly;
103103
104104static DEFINE_MUTEX (ftrace_lock );
105105
106- static struct ftrace_ops * ftrace_global_list __read_mostly = & ftrace_list_end ;
107106static struct ftrace_ops * ftrace_control_list __read_mostly = & ftrace_list_end ;
108107static struct ftrace_ops * ftrace_ops_list __read_mostly = & ftrace_list_end ;
109108ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub ;
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void)
171170 return cnt ;
172171}
173172
174- static void
175- ftrace_global_list_func (unsigned long ip , unsigned long parent_ip ,
176- struct ftrace_ops * op , struct pt_regs * regs )
177- {
178- int bit ;
179-
180- bit = trace_test_and_set_recursion (TRACE_GLOBAL_START , TRACE_GLOBAL_MAX );
181- if (bit < 0 )
182- return ;
183-
184- do_for_each_ftrace_op (op , ftrace_global_list ) {
185- op -> func (ip , parent_ip , op , regs );
186- } while_for_each_ftrace_op (op );
187-
188- trace_clear_recursion (bit );
189- }
190-
191173static void ftrace_pid_func (unsigned long ip , unsigned long parent_ip ,
192174 struct ftrace_ops * op , struct pt_regs * regs )
193175{
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops)
237219 return 0 ;
238220}
239221
240- static void update_global_ops (void )
241- {
242- ftrace_func_t func = ftrace_global_list_func ;
243- void * private = NULL ;
244-
245- /* The list has its own recursion protection. */
246- global_ops .flags |= FTRACE_OPS_FL_RECURSION_SAFE ;
247-
248- /*
249- * If there's only one function registered, then call that
250- * function directly. Otherwise, we need to iterate over the
251- * registered callers.
252- */
253- if (ftrace_global_list == & ftrace_list_end ||
254- ftrace_global_list -> next == & ftrace_list_end ) {
255- func = ftrace_global_list -> func ;
256- private = ftrace_global_list -> private ;
257- /*
258- * As we are calling the function directly.
259- * If it does not have recursion protection,
260- * the function_trace_op needs to be updated
261- * accordingly.
262- */
263- if (!(ftrace_global_list -> flags & FTRACE_OPS_FL_RECURSION_SAFE ))
264- global_ops .flags &= ~FTRACE_OPS_FL_RECURSION_SAFE ;
265- }
266-
267- /* If we filter on pids, update to use the pid function */
268- if (!list_empty (& ftrace_pids )) {
269- set_ftrace_pid_function (func );
270- func = ftrace_pid_func ;
271- }
272-
273- global_ops .func = func ;
274- global_ops .private = private ;
275- }
276-
277222static void ftrace_sync (struct work_struct * work )
278223{
279224 /*
@@ -301,8 +246,6 @@ static void update_ftrace_function(void)
301246{
302247 ftrace_func_t func ;
303248
304- update_global_ops ();
305-
306249 /*
307250 * If we are at the end of the list and this ops is
308251 * recursion safe and not dynamic and the arch supports passing ops,
@@ -314,10 +257,7 @@ static void update_ftrace_function(void)
314257 (ftrace_ops_list -> flags & FTRACE_OPS_FL_RECURSION_SAFE ) &&
315258 !FTRACE_FORCE_LIST_FUNC )) {
316259 /* Set the ftrace_ops that the arch callback uses */
317- if (ftrace_ops_list == & global_ops )
318- set_function_trace_op = ftrace_global_list ;
319- else
320- set_function_trace_op = ftrace_ops_list ;
260+ set_function_trace_op = ftrace_ops_list ;
321261 func = ftrace_ops_list -> func ;
322262 } else {
323263 /* Just use the default ftrace_ops */
@@ -434,16 +374,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
434374 if (ops -> flags & FTRACE_OPS_FL_DELETED )
435375 return - EINVAL ;
436376
437- if (FTRACE_WARN_ON (ops == & global_ops ))
438- return - EINVAL ;
439-
440377 if (WARN_ON (ops -> flags & FTRACE_OPS_FL_ENABLED ))
441378 return - EBUSY ;
442379
443- /* We don't support both control and global flags set. */
444- if ((ops -> flags & FL_GLOBAL_CONTROL_MASK ) == FL_GLOBAL_CONTROL_MASK )
445- return - EINVAL ;
446-
447380#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
448381 /*
449382 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -461,10 +394,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
461394 if (!core_kernel_data ((unsigned long )ops ))
462395 ops -> flags |= FTRACE_OPS_FL_DYNAMIC ;
463396
464- if (ops -> flags & FTRACE_OPS_FL_GLOBAL ) {
465- add_ftrace_list_ops (& ftrace_global_list , & global_ops , ops );
466- ops -> flags |= FTRACE_OPS_FL_ENABLED ;
467- } else if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
397+ if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
468398 if (control_ops_alloc (ops ))
469399 return - ENOMEM ;
470400 add_ftrace_list_ops (& ftrace_control_list , & control_ops , ops );
@@ -484,15 +414,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
484414 if (WARN_ON (!(ops -> flags & FTRACE_OPS_FL_ENABLED )))
485415 return - EBUSY ;
486416
487- if (FTRACE_WARN_ON (ops == & global_ops ))
488- return - EINVAL ;
489-
490- if (ops -> flags & FTRACE_OPS_FL_GLOBAL ) {
491- ret = remove_ftrace_list_ops (& ftrace_global_list ,
492- & global_ops , ops );
493- if (!ret )
494- ops -> flags &= ~FTRACE_OPS_FL_ENABLED ;
495- } else if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
417+ if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
496418 ret = remove_ftrace_list_ops (& ftrace_control_list ,
497419 & control_ops , ops );
498420 } else
@@ -2128,15 +2050,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
21282050 ftrace_start_up ++ ;
21292051 command |= FTRACE_UPDATE_CALLS ;
21302052
2131- /* ops marked global share the filter hashes */
2132- if (ops -> flags & FTRACE_OPS_FL_GLOBAL ) {
2133- ops = & global_ops ;
2134- /* Don't update hash if global is already set */
2135- if (global_start_up )
2136- hash_enable = false;
2137- global_start_up ++ ;
2138- }
2139-
21402053 ops -> flags |= FTRACE_OPS_FL_ENABLED ;
21412054 if (hash_enable )
21422055 ftrace_hash_rec_enable (ops , 1 );
@@ -2166,21 +2079,10 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
21662079 */
21672080 WARN_ON_ONCE (ftrace_start_up < 0 );
21682081
2169- if (ops -> flags & FTRACE_OPS_FL_GLOBAL ) {
2170- ops = & global_ops ;
2171- global_start_up -- ;
2172- WARN_ON_ONCE (global_start_up < 0 );
2173- /* Don't update hash if global still has users */
2174- if (global_start_up ) {
2175- WARN_ON_ONCE (!ftrace_start_up );
2176- hash_disable = false;
2177- }
2178- }
2179-
21802082 if (hash_disable )
21812083 ftrace_hash_rec_disable (ops , 1 );
21822084
2183- if (ops != & global_ops || !global_start_up )
2085+ if (!global_start_up )
21842086 ops -> flags &= ~FTRACE_OPS_FL_ENABLED ;
21852087
21862088 command |= FTRACE_UPDATE_CALLS ;
@@ -3524,10 +3426,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
35243426 struct ftrace_hash * hash ;
35253427 int ret ;
35263428
3527- /* All global ops uses the global ops filters */
3528- if (ops -> flags & FTRACE_OPS_FL_GLOBAL )
3529- ops = & global_ops ;
3530-
35313429 if (unlikely (ftrace_disabled ))
35323430 return - ENODEV ;
35333431
@@ -4462,6 +4360,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
44624360
44634361#endif /* CONFIG_DYNAMIC_FTRACE */
44644362
4363+ __init void ftrace_init_global_array_ops (struct trace_array * tr )
4364+ {
4365+ tr -> ops = & global_ops ;
4366+ tr -> ops -> private = tr ;
4367+ }
4368+
4369+ void ftrace_init_array_ops (struct trace_array * tr , ftrace_func_t func )
4370+ {
4371+ /* If we filter on pids, update to use the pid function */
4372+ if (tr -> flags & TRACE_ARRAY_FL_GLOBAL ) {
4373+ if (WARN_ON (tr -> ops -> func != ftrace_stub ))
4374+ printk ("ftrace ops had %pS for function\n" ,
4375+ tr -> ops -> func );
4376+ /* Only the top level instance does pid tracing */
4377+ if (!list_empty (& ftrace_pids )) {
4378+ set_ftrace_pid_function (func );
4379+ func = ftrace_pid_func ;
4380+ }
4381+ }
4382+ tr -> ops -> func = func ;
4383+ tr -> ops -> private = tr ;
4384+ }
4385+
4386+ void ftrace_reset_array_ops (struct trace_array * tr )
4387+ {
4388+ tr -> ops -> func = ftrace_stub ;
4389+ }
4390+
44654391static void
44664392ftrace_ops_control_func (unsigned long ip , unsigned long parent_ip ,
44674393 struct ftrace_ops * op , struct pt_regs * regs )
@@ -4520,9 +4446,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
45204446 */
45214447 preempt_disable_notrace ();
45224448 do_for_each_ftrace_op (op , ftrace_ops_list ) {
4523- if (ftrace_ops_test (op , ip , regs ))
4449+ if (ftrace_ops_test (op , ip , regs )) {
4450+ if (WARN_ON (!op -> func )) {
4451+ function_trace_stop = 1 ;
4452+ printk ("op=%p %pS\n" , op , op );
4453+ goto out ;
4454+ }
45244455 op -> func (ip , parent_ip , op , regs );
4456+ }
45254457 } while_for_each_ftrace_op (op );
4458+ out :
45264459 preempt_enable_notrace ();
45274460 trace_clear_recursion (bit );
45284461}
@@ -5076,8 +5009,7 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
50765009/* Just a place holder for function graph */
50775010static struct ftrace_ops fgraph_ops __read_mostly = {
50785011 .func = ftrace_stub ,
5079- .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5080- FTRACE_OPS_FL_RECURSION_SAFE ,
5012+ .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_RECURSION_SAFE ,
50815013};
50825014
50835015static int ftrace_graph_entry_test (struct ftrace_graph_ent * trace )
0 commit comments