@@ -327,7 +327,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
327327 struct kprobe * kp ;
328328
329329 list_for_each_entry_rcu (kp , & p -> list , list ) {
330- if (kp -> pre_handler ) {
330+ if (kp -> pre_handler && ! kprobe_gone ( kp ) ) {
331331 set_kprobe_instance (kp );
332332 if (kp -> pre_handler (kp , regs ))
333333 return 1 ;
@@ -343,7 +343,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
343343 struct kprobe * kp ;
344344
345345 list_for_each_entry_rcu (kp , & p -> list , list ) {
346- if (kp -> post_handler ) {
346+ if (kp -> post_handler && ! kprobe_gone ( kp ) ) {
347347 set_kprobe_instance (kp );
348348 kp -> post_handler (kp , regs , flags );
349349 reset_kprobe_instance ();
@@ -545,9 +545,10 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
545545 ap -> addr = p -> addr ;
546546 ap -> pre_handler = aggr_pre_handler ;
547547 ap -> fault_handler = aggr_fault_handler ;
548- if (p -> post_handler )
548+ /* We don't care the kprobe which has gone. */
549+ if (p -> post_handler && !kprobe_gone (p ))
549550 ap -> post_handler = aggr_post_handler ;
550- if (p -> break_handler )
551+ if (p -> break_handler && ! kprobe_gone ( p ) )
551552 ap -> break_handler = aggr_break_handler ;
552553
553554 INIT_LIST_HEAD (& ap -> list );
@@ -566,17 +567,41 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
566567 int ret = 0 ;
567568 struct kprobe * ap ;
568569
570+ if (kprobe_gone (old_p )) {
571+ /*
572+ * Attempting to insert new probe at the same location that
573+ * had a probe in the module vaddr area which already
574+ * freed. So, the instruction slot has already been
575+ * released. We need a new slot for the new probe.
576+ */
577+ ret = arch_prepare_kprobe (old_p );
578+ if (ret )
579+ return ret ;
580+ }
569581 if (old_p -> pre_handler == aggr_pre_handler ) {
570582 copy_kprobe (old_p , p );
571583 ret = add_new_kprobe (old_p , p );
584+ ap = old_p ;
572585 } else {
573586 ap = kzalloc (sizeof (struct kprobe ), GFP_KERNEL );
574- if (!ap )
587+ if (!ap ) {
588+ if (kprobe_gone (old_p ))
589+ arch_remove_kprobe (old_p );
575590 return - ENOMEM ;
591+ }
576592 add_aggr_kprobe (ap , old_p );
577593 copy_kprobe (ap , p );
578594 ret = add_new_kprobe (ap , p );
579595 }
596+ if (kprobe_gone (old_p )) {
597+ /*
598+ * If the old_p has gone, its breakpoint has been disarmed.
599+ * We have to arm it again after preparing real kprobes.
600+ */
601+ ap -> flags &= ~KPROBE_FLAG_GONE ;
602+ if (kprobe_enabled )
603+ arch_arm_kprobe (ap );
604+ }
580605 return ret ;
581606}
582607
@@ -639,8 +664,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
639664 return - EINVAL ;
640665 }
641666
642- p -> mod_refcounted = 0 ;
643-
667+ p -> flags = 0 ;
644668 /*
645669 * Check if are we probing a module.
646670 */
@@ -649,16 +673,14 @@ static int __kprobes __register_kprobe(struct kprobe *p,
649673 struct module * calling_mod ;
650674 calling_mod = __module_text_address (called_from );
651675 /*
652- * We must allow modules to probe themself and in this case
653- * avoid incrementing the module refcount, so as to allow
654- * unloading of self probing modules.
676+ * We must hold a refcount of the probed module while updating
677+ * its code to prohibit unexpected unloading.
655678 */
656679 if (calling_mod != probed_mod ) {
657680 if (unlikely (!try_module_get (probed_mod ))) {
658681 preempt_enable ();
659682 return - EINVAL ;
660683 }
661- p -> mod_refcounted = 1 ;
662684 } else
663685 probed_mod = NULL ;
664686 }
@@ -687,8 +709,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
687709out :
688710 mutex_unlock (& kprobe_mutex );
689711
690- if (ret && probed_mod )
712+ if (probed_mod )
691713 module_put (probed_mod );
714+
692715 return ret ;
693716}
694717
@@ -716,16 +739,16 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
716739 list_is_singular (& old_p -> list ))) {
717740 /*
718741 * Only probe on the hash list. Disarm only if kprobes are
719- * enabled - otherwise, the breakpoint would already have
720- * been removed. We save on flushing icache.
742+ * enabled and not gone - otherwise, the breakpoint would
743+ * already have been removed. We save on flushing icache.
721744 */
722- if (kprobe_enabled )
745+ if (kprobe_enabled && ! kprobe_gone ( old_p ) )
723746 arch_disarm_kprobe (p );
724747 hlist_del_rcu (& old_p -> hlist );
725748 } else {
726- if (p -> break_handler )
749+ if (p -> break_handler && ! kprobe_gone ( p ) )
727750 old_p -> break_handler = NULL ;
728- if (p -> post_handler ) {
751+ if (p -> post_handler && ! kprobe_gone ( p ) ) {
729752 list_for_each_entry_rcu (list_p , & old_p -> list , list ) {
730753 if ((list_p != p ) && (list_p -> post_handler ))
731754 goto noclean ;
@@ -740,27 +763,16 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
740763
741764static void __kprobes __unregister_kprobe_bottom (struct kprobe * p )
742765{
743- struct module * mod ;
744766 struct kprobe * old_p ;
745767
746- if (p -> mod_refcounted ) {
747- /*
748- * Since we've already incremented refcount,
749- * we don't need to disable preemption.
750- */
751- mod = module_text_address ((unsigned long )p -> addr );
752- if (mod )
753- module_put (mod );
754- }
755-
756- if (list_empty (& p -> list ) || list_is_singular (& p -> list )) {
757- if (!list_empty (& p -> list )) {
758- /* "p" is the last child of an aggr_kprobe */
759- old_p = list_entry (p -> list .next , struct kprobe , list );
760- list_del (& p -> list );
761- kfree (old_p );
762- }
768+ if (list_empty (& p -> list ))
763769 arch_remove_kprobe (p );
770+ else if (list_is_singular (& p -> list )) {
771+ /* "p" is the last child of an aggr_kprobe */
772+ old_p = list_entry (p -> list .next , struct kprobe , list );
773+ list_del (& p -> list );
774+ arch_remove_kprobe (old_p );
775+ kfree (old_p );
764776 }
765777}
766778
@@ -1074,6 +1086,67 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
10741086
10751087#endif /* CONFIG_KRETPROBES */
10761088
1089+ /* Set the kprobe gone and remove its instruction buffer. */
1090+ static void __kprobes kill_kprobe (struct kprobe * p )
1091+ {
1092+ struct kprobe * kp ;
1093+ p -> flags |= KPROBE_FLAG_GONE ;
1094+ if (p -> pre_handler == aggr_pre_handler ) {
1095+ /*
1096+ * If this is an aggr_kprobe, we have to list all the
1097+ * chained probes and mark them GONE.
1098+ */
1099+ list_for_each_entry_rcu (kp , & p -> list , list )
1100+ kp -> flags |= KPROBE_FLAG_GONE ;
1101+ p -> post_handler = NULL ;
1102+ p -> break_handler = NULL ;
1103+ }
1104+ /*
1105+ * Here, we can remove insn_slot safely, because no thread calls
1106+ * the original probed function (which will be freed soon) any more.
1107+ */
1108+ arch_remove_kprobe (p );
1109+ }
1110+
1111+ /* Module notifier call back, checking kprobes on the module */
1112+ static int __kprobes kprobes_module_callback (struct notifier_block * nb ,
1113+ unsigned long val , void * data )
1114+ {
1115+ struct module * mod = data ;
1116+ struct hlist_head * head ;
1117+ struct hlist_node * node ;
1118+ struct kprobe * p ;
1119+ unsigned int i ;
1120+
1121+ if (val != MODULE_STATE_GOING )
1122+ return NOTIFY_DONE ;
1123+
1124+ /*
1125+ * module .text section will be freed. We need to
1126+ * disable kprobes which have been inserted in the section.
1127+ */
1128+ mutex_lock (& kprobe_mutex );
1129+ for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
1130+ head = & kprobe_table [i ];
1131+ hlist_for_each_entry_rcu (p , node , head , hlist )
1132+ if (within_module_core ((unsigned long )p -> addr , mod )) {
1133+ /*
1134+ * The vaddr this probe is installed will soon
1135+ * be vfreed buy not synced to disk. Hence,
1136+ * disarming the breakpoint isn't needed.
1137+ */
1138+ kill_kprobe (p );
1139+ }
1140+ }
1141+ mutex_unlock (& kprobe_mutex );
1142+ return NOTIFY_DONE ;
1143+ }
1144+
1145+ static struct notifier_block kprobe_module_nb = {
1146+ .notifier_call = kprobes_module_callback ,
1147+ .priority = 0
1148+ };
1149+
10771150static int __init init_kprobes (void )
10781151{
10791152 int i , err = 0 ;
@@ -1130,6 +1203,9 @@ static int __init init_kprobes(void)
11301203 err = arch_init_kprobes ();
11311204 if (!err )
11321205 err = register_die_notifier (& kprobe_exceptions_nb );
1206+ if (!err )
1207+ err = register_module_notifier (& kprobe_module_nb );
1208+
11331209 kprobes_initialized = (err == 0 );
11341210
11351211 if (!err )
@@ -1150,10 +1226,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
11501226 else
11511227 kprobe_type = "k" ;
11521228 if (sym )
1153- seq_printf (pi , "%p %s %s+0x%x %s\n" , p -> addr , kprobe_type ,
1154- sym , offset , (modname ? modname : " " ));
1229+ seq_printf (pi , "%p %s %s+0x%x %s %s\n" , p -> addr , kprobe_type ,
1230+ sym , offset , (modname ? modname : " " ),
1231+ (kprobe_gone (p ) ? "[GONE]" : "" ));
11551232 else
1156- seq_printf (pi , "%p %s %p\n" , p -> addr , kprobe_type , p -> addr );
1233+ seq_printf (pi , "%p %s %p %s\n" , p -> addr , kprobe_type , p -> addr ,
1234+ (kprobe_gone (p ) ? "[GONE]" : "" ));
11571235}
11581236
11591237static void __kprobes * kprobe_seq_start (struct seq_file * f , loff_t * pos )
@@ -1234,7 +1312,8 @@ static void __kprobes enable_all_kprobes(void)
12341312 for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
12351313 head = & kprobe_table [i ];
12361314 hlist_for_each_entry_rcu (p , node , head , hlist )
1237- arch_arm_kprobe (p );
1315+ if (!kprobe_gone (p ))
1316+ arch_arm_kprobe (p );
12381317 }
12391318
12401319 kprobe_enabled = true;
@@ -1263,7 +1342,7 @@ static void __kprobes disable_all_kprobes(void)
12631342 for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
12641343 head = & kprobe_table [i ];
12651344 hlist_for_each_entry_rcu (p , node , head , hlist ) {
1266- if (!arch_trampoline_kprobe (p ))
1345+ if (!arch_trampoline_kprobe (p ) && ! kprobe_gone ( p ) )
12671346 arch_disarm_kprobe (p );
12681347 }
12691348 }
0 commit comments