3333#include <linux/workqueue.h>
3434#include <linux/kthread.h>
3535#include <linux/slab.h>
36+ #include <linux/irq_work.h>
37+ #include <asm/topology.h>
3638
3739#define CREATE_TRACE_POINTS
3840#include <trace/events/cpufreq_interactive.h>
@@ -56,6 +58,10 @@ struct cpufreq_interactive_cpuinfo {
5658 u64 loc_hispeed_val_time ; /* per-cpu hispeed_validate_time */
5759 struct rw_semaphore enable_sem ;
5860 int governor_enabled ;
61+ int cpu ;
62+ unsigned int task_boost_freq ;
63+ u64 task_boos_endtime ;
64+ struct irq_work irq_work ;
5965};
6066
6167static DEFINE_PER_CPU (struct cpufreq_interactive_cpuinfo , cpuinfo ) ;
@@ -402,6 +408,9 @@ static void cpufreq_interactive_timer(unsigned long data)
402408 (new_freq < tunables -> touchboost_freq )) {
403409 new_freq = tunables -> touchboost_freq ;
404410 }
411+ if ((now < pcpu -> task_boos_endtime ) && (new_freq < pcpu -> task_boost_freq )) {
412+ new_freq = pcpu -> task_boost_freq ;
413+ }
405414#endif
406415 if (pcpu -> policy -> cur >= tunables -> hispeed_freq &&
407416 new_freq > pcpu -> policy -> cur &&
@@ -1331,6 +1340,93 @@ static void rockchip_cpufreq_policy_init(struct cpufreq_policy *policy)
13311340 else
13321341 * tunables = backup_tunables [index ];
13331342}
1343+
1344+ static void task_boost_irq_work (struct irq_work * irq_work )
1345+ {
1346+ struct cpufreq_interactive_cpuinfo * pcpu ;
1347+ unsigned long flags [2 ];
1348+
1349+ pcpu = container_of (irq_work , struct cpufreq_interactive_cpuinfo , irq_work );
1350+ if (!down_read_trylock (& pcpu -> enable_sem ))
1351+ return ;
1352+
1353+ if (!pcpu -> governor_enabled || !pcpu -> policy )
1354+ goto out ;
1355+
1356+ spin_lock_irqsave (& speedchange_cpumask_lock , flags [0 ]);
1357+ spin_lock_irqsave (& pcpu -> target_freq_lock , flags [1 ]);
1358+ if (pcpu -> target_freq < pcpu -> task_boost_freq ) {
1359+ pcpu -> target_freq = pcpu -> task_boost_freq ;
1360+ cpumask_set_cpu (pcpu -> cpu , & speedchange_cpumask );
1361+ wake_up_process (speedchange_task );
1362+ }
1363+ spin_unlock_irqrestore (& pcpu -> target_freq_lock , flags [1 ]);
1364+ spin_unlock_irqrestore (& speedchange_cpumask_lock , flags [0 ]);
1365+
1366+ out :
1367+ up_read (& pcpu -> enable_sem );
1368+ }
1369+
1370+ static unsigned int get_freq_for_util (struct cpufreq_policy * policy , unsigned long util )
1371+ {
1372+ struct cpufreq_frequency_table * pos ;
1373+ unsigned long max_cap , cur_cap ;
1374+ unsigned int freq = 0 ;
1375+
1376+ max_cap = arch_scale_cpu_capacity (NULL , policy -> cpu );
1377+ cpufreq_for_each_valid_entry (pos , policy -> freq_table ) {
1378+ freq = pos -> frequency ;
1379+
1380+ cur_cap = max_cap * freq / policy -> max ;
1381+ if (cur_cap > util )
1382+ break ;
1383+ }
1384+
1385+ return freq ;
1386+ }
1387+
1388+ void cpufreq_task_boost (int cpu , unsigned long util )
1389+ {
1390+ struct cpufreq_interactive_tunables * tunables ;
1391+ struct cpufreq_interactive_cpuinfo * pcpu = & per_cpu (cpuinfo , cpu );
1392+ struct cpufreq_policy * policy = pcpu -> policy ;
1393+ unsigned long cap , min_util ;
1394+
1395+ if (!down_read_trylock (& pcpu -> enable_sem ))
1396+ return ;
1397+
1398+ if (!pcpu -> governor_enabled || !policy )
1399+ goto out ;
1400+
1401+ if (policy -> cur == policy -> max )
1402+ goto out ;
1403+
1404+ if (have_governor_per_policy ())
1405+ tunables = pcpu -> policy -> governor_data ;
1406+ else
1407+ tunables = common_tunables ;
1408+ if (!tunables )
1409+ goto out ;
1410+
1411+ min_util = util + (util >> 2 );
1412+ cap = capacity_curr_of (cpu );
1413+ if (min_util > cap ) {
1414+ u64 now = ktime_to_us (ktime_get ());
1415+ u64 prev_boos_endtime = pcpu -> task_boos_endtime ;
1416+ unsigned int boost_freq ;
1417+
1418+ pcpu -> task_boos_endtime = now + tunables -> timer_rate ;
1419+ boost_freq = get_freq_for_util (policy , min_util );
1420+ if ((now < prev_boos_endtime ) && (boost_freq <= pcpu -> task_boost_freq ))
1421+ goto out ;
1422+ pcpu -> task_boost_freq = boost_freq ;
1423+
1424+ irq_work_queue (& pcpu -> irq_work );
1425+ }
1426+
1427+ out :
1428+ up_read (& pcpu -> enable_sem );
1429+ }
13341430#endif
13351431
13361432static int cpufreq_governor_interactive (struct cpufreq_policy * policy ,
@@ -1479,6 +1575,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
14791575 pcpu = & per_cpu (cpuinfo , j );
14801576 down_write (& pcpu -> enable_sem );
14811577 pcpu -> governor_enabled = 0 ;
1578+ #ifdef CONFIG_ARCH_ROCKCHIP
1579+ irq_work_sync (& pcpu -> irq_work );
1580+ #endif
14821581 del_timer_sync (& pcpu -> cpu_timer );
14831582 del_timer_sync (& pcpu -> cpu_slack_timer );
14841583 up_write (& pcpu -> enable_sem );
@@ -1541,6 +1640,10 @@ static int __init cpufreq_interactive_init(void)
15411640 /* Initalize per-cpu timers */
15421641 for_each_possible_cpu (i ) {
15431642 pcpu = & per_cpu (cpuinfo , i );
1643+ #ifdef CONFIG_ARCH_ROCKCHIP
1644+ pcpu -> cpu = i ;
1645+ init_irq_work (& pcpu -> irq_work , task_boost_irq_work );
1646+ #endif
15441647 init_timer_deferrable (& pcpu -> cpu_timer );
15451648 pcpu -> cpu_timer .function = cpufreq_interactive_timer ;
15461649 pcpu -> cpu_timer .data = i ;
0 commit comments