Skip to content

Commit c1aa687

Browse files
paulusmackozbenh
authored andcommitted
powerpc: Clean up obsolete code relating to decrementer and timebase
Since the decrementer and timekeeping code was moved over to using the generic clockevents and timekeeping infrastructure, several variables and functions have been obsolete and effectively unused. This deletes them. In particular, wakeup_decrementer() is no longer needed since the generic code reprograms the decrementer as part of the process of resuming the timekeeping code, which happens during sysdev resume. Thus the wakeup_decrementer calls in the suspend_enter methods for 52xx platforms have been removed. The call in the powermac cpu frequency change code has been replaced by set_dec(1), which will cause a timer interrupt as soon as interrupts are enabled, and the generic code will then reprogram the decrementer with the correct value. This also simplifies the generic_suspend_en/disable_irqs functions and makes them static since they are not referenced outside time.c. The preempt_enable/disable calls are removed because the generic code has disabled all but the boot cpu at the point where these functions are called, so we can't be moved to another cpu. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
1 parent 8fd63a9 commit c1aa687

File tree

7 files changed

+9
-153
lines changed

7 files changed

+9
-153
lines changed

arch/powerpc/include/asm/machdep.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -366,8 +366,5 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
366366
#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
367367
#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
368368

369-
void generic_suspend_disable_irqs(void);
370-
void generic_suspend_enable_irqs(void);
371-
372369
#endif /* __KERNEL__ */
373370
#endif /* _ASM_POWERPC_MACHDEP_H */

arch/powerpc/include/asm/time.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,12 @@
2828
extern unsigned long tb_ticks_per_jiffy;
2929
extern unsigned long tb_ticks_per_usec;
3030
extern unsigned long tb_ticks_per_sec;
31-
extern u64 tb_to_xs;
32-
extern unsigned tb_to_us;
3331

3432
struct rtc_time;
3533
extern void to_tm(int tim, struct rtc_time * tm);
3634
extern void GregorianDay(struct rtc_time *tm);
37-
extern time_t last_rtc_update;
3835

3936
extern void generic_calibrate_decr(void);
40-
extern void wakeup_decrementer(void);
4137
extern void snapshot_timebase(void);
4238

4339
extern void set_dec_cpu6(unsigned int val);
@@ -204,9 +200,6 @@ static inline unsigned long tb_ticks_since(unsigned long tstamp)
204200
extern u64 mulhdu(u64, u64);
205201
#endif
206202

207-
extern void smp_space_timers(unsigned int);
208-
209-
extern unsigned mulhwu_scale_factor(unsigned, unsigned);
210203
extern void div128_by_32(u64 dividend_high, u64 dividend_low,
211204
unsigned divisor, struct div_result *dr);
212205

arch/powerpc/kernel/smp.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -288,8 +288,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288288
max_cpus = NR_CPUS;
289289
else
290290
max_cpus = 1;
291-
292-
smp_space_timers(max_cpus);
293291

294292
for_each_possible_cpu(cpu)
295293
if (cpu != boot_cpuid)

arch/powerpc/kernel/time.c

Lines changed: 3 additions & 133 deletions
Original file line numberDiff line numberDiff line change
@@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */
149149
EXPORT_SYMBOL(tb_ticks_per_usec);
150150
unsigned long tb_ticks_per_sec;
151151
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
152-
u64 tb_to_xs;
153-
unsigned tb_to_us;
154-
155-
#define TICKLEN_SCALE NTP_SCALE_SHIFT
156-
static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
157-
static u64 ticklen_to_xs; /* 0.64 fraction */
158-
159-
/* If last_tick_len corresponds to about 1/HZ seconds, then
160-
last_tick_len << TICKLEN_SHIFT will be about 2^63. */
161-
#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
162152

163153
DEFINE_SPINLOCK(rtc_lock);
164154
EXPORT_SYMBOL_GPL(rtc_lock);
@@ -174,7 +164,6 @@ unsigned long ppc_proc_freq;
174164
EXPORT_SYMBOL(ppc_proc_freq);
175165
unsigned long ppc_tb_freq;
176166

177-
static u64 tb_last_jiffy __cacheline_aligned_in_smp;
178167
static DEFINE_PER_CPU(u64, last_jiffy);
179168

180169
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -446,7 +435,6 @@ EXPORT_SYMBOL(profile_pc);
446435

447436
static int __init iSeries_tb_recal(void)
448437
{
449-
struct div_result divres;
450438
unsigned long titan, tb;
451439

452440
/* Make sure we only run on iSeries */
@@ -477,10 +465,7 @@ static int __init iSeries_tb_recal(void)
477465
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
478466
tb_ticks_per_sec = new_tb_ticks_per_sec;
479467
calc_cputime_factors();
480-
div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
481-
tb_to_xs = divres.result_low;
482468
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
483-
vdso_data->tb_to_xs = tb_to_xs;
484469
setup_cputime_one_jiffy();
485470
}
486471
else {
@@ -643,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs)
643628
trace_timer_interrupt_exit(regs);
644629
}
645630

646-
void wakeup_decrementer(void)
647-
{
648-
unsigned long ticks;
649-
650-
/*
651-
* The timebase gets saved on sleep and restored on wakeup,
652-
* so all we need to do is to reset the decrementer.
653-
*/
654-
ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
655-
if (ticks < tb_ticks_per_jiffy)
656-
ticks = tb_ticks_per_jiffy - ticks;
657-
else
658-
ticks = 1;
659-
set_dec(ticks);
660-
}
661-
662631
#ifdef CONFIG_SUSPEND
663-
void generic_suspend_disable_irqs(void)
632+
static void generic_suspend_disable_irqs(void)
664633
{
665-
preempt_disable();
666-
667634
/* Disable the decrementer, so that it doesn't interfere
668635
* with suspending.
669636
*/
@@ -673,12 +640,9 @@ void generic_suspend_disable_irqs(void)
673640
set_dec(0x7fffffff);
674641
}
675642

676-
void generic_suspend_enable_irqs(void)
643+
static void generic_suspend_enable_irqs(void)
677644
{
678-
wakeup_decrementer();
679-
680645
local_irq_enable();
681-
preempt_enable();
682646
}
683647

684648
/* Overrides the weak version in kernel/power/main.c */
@@ -698,23 +662,6 @@ void arch_suspend_enable_irqs(void)
698662
}
699663
#endif
700664

701-
#ifdef CONFIG_SMP
702-
void __init smp_space_timers(unsigned int max_cpus)
703-
{
704-
int i;
705-
u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
706-
707-
/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
708-
previous_tb -= tb_ticks_per_jiffy;
709-
710-
for_each_possible_cpu(i) {
711-
if (i == boot_cpuid)
712-
continue;
713-
per_cpu(last_jiffy, i) = previous_tb;
714-
}
715-
}
716-
#endif
717-
718665
/*
719666
* Scheduler clock - returns current time in nanosec units.
720667
*
@@ -1014,65 +961,28 @@ void secondary_cpu_time_init(void)
1014961
/* This function is only called on the boot processor */
1015962
void __init time_init(void)
1016963
{
1017-
unsigned long flags;
1018964
struct div_result res;
1019-
u64 scale, x;
965+
u64 scale;
1020966
unsigned shift;
1021967

1022968
if (__USE_RTC()) {
1023969
/* 601 processor: dec counts down by 128 every 128ns */
1024970
ppc_tb_freq = 1000000000;
1025-
tb_last_jiffy = get_rtcl();
1026971
} else {
1027972
/* Normal PowerPC with timebase register */
1028973
ppc_md.calibrate_decr();
1029974
printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1030975
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1031976
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1032977
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1033-
tb_last_jiffy = get_tb();
1034978
}
1035979

1036980
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1037981
tb_ticks_per_sec = ppc_tb_freq;
1038982
tb_ticks_per_usec = ppc_tb_freq / 1000000;
1039-
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
1040983
calc_cputime_factors();
1041984
setup_cputime_one_jiffy();
1042985

1043-
/*
1044-
* Calculate the length of each tick in ns. It will not be
1045-
* exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
1046-
* We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
1047-
* rounded up.
1048-
*/
1049-
x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
1050-
do_div(x, ppc_tb_freq);
1051-
tick_nsec = x;
1052-
last_tick_len = x << TICKLEN_SCALE;
1053-
1054-
/*
1055-
* Compute ticklen_to_xs, which is a factor which gets multiplied
1056-
* by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
1057-
* It is computed as:
1058-
* ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
1059-
* where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
1060-
* which turns out to be N = 51 - SHIFT_HZ.
1061-
* This gives the result as a 0.64 fixed-point fraction.
1062-
* That value is reduced by an offset amounting to 1 xsec per
1063-
* 2^31 timebase ticks to avoid problems with time going backwards
1064-
* by 1 xsec when we do timer_recalc_offset due to losing the
1065-
* fractional xsec. That offset is equal to ppc_tb_freq/2^51
1066-
* since there are 2^20 xsec in a second.
1067-
*/
1068-
div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
1069-
tb_ticks_per_jiffy << SHIFT_HZ, &res);
1070-
div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
1071-
ticklen_to_xs = res.result_low;
1072-
1073-
/* Compute tb_to_xs from tick_nsec */
1074-
tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
1075-
1076986
/*
1077987
* Compute scale factor for sched_clock.
1078988
* The calibrate_decr() function has set tb_ticks_per_sec,
@@ -1094,21 +1004,14 @@ void __init time_init(void)
10941004
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
10951005
boot_tb = get_tb_or_rtc();
10961006

1097-
write_seqlock_irqsave(&xtime_lock, flags);
1098-
10991007
/* If platform provided a timezone (pmac), we correct the time */
11001008
if (timezone_offset) {
11011009
sys_tz.tz_minuteswest = -timezone_offset / 60;
11021010
sys_tz.tz_dsttime = 0;
11031011
}
11041012

1105-
vdso_data->tb_orig_stamp = tb_last_jiffy;
11061013
vdso_data->tb_update_count = 0;
11071014
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1108-
vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1109-
vdso_data->tb_to_xs = tb_to_xs;
1110-
1111-
write_sequnlock_irqrestore(&xtime_lock, flags);
11121015

11131016
/* Start the decrementer on CPUs that have manual control
11141017
* such as BookE
@@ -1202,39 +1105,6 @@ void to_tm(int tim, struct rtc_time * tm)
12021105
GregorianDay(tm);
12031106
}
12041107

1205-
/* Auxiliary function to compute scaling factors */
1206-
/* Actually the choice of a timebase running at 1/4 the of the bus
1207-
* frequency giving resolution of a few tens of nanoseconds is quite nice.
1208-
* It makes this computation very precise (27-28 bits typically) which
1209-
* is optimistic considering the stability of most processor clock
1210-
* oscillators and the precision with which the timebase frequency
1211-
* is measured but does not harm.
1212-
*/
1213-
unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1214-
{
1215-
unsigned mlt=0, tmp, err;
1216-
/* No concern for performance, it's done once: use a stupid
1217-
* but safe and compact method to find the multiplier.
1218-
*/
1219-
1220-
for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1221-
if (mulhwu(inscale, mlt|tmp) < outscale)
1222-
mlt |= tmp;
1223-
}
1224-
1225-
/* We might still be off by 1 for the best approximation.
1226-
* A side effect of this is that if outscale is too large
1227-
* the returned value will be zero.
1228-
* Many corner cases have been checked and seem to work,
1229-
* some might have been forgotten in the test however.
1230-
*/
1231-
1232-
err = inscale * (mlt+1);
1233-
if (err <= inscale/2)
1234-
mlt++;
1235-
return mlt;
1236-
}
1237-
12381108
/*
12391109
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
12401110
* result.

arch/powerpc/platforms/52xx/lite5200_pm.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,6 @@ static int lite5200_pm_enter(suspend_state_t state)
216216

217217
lite5200_restore_regs();
218218

219-
/* restart jiffies */
220-
wakeup_decrementer();
221-
222219
iounmap(mbar);
223220
return 0;
224221
}

arch/powerpc/platforms/52xx/mpc52xx_pm.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,6 @@ int mpc52xx_pm_enter(suspend_state_t state)
171171
/* restore SRAM */
172172
memcpy(sram, saved_sram, sram_size);
173173

174-
/* restart jiffies */
175-
wakeup_decrementer();
176-
177174
/* reenable interrupts in PIC */
178175
out_be32(&intr->main_mask, intr_main_mask);
179176

arch/powerpc/platforms/powermac/cpufreq_32.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -310,8 +310,12 @@ static int pmu_set_cpu_speed(int low_speed)
310310
/* Restore low level PMU operations */
311311
pmu_unlock();
312312

313-
/* Restore decrementer */
314-
wakeup_decrementer();
313+
/*
314+
* Restore decrementer; we'll take a decrementer interrupt
315+
* as soon as interrupts are re-enabled and the generic
316+
* clockevents code will reprogram it with the right value.
317+
*/
318+
set_dec(1);
315319

316320
/* Restore interrupts */
317321
mpic_cpu_set_priority(pic_prio);

0 commit comments

Comments
 (0)