Skip to content

New, orthogonal and complete time conversion API #19591

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion arch/arc/core/timestamp.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ u64_t z_tsc_read(void)
t = (u64_t)z_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
irq_unlock(key);
t *= (u64_t)sys_clock_hw_cycles_per_tick();
t *= k_ticks_to_cyc_floor64(1);
t += (u64_t)count;
return t;
}
2 changes: 1 addition & 1 deletion doc/reference/kernel/timing/clocks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ between two points in time.

/* compute how long the work took (assumes no counter rollover) */
cycles_spent = stop_time - start_time;
nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent);
nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent);

Suggested Uses
**************
Expand Down
8 changes: 4 additions & 4 deletions drivers/timer/altera_avalon_timer_hal.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static void timer_irq_handler(void *unused)
read_timer_start_of_tick_handler();
#endif

accumulated_cycle_count += sys_clock_hw_cycles_per_tick();
accumulated_cycle_count += k_ticks_to_cyc_floor32(1);

/* Clear the interrupt */
alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ);
Expand All @@ -46,15 +46,15 @@ int z_clock_driver_init(struct device *device)
ARG_UNUSED(device);

IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE,
sys_clock_hw_cycles_per_tick() & 0xFFFF);
k_ticks_to_cyc_floor32(1) & 0xFFFF);
IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE,
(sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF);
(k_ticks_to_cyc_floor32(1) >> 16) & 0xFFFF);

IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0);
irq_enable(TIMER_0_IRQ);

alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0,
TIMER_0_IRQ, sys_clock_hw_cycles_per_tick());
TIMER_0_IRQ, k_ticks_to_cyc_floor32(1));

return 0;
}
Expand Down
6 changes: 3 additions & 3 deletions drivers/timer/litex_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ static void litex_timer_irq_handler(void *device)
int key = irq_lock();

sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR);
accumulated_cycle_count += sys_clock_hw_cycles_per_tick();
accumulated_cycle_count += k_ticks_to_cyc_floor32(1);
z_clock_announce(1);

irq_unlock(key);
Expand All @@ -57,9 +57,9 @@ int z_clock_driver_init(struct device *device)
sys_write8(TIMER_DISABLE, TIMER_EN_ADDR);

for (int i = 0; i < 4; i++) {
sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8),
sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8),
TIMER_RELOAD_ADDR + i * 0x4);
sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8),
sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8),
TIMER_LOAD_ADDR + i * 0x4);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/timer/loapic_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ int z_clock_driver_init(struct device *device)
/* determine the timer counter value (in timer clock cycles/system tick)
*/

cycles_per_tick = sys_clock_hw_cycles_per_tick();
cycles_per_tick = k_ticks_to_cyc_floor32(1);

tickless_idle_init();

Expand Down
2 changes: 1 addition & 1 deletion drivers/timer/xlnx_psttc_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void _timer_int_handler(void *unused)
u32_t regval;

regval = sys_read32(TIMER_BASEADDR + XTTCPS_ISR_OFFSET);
accumulated_cycles += sys_clock_hw_cycles_per_tick();
accumulated_cycles += k_ticks_to_cyc_floor32(1);
z_clock_announce(_sys_idle_elapsed_ticks);
}

Expand Down
4 changes: 2 additions & 2 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -1637,7 +1637,7 @@ __syscall u32_t k_timer_remaining_get(struct k_timer *timer);
static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer)
{
const s32_t ticks = z_timeout_remaining(&timer->timeout);
return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U;
return (ticks > 0) ? (u32_t)k_ticks_to_ms_floor64(ticks) : 0U;
}

/**
Expand Down Expand Up @@ -3077,7 +3077,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
*/
static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
{
return __ticks_to_ms(z_timeout_remaining(&work->timeout));
return k_ticks_to_ms_floor64(z_timeout_remaining(&work->timeout));
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there a risk that z_timeout_remaining returns negative number?

}

/**
Expand Down
134 changes: 17 additions & 117 deletions include/sys_clock.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
#include <toolchain.h>
#include <zephyr/types.h>

#include <time_units.h>

#ifdef __cplusplus
extern "C" {
#endif
Expand All @@ -31,40 +33,6 @@ extern int _sys_clock_always_on;
extern void z_enable_sys_clock(void);
#endif

#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
__syscall int z_clock_hw_cycles_per_sec_runtime_get(void);

static inline int z_impl_z_clock_hw_cycles_per_sec_runtime_get(void)
{
extern int z_clock_hw_cycles_per_sec;

return z_clock_hw_cycles_per_sec;
}
#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */

static inline int sys_clock_hw_cycles_per_sec(void)
{
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
return z_clock_hw_cycles_per_sec_runtime_get();
#else
return CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
}

/* Note that some systems with comparatively slow cycle counters
* experience precision loss when doing math like this. In the
* general case it is not correct that "cycles" are much faster than
* "ticks".
*/
static inline int sys_clock_hw_cycles_per_tick(void)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
#else
return 1; /* Just to avoid a division by zero */
#endif
}

#if defined(CONFIG_SYS_CLOCK_EXISTS) && \
(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 0)
#error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!"
Expand Down Expand Up @@ -104,103 +72,37 @@ static inline int sys_clock_hw_cycles_per_tick(void)

#endif

static ALWAYS_INLINE s32_t z_ms_to_ticks(s32_t ms)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS

#ifdef _NEED_PRECISE_TICK_MS_CONVERSION
int cyc = sys_clock_hw_cycles_per_sec();

/* use 64-bit math to keep precision */
return (s32_t)ceiling_fraction((s64_t)ms * cyc,
((s64_t)MSEC_PER_SEC * cyc) / CONFIG_SYS_CLOCK_TICKS_PER_SEC);
#else
/* simple division keeps precision */
s32_t ms_per_tick = MSEC_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC;

return (s32_t)ceiling_fraction(ms, ms_per_tick);
#endif

#else
__ASSERT(ms == 0, "ms not zero");
return 0;
#endif
}

static inline u64_t __ticks_to_ms(s64_t ticks)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return (u64_t)ticks * MSEC_PER_SEC /
(u64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC;
#else
__ASSERT(ticks == 0, "ticks not zero");
return 0ULL;
#endif
}

/*
* These are only currently used by k_usleep(), but they are
* defined here for parity with their ms analogs above. Note:
* we don't bother trying the 32-bit intermediate shortcuts
* possible with ms, because of the magnitudes involved.
*/

static inline s32_t z_us_to_ticks(s32_t us)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return (s32_t) ceiling_fraction(
(s64_t)us * sys_clock_hw_cycles_per_sec(),
((s64_t)USEC_PER_SEC * sys_clock_hw_cycles_per_sec()) /
CONFIG_SYS_CLOCK_TICKS_PER_SEC);
#else
__ASSERT(us == 0, "us not zero");
return 0;
#endif
}

static inline s32_t __ticks_to_us(s32_t ticks)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return (s32_t) ((s64_t)ticks * USEC_PER_SEC /
(s64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC);
#else
__ASSERT(ticks == 0, "ticks not zero");
return 0;
#endif
}
#define __ticks_to_ms(t) __DEPRECATED_MACRO \
k_ticks_to_ms_floor64(t)
#define z_ms_to_ticks(t) \
k_ms_to_ticks_ceil32(t)
#define __ticks_to_us(t) __DEPRECATED_MACRO \
k_ticks_to_us_floor64(t)
#define z_us_to_ticks(t) __DEPRECATED_MACRO \
k_us_to_ticks_ceil64(t)
#define sys_clock_hw_cycles_per_tick() __DEPRECATED_MACRO \
k_ticks_to_cyc_floor32(1)
#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) __DEPRECATED_MACRO \
k_cyc_to_ns_floor64(t)
#define SYS_CLOCK_HW_CYCLES_TO_NS(t) __DEPRECATED_MACRO \
((u32_t)k_cyc_to_ns_floor64(t))

/* added tick needed to account for tick in progress */
#define _TICK_ALIGN 1

/* SYS_CLOCK_HW_CYCLES_TO_NS64 converts CPU clock cycles to nanoseconds */
#define SYS_CLOCK_HW_CYCLES_TO_NS64(X) \
(((u64_t)(X) * NSEC_PER_SEC) / sys_clock_hw_cycles_per_sec())

/*
* SYS_CLOCK_HW_CYCLES_TO_NS_AVG converts CPU clock cycles to nanoseconds
* and calculates the average cycle time
*/
#define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \
(u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES)
(u32_t)(k_cyc_to_ns_floor64(X) / NCYCLES)

/**
* @defgroup clock_apis Kernel Clock APIs
* @ingroup kernel_apis
* @{
*/

/**
* @brief Compute nanoseconds from hardware clock cycles.
*
* This macro converts a time duration expressed in hardware clock cycles
* to the equivalent duration expressed in nanoseconds.
*
* @param X Duration in hardware clock cycles.
*
* @return Duration in nanoseconds.
*/
#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X))

/**
* @} end defgroup clock_apis
*/
Expand Down Expand Up @@ -243,6 +145,4 @@ struct _timeout {
}
#endif

#include <syscalls/sys_clock.h>

#endif /* ZEPHYR_INCLUDE_SYS_CLOCK_H_ */
Loading