Skip to content

add __always_inline to trivial-super-low-level inline functions #379

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ typedef ioptr const const_ioptr;
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to set
*/
inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
__force_inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_set_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -94,7 +94,7 @@ inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to clear
*/
inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
__force_inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_clear_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -104,7 +104,7 @@ inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to invert
*/
inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
__force_inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_xor_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -120,7 +120,7 @@ inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
* \param values Bits values
* \param write_mask Mask of bits to change
*/
inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
__force_inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
hw_xor_bits(addr, (*addr ^ values) & write_mask);
}

Expand Down
34 changes: 17 additions & 17 deletions src/rp2_common/hardware_sync/include/hardware/sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ typedef volatile uint32_t spin_lock_t;

* The SEV (send event) instruction sends an event to both cores.
*/
inline static void __sev(void) {
__force_inline static void __sev(void) {
__asm volatile ("sev");
}

Expand All @@ -90,7 +90,7 @@ inline static void __sev(void) {
* The WFE (wait for event) instruction waits until one of a number of
* events occurs, including events signalled by the SEV instruction on either core.
*/
inline static void __wfe(void) {
__force_inline static void __wfe(void) {
__asm volatile ("wfe");
}

Expand All @@ -99,7 +99,7 @@ inline static void __wfe(void) {
*
* The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
*/
inline static void __wfi(void) {
__force_inline static void __wfi(void) {
__asm volatile ("wfi");
}

Expand All @@ -109,7 +109,7 @@ inline static void __wfi(void) {
* The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
* instruction will be observed before any explicit access after the instruction.
*/
inline static void __dmb(void) {
__force_inline static void __dmb(void) {
__asm volatile ("dmb" : : : "memory");
}

Expand All @@ -120,7 +120,7 @@ inline static void __dmb(void) {
* memory barrier (DMB). The DSB operation completes when all explicit memory
* accesses before this instruction complete.
*/
inline static void __dsb(void) {
__force_inline static void __dsb(void) {
__asm volatile ("dsb" : : : "memory");
}

Expand All @@ -131,14 +131,14 @@ inline static void __dsb(void) {
* so that all instructions following the ISB are fetched from cache or memory again, after
* the ISB instruction has been completed.
*/
inline static void __isb(void) {
__force_inline static void __isb(void) {
__asm volatile ("isb");
}

/*! \brief Acquire a memory fence
* \ingroup hardware_sync
*/
inline static void __mem_fence_acquire(void) {
__force_inline static void __mem_fence_acquire(void) {
// the original code below makes it hard for us to be included from C++ via a header
// which itself is in an extern "C", so just use __dmb instead, which is what
// is required on Cortex M0+
Expand All @@ -154,7 +154,7 @@ inline static void __mem_fence_acquire(void) {
* \ingroup hardware_sync
*
*/
inline static void __mem_fence_release(void) {
__force_inline static void __mem_fence_release(void) {
// the original code below makes it hard for us to be included from C++ via a header
// which itself is in an extern "C", so just use __dmb instead, which is what
// is required on Cortex M0+
Expand All @@ -171,7 +171,7 @@ inline static void __mem_fence_release(void) {
*
* \return The prior interrupt enable status for restoration later via restore_interrupts()
*/
inline static uint32_t save_and_disable_interrupts(void) {
__force_inline static uint32_t save_and_disable_interrupts(void) {
uint32_t status;
__asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
__asm volatile ("cpsid i");
Expand All @@ -183,7 +183,7 @@ inline static uint32_t save_and_disable_interrupts(void) {
*
* \param status Previous interrupt status from save_and_disable_interrupts()
*/
inline static void restore_interrupts(uint32_t status) {
__force_inline static void restore_interrupts(uint32_t status) {
__asm volatile ("msr PRIMASK,%0"::"r" (status) : );
}

Expand All @@ -193,7 +193,7 @@ inline static void restore_interrupts(uint32_t status) {
* \param lock_num Spinlock ID
* \return The spinlock instance
*/
inline static spin_lock_t *spin_lock_instance(uint lock_num) {
__force_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
}
Expand All @@ -204,7 +204,7 @@ inline static spin_lock_t *spin_lock_instance(uint lock_num) {
* \param lock The Spinlock instance
* \return The Spinlock ID
*/
inline static uint spin_lock_get_num(spin_lock_t *lock) {
__force_inline static uint spin_lock_get_num(spin_lock_t *lock) {
invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
(uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
Expand All @@ -216,7 +216,7 @@ inline static uint spin_lock_get_num(spin_lock_t *lock) {
*
* \param lock Spinlock instance
*/
inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
__force_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
// Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
// with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
// anyway which should be finished soon
Expand All @@ -229,7 +229,7 @@ inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
*
* \param lock Spinlock instance
*/
inline static void spin_unlock_unsafe(spin_lock_t *lock) {
__force_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
__mem_fence_release();
*lock = 0;
}
Expand All @@ -242,7 +242,7 @@ inline static void spin_unlock_unsafe(spin_lock_t *lock) {
* \param lock Spinlock instance
* \return interrupt status to be used when unlocking, to restore to original state
*/
inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
__force_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
uint32_t save = save_and_disable_interrupts();
spin_lock_unsafe_blocking(lock);
return save;
Expand Down Expand Up @@ -270,7 +270,7 @@ inline static bool is_spin_locked(spin_lock_t *lock) {
*
* \sa spin_lock_blocking()
*/
inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
__force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
spin_unlock_unsafe(lock);
restore_interrupts(saved_irq);
}
Expand All @@ -280,7 +280,7 @@ inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
*
* \return The core number the call was made from
*/
static inline uint get_core_num(void) {
__force_inline static uint get_core_num(void) {
return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
}

Expand Down
17 changes: 11 additions & 6 deletions src/rp2_common/pico_platform/include/pico/platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,15 @@ extern "C" {
* Decorates a function name, such that the function will execute from RAM, explicitly marking it as
* noinline to prevent it being inlined into a flash function by the compiler
*/
#define __no_inline_not_in_flash_func(func_name) __attribute__((noinline)) __not_in_flash_func(func_name)
#define __no_inline_not_in_flash_func(func_name) __noinline __not_in_flash_func(func_name)

#define __packed_aligned __packed __aligned(4)

#if defined(__GNUC__) && __GNUC__ < 7
#define __force_inline inline __always_inline
#else
#define __force_inline __always_inline
#endif
#ifndef count_of
#define count_of(a) (sizeof(a)/sizeof((a)[0]))
#endif
Expand All @@ -71,7 +76,7 @@ static inline void __breakpoint(void) {
/**
* Ensure that the compiler does not move memory access across this method call
*/
static inline void __compiler_memory_barrier(void) {
__force_inline static void __compiler_memory_barrier(void) {
__asm__ volatile ("" : : : "memory");
}

Expand Down Expand Up @@ -140,9 +145,9 @@ static inline void tight_loop_contents(void) {}
* \param b the second operand
* \return a * b
*/
inline static int32_t __mul_instruction(int32_t a, int32_t b) {
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
return a;
__force_inline static int32_t __mul_instruction(int32_t a, int32_t b) {
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
return a;
}

/**
Expand All @@ -167,7 +172,7 @@ return a;
* Get the current exception level on this core
* \return the exception number if the CPU is handling an exception, or 0 otherwise
*/
extern uint __get_current_exception(void);
uint __get_current_exception(void);

#ifdef __cplusplus
}
Expand Down