2626 * modified.
2727 */
2828
29+ #include "zephyr/toolchain.h"
2930#include <zephyr/kernel.h>
3031#include <kernel_internal.h>
3132#include <zephyr/linker/linker-defs.h>
3233#include <pmp.h>
3334#include <zephyr/arch/arch_interface.h>
3435#include <zephyr/arch/riscv/csr.h>
36+ #include <zephyr/dt-bindings/memory-attr/memory-attr-riscv.h>
37+ #include <zephyr/mem_mgmt/mem_attr.h>
3538
3639#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
3740#include <zephyr/logging/log.h>
@@ -383,6 +386,58 @@ static void write_pmp_entries(unsigned int start, unsigned int end,
383386 pmp_addr , pmp_cfg );
384387}
385388
389+ #ifdef CONFIG_MEM_ATTR
390+ /**
391+ * @brief Install PMP entries from devicetree mem-attr regions.
392+ *
393+ * Iterates over devicetree-provided memory-attr regions and programs PMP
394+ * via set_pmp_entry(). Ordering matters because PMP checks entries from lowest
395+ * to highest index and uses the first entry that matches the address.
396+ *
397+ * @param index_p Location of the current PMP slot index to use. This index
398+ * will be updated according to the number of slots used.
399+ * @param pmp_addr Array of pmpaddr values (starting at entry 0).
400+ * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
401+ * @param index_limit Index value representing the size of the provided arrays.
402+ * @return Number of PMP slots consumed by installed mem-attr regions.
403+ *
404+ * @note DT_MEM_RISCV_TYPE_IO_X Limitation:
405+ * Since the current PMP entries are non-locked, the eXecute (X)
406+ * permission restriction applied by DT_MEM_RISCV_TYPE_IO_X does
407+ * not prevent execution in higher privilege modes (M-mode/kernel).
408+ * This is because the mstatus.MPRV register bit only affects
409+ * M-mode load/store operations, not instruction fetches.
410+ * The execute restriction still applies to User mode because PMP
411+ * is always enforced for lower privilege modes.
412+ */
413+ static unsigned int set_pmp_mem_attr (unsigned int * index_p ,
414+ unsigned long * pmp_addr , unsigned long * pmp_cfg ,
415+ unsigned int index_limit )
416+ {
417+ const struct mem_attr_region_t * region ;
418+ unsigned int entry_cnt = * index_p ;
419+ size_t num_regions ;
420+
421+ num_regions = mem_attr_get_regions (& region );
422+
423+ for (size_t idx = 0 ; idx < num_regions ; ++ idx ) {
424+
425+ uint8_t perm = DT_MEM_RISCV_TO_PMP_PERM (region [idx ].dt_attr );
426+
427+ if (perm || (region [idx ].dt_attr & DT_MEM_RISCV_TYPE_EMPTY )) {
428+ set_pmp_entry (index_p , perm ,
429+ (uintptr_t )(region [idx ].dt_addr ),
430+ (size_t )(region [idx ].dt_size ),
431+ pmp_addr , pmp_cfg , index_limit );
432+ }
433+ }
434+
435+ entry_cnt = * index_p - entry_cnt ;
436+
437+ return entry_cnt ;
438+ }
439+ #endif /* CONFIG_MEM_ATTR */
440+
386441/**
387442 * @brief Abstract the last 3 arguments to set_pmp_entry() and
388443 * write_pmp_entries( for m-mode.
@@ -427,14 +482,22 @@ static unsigned long global_pmp_last_addr[MODE_TOTAL];
427482/* End of global PMP entry range for each mode (M or U). */
428483static unsigned int global_pmp_end_index [MODE_TOTAL ];
429484
485+ #if defined(CONFIG_MEM_ATTR ) && defined(CONFIG_USERSPACE )
486+ /* Stores the initial pmpaddr values for the memory attribute region. */
487+ static unsigned long mem_attr_pmp_addr [CONFIG_PMP_SLOTS ];
488+ #endif
489+
430490/**
431491 * @Brief Initialize the PMP with global entries on each CPU
432492 */
433493void z_riscv_pmp_init (void )
434494{
435495 unsigned long pmp_addr [CONFIG_PMP_SLOTS ];
436- unsigned long pmp_cfg [CONFIG_PMP_SLOTS / PMPCFG_STRIDE ];
496+ unsigned long pmp_cfg [CONFIG_PMP_SLOTS / PMPCFG_STRIDE ] = { 0 } ;
437497 unsigned int index = 0 ;
498+ unsigned int attr_cnt = 0 ;
499+
500+ ARG_UNUSED (attr_cnt );
438501
439502#ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP
440503 /*
@@ -464,23 +527,6 @@ void z_riscv_pmp_init(void)
464527 (uintptr_t )z_interrupt_stacks [_current_cpu -> id ],
465528 Z_RISCV_STACK_GUARD_SIZE ,
466529 pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
467-
468- /*
469- * This early, the kernel init code uses the IRQ stack and we want to
470- * safeguard it as soon as possible. But we need a temporary default
471- * "catch all" PMP entry for MPRV to work. Later on, this entry will
472- * be set for each thread by z_riscv_pmp_kernelmode_prepare().
473- */
474- set_pmp_mprv_catchall (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
475-
476- /* Write those entries to PMP regs. */
477- write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
478-
479- /* Activate our non-locked PMP entries for m-mode */
480- csr_set (mstatus , MSTATUS_MPRV );
481-
482- /* And forget about that last entry as we won't need it later */
483- index -- ;
484530#else
485531 /* Without multithreading setup stack guards for IRQ and main stacks */
486532 set_pmp_entry (& index , PMP_NONE | PMP_L ,
@@ -493,14 +539,39 @@ void z_riscv_pmp_init(void)
493539 Z_RISCV_STACK_GUARD_SIZE ,
494540 pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
495541
496- /* Write those entries to PMP regs. */
497- write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
498542#endif /* CONFIG_MULTITHREADING */
499543#ifdef CONFIG_SMP
500544 unsigned int irq_index = index ;
501545#endif /* CONFIG_SMP */
546+ #endif
547+
548+ #ifdef CONFIG_MEM_ATTR
549+ /*
550+ * Set the memory attribute region as temporary PMP entries for early
551+ * kernel initialization. This provides essential protection before
552+ * the kernel mode memory attribute permission is fully operational.
553+ */
554+ attr_cnt = set_pmp_mem_attr (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
555+
556+ /*
557+ * This early, we want to protect unlock PMP entries as soon as
558+ * possible. But we need a temporary default "catch all" PMP entry for
559+ * MPRV to work. Later on, this entry will be set for each thread by
560+ * z_riscv_pmp_kernelmode_prepare().
561+ */
562+ set_pmp_mprv_catchall (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
563+
564+ /* Write those entries to PMP regs. */
565+ write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
566+
567+ /* Activate our non-locked PMP entries for m-mode */
568+ csr_clear (mstatus , MSTATUS_MPP );
569+ csr_set (mstatus , MSTATUS_MPRV );
570+
571+ /* And forget about that last entry as we won't need it later */
572+ index -- ;
502573#else
503- /* Write those entries to PMP regs. */
574+ /* Write those entries to PMP regs. */
504575 write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
505576#endif
506577
@@ -528,10 +599,20 @@ void z_riscv_pmp_init(void)
528599 global_pmp_end_index [M_MODE ] = index ;
529600
530601#ifdef CONFIG_USERSPACE
531- global_pmp_last_addr [U_MODE ] = pmp_addr [index - 1 ];
532- global_pmp_end_index [U_MODE ] = index ;
602+ global_pmp_last_addr [U_MODE ] = pmp_addr [index - attr_cnt - 1 ];
603+ global_pmp_end_index [U_MODE ] = index - attr_cnt ;
533604#endif /* CONFIG_USERSPACE */
534605
606+ #if defined(CONFIG_MEM_ATTR ) && defined(CONFIG_USERSPACE )
607+ /*
608+ * Copy the memory attribute pmpaddr entries to the global buffer.
609+ * These kernel mode pmpaddr entries are saved for restoration when
610+ * switching back from user mode.
611+ */
612+ memcpy (mem_attr_pmp_addr , & pmp_addr [global_pmp_end_index [U_MODE ]],
613+ attr_cnt * PMPCFG_STRIDE );
614+ #endif
615+
535616 if (PMP_DEBUG_DUMP ) {
536617 dump_pmp_regs ("initial register dump" );
537618 }
@@ -560,6 +641,19 @@ static inline unsigned int z_riscv_pmp_thread_init(enum pmp_mode mode,
560641
561642 pmp_addr [pmp_end_index - 1 ] = global_pmp_last_addr [mode ];
562643
644+ #if defined(CONFIG_MEM_ATTR ) && defined(CONFIG_USERSPACE )
645+ /*
646+ * This block restores the PMP entries used for memory attributes (set in
647+ * mem_attr_pmp_addr) that were overwritten when switching from user mode
648+ * back to kernel mode. It only applies when running in M_MODE pmp mode.
649+ */
650+ if (mode == M_MODE ) {
651+ memcpy (& pmp_addr [global_pmp_end_index [U_MODE ]], mem_attr_pmp_addr ,
652+ (global_pmp_end_index [M_MODE ] - global_pmp_end_index [U_MODE ]) *
653+ PMPCFG_STRIDE );
654+ }
655+ #endif
656+
563657 return pmp_end_index ;
564658}
565659#endif
@@ -585,7 +679,7 @@ void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread)
585679 } else if (z_stack_is_user_capable (thread -> stack_obj )) {
586680 stack_bottom = thread -> stack_info .start - K_THREAD_STACK_RESERVED ;
587681 }
588- #endif
682+ #endif /* CONFIG_USERSPACE */
589683 set_pmp_entry (& index , PMP_NONE ,
590684 stack_bottom , Z_RISCV_STACK_GUARD_SIZE ,
591685 PMP_M_MODE (thread ));
@@ -614,10 +708,17 @@ void z_riscv_pmp_kernelmode_enable(struct k_thread *thread)
614708 csr_clear (mstatus , MSTATUS_MPRV | MSTATUS_MPP );
615709
616710 /* Write our m-mode MPP entries */
711+ #ifdef CONFIG_USERSPACE
712+ write_pmp_entries (global_pmp_end_index [U_MODE ],
713+ thread -> arch .m_mode_pmp_end_index ,
714+ false /* no need to clear to the end */ ,
715+ PMP_M_MODE (thread ));
716+ #else
617717 write_pmp_entries (global_pmp_end_index [M_MODE ],
618718 thread -> arch .m_mode_pmp_end_index ,
619719 false /* no need to clear to the end */ ,
620720 PMP_M_MODE (thread ));
721+ #endif /* CONFIG_USERSPACE */
621722
622723 if (PMP_DEBUG_DUMP ) {
623724 dump_pmp_regs ("m-mode register dump" );
@@ -723,8 +824,52 @@ static void resync_pmp_domain(struct k_thread *thread,
723824 continue ;
724825 }
725826
726- ok = set_pmp_entry (& index , part -> attr .pmp_attr ,
727- part -> start , part -> size , PMP_U_MODE (thread ));
827+ #ifdef CONFIG_MEM_ATTR
828+ /*
829+ * Determine whether the partition is covered by a memory
830+ * attribute region.
831+ *
832+ * Constraint due to number of PMP entry limitation:
833+ * The logic asserts against any cases that requires splitting
834+ * a partition into multiple permissions, such as partial
835+ * overlap or the partition fully containing the memory
836+ * attribute region but not fully match.
837+ *
838+ * Supported cases:
839+ * 1. Partition excludes all memory attribute regions
840+ * The partition's permission is applied directly.
841+ * 2. Partition is contained in a memory attribute region:
842+ * The partition's permission is masked with the memory
843+ * attribute.
844+ */
845+ const struct mem_attr_region_t * region ;
846+ uint8_t attr_mask = PMP_R | PMP_W | PMP_X ;
847+
848+ for (int idx = 0 ; idx < mem_attr_get_regions (& region ); idx ++ ) {
849+ uintptr_t dt_start = (uintptr_t )(region [idx ].dt_addr );
850+ uintptr_t dt_end = dt_start + (size_t )(region [idx ].dt_size );
851+ bool covered = false;
852+
853+ /* No overlap at all, skip this memory region */
854+ if ((part -> start + part -> size ) <= dt_start || part -> start >= dt_end ) {
855+ continue ;
856+ }
857+
858+ /* Check if the partition is contained in the memory attribute region. */
859+ covered = part -> start >= dt_start && (part -> start + part -> size ) <= dt_end ;
860+ __ASSERT (covered , "No allowed partition partially overlaps memory region" );
861+
862+ attr_mask = DT_MEM_RISCV_TO_PMP_PERM (region [idx ].dt_attr );
863+ break ;
864+ }
865+
866+ ok = set_pmp_entry (& index , part -> attr .pmp_attr & attr_mask , part -> start , part -> size ,
867+ PMP_U_MODE (thread ));
868+ #else
869+ ok = set_pmp_entry (& index , part -> attr .pmp_attr , part -> start , part -> size ,
870+ PMP_U_MODE (thread ));
871+ #endif
872+
728873 __ASSERT (ok ,
729874 "no PMP slot left for %d remaining partitions in domain %p" ,
730875 remaining_partitions + 1 , domain );
0 commit comments