Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: mm: add affinity page allocator and debugging improvements #9740

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion components/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,12 @@ rsource "utilities/Kconfig"
rsource "vbus/Kconfig"
endif

if ARCH_MM_MMU
rsource "mm/Kconfig"
endif

if RT_USING_SMART
rsource "lwp/Kconfig"
rsource "mm/Kconfig"
endif

rsource "legacy/Kconfig"
Expand Down
1 change: 1 addition & 0 deletions components/dfs/dfs_v2/include/dfs_file.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ struct dfs_mmap2_args
int prot;
int flags;
off_t pgoffset;
size_t min_align_size;

struct rt_lwp *lwp;
void *ret;
Expand Down
1 change: 1 addition & 0 deletions components/dfs/dfs_v2/src/dfs_file_mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ static void *_map_data_to_uspace(struct dfs_mmap2_args *mmap2, void *data, rt_er
map_vaddr = (void *)((size_t)map_vaddr & ~ARCH_PAGE_MASK);

k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
k_flags = MMF_CREATE(k_flags, mmap2->min_align_size);
k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);

map_vaddr = _do_mmap(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data, code);
Expand Down
9 changes: 5 additions & 4 deletions components/dfs/dfs_v2/src/dfs_pcache.c
Original file line number Diff line number Diff line change
Expand Up @@ -694,14 +694,15 @@ static int dfs_page_unmap(struct dfs_page *page)
return 0;
}

static struct dfs_page *dfs_page_create(void)
static struct dfs_page *dfs_page_create(off_t pos)
{
struct dfs_page *page = RT_NULL;
int affid = RT_PAGE_PICK_AFFID(pos);

page = rt_calloc(1, sizeof(struct dfs_page));
if (page)
{
page->page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
page->page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
if (page->page)
{
//memset(page->page, 0x00, ARCH_PAGE_SIZE);
Expand Down Expand Up @@ -992,12 +993,12 @@ static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
struct dfs_vnode *vnode = file->vnode;
struct dfs_aspace *aspace = vnode->aspace;

page = dfs_page_create();
page = dfs_page_create(pos);
if (page)
{
page->aspace = aspace;
page->size = ARCH_PAGE_SIZE;
page->fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
page->fpos = RT_ALIGN_DOWN(pos, ARCH_PAGE_SIZE);
aspace->ops->read(file, page);
page->ref_count ++;

Expand Down
2 changes: 1 addition & 1 deletion components/lwp/lwp_elf.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ static int elf_aux_fill(elf_load_info_t *load_info)
elf_addr_t *aux_info;
uint32_t random_value = rt_tick_get();
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_PRIVATE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
rt_lwp_t lwp = load_info->lwp;
void *va;

Expand Down
95 changes: 87 additions & 8 deletions components/lwp/lwp_user_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -503,29 +503,99 @@ void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length)

return kaddr;
}
#include <dfs_dentry.h>
#define _AFFBLK_PGOFFSET (RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT)

static rt_base_t _aligned_for_weak_mapping(off_t *ppgoff, rt_size_t *plen, rt_size_t *palign)
{
off_t aligned_pgoffset, pgoffset = *ppgoff;
rt_size_t length = *plen;
rt_size_t min_align_size = *palign;
rt_base_t aligned_size = 0;

if (pgoffset >= 0)
{
/* force an alignment */
aligned_pgoffset =
RT_ALIGN_DOWN(pgoffset, RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT);
aligned_size = (pgoffset - aligned_pgoffset) << MM_PAGE_SHIFT;

if (aligned_pgoffset != pgoffset)
{
/**
* If requested pgoffset is not sitting on an aligned page offset,
* expand the request mapping to force an alignment.
*/
length += aligned_size;
pgoffset = aligned_pgoffset;
}

/**
* As this is a weak mapping, we can pick any reasonable address for our
* requirement.
*/
min_align_size = RT_PAGE_AFFINITY_BLOCK_SIZE;
}
else
{
RT_ASSERT(0 && "Unexpected input");
}

*ppgoff = pgoffset;
*plen = length;
*palign = min_align_size;

return aligned_size;
}

void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
int flags, int fd, off_t pgoffset)
{
rt_err_t rc;
rt_size_t k_attr;
rt_size_t k_flags;
rt_size_t k_offset;
rt_size_t k_attr, k_flags, k_offset, aligned_size = 0;
rt_size_t min_align_size = 1 << MM_PAGE_SHIFT;
rt_aspace_t uspace;
rt_mem_obj_t mem_obj;
void *ret = 0;
LOG_D("%s(addr=0x%lx,length=%ld,fd=%d)", __func__, addr, length, fd);
LOG_D("%s(addr=0x%lx,length=0x%lx,fd=%d,pgoff=0x%lx)", __func__, addr, length, fd, pgoffset);

/* alignment for affinity page block */
if (flags & MAP_FIXED)
{
if (fd != -1)
{
/* requested mapping address */
rt_base_t va_affid = RT_PAGE_PICK_AFFID(addr);
rt_base_t pgoff_affid = RT_PAGE_PICK_AFFID(pgoffset << MM_PAGE_SHIFT);

/* filter illegal align address */
if (va_affid != pgoff_affid)
{
LOG_W("Unaligned mapping address %p(pgoff=0x%lx) from fd=%d",
addr, pgoffset, fd);
}
}
else
{
/* anonymous mapping can always aligned */
}
}
else
{
/* weak address selection */
aligned_size = _aligned_for_weak_mapping(&pgoffset, &length, &min_align_size);
}

if (fd == -1)
{
/**
* todo: add threshold
*/
#ifdef RT_DEBUGGING_PAGE_THRESHOLD
if (!_memory_threshold_ok())
return (void *)-ENOMEM;
#endif /* RT_DEBUGGING_PAGE_THRESHOLD */

k_offset = MM_PA_TO_OFF(addr);
k_flags = lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE;
k_flags = MMF_CREATE(lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE,
min_align_size);
k_attr = lwp_user_mm_attr_to_kernel(prot);

uspace = lwp->aspace;
Expand Down Expand Up @@ -553,6 +623,7 @@ void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,

mmap2.addr = addr;
mmap2.length = length;
mmap2.min_align_size = min_align_size;
mmap2.prot = prot;
mmap2.flags = flags;
mmap2.pgoffset = pgoffset;
Expand All @@ -572,7 +643,15 @@ void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
}

if ((long)ret <= 0)
{
LOG_D("%s() => %ld", __func__, ret);
}
else
{
ret = (char *)ret + aligned_size;
LOG_D("%s() => 0x%lx", __func__, ret);
}

return ret;
}

Expand Down
33 changes: 33 additions & 0 deletions components/mm/Kconfig
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
menu "Memory management"

config RT_PAGE_AFFINITY_BLOCK_SIZE
hex "Affinity block size in bytes for page management"
default 0x1000
help
Page affinity block can be used to resolve the VIPT aliasing problem.
It should be set to `1ul << ((index + block) - page_offset)` in this case.
You could also exploit this as a tunning for cache coloring.

config RT_PAGE_MAX_ORDER
int "Max order of pages allocatable by page allocator"
default 11
depends on ARCH_MM_MMU
help
For example, A value of 11 means the maximum chunk of contiguous memory
allocatable by page system is 2^(11 + ARCH_PAGE_BITS - 1) Bytes.
Large memory requirement can consume all system resource, and should
consider reserved memory instead to enhance system endurance.
Max order should at least satisfied usage by huge page.

config RT_USING_MEMBLOCK
bool "Using memblock"
default n
Expand All @@ -16,4 +35,18 @@ config RT_INIT_MEMORY_REGIONS
memory into different types of regions. This variable specifies
the maximum number of regions supported by the system.

menu "Debugging"
config RT_DEBUGGING_ALIASING
bool "Using aliasing paging debugger"
default n

config RT_DEBUGING_PAGE_LEAK
bool "Using page leaking tracer"
default n

config RT_DEBUGGING_PAGE_POISON
bool "Using page poisoner to detect illegal usage"
default n
endmenu

endmenu
10 changes: 6 additions & 4 deletions components/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,15 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
size_t attr;
size_t lo_off;
int err;
size_t pa_off = (rt_ubase_t)paddr & ~(RT_PAGE_AFFINITY_BLOCK_SIZE - 1);

lo_off = (rt_ubase_t)paddr & ARCH_PAGE_MASK;
lo_off = (rt_ubase_t)paddr - pa_off;
pa_off = MM_PA_TO_OFF(pa_off);

struct rt_mm_va_hint hint = {
.prefer = RT_NULL,
.map_size = RT_ALIGN(size + lo_off, ARCH_PAGE_SIZE),
.flags = 0,
.map_size = RT_ALIGN(size + lo_off, RT_PAGE_AFFINITY_BLOCK_SIZE),
.flags = MMF_CREATE(0, RT_PAGE_AFFINITY_BLOCK_SIZE),
.limit_start = rt_ioremap_start,
.limit_range_size = rt_ioremap_size,
};
Expand All @@ -63,7 +65,7 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
default:
return v_addr;
}
err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, MM_PA_TO_OFF(paddr), (void **)&v_addr);
err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, pa_off, (void **)&v_addr);

if (err)
{
Expand Down
2 changes: 1 addition & 1 deletion components/mm/mm_anon.c
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
}
else if (ex_obj->page_read)
{
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
page = rt_pages_alloc_tagged(0, RT_PAGE_PICK_AFFID(fault_vaddr), PAGE_ANY_AVAILABLE);
if (page)
{
/** setup message & fetch the data from source object */
Expand Down
Loading
Loading