Skip to content

Commit

Permalink
nommu: there is no mlock() for NOMMU, so don't provide the bits
Browse files Browse the repository at this point in the history
The mlock() facility does not exist for NOMMU since all mappings are
effectively locked anyway, so we don't make the bits available when
they're not useful.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Greg Ungerer <gerg@snapgear.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Enrik Berkhan <Enrik.Berkhan@ge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
dhowells authored and torvalds committed Apr 1, 2009
1 parent 7ca43e7 commit 33925b2
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 10 deletions.
20 changes: 13 additions & 7 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ enum pageflags {
PG_swapbacked, /* Page is backed by RAM/swap */
#ifdef CONFIG_UNEVICTABLE_LRU
PG_unevictable, /* Page is "unevictable" */
#endif
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
Expand Down Expand Up @@ -234,20 +236,20 @@ PAGEFLAG_FALSE(SwapCache)
#ifdef CONFIG_UNEVICTABLE_LRU
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
#else
PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
__CLEARPAGEFLAG_NOOP(Unevictable)
#endif

#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define MLOCK_PAGES 1
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked)

#else

#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked)
SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)

PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
__CLEARPAGEFLAG_NOOP(Unevictable)
#endif

#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
Expand Down Expand Up @@ -367,9 +369,13 @@ static inline void __ClearPageTail(struct page *page)

#ifdef CONFIG_UNEVICTABLE_LRU
#define __PG_UNEVICTABLE (1 << PG_unevictable)
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_UNEVICTABLE 0
#endif

#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
#endif

Expand Down
8 changes: 8 additions & 0 deletions mm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -214,5 +214,13 @@ config UNEVICTABLE_LRU
will use one page flag and increase the code size a little,
say Y unless you know what you are doing.

config HAVE_MLOCK
bool
default y if MMU=y

config HAVE_MLOCKED_PAGE_BIT
bool
default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y

config MMU_NOTIFIER
bool
8 changes: 5 additions & 3 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}

#ifdef CONFIG_HAVE_MLOCK
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
Expand All @@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
#endif

#ifdef CONFIG_UNEVICTABLE_LRU
/*
Expand All @@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
}
#endif

#ifdef CONFIG_UNEVICTABLE_LRU
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
Expand Down Expand Up @@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
}
}

#else /* CONFIG_UNEVICTABLE_LRU */
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
Expand All @@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
static inline void free_page_mlock(struct page *page) { }

#endif /* CONFIG_UNEVICTABLE_LRU */
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */

/*
* Return the mem_map entry representing the 'offset' subpage within
Expand Down

0 comments on commit 33925b2

Please sign in to comment.