Skip to content

Commit c852023

Browse files
Hugh Dickinstorvalds
authored andcommitted
huge tmpfs: move shmem_huge_enabled() upwards
shmem_huge_enabled() is about to be enhanced into shmem_is_huge(), so that it can be used more widely throughout: before making functional changes, shift it to its final position (to avoid forward declaration). Link: https://lkml.kernel.org/r/16fec7b7-5c84-415a-8586-69d8bf6a6685@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Rik van Riel <riel@surriel.com> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent b9e2faa commit c852023

File tree

1 file changed

+35
-37
lines changed

1 file changed

+35
-37
lines changed

mm/shmem.c

Lines changed: 35 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -473,6 +473,41 @@ static bool shmem_confirm_swap(struct address_space *mapping,
473473

474474
static int shmem_huge __read_mostly;
475475

476+
bool shmem_huge_enabled(struct vm_area_struct *vma)
477+
{
478+
struct inode *inode = file_inode(vma->vm_file);
479+
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
480+
loff_t i_size;
481+
pgoff_t off;
482+
483+
if ((vma->vm_flags & VM_NOHUGEPAGE) ||
484+
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
485+
return false;
486+
if (shmem_huge == SHMEM_HUGE_FORCE)
487+
return true;
488+
if (shmem_huge == SHMEM_HUGE_DENY)
489+
return false;
490+
switch (sbinfo->huge) {
491+
case SHMEM_HUGE_NEVER:
492+
return false;
493+
case SHMEM_HUGE_ALWAYS:
494+
return true;
495+
case SHMEM_HUGE_WITHIN_SIZE:
496+
off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
497+
i_size = round_up(i_size_read(inode), PAGE_SIZE);
498+
if (i_size >= HPAGE_PMD_SIZE &&
499+
i_size >> PAGE_SHIFT >= off)
500+
return true;
501+
fallthrough;
502+
case SHMEM_HUGE_ADVISE:
503+
/* TODO: implement fadvise() hints */
504+
return (vma->vm_flags & VM_HUGEPAGE);
505+
default:
506+
VM_BUG_ON(1);
507+
return false;
508+
}
509+
}
510+
476511
#if defined(CONFIG_SYSFS)
477512
static int shmem_parse_huge(const char *str)
478513
{
@@ -3979,43 +4014,6 @@ struct kobj_attribute shmem_enabled_attr =
39794014
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
39804015
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
39814016

3982-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3983-
bool shmem_huge_enabled(struct vm_area_struct *vma)
3984-
{
3985-
struct inode *inode = file_inode(vma->vm_file);
3986-
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3987-
loff_t i_size;
3988-
pgoff_t off;
3989-
3990-
if ((vma->vm_flags & VM_NOHUGEPAGE) ||
3991-
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3992-
return false;
3993-
if (shmem_huge == SHMEM_HUGE_FORCE)
3994-
return true;
3995-
if (shmem_huge == SHMEM_HUGE_DENY)
3996-
return false;
3997-
switch (sbinfo->huge) {
3998-
case SHMEM_HUGE_NEVER:
3999-
return false;
4000-
case SHMEM_HUGE_ALWAYS:
4001-
return true;
4002-
case SHMEM_HUGE_WITHIN_SIZE:
4003-
off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4004-
i_size = round_up(i_size_read(inode), PAGE_SIZE);
4005-
if (i_size >= HPAGE_PMD_SIZE &&
4006-
i_size >> PAGE_SHIFT >= off)
4007-
return true;
4008-
fallthrough;
4009-
case SHMEM_HUGE_ADVISE:
4010-
/* TODO: implement fadvise() hints */
4011-
return (vma->vm_flags & VM_HUGEPAGE);
4012-
default:
4013-
VM_BUG_ON(1);
4014-
return false;
4015-
}
4016-
}
4017-
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4018-
40194017
#else /* !CONFIG_SHMEM */
40204018

40214019
/*

0 commit comments

Comments
 (0)