Skip to content

Commit

Permalink
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/kees/linux

Pull hardened usercopy whitelisting from Kees Cook:
 "Currently, hardened usercopy performs dynamic bounds checking on slab
  cache objects. This is good, but still leaves a lot of kernel memory
  available to be copied to/from userspace in the face of bugs.

  To further restrict what memory is available for copying, this creates
  a way to whitelist specific areas of a given slab cache object for
  copying to/from userspace, allowing much finer granularity of access
  control.

  Slab caches that are never exposed to userspace can declare no
  whitelist for their objects, thereby keeping them unavailable to
  userspace via dynamic copy operations. (Note, an implicit form of
  whitelisting is the use of constant sizes in usercopy operations and
  get_user()/put_user(); these bypass all hardened usercopy checks since
  these sizes cannot change at runtime.)

  This new check is WARN-by-default, so any mistakes can be found over
  the next several releases without breaking anyone's system.

  The series has roughly the following sections:
   - remove %p and improve reporting with offset
   - prepare infrastructure and whitelist kmalloc
   - update VFS subsystem with whitelists
   - update SCSI subsystem with whitelists
   - update network subsystem with whitelists
   - update process memory with whitelists
   - update per-architecture thread_struct with whitelists
   - update KVM with whitelists and fix ioctl bug
   - mark all other allocations as not whitelisted
   - update lkdtm for more sensible test overage"

* tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits)
  lkdtm: Update usercopy tests for whitelisting
  usercopy: Restrict non-usercopy caches to size 0
  kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl
  kvm: whitelist struct kvm_vcpu_arch
  arm: Implement thread_struct whitelist for hardened usercopy
  arm64: Implement thread_struct whitelist for hardened usercopy
  x86: Implement thread_struct whitelist for hardened usercopy
  fork: Provide usercopy whitelisting for task_struct
  fork: Define usercopy region in thread_stack slab caches
  fork: Define usercopy region in mm_struct slab caches
  net: Restrict unwhitelisted proto caches to size 0
  sctp: Copy struct sctp_sock.autoclose to userspace using put_user()
  sctp: Define usercopy region in SCTP proto slab cache
  caif: Define usercopy region in caif proto slab cache
  ip: Define usercopy region in IP proto slab cache
  net: Define usercopy region in struct proto slab cache
  scsi: Define usercopy region in scsi_sense_cache slab cache
  cifs: Define usercopy region in cifs_request slab cache
  vxfs: Define usercopy region in vxfs_inode slab cache
  ufs: Define usercopy region in ufs_inode_cache slab cache
  ...
  • Loading branch information
torvalds committed Feb 4, 2018
2 parents 0771ad4 + e47e311 commit 617aebe
Show file tree
Hide file tree
Showing 45 changed files with 515 additions and 215 deletions.
11 changes: 11 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,17 @@ config ARCH_TASK_STRUCT_ON_STACK
config ARCH_TASK_STRUCT_ALLOCATOR
bool

config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool
depends on !ARCH_TASK_STRUCT_ALLOCATOR
help
An architecture should select this to provide hardened usercopy
knowledge about what region of the thread_struct should be
whitelisted for copying to userspace. Normally this is only the
FPU registers. Specifically, arch_thread_struct_whitelist()
should be implemented. Without this, the entire thread_struct
field in task_struct will be left whitelisted.

# Select if arch has its private alloc_thread_stack() function
config ARCH_THREAD_STACK_ALLOCATOR
bool
Expand Down
1 change: 1 addition & 0 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ config ARM
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
Expand Down
10 changes: 10 additions & 0 deletions arch/arm/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,16 @@ struct thread_struct {
struct debug_info debug;
};

/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = *size = 0;
}

#define INIT_THREAD { }

#define start_thread(regs,pc,sp) \
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,16 @@ struct thread_struct {
struct debug_info debug; /* debugging */
};

/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = *size = 0;
}

#ifdef CONFIG_COMPAT
#define task_user_tls(t) \
({ \
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ config X86
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,14 @@ struct thread_struct {
*/
};

/* Whitelist the FPU state from the task_struct for hardened usercopy. */
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = offsetof(struct thread_struct, fpu.state);
*size = fpu_kernel_xstate_size;
}

/*
* Thread-synchronous status.
*
Expand Down
7 changes: 4 additions & 3 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -4237,13 +4237,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
mutex_unlock(&kvm->lock);
break;
case KVM_XEN_HVM_CONFIG: {
struct kvm_xen_hvm_config xhc;
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
if (copy_from_user(&xhc, argp, sizeof(xhc)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
if (xhc.flags)
goto out;
memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
r = 0;
break;
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/misc/lkdtm.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
Expand Down
4 changes: 2 additions & 2 deletions drivers/misc/lkdtm_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(ATOMIC_TIMING),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
Expand Down
101 changes: 58 additions & 43 deletions drivers/misc/lkdtm_usercopy.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
*/
static volatile size_t unconst = 0;
static volatile size_t cache_size = 1024;
static struct kmem_cache *bad_cache;
static struct kmem_cache *whitelist_cache;

static const unsigned char test_text[] = "This is a test.\n";

Expand Down Expand Up @@ -115,10 +115,16 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
vm_munmap(user_addr, PAGE_SIZE);
}

/*
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
void __user *test_user_addr;
void *test_kern_addr;
size_t size = unconst + 1024;

one = kmalloc(size, GFP_KERNEL);
Expand All @@ -139,27 +145,30 @@ static void do_usercopy_heap_size(bool to_user)
memset(one, 'A', size);
memset(two, 'B', size);

test_user_addr = (void __user *)(user_addr + 16);
test_kern_addr = one + 16;

if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user((void __user *)user_addr, one, size)) {
if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}

pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
if (copy_to_user(test_user_addr, test_kern_addr, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(one, (void __user *)user_addr, size)) {
if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}

pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
if (copy_from_user(test_kern_addr, test_user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
Expand All @@ -172,77 +181,79 @@ static void do_usercopy_heap_size(bool to_user)
kfree(two);
}

static void do_usercopy_heap_flag(bool to_user)
/*
* This checks for the specific whitelist window within an object. If this
* test passes, then do_usercopy_heap_size() tests will pass too.
*/
static void do_usercopy_heap_whitelist(bool to_user)
{
unsigned long user_addr;
unsigned char *good_buf = NULL;
unsigned char *bad_buf = NULL;
unsigned long user_alloc;
unsigned char *buf = NULL;
unsigned char __user *user_addr;
size_t offset, size;

/* Make sure cache was prepared. */
if (!bad_cache) {
if (!whitelist_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}

/*
* Allocate one buffer from each cache (kmalloc will have the
* SLAB_USERCOPY flag already, but "bad_cache" won't).
* Allocate a buffer with a whitelisted window in the buffer.
*/
good_buf = kmalloc(cache_size, GFP_KERNEL);
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
if (!good_buf || !bad_buf) {
pr_warn("Failed to allocate buffers from caches\n");
buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
if (!buf) {
pr_warn("Failed to allocate buffer from whitelist cache\n");
goto free_alloc;
}

/* Allocate user memory we'll poke at. */
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
if (user_alloc >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
user_addr = (void __user *)user_alloc;

memset(good_buf, 'A', cache_size);
memset(bad_buf, 'B', cache_size);
memset(buf, 'B', cache_size);

/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
offset = (cache_size / 4) + unconst;
size = (cache_size / 16) + unconst;

if (to_user) {
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, good_buf,
cache_size)) {
pr_info("attempting good copy_to_user inside whitelist\n");
if (copy_to_user(user_addr, buf + offset, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}

pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, bad_buf,
cache_size)) {
pr_info("attempting bad copy_to_user outside whitelist\n");
if (copy_to_user(user_addr, buf + offset - 1, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
if (copy_from_user(good_buf, (void __user *)user_addr,
cache_size)) {
pr_info("attempting good copy_from_user inside whitelist\n");
if (copy_from_user(buf + offset, user_addr, size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}

pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
if (copy_from_user(bad_buf, (void __user *)user_addr,
cache_size)) {
pr_info("attempting bad copy_from_user outside whitelist\n");
if (copy_from_user(buf + offset - 1, user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}

free_user:
vm_munmap(user_addr, PAGE_SIZE);
vm_munmap(user_alloc, PAGE_SIZE);
free_alloc:
if (bad_buf)
kmem_cache_free(bad_cache, bad_buf);
kfree(good_buf);
if (buf)
kmem_cache_free(whitelist_cache, buf);
}

/* Callable tests. */
Expand All @@ -256,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
do_usercopy_heap_size(false);
}

void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
{
do_usercopy_heap_flag(true);
do_usercopy_heap_whitelist(true);
}

void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
{
do_usercopy_heap_flag(false);
do_usercopy_heap_whitelist(false);
}

void lkdtm_USERCOPY_STACK_FRAME_TO(void)
Expand Down Expand Up @@ -314,11 +325,15 @@ void lkdtm_USERCOPY_KERNEL(void)
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
0, NULL);
whitelist_cache =
kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
0, 0,
cache_size / 4,
cache_size / 16,
NULL);
}

void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(bad_cache);
kmem_cache_destroy(whitelist_cache);
}
9 changes: 5 additions & 4 deletions drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
SCSI_SENSE_BUFFERSIZE, 0,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
SCSI_SENSE_BUFFERSIZE, 0,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache)
ret = -ENOMEM;
} else {
scsi_sense_cache =
kmem_cache_create("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
kmem_cache_create_usercopy("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
0, SCSI_SENSE_BUFFERSIZE, NULL);
if (!scsi_sense_cache)
ret = -ENOMEM;
}
Expand Down
14 changes: 9 additions & 5 deletions fs/befs/linuxvfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -444,11 +444,15 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
static int __init
befs_init_inodecache(void)
{
befs_inode_cachep = kmem_cache_create("befs_inode_cache",
sizeof (struct befs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache",
sizeof(struct befs_inode_info), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
SLAB_ACCOUNT),
offsetof(struct befs_inode_info,
i_data.symlink),
sizeof_field(struct befs_inode_info,
i_data.symlink),
init_once);
if (befs_inode_cachep == NULL)
return -ENOMEM;

Expand Down
Loading

0 comments on commit 617aebe

Please sign in to comment.