linux-brain/include/linux/thread_info.h
Kees Cook f5509cc18d mm: Hardened usercopy
This is the start of porting PAX_USERCOPY into the mainline kernel. This
is the first set of features, controlled by CONFIG_HARDENED_USERCOPY. The
work is based on code by PaX Team and Brad Spengler, and an earlier port
from Casey Schaufler. Additional non-slab page tests are from Rik van Riel.

This patch contains the logic for validating several conditions when
performing copy_to_user() and copy_from_user() on the kernel object
being copied to/from:
- address range doesn't wrap around
- address range isn't NULL or zero-allocated (with a non-zero copy size)
- if on the slab allocator:
  - object size must be less than or equal to copy size (when check is
    implemented in the allocator, which appear in subsequent patches)
- otherwise, object must not span page allocations (excepting Reserved
  and CMA ranges)
- if on the stack
  - object must not extend before/after the current process stack
  - object must be contained by a valid stack frame (when there is
    arch/build support for identifying stack frames)
- object must not overlap with kernel text

Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-26 14:41:47 -07:00

176 lines
4.6 KiB
C

/* thread_info.h: common low-level thread information accessors
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds
*/
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H
#include <linux/types.h>
#include <linux/bug.h>
struct timespec;
struct compat_timespec;
/*
* System call restart block.
*/
struct restart_block {
long (*fn)(struct restart_block *);
union {
/* For futex_wait and futex_wait_requeue_pi */
struct {
u32 __user *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
u32 __user *uaddr2;
} futex;
/* For nanosleep */
struct {
clockid_t clockid;
struct timespec __user *rmtp;
#ifdef CONFIG_COMPAT
struct compat_timespec __user *compat_rmtp;
#endif
u64 expires;
} nanosleep;
/* For poll */
struct {
struct pollfd __user *ufds;
int nfds;
int has_timeout;
unsigned long tv_sec;
unsigned long tv_nsec;
} poll;
};
};
extern long do_no_restart_syscall(struct restart_block *parm);
#include <linux/bitops.h>
#include <asm/thread_info.h>
#ifdef __KERNEL__
#ifdef CONFIG_DEBUG_STACK_USAGE
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
__GFP_ZERO)
#else
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
#endif
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
*/
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}
static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag, (unsigned long *)&ti->flags);
}
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
clear_ti_thread_flag(current_thread_info(), flag)
#define test_and_set_thread_flag(flag) \
test_and_set_ti_thread_flag(current_thread_info(), flag)
#define test_and_clear_thread_flag(flag) \
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
* An arch can define its own version of set_restore_sigmask() to get the
* job done however works, with or without TIF_RESTORE_SIGMASK.
*/
#define HAVE_SET_RESTORE_SIGMASK 1
/**
* set_restore_sigmask() - make sure saved_sigmask processing gets done
*
* This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
* will run before returning to user mode, to process the flag. For
* all callers, TIF_SIGPENDING is already set or it's no harm to set
* it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
* arch code will notice on return to user mode, in case those bits
* are scarce. We set TIF_SIGPENDING here to ensure that the arch
* signal code always gets run when TIF_RESTORE_SIGMASK is set.
*/
static inline void set_restore_sigmask(void)
{
set_thread_flag(TIF_RESTORE_SIGMASK);
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_and_clear_restore_sigmask(void)
{
return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
}
#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
#ifndef HAVE_SET_RESTORE_SIGMASK
#error "no set_restore_sigmask() provided and default one won't work"
#endif
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
const void * const stackend,
const void *obj, unsigned long len)
{
return 0;
}
#endif
#ifdef CONFIG_HARDENED_USERCOPY
extern void __check_object_size(const void *ptr, unsigned long n,
bool to_user);
static inline void check_object_size(const void *ptr, unsigned long n,
bool to_user)
{
__check_object_size(ptr, n, to_user);
}
#else
static inline void check_object_size(const void *ptr, unsigned long n,
bool to_user)
{ }
#endif /* CONFIG_HARDENED_USERCOPY */
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */