@ -5,16 +5,23 @@
# ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1)))
# define __kernel /* default address space */
# define __kernel __attribute__((address_space(0)))
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2)))
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
# ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
# else
# define __rcu
# endif
extern void __chk_user_ptr ( const volatile void __user * ) ;
extern void __chk_io_ptr ( const volatile void __iomem * ) ;
# else
@ -27,13 +34,20 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
# define __must_hold(x)
# define __acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
# endif
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
# define ___PASTE(a,b) a##b
# define __PASTE(a,b) ___PASTE(a,b)
# ifdef __KERNEL__
# ifdef __GNUC__
@ -49,6 +63,13 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# include <linux / compiler-intel.h>
# endif
/* Clang compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
# ifdef __clang__
# include <linux/compiler-clang.h>
# endif
/*
* Generic compiler - dependent macros required for kernel
* build go below this comment . Actual compiler / compiler version
@ -156,6 +177,15 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
( typeof ( ptr ) ) ( __ptr + ( off ) ) ; } )
# endif
# ifndef OPTIMIZER_HIDE_VAR
# define OPTIMIZER_HIDE_VAR(var) barrier()
# endif
/* Not-quite-unique ID. */
# ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
# endif
# endif /* __KERNEL__ */
# endif /* __ASSEMBLY__ */
@ -228,7 +258,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
/*
* Rather then using noinline to prevent stack consumption , use
* noinline_for_stack instead . For documentai ton reasons .
* noinline_for_stack instead . For documentati on reasons .
*/
# define noinline_for_stack noinline
@ -270,11 +300,20 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __section(S) __attribute__ ((__section__(#S)))
# endif
# ifndef __visible
# define __visible
# endif
/* Are two types/vars the same type (ignoring qualifiers)? */
# ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
# endif
/* Is this type a native word size -- useful for atomic operations */
# ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
# endif
/* Compile time object size, -1 for unknown */
# ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@ -284,8 +323,49 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# endif
# ifndef __compiletime_error
# define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert . Unfortunately we can ' t just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC ( e . g . 4.2 .4 ) , so hide the array from sparse altogether .
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \
do { ( ( void ) sizeof ( char [ 1 - 2 * condition ] ) ) ; } while ( 0 )
# endif
# endif
# ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0)
# endif
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
bool __cond = ! ( condition ) ; \
extern void prefix # # suffix ( void ) __compiletime_error ( msg ) ; \
if ( __cond ) \
prefix # # suffix ( ) ; \
__compiletime_error_fallback ( __cond ) ; \
} while ( 0 )
# define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert ( condition , msg , prefix , suffix )
/**
* compiletime_assert - break build and emit msg if condition is false
* @ condition : a compile - time constant condition to check
* @ msg : a message to emit if condition is false
*
* In tradition of POSIX assert , this macro will break the build if the
* supplied condition is * false * , emitting the supplied error message if the
* compiler has support to do so .
*/
# define compiletime_assert(condition, msg) \
_compiletime_assert ( condition , msg , __compiletime_assert_ , __LINE__ )
# define compiletime_assert_atomic_type(t) \
compiletime_assert ( __native_word ( t ) , \
" Need native word sized stores/loads for atomicity. " )
/*
* Prevent the compiler from merging or refetching accesses . The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE ( ) ,
@ -300,4 +380,12 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/
# define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
# ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
# define nokprobe_inline __always_inline
# else
# define __kprobes
# define nokprobe_inline inline
# endif
# endif /* __LINUX_COMPILER_H */