|  | /* | 
|  | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). | 
|  | * | 
|  | * (C) SGI 2006, Christoph Lameter | 
|  | * 	Cleaned up and restructured to ease the addition of alternative | 
|  | * 	implementations of SLAB allocators. | 
|  | */ | 
|  |  | 
|  | #ifndef _LINUX_SLAB_H | 
|  | #define	_LINUX_SLAB_H | 
|  |  | 
|  | #include <linux/gfp.h> | 
|  | #include <linux/types.h> | 
|  | #include <linux/workqueue.h> | 
|  |  | 
|  |  | 
|  | /* | 
|  | * Flags to pass to kmem_cache_create(). | 
|  | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | 
|  | */ | 
|  | #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */ | 
|  | #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */ | 
|  | #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */ | 
|  | #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */ | 
|  | #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */ | 
|  | #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */ | 
|  | #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */ | 
|  | /* | 
|  | * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! | 
|  | * | 
|  | * This delays freeing the SLAB page by a grace period, it does _NOT_ | 
|  | * delay object freeing. This means that if you do kmem_cache_free() | 
|  | * that memory location is free to be reused at any time. Thus it may | 
|  | * be possible to see another object there in the same RCU grace period. | 
|  | * | 
|  | * This feature only ensures the memory location backing the object | 
|  | * stays valid, the trick to using this is relying on an independent | 
|  | * object validation pass. Something like: | 
|  | * | 
|  | *  rcu_read_lock() | 
|  | * again: | 
|  | *  obj = lockless_lookup(key); | 
|  | *  if (obj) { | 
|  | *    if (!try_get_ref(obj)) // might fail for free objects | 
|  | *      goto again; | 
|  | * | 
|  | *    if (obj->key != key) { // not the object we expected | 
|  | *      put_ref(obj); | 
|  | *      goto again; | 
|  | *    } | 
|  | *  } | 
|  | *  rcu_read_unlock(); | 
|  | * | 
|  | * See also the comment on struct slab_rcu in mm/slab.c. | 
|  | */ | 
|  | #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */ | 
|  | #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */ | 
|  | #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */ | 
|  |  | 
|  | /* Flag to prevent checks on free */ | 
|  | #ifdef CONFIG_DEBUG_OBJECTS | 
|  | # define SLAB_DEBUG_OBJECTS	0x00400000UL | 
|  | #else | 
|  | # define SLAB_DEBUG_OBJECTS	0x00000000UL | 
|  | #endif | 
|  |  | 
|  | #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */ | 
|  |  | 
|  | /* Don't track use of uninitialized memory */ | 
|  | #ifdef CONFIG_KMEMCHECK | 
|  | # define SLAB_NOTRACK		0x01000000UL | 
|  | #else | 
|  | # define SLAB_NOTRACK		0x00000000UL | 
|  | #endif | 
|  | #ifdef CONFIG_FAILSLAB | 
|  | # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */ | 
|  | #else | 
|  | # define SLAB_FAILSLAB		0x00000000UL | 
|  | #endif | 
|  |  | 
|  | /* The following flags affect the page allocator grouping pages by mobility */ | 
|  | #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */ | 
|  | #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */ | 
|  | /* | 
|  | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | 
|  | * | 
|  | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | 
|  | * | 
|  | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | 
|  | * Both make kfree a no-op. | 
|  | */ | 
|  | #define ZERO_SIZE_PTR ((void *)16) | 
|  |  | 
|  | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 
|  | (unsigned long)ZERO_SIZE_PTR) | 
|  |  | 
|  |  | 
|  | struct mem_cgroup; | 
|  | /* | 
|  | * struct kmem_cache related prototypes | 
|  | */ | 
|  | void __init kmem_cache_init(void); | 
|  | int slab_is_available(void); | 
|  |  | 
|  | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 
|  | unsigned long, | 
|  | void (*)(void *)); | 
|  | struct kmem_cache * | 
|  | kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, | 
|  | unsigned long, void (*)(void *), struct kmem_cache *); | 
|  | void kmem_cache_destroy(struct kmem_cache *); | 
|  | int kmem_cache_shrink(struct kmem_cache *); | 
|  | void kmem_cache_free(struct kmem_cache *, void *); | 
|  |  | 
|  | /* | 
|  | * Please use this macro to create slab caches. Simply specify the | 
|  | * name of the structure and maybe some flags that are listed above. | 
|  | * | 
|  | * The alignment of the struct determines object alignment. If you | 
|  | * f.e. add ____cacheline_aligned_in_smp to the struct declaration | 
|  | * then the objects will be properly aligned in SMP configurations. | 
|  | */ | 
|  | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | 
|  | sizeof(struct __struct), __alignof__(struct __struct),\ | 
|  | (__flags), NULL) | 
|  |  | 
|  | /* | 
|  | * Common kmalloc functions provided by all allocators | 
|  | */ | 
|  | void * __must_check __krealloc(const void *, size_t, gfp_t); | 
|  | void * __must_check krealloc(const void *, size_t, gfp_t); | 
|  | void kfree(const void *); | 
|  | void kzfree(const void *); | 
|  | size_t ksize(const void *); | 
|  |  | 
|  | /* | 
|  | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | 
|  | * alignment larger than the alignment of a 64-bit integer. | 
|  | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. | 
|  | */ | 
|  | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | 
|  | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | 
|  | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | 
|  | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) | 
|  | #else | 
|  | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SLOB | 
|  | /* | 
|  | * Common fields provided in kmem_cache by all slab allocators | 
|  | * This struct is either used directly by the allocator (SLOB) | 
|  | * or the allocator must include definitions for all fields | 
|  | * provided in kmem_cache_common in their definition of kmem_cache. | 
|  | * | 
|  | * Once we can do anonymous structs (C11 standard) we could put a | 
|  | * anonymous struct definition in these allocators so that the | 
|  | * separate allocations in the kmem_cache structure of SLAB and | 
|  | * SLUB is no longer needed. | 
|  | */ | 
|  | struct kmem_cache { | 
|  | unsigned int object_size;/* The original size of the object */ | 
|  | unsigned int size;	/* The aligned/padded/added on size  */ | 
|  | unsigned int align;	/* Alignment as calculated */ | 
|  | unsigned long flags;	/* Active flags on the slab */ | 
|  | const char *name;	/* Slab name for sysfs */ | 
|  | int refcount;		/* Use counter */ | 
|  | void (*ctor)(void *);	/* Called on object slot creation */ | 
|  | struct list_head list;	/* List of all slab caches on the system */ | 
|  | }; | 
|  |  | 
|  | #endif /* CONFIG_SLOB */ | 
|  |  | 
|  | /* | 
|  | * Kmalloc array related definitions | 
|  | */ | 
|  |  | 
|  | #ifdef CONFIG_SLAB | 
|  | /* | 
|  | * The largest kmalloc size supported by the SLAB allocators is | 
|  | * 32 megabyte (2^25) or the maximum allocatable page order if that is | 
|  | * less than 32 MB. | 
|  | * | 
|  | * WARNING: Its not easy to increase this value since the allocators have | 
|  | * to do various tricks to work around compiler limitations in order to | 
|  | * ensure proper constant folding. | 
|  | */ | 
|  | #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ | 
|  | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | 
|  | #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH | 
|  | #ifndef KMALLOC_SHIFT_LOW | 
|  | #define KMALLOC_SHIFT_LOW	5 | 
|  | #endif | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SLUB | 
|  | /* | 
|  | * SLUB allocates up to order 2 pages directly and otherwise | 
|  | * passes the request to the page allocator. | 
|  | */ | 
|  | #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1) | 
|  | #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT) | 
|  | #ifndef KMALLOC_SHIFT_LOW | 
|  | #define KMALLOC_SHIFT_LOW	3 | 
|  | #endif | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SLOB | 
|  | /* | 
|  | * SLOB passes all page size and larger requests to the page allocator. | 
|  | * No kmalloc array is necessary since objects of different sizes can | 
|  | * be allocated from the same page. | 
|  | */ | 
|  | #define KMALLOC_SHIFT_MAX	30 | 
|  | #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT | 
|  | #ifndef KMALLOC_SHIFT_LOW | 
|  | #define KMALLOC_SHIFT_LOW	3 | 
|  | #endif | 
|  | #endif | 
|  |  | 
|  | /* Maximum allocatable size */ | 
|  | #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX) | 
|  | /* Maximum size for which we actually use a slab cache */ | 
|  | #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH) | 
|  | /* Maximum order allocatable via the slab allocagtor */ | 
|  | #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT) | 
|  |  | 
|  | /* | 
|  | * Kmalloc subsystem. | 
|  | */ | 
|  | #ifndef KMALLOC_MIN_SIZE | 
|  | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) | 
|  | #endif | 
|  |  | 
|  | #ifndef CONFIG_SLOB | 
|  | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | 
|  | #ifdef CONFIG_ZONE_DMA | 
|  | extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | 
|  | #endif | 
|  |  | 
|  | /* | 
|  | * Figure out which kmalloc slab an allocation of a certain size | 
|  | * belongs to. | 
|  | * 0 = zero alloc | 
|  | * 1 =  65 .. 96 bytes | 
|  | * 2 = 120 .. 192 bytes | 
|  | * n = 2^(n-1) .. 2^n -1 | 
|  | */ | 
|  | static __always_inline int kmalloc_index(size_t size) | 
|  | { | 
|  | if (!size) | 
|  | return 0; | 
|  |  | 
|  | if (size <= KMALLOC_MIN_SIZE) | 
|  | return KMALLOC_SHIFT_LOW; | 
|  |  | 
|  | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | 
|  | return 1; | 
|  | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | 
|  | return 2; | 
|  | if (size <=          8) return 3; | 
|  | if (size <=         16) return 4; | 
|  | if (size <=         32) return 5; | 
|  | if (size <=         64) return 6; | 
|  | if (size <=        128) return 7; | 
|  | if (size <=        256) return 8; | 
|  | if (size <=        512) return 9; | 
|  | if (size <=       1024) return 10; | 
|  | if (size <=   2 * 1024) return 11; | 
|  | if (size <=   4 * 1024) return 12; | 
|  | if (size <=   8 * 1024) return 13; | 
|  | if (size <=  16 * 1024) return 14; | 
|  | if (size <=  32 * 1024) return 15; | 
|  | if (size <=  64 * 1024) return 16; | 
|  | if (size <= 128 * 1024) return 17; | 
|  | if (size <= 256 * 1024) return 18; | 
|  | if (size <= 512 * 1024) return 19; | 
|  | if (size <= 1024 * 1024) return 20; | 
|  | if (size <=  2 * 1024 * 1024) return 21; | 
|  | if (size <=  4 * 1024 * 1024) return 22; | 
|  | if (size <=  8 * 1024 * 1024) return 23; | 
|  | if (size <=  16 * 1024 * 1024) return 24; | 
|  | if (size <=  32 * 1024 * 1024) return 25; | 
|  | if (size <=  64 * 1024 * 1024) return 26; | 
|  | BUG(); | 
|  |  | 
|  | /* Will never be reached. Needed because the compiler may complain */ | 
|  | return -1; | 
|  | } | 
|  | #endif /* !CONFIG_SLOB */ | 
|  |  | 
|  | #ifdef CONFIG_SLAB | 
|  | #include <linux/slab_def.h> | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SLUB | 
|  | #include <linux/slub_def.h> | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SLOB | 
|  | #include <linux/slob_def.h> | 
|  | #endif | 
|  |  | 
|  | /* | 
|  | * Determine size used for the nth kmalloc cache. | 
|  | * return size or 0 if a kmalloc cache for that | 
|  | * size does not exist | 
|  | */ | 
|  | static __always_inline int kmalloc_size(int n) | 
|  | { | 
|  | #ifndef CONFIG_SLOB | 
|  | if (n > 2) | 
|  | return 1 << n; | 
|  |  | 
|  | if (n == 1 && KMALLOC_MIN_SIZE <= 32) | 
|  | return 96; | 
|  |  | 
|  | if (n == 2 && KMALLOC_MIN_SIZE <= 64) | 
|  | return 192; | 
|  | #endif | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | 
|  | * Intended for arches that get misalignment faults even for 64 bit integer | 
|  | * aligned buffers. | 
|  | */ | 
|  | #ifndef ARCH_SLAB_MINALIGN | 
|  | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | 
|  | #endif | 
|  | /* | 
|  | * This is the main placeholder for memcg-related information in kmem caches. | 
|  | * struct kmem_cache will hold a pointer to it, so the memory cost while | 
|  | * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it | 
|  | * would otherwise be if that would be bundled in kmem_cache: we'll need an | 
|  | * extra pointer chase. But the trade off clearly lays in favor of not | 
|  | * penalizing non-users. | 
|  | * | 
|  | * Both the root cache and the child caches will have it. For the root cache, | 
|  | * this will hold a dynamically allocated array large enough to hold | 
|  | * information about the currently limited memcgs in the system. | 
|  | * | 
|  | * Child caches will hold extra metadata needed for its operation. Fields are: | 
|  | * | 
|  | * @memcg: pointer to the memcg this cache belongs to | 
|  | * @list: list_head for the list of all caches in this memcg | 
|  | * @root_cache: pointer to the global, root cache, this cache was derived from | 
|  | * @dead: set to true after the memcg dies; the cache may still be around. | 
|  | * @nr_pages: number of pages that belongs to this cache. | 
|  | * @destroy: worker to be called whenever we are ready, or believe we may be | 
|  | *           ready, to destroy this cache. | 
|  | */ | 
|  | struct memcg_cache_params { | 
|  | bool is_root_cache; | 
|  | union { | 
|  | struct kmem_cache *memcg_caches[0]; | 
|  | struct { | 
|  | struct mem_cgroup *memcg; | 
|  | struct list_head list; | 
|  | struct kmem_cache *root_cache; | 
|  | bool dead; | 
|  | atomic_t nr_pages; | 
|  | struct work_struct destroy; | 
|  | }; | 
|  | }; | 
|  | }; | 
|  |  | 
|  | int memcg_update_all_caches(int num_memcgs); | 
|  |  | 
|  | struct seq_file; | 
|  | int cache_show(struct kmem_cache *s, struct seq_file *m); | 
|  | void print_slabinfo_header(struct seq_file *m); | 
|  |  | 
|  | /** | 
|  | * kmalloc - allocate memory | 
|  | * @size: how many bytes of memory are required. | 
|  | * @flags: the type of memory to allocate. | 
|  | * | 
|  | * The @flags argument may be one of: | 
|  | * | 
|  | * %GFP_USER - Allocate memory on behalf of user.  May sleep. | 
|  | * | 
|  | * %GFP_KERNEL - Allocate normal kernel ram.  May sleep. | 
|  | * | 
|  | * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools. | 
|  | *   For example, use this inside interrupt handlers. | 
|  | * | 
|  | * %GFP_HIGHUSER - Allocate pages from high memory. | 
|  | * | 
|  | * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | 
|  | * | 
|  | * %GFP_NOFS - Do not make any fs calls while trying to get memory. | 
|  | * | 
|  | * %GFP_NOWAIT - Allocation will not sleep. | 
|  | * | 
|  | * %GFP_THISNODE - Allocate node-local memory only. | 
|  | * | 
|  | * %GFP_DMA - Allocation suitable for DMA. | 
|  | *   Should only be used for kmalloc() caches. Otherwise, use a | 
|  | *   slab created with SLAB_DMA. | 
|  | * | 
|  | * Also it is possible to set different flags by OR'ing | 
|  | * in one or more of the following additional @flags: | 
|  | * | 
|  | * %__GFP_COLD - Request cache-cold pages instead of | 
|  | *   trying to return cache-warm pages. | 
|  | * | 
|  | * %__GFP_HIGH - This allocation has high priority and may use emergency pools. | 
|  | * | 
|  | * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail | 
|  | *   (think twice before using). | 
|  | * | 
|  | * %__GFP_NORETRY - If memory is not immediately available, | 
|  | *   then give up at once. | 
|  | * | 
|  | * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | 
|  | * | 
|  | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | 
|  | * | 
|  | * There are other flags available as well, but these are not intended | 
|  | * for general use, and so are not documented here. For a full list of | 
|  | * potential flags, always refer to linux/gfp.h. | 
|  | * | 
|  | * kmalloc is the normal method of allocating memory | 
|  | * in the kernel. | 
|  | */ | 
|  | static __always_inline void *kmalloc(size_t size, gfp_t flags); | 
|  |  | 
|  | /** | 
|  | * kmalloc_array - allocate memory for an array. | 
|  | * @n: number of elements. | 
|  | * @size: element size. | 
|  | * @flags: the type of memory to allocate (see kmalloc). | 
|  | */ | 
|  | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | 
|  | { | 
|  | if (size != 0 && n > SIZE_MAX / size) | 
|  | return NULL; | 
|  | return __kmalloc(n * size, flags); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * kcalloc - allocate memory for an array. The memory is set to zero. | 
|  | * @n: number of elements. | 
|  | * @size: element size. | 
|  | * @flags: the type of memory to allocate (see kmalloc). | 
|  | */ | 
|  | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | 
|  | { | 
|  | return kmalloc_array(n, size, flags | __GFP_ZERO); | 
|  | } | 
|  |  | 
|  | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | 
|  | /** | 
|  | * kmalloc_node - allocate memory from a specific node | 
|  | * @size: how many bytes of memory are required. | 
|  | * @flags: the type of memory to allocate (see kmalloc). | 
|  | * @node: node to allocate from. | 
|  | * | 
|  | * kmalloc() for non-local nodes, used to allocate from a specific node | 
|  | * if available. Equivalent to kmalloc() in the non-NUMA single-node | 
|  | * case. | 
|  | */ | 
|  | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 
|  | { | 
|  | return kmalloc(size, flags); | 
|  | } | 
|  |  | 
|  | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 
|  | { | 
|  | return __kmalloc(size, flags); | 
|  | } | 
|  |  | 
|  | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 
|  |  | 
|  | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | 
|  | gfp_t flags, int node) | 
|  | { | 
|  | return kmem_cache_alloc(cachep, flags); | 
|  | } | 
|  | #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ | 
|  |  | 
|  | /* | 
|  | * kmalloc_track_caller is a special version of kmalloc that records the | 
|  | * calling function of the routine calling it for slab leak tracking instead | 
|  | * of just the calling function (confusing, eh?). | 
|  | * It's useful when the call to kmalloc comes from a widely-used standard | 
|  | * allocator where we care about the real place the memory allocation | 
|  | * request comes from. | 
|  | */ | 
|  | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 
|  | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ | 
|  | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | 
|  | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | 
|  | #define kmalloc_track_caller(size, flags) \ | 
|  | __kmalloc_track_caller(size, flags, _RET_IP_) | 
|  | #else | 
|  | #define kmalloc_track_caller(size, flags) \ | 
|  | __kmalloc(size, flags) | 
|  | #endif /* DEBUG_SLAB */ | 
|  |  | 
|  | #ifdef CONFIG_NUMA | 
|  | /* | 
|  | * kmalloc_node_track_caller is a special version of kmalloc_node that | 
|  | * records the calling function of the routine calling it for slab leak | 
|  | * tracking instead of just the calling function (confusing, eh?). | 
|  | * It's useful when the call to kmalloc_node comes from a widely-used | 
|  | * standard allocator where we care about the real place the memory | 
|  | * allocation request comes from. | 
|  | */ | 
|  | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 
|  | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ | 
|  | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | 
|  | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); | 
|  | #define kmalloc_node_track_caller(size, flags, node) \ | 
|  | __kmalloc_node_track_caller(size, flags, node, \ | 
|  | _RET_IP_) | 
|  | #else | 
|  | #define kmalloc_node_track_caller(size, flags, node) \ | 
|  | __kmalloc_node(size, flags, node) | 
|  | #endif | 
|  |  | 
|  | #else /* CONFIG_NUMA */ | 
|  |  | 
|  | #define kmalloc_node_track_caller(size, flags, node) \ | 
|  | kmalloc_track_caller(size, flags) | 
|  |  | 
|  | #endif /* CONFIG_NUMA */ | 
|  |  | 
|  | /* | 
|  | * Shortcuts | 
|  | */ | 
|  | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | 
|  | { | 
|  | return kmem_cache_alloc(k, flags | __GFP_ZERO); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * kzalloc - allocate memory. The memory is set to zero. | 
|  | * @size: how many bytes of memory are required. | 
|  | * @flags: the type of memory to allocate (see kmalloc). | 
|  | */ | 
|  | static inline void *kzalloc(size_t size, gfp_t flags) | 
|  | { | 
|  | return kmalloc(size, flags | __GFP_ZERO); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * kzalloc_node - allocate zeroed memory from a particular memory node. | 
|  | * @size: how many bytes of memory are required. | 
|  | * @flags: the type of memory to allocate (see kmalloc). | 
|  | * @node: memory node from which to allocate | 
|  | */ | 
|  | static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | 
|  | { | 
|  | return kmalloc_node(size, flags | __GFP_ZERO, node); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Determine the size of a slab object | 
|  | */ | 
|  | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | 
|  | { | 
|  | return s->object_size; | 
|  | } | 
|  |  | 
|  | void __init kmem_cache_init_late(void); | 
|  |  | 
|  | #endif	/* _LINUX_SLAB_H */ |