blob: 077e06e1a8899a5175118b5dd5b8de9d8660d7cc [file] [log] [blame]
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09001#include <linux/mm.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4
5#if THREAD_SHIFT < PAGE_SHIFT
6static struct kmem_cache *thread_info_cache;
7
8struct thread_info *alloc_thread_info(struct task_struct *tsk)
9{
10 struct thread_info *ti;
11
12 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
13 if (unlikely(ti == NULL))
14 return NULL;
15#ifdef CONFIG_DEBUG_STACK_USAGE
16 memset(ti, 0, THREAD_SIZE);
17#endif
18 return ti;
19}
20
21void free_thread_info(struct thread_info *ti)
22{
23 kmem_cache_free(thread_info_cache, ti);
24}
25
26void thread_info_cache_init(void)
27{
28 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
Paul Mundta3705792010-01-12 19:10:06 +090029 THREAD_SIZE, SLAB_PANIC, NULL);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090030}
31#else
32struct thread_info *alloc_thread_info(struct task_struct *tsk)
33{
34#ifdef CONFIG_DEBUG_STACK_USAGE
35 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
36#else
37 gfp_t mask = GFP_KERNEL;
38#endif
39 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
40}
41
42void free_thread_info(struct thread_info *ti)
43{
44 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
45}
46#endif /* THREAD_SHIFT < PAGE_SHIFT */