blob: b6fa5e63085d65b42f46fe97735751b6f402cab4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 *
40 * The sequence counters are for flush_scheduled_work(). It wants to wait
Rolf Eike Beer9f5d7852006-10-03 23:07:31 +020041 * until all currently-scheduled works are completed, but it doesn't
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * want to be livelocked by new, incoming ones. So it waits until
43 * remove_sequence is >= the insert_sequence which pertained when
44 * flush_scheduled_work() was called.
45 */
46struct cpu_workqueue_struct {
47
48 spinlock_t lock;
49
50 long remove_sequence; /* Least-recently added (next to run) */
51 long insert_sequence; /* Next to add */
52
53 struct list_head worklist;
54 wait_queue_head_t more_work;
55 wait_queue_head_t work_done;
56
57 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070058 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 int run_depth; /* Detect run_workqueue() recursion depth */
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080061
62 int freezeable; /* Freeze the thread during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063} ____cacheline_aligned;
64
65/*
66 * The externally visible workqueue abstraction is an array of
67 * per-CPU workqueues:
68 */
69struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080070 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 const char *name;
72 struct list_head list; /* Empty if single thread */
73};
74
75/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
76 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070077static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078static LIST_HEAD(workqueues);
79
Nathan Lynchf756d5e2006-01-08 01:05:12 -080080static int singlethread_cpu;
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082/* If it's single threaded, it isn't in the list of workqueues. */
83static inline int is_single_threaded(struct workqueue_struct *wq)
84{
85 return list_empty(&wq->list);
86}
87
David Howells4594bf12006-12-07 11:33:26 +000088/*
89 * Set the workqueue on which a work item is to be run
90 * - Must *only* be called if the pending flag is set
91 */
David Howells365970a2006-11-22 14:54:49 +000092static inline void set_wq_data(struct work_struct *work, void *wq)
93{
David Howells4594bf12006-12-07 11:33:26 +000094 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000095
David Howells4594bf12006-12-07 11:33:26 +000096 BUG_ON(!work_pending(work));
97
David Howells365970a2006-11-22 14:54:49 +000098 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080099 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000101}
102
103static inline void *get_wq_data(struct work_struct *work)
104{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000106}
107
Linus Torvalds68380b52006-12-07 09:28:19 -0800108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
109{
110 int ret = 0;
111 unsigned long flags;
112
113 spin_lock_irqsave(&cwq->lock, flags);
114 /*
115 * We need to re-validate the work info after we've gotten
116 * the cpu_workqueue lock. We can run the work now iff:
117 *
118 * - the wq_data still matches the cpu_workqueue_struct
119 * - AND the work is still marked pending
120 * - AND the work is still on a list (which will be this
121 * workqueue_struct list)
122 *
123 * All these conditions are important, because we
124 * need to protect against the work being run right
125 * now on another CPU (all but the last one might be
126 * true if it's currently running and has not been
127 * released yet, for example).
128 */
129 if (get_wq_data(work) == cwq
130 && work_pending(work)
131 && !list_empty(&work->entry)) {
132 work_func_t f = work->func;
133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags);
135
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
Linus Torvalds68380b52006-12-07 09:28:19 -0800137 work_release(work);
138 f(work);
139
140 spin_lock_irqsave(&cwq->lock, flags);
141 cwq->remove_sequence++;
142 wake_up(&cwq->work_done);
143 ret = 1;
144 }
145 spin_unlock_irqrestore(&cwq->lock, flags);
146 return ret;
147}
148
149/**
150 * run_scheduled_work - run scheduled work synchronously
151 * @work: work to run
152 *
153 * This checks if the work was pending, and runs it
154 * synchronously if so. It returns a boolean to indicate
155 * whether it had any scheduled work to run or not.
156 *
157 * NOTE! This _only_ works for normal work_structs. You
158 * CANNOT use this for delayed work, because the wq data
159 * for delayed work will not point properly to the per-
160 * CPU workqueue struct, but will change!
161 */
162int fastcall run_scheduled_work(struct work_struct *work)
163{
164 for (;;) {
165 struct cpu_workqueue_struct *cwq;
166
167 if (!work_pending(work))
168 return 0;
169 if (list_empty(&work->entry))
170 return 0;
171 /* NOTE! This depends intimately on __queue_work! */
172 cwq = get_wq_data(work);
173 if (!cwq)
174 return 0;
175 if (__run_work(cwq, work))
176 return 1;
177 }
178}
179EXPORT_SYMBOL(run_scheduled_work);
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/* Preempt must be disabled. */
182static void __queue_work(struct cpu_workqueue_struct *cwq,
183 struct work_struct *work)
184{
185 unsigned long flags;
186
187 spin_lock_irqsave(&cwq->lock, flags);
David Howells365970a2006-11-22 14:54:49 +0000188 set_wq_data(work, cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 list_add_tail(&work->entry, &cwq->worklist);
190 cwq->insert_sequence++;
191 wake_up(&cwq->more_work);
192 spin_unlock_irqrestore(&cwq->lock, flags);
193}
194
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700195/**
196 * queue_work - queue work on a workqueue
197 * @wq: workqueue to use
198 * @work: work to queue
199 *
Alan Stern057647f2006-10-28 10:38:58 -0700200 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 *
202 * We queue the work to the CPU it was submitted, but there is no
203 * guarantee that it will be processed by that CPU.
204 */
205int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{
207 int ret = 0, cpu = get_cpu();
208
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800211 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800213 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 ret = 1;
215 }
216 put_cpu();
217 return ret;
218}
Dave Jonesae90dd52006-06-30 01:40:45 -0400219EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800221void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
David Howells52bad642006-11-22 14:54:01 +0000223 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000224 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 int cpu = smp_processor_id();
226
227 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800228 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
David Howells52bad642006-11-22 14:54:01 +0000230 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700233/**
234 * queue_delayed_work - queue work on a workqueue after delay
235 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800236 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700237 * @delay: number of jiffies to wait before queueing
238 *
Alan Stern057647f2006-10-28 10:38:58 -0700239 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700240 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000242 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
244 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000245 struct timer_list *timer = &dwork->timer;
246 struct work_struct *work = &dwork->work;
247
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800248 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000249 if (delay == 0)
250 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800252 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 BUG_ON(timer_pending(timer));
254 BUG_ON(!list_empty(&work->entry));
255
256 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000257 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000259 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 timer->function = delayed_work_timer_fn;
261 add_timer(timer);
262 ret = 1;
263 }
264 return ret;
265}
Dave Jonesae90dd52006-06-30 01:40:45 -0400266EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700268/**
269 * queue_delayed_work_on - queue work on specific CPU after delay
270 * @cpu: CPU number to execute work on
271 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800272 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700273 * @delay: number of jiffies to wait before queueing
274 *
Alan Stern057647f2006-10-28 10:38:58 -0700275 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700276 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700277int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000278 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700279{
280 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000281 struct timer_list *timer = &dwork->timer;
282 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700283
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800284 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700285 BUG_ON(timer_pending(timer));
286 BUG_ON(!list_empty(&work->entry));
287
288 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000289 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700290 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000291 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700292 timer->function = delayed_work_timer_fn;
293 add_timer_on(timer, cpu);
294 ret = 1;
295 }
296 return ret;
297}
Dave Jonesae90dd52006-06-30 01:40:45 -0400298EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Arjan van de Ven858119e2006-01-14 13:20:43 -0800300static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
302 unsigned long flags;
303
304 /*
305 * Keep taking off work from the queue until
306 * done.
307 */
308 spin_lock_irqsave(&cwq->lock, flags);
309 cwq->run_depth++;
310 if (cwq->run_depth > 3) {
311 /* morton gets to eat his hat */
312 printk("%s: recursion depth exceeded: %d\n",
313 __FUNCTION__, cwq->run_depth);
314 dump_stack();
315 }
316 while (!list_empty(&cwq->worklist)) {
317 struct work_struct *work = list_entry(cwq->worklist.next,
318 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000319 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321 list_del_init(cwq->worklist.next);
322 spin_unlock_irqrestore(&cwq->lock, flags);
323
David Howells365970a2006-11-22 14:54:49 +0000324 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800325 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000326 work_release(work);
327 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800329 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
330 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
331 "%s/0x%08x/%d\n",
332 current->comm, preempt_count(),
333 current->pid);
334 printk(KERN_ERR " last function: ");
335 print_symbol("%s\n", (unsigned long)f);
336 debug_show_held_locks(current);
337 dump_stack();
338 }
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 spin_lock_irqsave(&cwq->lock, flags);
341 cwq->remove_sequence++;
342 wake_up(&cwq->work_done);
343 }
344 cwq->run_depth--;
345 spin_unlock_irqrestore(&cwq->lock, flags);
346}
347
348static int worker_thread(void *__cwq)
349{
350 struct cpu_workqueue_struct *cwq = __cwq;
351 DECLARE_WAITQUEUE(wait, current);
352 struct k_sigaction sa;
353 sigset_t blocked;
354
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800355 if (!cwq->freezeable)
356 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358 set_user_nice(current, -5);
359
360 /* Block and flush all signals */
361 sigfillset(&blocked);
362 sigprocmask(SIG_BLOCK, &blocked, NULL);
363 flush_signals(current);
364
Christoph Lameter46934022006-10-11 01:21:26 -0700365 /*
366 * We inherited MPOL_INTERLEAVE from the booting kernel.
367 * Set MPOL_DEFAULT to insure node local allocations.
368 */
369 numa_default_policy();
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
372 sa.sa.sa_handler = SIG_IGN;
373 sa.sa.sa_flags = 0;
374 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
375 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
376
377 set_current_state(TASK_INTERRUPTIBLE);
378 while (!kthread_should_stop()) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800379 if (cwq->freezeable)
380 try_to_freeze();
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 add_wait_queue(&cwq->more_work, &wait);
383 if (list_empty(&cwq->worklist))
384 schedule();
385 else
386 __set_current_state(TASK_RUNNING);
387 remove_wait_queue(&cwq->more_work, &wait);
388
389 if (!list_empty(&cwq->worklist))
390 run_workqueue(cwq);
391 set_current_state(TASK_INTERRUPTIBLE);
392 }
393 __set_current_state(TASK_RUNNING);
394 return 0;
395}
396
397static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
398{
399 if (cwq->thread == current) {
400 /*
401 * Probably keventd trying to flush its own queue. So simply run
402 * it by hand rather than deadlocking.
403 */
404 run_workqueue(cwq);
405 } else {
406 DEFINE_WAIT(wait);
407 long sequence_needed;
408
409 spin_lock_irq(&cwq->lock);
410 sequence_needed = cwq->insert_sequence;
411
412 while (sequence_needed - cwq->remove_sequence > 0) {
413 prepare_to_wait(&cwq->work_done, &wait,
414 TASK_UNINTERRUPTIBLE);
415 spin_unlock_irq(&cwq->lock);
416 schedule();
417 spin_lock_irq(&cwq->lock);
418 }
419 finish_wait(&cwq->work_done, &wait);
420 spin_unlock_irq(&cwq->lock);
421 }
422}
423
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700424/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700426 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 *
428 * Forces execution of the workqueue and blocks until its completion.
429 * This is typically used in driver shutdown handlers.
430 *
431 * This function will sample each workqueue's current insert_sequence number and
432 * will sleep until the head sequence is greater than or equal to that. This
433 * means that we sleep until all works which were queued on entry have been
434 * handled, but we are not livelocked by new incoming ones.
435 *
436 * This function used to run the workqueues itself. Now we just wait for the
437 * helper threads to do it.
438 */
439void fastcall flush_workqueue(struct workqueue_struct *wq)
440{
441 might_sleep();
442
443 if (is_single_threaded(wq)) {
Ben Collinsbce61dd2005-11-28 13:43:56 -0800444 /* Always use first cpu's area. */
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800445 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 } else {
447 int cpu;
448
Andrew Morton9b41ea72006-08-13 23:24:26 -0700449 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 for_each_online_cpu(cpu)
Christoph Lameter89ada672005-10-30 15:01:59 -0800451 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Andrew Morton9b41ea72006-08-13 23:24:26 -0700452 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454}
Dave Jonesae90dd52006-06-30 01:40:45 -0400455EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800458 int cpu, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
Christoph Lameter89ada672005-10-30 15:01:59 -0800460 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 struct task_struct *p;
462
463 spin_lock_init(&cwq->lock);
464 cwq->wq = wq;
465 cwq->thread = NULL;
466 cwq->insert_sequence = 0;
467 cwq->remove_sequence = 0;
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800468 cwq->freezeable = freezeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 INIT_LIST_HEAD(&cwq->worklist);
470 init_waitqueue_head(&cwq->more_work);
471 init_waitqueue_head(&cwq->work_done);
472
473 if (is_single_threaded(wq))
474 p = kthread_create(worker_thread, cwq, "%s", wq->name);
475 else
476 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
477 if (IS_ERR(p))
478 return NULL;
479 cwq->thread = p;
480 return p;
481}
482
483struct workqueue_struct *__create_workqueue(const char *name,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800484 int singlethread, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 int cpu, destroy = 0;
487 struct workqueue_struct *wq;
488 struct task_struct *p;
489
Pekka J Enbergdd392712005-09-06 15:18:31 -0700490 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 if (!wq)
492 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Christoph Lameter89ada672005-10-30 15:01:59 -0800494 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Ben Collins676121f2006-01-08 01:03:04 -0800495 if (!wq->cpu_wq) {
496 kfree(wq);
497 return NULL;
498 }
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 wq->name = name;
Andrew Morton9b41ea72006-08-13 23:24:26 -0700501 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (singlethread) {
503 INIT_LIST_HEAD(&wq->list);
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800504 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (!p)
506 destroy = 1;
507 else
508 wake_up_process(p);
509 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 list_add(&wq->list, &workqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 for_each_online_cpu(cpu) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800512 p = create_workqueue_thread(wq, cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (p) {
514 kthread_bind(p, cpu);
515 wake_up_process(p);
516 } else
517 destroy = 1;
518 }
519 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700520 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 /*
523 * Was there any error during startup? If yes then clean up:
524 */
525 if (destroy) {
526 destroy_workqueue(wq);
527 wq = NULL;
528 }
529 return wq;
530}
Dave Jonesae90dd52006-06-30 01:40:45 -0400531EXPORT_SYMBOL_GPL(__create_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
534{
535 struct cpu_workqueue_struct *cwq;
536 unsigned long flags;
537 struct task_struct *p;
538
Christoph Lameter89ada672005-10-30 15:01:59 -0800539 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 spin_lock_irqsave(&cwq->lock, flags);
541 p = cwq->thread;
542 cwq->thread = NULL;
543 spin_unlock_irqrestore(&cwq->lock, flags);
544 if (p)
545 kthread_stop(p);
546}
547
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700548/**
549 * destroy_workqueue - safely terminate a workqueue
550 * @wq: target workqueue
551 *
552 * Safely destroy a workqueue. All work currently pending will be done first.
553 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554void destroy_workqueue(struct workqueue_struct *wq)
555{
556 int cpu;
557
558 flush_workqueue(wq);
559
560 /* We don't need the distraction of CPUs appearing and vanishing. */
Andrew Morton9b41ea72006-08-13 23:24:26 -0700561 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800563 cleanup_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 else {
565 for_each_online_cpu(cpu)
566 cleanup_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 list_del(&wq->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700569 mutex_unlock(&workqueue_mutex);
Christoph Lameter89ada672005-10-30 15:01:59 -0800570 free_percpu(wq->cpu_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 kfree(wq);
572}
Dave Jonesae90dd52006-06-30 01:40:45 -0400573EXPORT_SYMBOL_GPL(destroy_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575static struct workqueue_struct *keventd_wq;
576
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700577/**
578 * schedule_work - put work task in global workqueue
579 * @work: job to be done
580 *
581 * This puts a job in the kernel-global workqueue.
582 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583int fastcall schedule_work(struct work_struct *work)
584{
585 return queue_work(keventd_wq, work);
586}
Dave Jonesae90dd52006-06-30 01:40:45 -0400587EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700589/**
590 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000591 * @dwork: job to be done
592 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700593 *
594 * After waiting for a given time this puts a job in the kernel-global
595 * workqueue.
596 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800597int fastcall schedule_delayed_work(struct delayed_work *dwork,
598 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800600 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000601 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
Dave Jonesae90dd52006-06-30 01:40:45 -0400603EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700605/**
606 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
607 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000608 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700609 * @delay: number of jiffies to wait
610 *
611 * After waiting for a given time this puts a job in the kernel-global
612 * workqueue on the specified CPU.
613 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000615 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
David Howells52bad642006-11-22 14:54:01 +0000617 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
Dave Jonesae90dd52006-06-30 01:40:45 -0400619EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Andrew Mortonb6136772006-06-25 05:47:49 -0700621/**
622 * schedule_on_each_cpu - call a function on each online CPU from keventd
623 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700624 *
625 * Returns zero on success.
626 * Returns -ve errno on failure.
627 *
628 * Appears to be racy against CPU hotplug.
629 *
630 * schedule_on_each_cpu() is very slow.
631 */
David Howells65f27f32006-11-22 14:55:48 +0000632int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800633{
634 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700635 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800636
Andrew Mortonb6136772006-06-25 05:47:49 -0700637 works = alloc_percpu(struct work_struct);
638 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800639 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700640
Andrew Morton9b41ea72006-08-13 23:24:26 -0700641 mutex_lock(&workqueue_mutex);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800642 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100643 struct work_struct *work = per_cpu_ptr(works, cpu);
644
645 INIT_WORK(work, func);
646 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
647 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800648 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700649 mutex_unlock(&workqueue_mutex);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800650 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700651 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800652 return 0;
653}
654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655void flush_scheduled_work(void)
656{
657 flush_workqueue(keventd_wq);
658}
Dave Jonesae90dd52006-06-30 01:40:45 -0400659EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800662 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000664 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 */
James Bottomley81ddef72005-04-16 15:23:59 -0700666void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000667 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
David Howells52bad642006-11-22 14:54:01 +0000669 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 flush_workqueue(wq);
671}
James Bottomley81ddef72005-04-16 15:23:59 -0700672EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800675 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000676 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 */
David Howells52bad642006-11-22 14:54:01 +0000678void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
David Howells52bad642006-11-22 14:54:01 +0000680 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682EXPORT_SYMBOL(cancel_rearming_delayed_work);
683
James Bottomley1fa44ec2006-02-23 12:43:43 -0600684/**
685 * execute_in_process_context - reliably execute the routine with user context
686 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600687 * @ew: guaranteed storage for the execute work structure (must
688 * be available when the work executes)
689 *
690 * Executes the function immediately if process context is available,
691 * otherwise schedules the function for delayed execution.
692 *
693 * Returns: 0 - function was executed
694 * 1 - function was scheduled for execution
695 */
David Howells65f27f32006-11-22 14:55:48 +0000696int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600697{
698 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000699 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600700 return 0;
701 }
702
David Howells65f27f32006-11-22 14:55:48 +0000703 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600704 schedule_work(&ew->work);
705
706 return 1;
707}
708EXPORT_SYMBOL_GPL(execute_in_process_context);
709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710int keventd_up(void)
711{
712 return keventd_wq != NULL;
713}
714
715int current_is_keventd(void)
716{
717 struct cpu_workqueue_struct *cwq;
718 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
719 int ret = 0;
720
721 BUG_ON(!keventd_wq);
722
Christoph Lameter89ada672005-10-30 15:01:59 -0800723 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 if (current == cwq->thread)
725 ret = 1;
726
727 return ret;
728
729}
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/* Take the work from this (downed) CPU. */
732static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
733{
Christoph Lameter89ada672005-10-30 15:01:59 -0800734 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700735 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 struct work_struct *work;
737
738 spin_lock_irq(&cwq->lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700739 list_replace_init(&cwq->worklist, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
741 while (!list_empty(&list)) {
742 printk("Taking work for %s\n", wq->name);
743 work = list_entry(list.next,struct work_struct,entry);
744 list_del(&work->entry);
Christoph Lameter89ada672005-10-30 15:01:59 -0800745 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 }
747 spin_unlock_irq(&cwq->lock);
748}
749
750/* We're holding the cpucontrol mutex here */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700751static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 unsigned long action,
753 void *hcpu)
754{
755 unsigned int hotcpu = (unsigned long)hcpu;
756 struct workqueue_struct *wq;
757
758 switch (action) {
759 case CPU_UP_PREPARE:
Andrew Morton9b41ea72006-08-13 23:24:26 -0700760 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /* Create a new workqueue thread for it. */
762 list_for_each_entry(wq, &workqueues, list) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800763 if (!create_workqueue_thread(wq, hotcpu, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 printk("workqueue for %i failed\n", hotcpu);
765 return NOTIFY_BAD;
766 }
767 }
768 break;
769
770 case CPU_ONLINE:
771 /* Kick off worker threads. */
772 list_for_each_entry(wq, &workqueues, list) {
Christoph Lameter89ada672005-10-30 15:01:59 -0800773 struct cpu_workqueue_struct *cwq;
774
775 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
776 kthread_bind(cwq->thread, hotcpu);
777 wake_up_process(cwq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700779 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 break;
781
782 case CPU_UP_CANCELED:
783 list_for_each_entry(wq, &workqueues, list) {
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700784 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
785 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /* Unbind so it can run. */
Christoph Lameter89ada672005-10-30 15:01:59 -0800787 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800788 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 cleanup_workqueue_thread(wq, hotcpu);
790 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700791 mutex_unlock(&workqueue_mutex);
792 break;
793
794 case CPU_DOWN_PREPARE:
795 mutex_lock(&workqueue_mutex);
796 break;
797
798 case CPU_DOWN_FAILED:
799 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 break;
801
802 case CPU_DEAD:
803 list_for_each_entry(wq, &workqueues, list)
804 cleanup_workqueue_thread(wq, hotcpu);
805 list_for_each_entry(wq, &workqueues, list)
806 take_over_work(wq, hotcpu);
Andrew Morton9b41ea72006-08-13 23:24:26 -0700807 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 break;
809 }
810
811 return NOTIFY_OK;
812}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814void init_workqueues(void)
815{
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800816 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 hotcpu_notifier(workqueue_cpu_callback, 0);
818 keventd_wq = create_workqueue("events");
819 BUG_ON(!keventd_wq);
820}
821