一个下半部的任务对应如下结构体:
***************** struct work_struct ***************************
79 struct work_struct {
80 atomic_long_t data;
81 struct list_head entry;
82 work_func_t func;//
83 #ifdef CONFIG_LOCKDEP
84 struct lockdep_map lockdep_map;
85 #endif
86 };
18 typedef void (*work_func_t)(struct work_struct *work);

1.实例化一个任务对象:
struct work_struct work;
INIT_WORK();

196 #define INIT_WORK(_work, _func) \
197 do { \
198 __INIT_WORK((_work), (_func), 0); \
199 } while (0)
//在定义的_work工作队列里面增加一个工作任务,该任务就是_func。_func这个任务会需要一些数据作为参数,这个参数就是通过_data传递的

2.在中断的上半部调用如下函数将下半部的任务交给调度器调度。
schedule_work(&work);

//int schedule_work(struct work_struct *work)

2741 /**
2742 * schedule_work – put work task in global workqueue
2743 * @work: job to be done
2744 *
2745 * Returns zero if @work was already on the kernel-global workqueue and
2746 * non-zero otherwise.
2747 *
2748 * This puts a job in the kernel-global workqueue if it was not already
2749 * queued and leaves it in the same position on the kernel-global
2750 * workqueue otherwise.
2751 */

2752 int schedule_work(struct work_struct *work) //
2753 {
2754 return queue_work(system_wq, work);
2755 }

 

3.在驱动模块出口调用如下函数等待任务的完成:

flush_work(&work);

2497 /**
2498 * flush_work – wait for a work to finish executing the last queueing
instance
2499 * @work: the work to flush
2500 *
2501 * Wait until @work has finished execution. This function considers
2502 * only the last queueing instance of @work. If @work has been
2503 * enqueued across different CPUs on a non-reentrant workqueue or on
2504 * multiple workqueues, @work might still be executing on return on
2505 * some of the CPUs from earlier queueing.
2506 *
2507 * If @work was queued only on a non-reentrant, ordered or unbound
2508 * workqueue, @work is guaranteed to be idle on return if it hasn\’t
2509 * been requeued since flush started.
2510 *
2511 * RETURNS:
2512 * %true if flush_work() waited for the work to finish execution,
2513 * %false if it was already idle.
2514 */
2515 bool flush_work(struct work_struct *work)//
2516 {
2517 struct wq_barrier barr;
2518
2519 lock_map_acquire(&work->lockdep_map);
2520 lock_map_release(&work->lockdep_map);
2521
2522 if (start_flush_work(work, &barr, true)) {
2523 wait_for_completion(&barr.done);
2524 destroy_work_on_stack(&barr.work);
2525 return true;
2526 } else
2527 return false;
2528 }

—————————————————————————————————————————-

Makefile

1 obj-m    := demo.o
2 
3 KERNEL    := /linux-3.5
4 
5 all:
6     make -C $(KERNEL) M=`pwd`
7 clean:
8     make -C $(KERNEL) M=`pwd` clean
9     

demo.c

  1 /* head file */
  2 #include <linux/init.h>
  3 #include <linux/module.h>
  4 #include <plat/irqs.h>
  5 
  6 #include <linux/interrupt.h>
  7 
  8 struct mill_key{
  9     int irqnum;
 10     char *name;
 11     u32 cnt;
 12 }millkeys[] = {
 13     {IRQ_EINT(26), "key1", 0},
 14     {IRQ_EINT(27), "key2", 0},
 15     {IRQ_EINT(28), "key3", 0},
 16     {IRQ_EINT(29), "key4", 0},
 17 };
 18 
 19 struct mywork{    //因为中断下半部函数不能拿到上半部传来的相应结构体的地址cookie,为了让俩者发生关系,建立在同一个结构体内
 20     struct mill_key *ptr;//定义结构体数组指针/也就是我们感兴趣的变量
 21     struct work_struct work;//实例化下半部的一个任务对象
 22 }mywork;
 23 
 24 static void mill_unregister_irqkey(void)
 25 {
 26     int i;
 27 
 28     for (i = 0; i < ARRAY_SIZE(millkeys); ++i) {
 29         free_irq(millkeys[i].irqnum, &millkeys[i]);
 30     }
 31     
 32 }
 33 
 34 /*irq bootm half*/
 35 static void do_bh_handler (struct work_struct *work)
 36 {
 37     struct mill_key *ptr;
 38     struct mywork *tmp = container_of(work, struct mywork, work);//取struct mywork结构体地址
 39     ptr = tmp->ptr;
 40 
 41     if (in_interrupt()) {
 42         printk("%s In interrupt ...\n", __func__);
 43     } else {
 44         printk("%s In process ...\n", __func__);
 45     }
 46 
 47     ptr->cnt++;
 48     printk("%s is %s!\n", ptr->name, (ptr->cnt%2)?"down":"up");    
 49 }
 50 
 51 /*irq top half*/
 52 static irqreturn_t do_handler(int irqnum, void *dev)
 53 {
 54     mywork.ptr = dev;//中断发生时中断注册函数的第五个参数(结构体数组指针)由该中断处理函数第二个参数dev来接收
 55 
 56     schedule_work(&mywork.work);//在中断的上半部将下半部的任务交给调度器调度。
 57 
 58     return IRQ_HANDLED;
 59 }
 60 
 61 static int mill_register_irqkey(void)
 62 {
 63     int i;
 64     int ret;
 65 
 66     for (i = 0; i < ARRAY_SIZE(millkeys); ++i) {
 67         ret = request_irq(
 68             millkeys[i].irqnum, do_handler, 
 69             IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 70             millkeys[i].name, &millkeys[i]);//第五个参数作为cookie
 71         if (ret < 0) {
 72             goto error0;
 73         }
 74     }
 75 
 76     return 0;
 77 error0:
 78     while (i--) {
 79         free_irq(millkeys[i].irqnum, &millkeys[i]);
 80     }
 81 
 82     return ret;
 83 }
 84 
 85 /* driver module entry */
 86 static int __init demo_init(void)
 87 {
 88     INIT_WORK(&mywork.work, do_bh_handler);//在定义的work工作队列里增加一个工作任务_fun:do_bh_handler
 89 
 90     return mill_register_irqkey();
 91 }
 92 
 93 module_init(demo_init);
 94 
 95 
 96 /* driver module exit */
 97 static void __exit demo_exit(void)
 98 {
 99     flush_work(&mywork.work);//等待任务完成并把下半部任务从内核移除
100 
101     mill_unregister_irqkey();
102 }
103 module_exit(demo_exit);
104 
105 /* driver module description */
106 MODULE_LICENSE("GPL");
107 
108 MODULE_AUTHOR("millet9527");
109 MODULE_VERSION("millet plus 18");
110 MODULE_DESCRIPTION("example for driver module arch");

 

 

 

 

=================================================================

tasklet:
中断上下文,响应快,中断处理函数中不能睡眠

工作队列:
进程上下文,速度慢,中断处理函数可睡眠
=====================================================================
workqueue与tasklet类似,都是允许内核代码请求某个函数在将来的时间被调用(抄《ldd3》上的)
每个workqueue就是一个内核进程。

workqueue与tasklet的区别:
1.tasklet是通过软中断实现的,在软中断上下文中运行,tasklet代码必须是原子的
workqueue是通过内核进程实现的,就没有上述限制的,最爽的是,工作队列函数可以休眠

PS: 我的驱动模块就是印在计时器中调用了可休眠函数,所以出现了cheduling
while atomic告警
内核计时器也是通过软中断实现的

2.tasklet始终运行在被初始提交的同一处理器上,workqueue不一定
3.tasklet不能确定延时时间(即使很短),workqueue可以设定延迟时间

 

版权声明:本文为crmn原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://www.cnblogs.com/crmn/articles/6602008.html