×
嵌入式 > 嵌入式开发 > 详情

Linux学习-等待队列

发布时间:2020-10-09 发布时间:
|
由于学习linux驱动编程,学习到了堵塞型IO读写,等待队列的操作比较的有意思,拿来分析分析,其中的一些代码还是蛮有意思的,感受到了linux的美,体会到了艺术家和一般程序员的差别。
我就简要的分析分析等待队列的一些问题,就相当于自己的总结吧。边学驱动,边学内核,还是蛮有意思的。
1、等待队列的定义,包括两个,等待队列头,节点。
struct __wait_queue_head {
spinlock_t lock; /*自旋锁*/
struct list_head task_list; /*链表头*/
};
typedef struct __wait_queue_head wait_queue_head_t;
...
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list;
};
/*关于等待队列的操作主要是初始化操作*/
#define DECLARE_WAIT_QUEUE_HEAD(name)
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
/*就是初始化两个元素*/
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {
.lock = __SPIN_LOCK_UNLOCKED(name.lock),
.task_list = { &(name).task_list, &(name).task_list } }
#define init_waitqueue_head(q)
do {
static struct lock_class_key __key;
__init_waitqueue_head((q), &__key);
} while (0)
void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
lockdep_set_class(&q->lock, key);
INIT_LIST_HEAD(&q->task_list);
}
从上面的定义可知,实质上等待队列头很简单,只要就是一个链表头,而等待队列的节点主要包含了一个函数指针和对应的参数,以及链表。
我们在驱动过程中主要使用的函数主要包括wait_event(),wait_event_interruptible(),wait_event_killable(),以及唤醒过程中的wait_up(),wait_up_interruptible().
基本的流程就是:
#define wait_event(wq, condition)
do {
if (condition)
/*添加满足,则直接跳出*/
break;
/*负责进入等待队列*/
__wait_event(wq, condition);
} while (0)
#define __wait_event(wq, condition)
do {
/*定义新的等待队列节点*/
DEFINE_WAIT(__wait);
for (;;) {/*一个循环的过程,可能导致堵塞*/
/*将添加的节点添加到队列中,并改变进程的运行状态*/
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);
if (condition)/*如果条件合适了,就跳出当前的循环,也就是等待条件获得*/
break;
/*当前进程放弃CPU,进行调度其他的进程,这时的进程进入睡眠状态
也就是说在schedule中函数就不在继续执行,只有调用wake_up函数唤
醒当前的进程,才会退出schedule函数,然后继续执行下面的函数,也就是继续循环
真正的退出循环,只有当条件满足时,如果条件不满足,调用wake_up函数
仍然不会满足条件,只会再次调度,再次失去CPU,
根据上面的分析可知,只有上面的条件满足了,并调用
wake_up函数才能跳出当前的for循环。
*/
schedule();
}
/*完成等待*/
finish_wait(&wq, &__wait);
} while (0)
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#define DEFINE_WAIT_FUNC(name, function)
wait_queue_t name = {
.private = current,
.func = function,
.task_list = LIST_HEAD_INIT((name).task_list),
}
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
/*改变状态*/
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
/*如果链表是空,则将当前的这个节点添加进来,这样能避免wait被反复的添加,造成大量的浪费*/
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
/*修改当前进程的状态*/
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
#define set_current_state(state_value)
set_mb(current->state, (state_value))
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
/*就是将链表添加进来而已*/
list_add(&new->task_list, &head->task_list);
}
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
/*修改当前进程的状态为TASK_RUNNING,因此可以被执行*/
__set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPUs that we havent seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
/*删除链表,实质上就是释放*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags);
}
}
asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
release_kernel_lock(prev);
need_resched_nonpreemptible:
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
/*
* If a worker is going to sleep, notify and
* ask workqueue whether it wants to wake up a
* task to maintain concurrency. If so, wake
* up the task.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
if (unlikely(reacquire_kernel_lock(prev)))
goto need_resched_nonpreemptible;
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
根据上面的各个函数,宏定义可知,在wait_event函数中完成了大部分的事情,其中包括等待队列节点的定义,添加,当前进程运行状态的改变,等待条件的满足,跳出等待,函数介绍之前需要完成的任务是修改当前进程的状态为TASK_RUNNING,删除链表,释放一些空间。
其他的函数wait_event_interruptible以及wait_event_killable具有相似的操作,只是对前期修改进程状态存在差别。wait_event_interruptible则不一定只能在条件满足时唤醒,也可以被信号唤醒,而wait_event则在条件满足时被唤醒。


『本文转载自网络,版权归原作者所有,如有侵权请联系删除』

热门文章 更多
CC-Link现场总线及应用实例