20
1 Wait Queue & Wait Queue Head task_struct ... wait_queue_head wait_queue flag func task next prev wait_queue flag func task next prev wait_queue flag func task next prev lock next prev task_struct task_struct autoremove_wake_function() default_wake_function() Other_callback_function

Wait queue

  • Upload
    roy-lee

  • View
    1.886

  • Download
    4

Embed Size (px)

DESCRIPTION

outdated

Citation preview

1

Wait Queue & Wait Queue Head

task_struct

...

wait_queue_head wait_queue

flag func

task

nextprev

wait_queue

flag func

task

nextprev

wait_queue

flag func

task

nextprev

lock

nextprev

task_struct task_struct

autoremove_wake_function() default_wake_function()

Other_callback_function

2

wait_queue_head_t

#define DECLARE_WAIT_QUEUE_HEAD(name) \

wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)

#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \

.lock = SPIN_LOCK_UNLOCKED, \

.task_list = { &(name).task_list, &(name).task_list } }

struct __wait_queue_head {

spinlock_t lock;

struct list_head task_list;

};

typedef struct __wait_queue_head wait_queue_head_t; wait_queue_head

lock

nextprev

3

wait_queue_t

#define DECLARE_WAITQUEUE(name, tsk) \

wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)

#define __WAITQUEUE_INITIALIZER(name, tsk) { \

.task = tsk, \

.func = default_wake_function, \

.task_list = { NULL, NULL } }

struct __wait_queue {

unsigned int flags;

#define WQ_FLAG_EXCLUSIVE 0x01

struct task_struct * task;

wait_queue_func_t func;

struct list_head task_list;

};

typedef struct __wait_queue wait_queue_t;

wait_queue

flag func

task

nextprev

4

Wake Up Control Flow

wake_up(x)

__wake_up()

__wake_up_common()

try_to_wake_up()

curr->func()

resched_task

[include/linux/wait.h]

[kernel/sched.c]

__wait_event()

DEFINE_WAIT()

prepare_to_wait()

schedule()

finish_wait

if (condition)?

__add_wait_queue()

noyes

assign a handler

autoremove_wake_function()

loop

wait_event()

5

wait_event() & __wait_event()

#define __wait_event(wq, condition) \

do { \

DEFINE_WAIT(__wait); \

\

for (;;) { \

prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);\

if (condition) \

break; \

schedule(); \

} \

finish_wait(&wq, &__wait); \

} while (0)

#define wait_event(wq, condition) \

do { \

if (condition) \

break; \

__wait_event(wq, condition); \

} while (0)

6

DEFINE_WAIT() &

autoremove_wake_function()

#define DEFINE_WAIT(name) \

wait_queue_t name = { \

.task = current, \

.func = autoremove_wake_function, \

.task_list = LIST_HEAD_INIT((name).task_list), \

}

int autoremove_wake_function(wait_queue_t *wait,

unsigned mode, int sync, void *key)

{

int ret = default_wake_function(wait, mode, sync, key);

if (ret)

list_del_init(&wait->task_list);

return ret;

}

7

prepare_to_wait()

void fastcall

prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)

{

unsigned long flags;

wait->flags &= ~WQ_FLAG_EXCLUSIVE;

spin_lock_irqsave(&q->lock, flags);

if (list_empty(&wait->task_list))

__add_wait_queue(q, wait);

/*

* don't alter the task state if this is just going to

* queue an async wait queue callback

*/

if (is_sync_wait(wait))

set_current_state(state);

spin_unlock_irqrestore(&q->lock, flags);

}

Used to distinguish between sync and async io wait context

wait_queue

flag func

task

nextprev

8

finish_wait()

void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)

{

unsigned long flags;

__set_current_state(TASK_RUNNING);

if (!list_empty_careful(&wait->task_list)) {

spin_lock_irqsave(&q->lock, flags);

list_del_init(&wait->task_list);

spin_unlock_irqrestore(&q->lock, flags);

}

}

9

wait_event()

__wait_event()

__wait_event_timeout()

wait_event_timeout()

__wait_event_interruptible()

wait_event_interruptible()

__wait_event_interruptible_timeout()

wait_event_interruptible_timeout()

... ...

......

__wait_event_interruptible_exclusive()

wait_event_interruptible_exclusive()

...

__wait_event_exclusive()

wait_event_exclusive()

...

wait_event*()[include/linux/wait.h]

10

__wait_event_interruptible()

#define __wait_event_interruptible(wq, condition, ret) \

do { \

DEFINE_WAIT(__wait); \

\

for (;;) { \

prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \

if (condition) \

break; \

if (!signal_pending(current)) { \

schedule(); \

continue; \

} \

ret = -ERESTARTSYS; \

break; \

} \

finish_wait(&wq, &__wait); \

} while (0)

#define wait_event_interruptible(wq, condition) \

({ \

int __ret = 0; \

if (!(condition)) \

__wait_event_interruptible(wq, condition, __ret); \

__ret; \

})

11

__wait_event_timeout()

#define __wait_event_timeout(wq, condition, ret) \

do { \

DEFINE_WAIT(__wait); \

\

for (;;) { \

prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);\

if (condition) \

break; \

ret = schedule_timeout(ret); \

if (!ret) \

break; \

} \

finish_wait(&wq, &__wait); \

} while (0)

#define wait_event_timeout(wq, condition, timeout) \

({ \

long __ret = timeout; \

if (!(condition)) \

__wait_event_timeout(wq, condition, __ret); \

__ret; \

})

12

__wait_event_interruptible_exclusive()

#define __wait_event_interruptible_exclusive(wq, condition, ret) \

do { \

DEFINE_WAIT(__wait); \

\

for (;;) { \

prepare_to_wait_exclusive(&wq, &__wait, \

TASK_INTERRUPTIBLE); \

if (condition) \

break; \

if (!signal_pending(current)) { \

schedule(); \

continue; \

} \

ret = -ERESTARTSYS; \

break; \

} \

finish_wait(&wq, &__wait); \

} while (0)

#define wait_event_interruptible_timeout(wq, condition, timeout) \

({ \

long __ret = timeout; \

if (!(condition)) \

__wait_event_interruptible_timeout(wq, condition, __ret); \

__ret; \

})

13

wake up*()

#define wake_up(x) \

__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)

#define wake_up_nr(x, nr) \

__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)

#define wake_up_all(x) \

__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)

#define wake_up_interruptible(x) \

__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)

#define wake_up_interruptible_nr(x, nr) \

__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)

#define wake_up_interruptible_all(x)\

__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)

#define wake_up_locked(x) \

__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)

#define wake_up_interruptible_sync(x) \

__wake_up_sync((x),TASK_INTERRUPTIBLE, 1)

[include/linux/wait.h]

[kernel/sched.c]

14

wake up*()

wake_up(x)

__wake_up()

__wake_up_common(q,…)

default_wake_function()

init_waitqueue_entry()

wake_up_all(x)

wake_up_locked(x)

wake_up_nr(x, nr)wake_up_interruptible_sync(x)

__wake_up_locked() __wake_up_sync()

try_to_wake_up()

q->func()assign

[include/linux/wait.h]

[kernel/sched.c]

15

__wake_up()

void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,

int nr_exclusive, void *key)

{

unsigned long flags;

spin_lock_irqsave(&q->lock, flags);

__wake_up_common(q, mode, nr_exclusive, 0, key);

spin_unlock_irqrestore(&q->lock, flags);

}

16

__wake_up_common()

static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,

int nr_exclusive, int sync, void *key)

{

struct list_head *tmp, *next;

list_for_each_safe(tmp, next, &q->task_list) {

wait_queue_t *curr;

unsigned flags;

curr = list_entry(tmp, wait_queue_t, task_list);

flags = curr->flags;

if (curr->func(curr, mode, sync, key) &&

(flags & WQ_FLAG_EXCLUSIVE) &&

!--nr_exclusive)

break;

}

}

17

default_wake_function()

int default_wake_function(wait_queue_t *curr,

unsigned mode, int sync, void *key)

{

task_t *p = curr->task;

return try_to_wake_up(p, mode, sync);

}

18

static int try_to_wake_up(task_t * p, unsigned int state, int sync)

{

int cpu, this_cpu, success = 0;

unsigned long flags;

long old_state;

runqueue_t *rq;

rq = task_rq_lock(p, &flags);

old_state = p->state;

if (!(old_state & state))

goto out;

if (p->array)

goto out_running;

cpu = task_cpu(p);

this_cpu = smp_processor_id();

SMP part omitted

try_to_wake_up() (1/2)

19

try_to_wake_up() (2/2)

if (old_state == TASK_UNINTERRUPTIBLE) {

rq->nr_uninterruptible--;

p->activated = -1;

}

activate_task(p, rq, cpu == this_cpu);

if (!sync || cpu != this_cpu) {

if (TASK_PREEMPTS_CURR(p, rq))

resched_task(rq->curr);

}

success = 1;

out_running:

p->state = TASK_RUNNING;

out:

task_rq_unlock(rq, &flags);

return success;

}

SMP part omitted

The waker said “I will leave the cpu soon, so

you don’t have to do trigger a preemption.”

Put it back into the runqueue

#define TASK_PREEMPTS_CURR(p, rq) \

((p)->prio < (rq)->curr->prio)

static inline void resched_task(task_t *p)

{

set_tsk_need_resched(p);

}

20

list_head

struct list_head {

struct list_head *next, *prev;

};

list_add(new, head)

list_add_tail(new, head)

list_del(entry)

list_empty(head)

list_entry(head,t,member)

list_for_each(pos,head)

...