Arm Linux Kernel Hacks

rousalome.egloos.com

포토로그 Kernel Crash


통계 위젯 (화이트)

493
557
422263


[Kernel][Workqueue] flush_work(), 배리어 워크(barrier_work, wq_barrier) 7. 워크큐(Workqueue)

barrier work에 대해서 알아보겠습니다. 
flush_work 함수는 두 가지 상황에서 쓰입니다. 두 가지 경우에 barrier work가 어떻게 쓰이는지 알아볼게요.

1. 현재 다른 워커 쓰레드에서 동일한 워크가 실행 중에 동일한 work을 flush한 경우
           <...>-386   [001] ...1   143.380287: workqueue_execute_start: work struct e880e910: function sdhci_pm_qos_cpu_unvote_work
//...           
     mmc-cmdqd/0-339   [000] ...1   143.381065: flush_work <-__cancel_work_timer
     mmc-cmdqd/0-339   [000] ...1   143.381084: <stack trace>
 => flush_work
 => __cancel_work_timer
 => cancel_delayed_work_sync
 => sdhci_pm_qos_cpu_vote
 => cmdq_request
 => mmc_cmdq_start_req
 => mmc_blk_cmdq_issue_rq
 => mmc_cmdq_thread
 => kthread
 => ret_from_fork
     mmc-cmdqd/0-339   [000] d..1   143.381089: flush_work: [+++][Bret] start  [F:start_flush_work, L:2671] caller(__cancel_work_timer+0x130/0x1bc)
     mmc-cmdqd/0-339   [000] d..1   143.381092: flush_work: [+][Bret] work_struct->func: sdhci_pm_qos_cpu_unvote_work+0x0/0x34   [F:insert_wq_barrier, L:2344] caller(__cancel_work_timer+0x130/0x1bc)
     mmc-cmdqd/0-339   [000] d..3   143.381127: sched_switch: prev_comm=mmc-cmdqd/0 prev_pid=339 prev_prio=120 prev_state=D ==> next_comm=swapper/0 next_pid=0 next_prio=120
//...
           <...>-386   [001] ...1   143.382352: workqueue_execute_end: work struct e880e910
           <...>-386   [001] ...1   143.382356: workqueue_execute_start: work struct e8881cf8: function wq_barrier_func
           <...>-386   [001] ...1   143.382360: wq_barrier_func <-process_one_work
          <idle>-0     [000] d..3   143.382361: sched_switch: prev_comm=swapper/0 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=DispSync next_pid=596 next_prio=97
           <...>-386   [001] ...1   143.382380: <stack trace>
 => wq_barrier_func
 => process_one_work
 => process_scheduled_works
 => worker_thread
 => kthread
 => ret_from_fork
          <...>-386   [001] d..4   143.382400: sched_wakeup: comm=mmc-cmdqd/0 pid=339prio=120 success=1 target_cpu=000
          <...>-386   [001] ...1   143.382403: workqueue_execute_end: work struct e8881cf8
          <...>-596   [000] d..3   143.382468: sched_switch: prev_comm=DispSync prev_pid=596 prev_prio=97 prev_state=S ==> next_comm=mmc-cmdqd/0 next_pid=339 next_prio=120
     mmc-cmdqd/0-339   [000] ....   143.382476: flush_work.part.6: [-][Bret] barrier work wait complete  [F:flush_work, L:2716] caller(flush_work+0x1b8/0x1fc)

2. 다른 커널 쓰레드에서 워크를 실행하고 싶은 경우
       core_ctl/0-359   [006] ...1   148.104238: <stack trace>
  => flush_work
 => workqueue_cpu_down_callback
 => notifier_call_chain
 => __raw_notifier_call_chain
 => __cpu_notify
 => _cpu_down
 => cpu_down
 => cpu_subsys_offline
 => device_offline
 => core_ctl_offline_core
 => do_hotplug
 => try_hotplug
 => kthread
 => ret_from_fork

flush_work 함수 코드 리뷰
여기서 배리어 워크가 등장합니다. start_flush_work 함수 호출이 끝난 후 wait_for_completion(&barr.done); complete(&barr.done); 호출될 때 까지 기다립니다.
2843bool flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 lock_map_acquire(&work->lockdep_map);
2848 lock_map_release(&work->lockdep_map);
2849
2850 if (start_flush_work(work, &barr)) {
2851  wait_for_completion(&barr.done);
2852  destroy_work_on_stack(&barr.work);
2853  return true;
2854 } else {
2855  return false;
2856 }
2857}
2858EXPORT_SYMBOL_GPL(flush_work);

start_flush_work 코드 리뷰
2781static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2782{
2783 struct worker *worker = NULL;
2784 struct worker_pool *pool;
2785 struct pool_workqueue *pwq;
2786
2787 might_sleep();
2788
2789 local_irq_disable();
2790 pool = get_work_pool(work);
2791 if (!pool) {
2792  local_irq_enable();
2793  return false;
2794 }
2795
2796 spin_lock(&pool->lock);
2797 /* see the comment in try_to_grab_pending() with the same code */
2798 pwq = get_work_pwq(work);
2799 if (pwq) {
2800  if (unlikely(pwq->pool != pool))
2801   goto already_gone;
2802 } else {
2803  worker = find_worker_executing_work(pool, work);  //<<<--[1]
2804  if (!worker)
2805   goto already_gone;
2806  pwq = worker->current_pwq;
2807 }
2808
2809 check_flush_dependency(pwq->wq, work);
2810
2811 insert_wq_barrier(pwq, barr, work, worker);  //<<<--[2]
2812 spin_unlock_irq(&pool->lock);
2813
2814 /*
2815  * If @max_active is 1 or rescuer is in use, flushing another work
2816  * item on the same workqueue may lead to deadlock.  Make sure the
2817  * flusher is not running on the same workqueue by verifying write
2818  * access.
2819  */
2820 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2821  lock_map_acquire(&pwq->wq->lockdep_map);
2822 else
2823  lock_map_acquire_read(&pwq->wq->lockdep_map);
2824 lock_map_release(&pwq->wq->lockdep_map);
2825
2826 return true;
2827already_gone:
2828 spin_unlock_irq(&pool->lock);
2829 return false;
2830}

[1]: 워커 풀과 work로 현재 실행 중인 worker를 가져옵니다.
[2]: 베리어 워크를 추가합니다.

find_worker_executing_work 코드 리뷰
1000static struct worker *find_worker_executing_work(struct worker_pool *pool,
1001       struct work_struct *work)
1002{
1003 struct worker *worker;
1004
1005 hash_for_each_possible(pool->busy_hash, worker, hentry,  //<<--[1]
1006          (unsigned long)work)
1007  if (worker->current_work == work &&
1008      worker->current_func == work->func)
1009   return worker;
1010
1011 return NULL;
1012}

[1]: 워커는 struct worker_pool.busy_hash로 등록됩니다. 이 array을 순회하면서 워커를 뒤집니다.
그런데 work의 func과 워커가 같을 때 해당 워커를 리턴합니다.

다음 ftrace log를 보면 해당 디버깅 정보를 확인할 수 있습니다.
     kworker/1:3-387   [001] ...1   166.380674: flush_work <-__cancel_work_timer
     kworker/1:3-387   [001] ...1   166.380725: <stack trace>
 => flush_work
 => __cancel_work_timer
 => cancel_delayed_work_sync
 => sdhci_ppp_bus_cancel_work_and_set_vote
 => sdhci_ppp_bus_voting
 => sdhci_ppp_set_clock
 => sdhci_do_set_ios
 => sdhci_set_ios
 => mmc_set_ios
 => mmc_gate_clock
 => mmc_host_clk_gate_delayed
 => mmc_host_clk_gate_work
 => process_one_work
 => worker_thread
 => kthread
 => ret_from_fork
mmc-cmdqd/0-340   [006] d..1   166.380882: flush_work: [+][Bret] work_struct->func: mmc_host_clk_gate_work+0x0/0x20   [F:insert_wq_barrier, L:2344] caller(__cancel_work_timer+0x130/0x1bc)

mmc_host_clk_gate_work 워크가 kworker/1:3-387 워커 쓰레드에 의해 실행 중인데, 
mmc_host_clk_gate_work 워크를 flush_work(&mmc_host_clk_gate_work)로 호출하는 경우입니다.
static void insert_wq_barrier(struct pool_workqueue *pwq,
2350         struct wq_barrier *barr,
2351         struct work_struct *target, struct worker *worker)
2352{
2353 struct list_head *head;
2354 unsigned int linked = 0;
2355
2356 /*
2357  * debugobject calls are safe here even with pool->lock locked
2358  * as we know for sure that this will not trigger any of the
2359  * checks and call back into the fixup functions where we
2360  * might deadlock.
2361  */
2362 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);  //<<--[1]
2363 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2364 init_completion(&barr->done);
2365
2366 /*
2367  * If @target is currently being executed, schedule the
2368  * barrier to the worker; otherwise, put it after @target.
2369  */
2370 if (worker)
2371  head = worker->scheduled.next;  //<<--[2]
2372 else {
2373  unsigned long *bits = work_data_bits(target);
2374
2375  head = target->entry.next;  //<<--[3]
2376  /* there can already be other linked works, inherit and set */
2377  linked = *bits & WORK_STRUCT_LINKED;
2378  __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2379 }
2380
2381 debug_work_activate(&barr->work);
2382 insert_work(pwq, &barr->work, head,  //<<--[4]
2383      work_color_to_flags(WORK_NO_COLOR) | linked);
2384}

[1]: 콜백 함수를 wq_barrier_func로 배리어 워크를 초기화합니다.
[2]: 만약 현재 구동 중인 워커 쓰레드에서 돌고 있는 워크을 flush_work로 호출한 경우 실행됩니다.
워커의 worker->scheduled.next; 주소를 head로 가져옵니다.
[3]: 현재 워커 쓰레드가 아닌 다른 커널 쓰레드에서 flush_work가 호출됐을 때 실행됩니다.
현재 워크(struct work_struct)의 entry 주소를 head로 가져옵니다.
[4]: insert_work를 호출해서 배리어 워크와 flush_work(&work_list)을 추가합니다.

[2],[3]번 코드 조건에 따라 다음과 같이 링크됩니다.
&barr->work->entry = worker->scheduled.next
&barr->work->entry = target->entry.next

이번에는 insert_work() 함수 코드를 분석하겠습니다.
1274static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1275   struct list_head *head, unsigned int extra_flags)
1276{
1277 struct worker_pool *pool = pwq->pool;
1278
1279 /* we own @work, set data and link */
1280 set_work_pwq(work, pwq, extra_flags);  
1281 list_add_tail(&work->entry, head);   //<<--[1]
1282 get_pwq(pwq);
1283
1284 /*
1285  * Ensure either wq_worker_sleeping() sees the above
1286  * list_add_tail() or we see zero nr_running to avoid workers lying
1287  * around lazily while there are works to be processed.
1288  */
1289 smp_mb();
1290
1291 if (__need_more_worker(pool))
1292  wake_up_worker(pool); //<<--[2]
1293}

[1]: head를 &work->entry tail로 추가합니다.
struct worker_pool->worklist에도 함께 추가됩니다. 이 이유는 조금 더 상세히 다음에 상세히 알아 보겠습니다.
[2]: 워커를 깨웁니다.

start_flush_work 하부 루틴 코드 리뷰를 끝내고 flush_work 함수로 돌아왔습니다.
2843bool flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 lock_map_acquire(&work->lockdep_map);
2848 lock_map_release(&work->lockdep_map);
2849
2850 if (start_flush_work(work, &barr)) {
2851  wait_for_completion(&barr.done);  //<<--[1]
2852  destroy_work_on_stack(&barr.work);
2853  return true;
2854 } else {
2855  return false;
2856 }
2857}
2858EXPORT_SYMBOL_GPL(flush_work);

[1]: flush_work의 가장 중요한 코드입니다. barrier work(배리어 워크)가 처리될 때 까지 기다립니다.
wait_for_completion는 complete 코드가 실행되면 빠져 나옵니다.

다음 워커 쓰레드가 깨어나서 배리어 워크가 실행되면 wq_barrier_func 함수가 호출됩니다.
complete(&barr->done); 코드를 눈여겨봅시다.
2319static void wq_barrier_func(struct work_struct *work)
2320{
2321 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2322 complete(&barr->done);
2323}

다음 워커 쓰레드 동작입니다.
2119static int worker_thread(void *__worker)
2120{
2121 struct worker *worker = __worker;
2122 struct worker_pool *pool = worker->pool;
2123
2124 /* tell the scheduler that this is a workqueue worker */
2125 worker->task->flags |= PF_WQ_WORKER;
2126woke_up:
2127 spin_lock_irq(&pool->lock);
2128
//.....
2166 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2167
2168 do {
2169  struct work_struct *work =
2170   list_first_entry(&pool->worklist,
2171      struct work_struct, entry);  //<<--[0]
2172
2173  if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {   //<<--[1]
2174   /* optimization path, not strictly necessary */
2175   process_one_work(worker, work);  //<<--[2]
2176   if (unlikely(!list_empty(&worker->scheduled)))
2177    process_scheduled_works(worker); //<<--[3]
2178  } else {
2179   move_linked_works(work, &worker->scheduled, NULL);
2180   process_scheduled_works(worker);  //<<--[4]
2181  }
2182 } while (keep_working(pool));
2183
2184 worker_set_flags(worker, WORKER_PREP);
2185sleep:

[0]: 워커풀에 등록된 &pool->worklist를 가져와 워크 변수에 입력합니다.
[1]: 현재 실행 중인 워커 쓰레드와 동일한 워크를 flush한 경우 work_data_bits(work)가 WORK_STRUCT_LINKED이 아닙니다.
이 조건을 검사합니다.

[2]: 워크를 process_one_work에 대입하여 실행합니다.
[3]: &worker->scheduled 멤버가 비어 있지 않으면 배리어 워크가 등록됐다는 의미입니다.
process_scheduled_works(worker) 함수를 호출하여 배리어 워크를 실행합니다.
[4]: 배리어 워크를 실행합니다. 

현재 실행 중인 워커 쓰레드와 동일한 워크를 flush한 경우 ftrace log는 다음과 같습니다.
 => wq_barrier_func
 => process_one_work
 => process_scheduled_works
 => worker_thread
 => kthread
 => ret_from_fork

다음과 같은 패치를 반영하고 로그를 받아 봤습니다.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ba8285b..58cf99e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2340,8 +2340,10 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
         * If @target is currently being executed, schedule the
         * barrier to the worker; otherwise, put it after @target.
         */
-       if (worker)
+       if (worker) {
+               trace_printk("[+][Bret] work_struct->func: %pF   [F:%s, L:%d] caller(%pS)\n", target->func, __func__,__LINE__,  (void *)__builtin_return_address(0));
                head = worker->scheduled.next;
+       }
        else {
                unsigned long *bits = work_data_bits(target);

@@ -2349,6 +2351,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
                /* there can already be other linked works, inherit and set */
                linked = *bits & WORK_STRUCT_LINKED;
                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
+               trace_printk("[++][Bret] work_struct->func: %pF   [F:%s, L:%d] caller(%pS)\n", target->func, __func__,__LINE__,  (void *)__builtin_return_address(0));
        }

        debug_work_activate(&barr->work);
@@ -2647,6 +2650,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
        pool = get_work_pool(work);
        if (!pool) {
                local_irq_enable();
+               trace_printk("[0][Bret] no pool  [F:%s, L:%d] caller(%pS)\n", __func__,__LINE__,  (void *)__builtin_return_address(0));
                return false;
        }

@@ -2654,12 +2658,17 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
        /* see the comment in try_to_grab_pending() with the same code */
        pwq = get_work_pwq(work);
        if (pwq) {
-               if (unlikely(pwq->pool != pool))
+               if (unlikely(pwq->pool != pool)) {
+                       trace_printk("[+][Bret] goto already_gone  [F:%s, L:%d] caller(%pS)\n", __func__,__LINE__,  (void *)__builtin_return_address(0));
                        goto already_gone;
+               }
        } else {
                worker = find_worker_executing_work(pool, work);
-               if (!worker)
+               if (!worker) {
+                       trace_printk("[++][Bret] goto already_gone  [F:%s, L:%d] caller(%pS)\n", __func__,__LINE__,  (void *)__builtin_return_address(0));
                        goto already_gone;
+               }
+               trace_printk("[+++][Bret] start  [F:%s, L:%d] caller(%pS)\n", __func__,__LINE__,  (void *)__builtin_return_address(0));
                pwq = worker->current_pwq;
        }

@@ -2704,6 +2713,7 @@ bool flush_work(struct work_struct *work)

        if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
+               trace_printk("[-][Bret] barrier work wait complete  [F:%s, L:%d] caller(%pS)\n", __func__,__LINE__,  (void *)__builtin_return_address(0));
                destroy_work_on_stack(&barr.work);
                return true;
        } else {

ftrace 설정 
adb shell "echo  > d/tracing/set_event"
adb shell "sleep 1"

adb shell "echo 0 > d/tracing/tracing_on"
adb shell "sleep 1"

:adb shell " echo 50000 > /d/tracing/buffer_size_kb"

adb shell "echo function > d/tracing/current_tracer"
adb shell "sleep 1"

adb shell "echo 1 > /d/tracing/events/sched/sched_switch/enable
adb shell "echo 1 > /d/tracing/events/sched/sched_wakeup/enable
adb shell "sleep 1"

adb shell "echo 1 > /d/tracing/events/workqueue/enable"
adb shell "sleep 1"

adb shell "echo wq_barrier_func flush_work   > d/tracing/set_ftrace_filter"
adb shell "sleep 1"

adb shell "echo 1 > d/tracing/options/func_stack_trace"
adb shell "sleep 1"

adb shell "echo 1 > d/tracing/tracing_on"
adb shell "sleep 1"

1. 현재 실행 중인 워커 쓰레드와 동일한 워크를 flush한 경우 ftrace log입니다.
kworker/7:1-345   [007] ...1   144.460351: workqueue_execute_start: work struct c3ff29f4: function sdhci_msm_pm_qos_cpu_unvote_work
     kworker/7:1-345   [007] ...1   144.460353: flush_work <-__cancel_work_timer
     kworker/7:1-345   [007] ...1   144.460373: <stack trace>
 => flush_work
 => __cancel_work_timer
 => cancel_delayed_work_sync
 => pm_qos_update_request
 => sdhci_msm_pm_qos_cpu_unvote_work
 => process_one_work
 => worker_thread
 => kthread
 => ret_from_fork
     mmc-cmdqd/0-340   [004] d..1   144.460461: flush_work: [+][Bret] work_struct->func: sdhci_msm_pm_qos_cpu_unvote_work+0x0/0x34   [F:insert_wq_barrier, L:2344] caller(__cancel_work_timer+0x130/0x1bc)
     kworker/7:1-345   [007] ...1   144.460485: workqueue_execute_end: work struct c3ff29f4
     kworker/7:1-345   [007] ...1   144.460489: workqueue_execute_start: work struct c3d37d00: function wq_barrier_func
     kworker/7:1-345   [007] ...1   144.460491: wq_barrier_func <-process_one_work
     kworker/7:1-345   [007] ...1   144.460511: <stack trace>
 => wq_barrier_func
 => process_one_work
 => process_scheduled_works
 => worker_thread
 => kthread
 => ret_from_fork
     kworker/7:1-345   [007] ...1   144.460532: workqueue_execute_end: work struct c3d37d00
     mmc-cmdqd/0-340   [007] ....   144.460551: flush_work.part.6: [-][Bret] barrier work wait complete  [F:flush_work, L:2710] caller(flush_work+0x150/0x188)

2. 현재 flush하려는 work을 수행 중인 worker가 없는 경우
<...>-357 [005] .... 123.236104: flush_work: [0][Bret] no pool [F:start_flush_work, L:2653] caller(__cancel_work_timer+0x130/0x1bc)
<...>-357 [005] d..2 123.236119: workqueue_queue_work: work struct=e8405db8 function=wq_unbind_fn workqueue=eac0dcc0 req_cpu=1 cpu=1
<...>-357 [005] d..2 123.236122: workqueue_activate_work: work struct e8405db8
<...>-357 [005] d..4 123.236144: sched_wakeup: comm=kworker/1:1H pid=379 prio=100 success=1 target_cpu=001

           <...>-357   [005] ...1   123.236239: <stack trace>
 => flush_work
 => workqueue_cpu_down_callback
 => notifier_call_chain
 => __raw_notifier_call_chain
 => __cpu_notify
 => _cpu_down
 => cpu_down
 => cpu_subsys_offline
 => device_offline
 => core_ctl_offline_core
 => do_hotplug
 => try_hotplug
 => kthread
 => ret_from_fork
           <...>-357   [005] d..1   123.236246: flush_work: [++][Bret] work_struct->func: wq_unbind_fn+0x0/0xd8   [F:insert_wq_barrier, L:2354] caller(workqueue_cpu_down_callback+0x90/0xac)
           <...>-357   [005] d..3   123.236269: sched_switch: prev_comm=core_ctl/0 prev_pid=357 prev_prio=0 prev_state=D ==> next_comm=kworker/u16:8 next_pid=833 next_prio=120
          <idle>-0     [001] d..3   123.236281: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=kworker/1:1H next_pid=379 next_prio=100
           <...>-5001  [000] d..4   123.236295: sched_wakeup: comm=CAM_AFD pid=5019 prio=120 success=1 target_cpu=000
           <...>-379   [001] ...1   123.236295: workqueue_execute_start: work struct e8405db8: function wq_unbind_fn
           <...>-379   [001] d..4   123.236319: sched_wakeup: comm=kworker/1:3 pid=386 prio=120 success=1 target_cpu=001
           <...>-5001  [000] d..4   123.236325: sched_wakeup: comm=CAM_ASD pid=5020 prio=120 success=1 target_cpu=000
           <...>-379   [001] d..3   123.236330: sched_switch: prev_comm=kworker/1:1H prev_pid=379 prev_prio=100 prev_state=R ==> next_comm=kworker/1:3 next_pid=386 next_prio=120
           <...>-386   [001] d..3   123.236346: sched_switch: prev_comm=kworker/1:3 prev_pid=386 prev_prio=120 prev_state=S ==> next_comm=kworker/1:1H next_pid=379 next_prio=100
           <...>-379   [001] d..4   123.236359: sched_wakeup: comm=kworker/1:0H pid=17 prio=100 success=1 target_cpu=001
           <...>-379   [001] ...1   123.236362: workqueue_execute_end: work struct e8405db8
           <...>-379   [001] ...1   123.236366: workqueue_execute_start: work struct e8405d58: function wq_barrier_func
           <...>-5001  [000] d..4   123.236368: sched_wakeup: comm=CAM_iface_hw pid=5342 prio=120 success=1 target_cpu=004
           <...>-379   [001] ...1   123.236372: wq_barrier_func <-process_one_work
   kworker/u16:8-833   [005] d..2   123.236372: workqueue_queue_work: work struct=e46a0c68 function=do_read_data workqueue=e46a1f80 req_cpu=8 cpu=4294967295
   kworker/u16:8-833   [005] d..2   123.236375: workqueue_activate_work: work struct e46a0c68
           <...>-5001  [000] d..3   123.236389: sched_switch: prev_comm=CAM_isp_parser prev_pid=5001 prev_prio=120 prev_state=S ==> next_comm=CAM_AECAWB next_pid=5016 next_prio=120
           <...>-379   [001] ...1   123.236395: <stack trace>
 => wq_barrier_func
 => process_one_work
 => process_scheduled_works
 => worker_thread
 => kthread
 => ret_from_fork
  kworker/u16:8-833   [005] d..4   123.236409: sched_wakeup: comm=kworker/u16:11 pid=4830 prio=120 success=1 target_cpu=000
           <...>-379   [001] d..4   123.236422: sched_wakeup: comm=core_ctl/0 pid=357 prio=0 success=1 target_cpu=005
           <...>-379   [001] ...1   123.236426: workqueue_execute_end: work struct e8405d58
           <...>-379   [001] d..3   123.236438: sched_switch: prev_comm=kworker/1:1H prev_pid=379 prev_prio=100 prev_state=S ==> next_comm=kworker/1:0H next_pid=17 next_prio=100
   kworker/u16:8-833   [005] d..3   123.236441: sched_switch: prev_comm=kworker/u16:8 prev_pid=833 prev_prio=120 prev_state=R+ ==> next_comm=core_ctl/0 next_pid=357 next_prio=0
           <...>-357   [005] ....   123.236452: flush_work.part.6: [-][Bret] barrier work wait complete  [F:flush_work, L:2716] caller(flush_work+0x1b8/0x1fc)


Reference(워크큐)

.

핑백

덧글

댓글 입력 영역