From 047f7617eba3653ff3bcfbe902986903fff2ed3b Mon Sep 17 00:00:00 2001 From: Bernard Pidoux Date: Sun, 20 Apr 2008 15:58:07 -0700 Subject: [ROSE]: Fix soft lockup wrt. rose_node_list_lock [ INFO: possible recursive locking detected ] 2.6.25 #3 --------------------------------------------- ax25ipd/3811 is trying to acquire lock: (rose_node_list_lock){-+..}, at: [] rose_get_neigh+0x1a/0xa0 [rose] but task is already holding lock: (rose_node_list_lock){-+..}, at: [] rose_route_frame+0x4d/0x620 [rose] other info that might help us debug this: 6 locks held by ax25ipd/3811: #0: (&tty->atomic_write_lock){--..}, at: [] tty_write_lock+0x1c/0x50 #1: (rcu_read_lock){..--}, at: [] net_rx_action+0x96/0x230 #2: (rcu_read_lock){..--}, at: [] netif_receive_skb+0x100/0x2f0 #3: (rose_node_list_lock){-+..}, at: [] rose_route_frame+0x4d/0x620 [rose] #4: (rose_neigh_list_lock){-+..}, at: [] rose_route_frame+0x57/0x620 [rose] #5: (rose_route_list_lock){-+..}, at: [] rose_route_frame+0x61/0x620 [rose] stack backtrace: Pid: 3811, comm: ax25ipd Not tainted 2.6.25 #3 [] print_deadlock_bug+0xc7/0xd0 [] check_deadlock+0x9a/0xb0 [] validate_chain+0x1e2/0x310 [] ? validate_chain+0xa5/0x310 [] ? native_sched_clock+0x88/0xc0 [] __lock_acquire+0x1a1/0x750 [] lock_acquire+0x81/0xa0 [] ? rose_get_neigh+0x1a/0xa0 [rose] [] _spin_lock_bh+0x33/0x60 [] ? rose_get_neigh+0x1a/0xa0 [rose] [] rose_get_neigh+0x1a/0xa0 [rose] [] rose_route_frame+0x464/0x620 [rose] [] ? _read_unlock+0x1d/0x20 [] ? rose_route_frame+0x0/0x620 [rose] [] ax25_rx_iframe+0x66/0x3b0 [ax25] [] ? ax25_start_t3timer+0x1f/0x40 [ax25] [] ax25_std_frame_in+0x7fb/0x890 [ax25] [] ? _spin_unlock_bh+0x25/0x30 [] ax25_kiss_rcv+0x2c6/0x800 [ax25] [] ? sock_def_readable+0x59/0x80 [] ? __lock_release+0x47/0x70 [] ? sock_def_readable+0x59/0x80 [] ? _read_unlock+0x1d/0x20 [] ? sock_def_readable+0x59/0x80 [] ? sock_queue_rcv_skb+0x13a/0x1d0 [] ? sock_queue_rcv_skb+0x45/0x1d0 [] ? ax25_kiss_rcv+0x0/0x800 [ax25] [] netif_receive_skb+0x255/0x2f0 [] ? netif_receive_skb+0x100/0x2f0 [] process_backlog+0x7c/0xf0 [] net_rx_action+0x16c/0x230 [] ? net_rx_action+0x96/0x230 [] __do_softirq+0x93/0x120 [] ? mkiss_receive_buf+0x33a/0x3f0 [mkiss] [] do_softirq+0x57/0x60 [] local_bh_enable_ip+0xa5/0xe0 [] _spin_unlock_bh+0x25/0x30 [] mkiss_receive_buf+0x33a/0x3f0 [mkiss] [] pty_write+0x47/0x60 [] write_chan+0x1b0/0x220 [] ? tty_write_lock+0x1c/0x50 [] ? default_wake_function+0x0/0x10 [] tty_write+0x12a/0x1c0 [] ? write_chan+0x0/0x220 [] vfs_write+0x96/0x130 [] ? tty_write+0x0/0x1c0 [] sys_write+0x3d/0x70 [] sysenter_past_esp+0x5f/0xa5 ======================= BUG: soft lockup - CPU#0 stuck for 61s! [ax25ipd:3811] Pid: 3811, comm: ax25ipd Not tainted (2.6.25 #3) EIP: 0060:[] EFLAGS: 00000246 CPU: 0 EIP is at native_read_tsc+0xb/0x20 EAX: b404aa2c EBX: b404a9c9 ECX: 017f1000 EDX: 0000076b ESI: 00000001 EDI: 00000000 EBP: ecc83afc ESP: ecc83afc DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 CR0: 8005003b CR2: b7f5f000 CR3: 2cd8e000 CR4: 000006f0 DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff0ff0 DR7: 00000400 [] delay_tsc+0x17/0x30 [] __delay+0x9/0x10 [] __spin_lock_debug+0x76/0xf0 [] ? spin_bug+0x18/0x100 [] ? __lock_contended+0xa3/0x110 [] _raw_spin_lock+0x68/0x90 [] _spin_lock_bh+0x4f/0x60 [] ? rose_get_neigh+0x1a/0xa0 [rose] [] rose_get_neigh+0x1a/0xa0 [rose] [] rose_route_frame+0x464/0x620 [rose] [] ? _read_unlock+0x1d/0x20 [] ? rose_route_frame+0x0/0x620 [rose] [] ax25_rx_iframe+0x66/0x3b0 [ax25] [] ? ax25_start_t3timer+0x1f/0x40 [ax25] [] ax25_std_frame_in+0x7fb/0x890 [ax25] [] ? _spin_unlock_bh+0x25/0x30 [] ax25_kiss_rcv+0x2c6/0x800 [ax25] [] ? sock_def_readable+0x59/0x80 [] ? __lock_release+0x47/0x70 [] ? sock_def_readable+0x59/0x80 [] ? _read_unlock+0x1d/0x20 [] ? sock_def_readable+0x59/0x80 [] ? sock_queue_rcv_skb+0x13a/0x1d0 [] ? sock_queue_rcv_skb+0x45/0x1d0 [] ? ax25_kiss_rcv+0x0/0x800 [ax25] [] netif_receive_skb+0x255/0x2f0 [] ? netif_receive_skb+0x100/0x2f0 [] process_backlog+0x7c/0xf0 [] net_rx_action+0x16c/0x230 [] ? net_rx_action+0x96/0x230 [] __do_softirq+0x93/0x120 [] ? mkiss_receive_buf+0x33a/0x3f0 [mkiss] [] do_softirq+0x57/0x60 [] local_bh_enable_ip+0xa5/0xe0 [] _spin_unlock_bh+0x25/0x30 [] mkiss_receive_buf+0x33a/0x3f0 [mkiss] [] pty_write+0x47/0x60 [] write_chan+0x1b0/0x220 [] ? tty_write_lock+0x1c/0x50 [] ? default_wake_function+0x0/0x10 [] tty_write+0x12a/0x1c0 [] ? write_chan+0x0/0x220 [] vfs_write+0x96/0x130 [] ? tty_write+0x0/0x1c0 [] sys_write+0x3d/0x70 [] sysenter_past_esp+0x5f/0xa5 ======================= Since rose_route_frame() does not use rose_node_list we can safely remove rose_node_list_lock spin lock here and let it be free for rose_get_neigh(). Signed-off-by: Bernard Pidoux Signed-off-by: David S. Miller --- net/rose/rose_route.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index fb9359fb2358..5053a53ba24f 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -857,7 +857,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) src_addr = (rose_address *)(skb->data + 9); dest_addr = (rose_address *)(skb->data + 4); - spin_lock_bh(&rose_node_list_lock); spin_lock_bh(&rose_neigh_list_lock); spin_lock_bh(&rose_route_list_lock); @@ -1060,7 +1059,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) out: spin_unlock_bh(&rose_route_list_lock); spin_unlock_bh(&rose_neigh_list_lock); - spin_unlock_bh(&rose_node_list_lock); return res; } -- cgit v1.2.3