summaryrefslogtreecommitdiffstats
path: root/kernel/locking/osq_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/osq_lock.c')
-rw-r--r--kernel/locking/osq_lock.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a37857ab55..a3167941093b 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -21,6 +21,11 @@ static inline int encode_cpu(int cpu_nr)
return cpu_nr + 1;
}
+static inline int node_cpu(struct optimistic_spin_node *node)
+{
+ return node->cpu - 1;
+}
+
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
{
int cpu_nr = encoded_cpu_val - 1;
@@ -75,7 +80,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
break;
}
- cpu_relax_lowlatency();
+ cpu_relax();
}
return next;
@@ -118,11 +123,13 @@ bool osq_lock(struct optimistic_spin_queue *lock)
while (!READ_ONCE(node->locked)) {
/*
* If we need to reschedule bail... so we can block.
+ * Use vcpu_is_preempted() to avoid waiting for a preempted
+ * lock holder:
*/
- if (need_resched())
+ if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
goto unqueue;
- cpu_relax_lowlatency();
+ cpu_relax();
}
return true;
@@ -148,7 +155,7 @@ unqueue:
if (smp_load_acquire(&node->locked))
return true;
- cpu_relax_lowlatency();
+ cpu_relax();
/*
* Or we race against a concurrent unqueue()'s step-B, in which