Revert "rcu: Permit rt_mutex_unlock() with irqs disabled"

This reverts commit 5342e269b2.

The approach taken in this patch was deemed too abusive to mutexes,
and thus too likely to result in maintenance problems in the future.
Instead, we will disallow RCU read-side critical sections that partially
overlap with interrupt-disbled code segments.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2011-12-09 14:00:06 -08:00
parent d493011a37
commit 70321d447a
2 changed files with 0 additions and 13 deletions

View file

@ -1165,8 +1165,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
static struct lock_class_key rcu_boost_class;
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@ -1229,9 +1227,6 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
/* Avoid lockdep false positives. This rt_mutex is its own thing. */
lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
"rcu_boost_mutex");
t->rcu_boost_mutex = &mtx;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */

View file

@ -579,7 +579,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter *waiter)
{
int ret = 0;
int was_disabled;
for (;;) {
/* Try to acquire the lock: */
@ -602,17 +601,10 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
raw_spin_unlock(&lock->wait_lock);
was_disabled = irqs_disabled();
if (was_disabled)
local_irq_enable();
debug_rt_mutex_print_deadlock(waiter);
schedule_rt_mutex(lock);
if (was_disabled)
local_irq_disable();
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}