mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
irq_flow_handler_t now returns bool
Alter the signature of irq_flow_handler_t to return true for those interrupts whose handlers were invoked, and false otherwise. Also rework the actual handlers, handle_.*_irq, to support the new signature. Change-Id: I8a50410c477692bbcd39a0fefdac14253602d1f5 Signed-off-by: Iliyan Malchev <malchev@google.com>
This commit is contained in:
parent
13e2b3277d
commit
4dbec3e7db
5 changed files with 57 additions and 24 deletions
|
@ -460,12 +460,13 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static bool gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct gic_chip_data *chip_data = irq_get_handler_data(irq);
|
||||
struct irq_chip *chip = irq_get_chip(irq);
|
||||
unsigned int cascade_irq, gic_irq;
|
||||
unsigned long status;
|
||||
int handled = false;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
|
@ -481,10 +482,11 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (unlikely(gic_irq < 32 || gic_irq > 1020))
|
||||
do_bad_IRQ(cascade_irq, desc);
|
||||
else
|
||||
generic_handle_irq(cascade_irq);
|
||||
handled = generic_handle_irq(cascade_irq);
|
||||
|
||||
out:
|
||||
chained_irq_exit(chip, desc);
|
||||
return handled == true;
|
||||
}
|
||||
|
||||
static struct irq_chip gic_chip = {
|
||||
|
|
|
@ -32,7 +32,7 @@ struct seq_file;
|
|||
struct module;
|
||||
struct irq_desc;
|
||||
struct irq_data;
|
||||
typedef void (*irq_flow_handler_t)(unsigned int irq,
|
||||
typedef bool (*irq_flow_handler_t)(unsigned int irq,
|
||||
struct irq_desc *desc);
|
||||
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
|
||||
|
||||
|
@ -406,15 +406,15 @@ extern int no_irq_affinity;
|
|||
* Built-in IRQ handlers for various IRQ types,
|
||||
* callable via desc->handle_irq()
|
||||
*/
|
||||
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_nested_irq(unsigned int irq);
|
||||
extern bool handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern bool handle_nested_irq(unsigned int irq);
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||
|
|
|
@ -108,9 +108,9 @@ static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
|
|||
* irqchip-style controller then we call the ->handle_irq() handler,
|
||||
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
||||
*/
|
||||
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
static inline bool generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
desc->handle_irq(irq, desc);
|
||||
return desc->handle_irq(irq, desc);
|
||||
}
|
||||
|
||||
int generic_handle_irq(unsigned int irq);
|
||||
|
|
|
@ -262,12 +262,13 @@ void unmask_irq(struct irq_desc *desc)
|
|||
* handler. The handler function is called inside the calling
|
||||
* threads context.
|
||||
*/
|
||||
void handle_nested_irq(unsigned int irq)
|
||||
bool handle_nested_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
int mask_this_irq = 0;
|
||||
irqreturn_t action_ret;
|
||||
bool handled = false;
|
||||
|
||||
might_sleep();
|
||||
|
||||
|
@ -291,6 +292,8 @@ void handle_nested_irq(unsigned int irq)
|
|||
raw_spin_lock_irq(&desc->lock);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
|
||||
handled = true;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
if (unlikely(mask_this_irq)) {
|
||||
|
@ -298,6 +301,8 @@ out_unlock:
|
|||
mask_irq(desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_nested_irq);
|
||||
|
||||
|
@ -320,9 +325,11 @@ static bool irq_check_poll(struct irq_desc *desc)
|
|||
* Note: The caller is expected to handle the ack, clear, mask and
|
||||
* unmask issues if necessary.
|
||||
*/
|
||||
void
|
||||
bool
|
||||
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
||||
|
@ -337,8 +344,11 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
|||
|
||||
handle_irq_event(desc);
|
||||
|
||||
handled = true;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return handled;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_simple_irq);
|
||||
|
||||
|
@ -370,9 +380,11 @@ static void cond_unmask_irq(struct irq_desc *desc)
|
|||
* it after the associated handler has acknowledged the device, so the
|
||||
* interrupt line is back to inactive.
|
||||
*/
|
||||
void
|
||||
bool
|
||||
handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc);
|
||||
|
||||
|
@ -394,8 +406,11 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|||
|
||||
cond_unmask_irq(desc);
|
||||
|
||||
handled = true;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return handled;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_level_irq);
|
||||
|
||||
|
@ -419,9 +434,11 @@ static inline void preflow_handler(struct irq_desc *desc) { }
|
|||
* for modern forms of interrupt handlers, which handle the flow
|
||||
* details in hardware, transparently.
|
||||
*/
|
||||
void
|
||||
bool
|
||||
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
||||
|
@ -451,11 +468,13 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (desc->istate & IRQS_ONESHOT)
|
||||
cond_unmask_irq(desc);
|
||||
|
||||
handled = true;
|
||||
|
||||
out_eoi:
|
||||
desc->irq_data.chip->irq_eoi(&desc->irq_data);
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return;
|
||||
return handled;
|
||||
out:
|
||||
if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
|
||||
goto out_eoi;
|
||||
|
@ -478,9 +497,11 @@ out:
|
|||
* the handler was running. If all pending interrupts are handled, the
|
||||
* loop is left.
|
||||
*/
|
||||
void
|
||||
bool
|
||||
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
@ -520,12 +541,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
|
||||
handle_irq_event(desc);
|
||||
handled = true;
|
||||
|
||||
} while ((desc->istate & IRQS_PENDING) &&
|
||||
!irqd_irq_disabled(&desc->irq_data));
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return handled;
|
||||
}
|
||||
EXPORT_SYMBOL(handle_edge_irq);
|
||||
|
||||
|
@ -538,8 +561,9 @@ EXPORT_SYMBOL(handle_edge_irq);
|
|||
* Similar as the above handle_edge_irq, but using eoi and w/o the
|
||||
* mask/unmask logic.
|
||||
*/
|
||||
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
bool handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
bool handled = false;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
@ -564,6 +588,7 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
goto out_eoi;
|
||||
|
||||
handle_irq_event(desc);
|
||||
handled = true;
|
||||
|
||||
} while ((desc->istate & IRQS_PENDING) &&
|
||||
!irqd_irq_disabled(&desc->irq_data));
|
||||
|
@ -571,6 +596,7 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
out_eoi:
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return handled;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -581,7 +607,7 @@ out_eoi:
|
|||
*
|
||||
* Per CPU interrupts on SMP machines without locking requirements
|
||||
*/
|
||||
void
|
||||
bool
|
||||
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
@ -595,6 +621,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
|||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -609,7 +637,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
|||
* contain the real device id for the cpu on which this handler is
|
||||
* called
|
||||
*/
|
||||
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
||||
bool handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct irqaction *action = desc->action;
|
||||
|
@ -627,6 +655,8 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
|||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -27,11 +27,12 @@
|
|||
*
|
||||
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
|
||||
*/
|
||||
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
bool handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
print_irq_desc(irq, desc);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
ack_bad_irq(irq);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue