mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
1d5b600b50
PM QoS and other idle frameworks can do a better job of addressing power and performance requirements for a cpu, knowing the IRQs that are affine to that cpu. If a performance request is placed against serving the IRQ faster and if the IRQ is affine to a set of cpus, then setting the performance requirements only on those cpus help save power on the rest of the cpus. PM QoS framework is one such framework interested in knowing the smp_affinity of an IRQ and the change notificiation in this regard. QoS requests for the CPU_DMA_LATENCY constraint currently apply to all cpus, but when attached to an IRQ, can be applied only to the set of cpus that IRQ's smp_affinity is set to. This allows other cpus to enter deeper sleep states to save power. More than one framework/driver can be interested in such information. The current implementation allows only a single notification callback whenever the IRQ's SMP affinity is changed. Adding a second notification punts the existing notifier function out of registration. Add a list of notifiers, allowing multiple clients to register for irq affinity notifications. The kref object associated with the struct irq_affinity_notify was used to prevent the notifier object from being released if there is a pending notification. It was incremented before the work item was scheduled and was decremented when the notification was completed. If the kref count was zero at the end of it, the release function gets a callback allowing the module to release the irq_affinity_notify memory. This works well for a single notification. When multiple clients are registered, no single kref object can be used. Hence, the work function when scheduled, will increase the kref count using the kref_get_unless_zero(), so if the module had already unregistered the irq_affinity_notify object while the work function was scheduled, it will not be notified. Change-Id: If2e38ce8d7c43459ba1604d5b4798d1bad966997 Signed-off-by: Lina Iyer <lina.iyer@linaro.org> Patch-mainline: linux-pm @ Wed, 27 Aug 2014 13:18:28 https://lkml.org/lkml/2014/8/27/609 [mnalajal@codeaurora.org: resolve NON SMP target compilation issues] Signed-off-by: Murali Nalajala <mnalajal@codeaurora.org>
189 lines
5.5 KiB
C
189 lines
5.5 KiB
C
#ifndef _LINUX_IRQDESC_H
|
|
#define _LINUX_IRQDESC_H
|
|
|
|
/*
|
|
* Core internal functions to deal with irq descriptors
|
|
*
|
|
* This include will move to kernel/irq once we cleaned up the tree.
|
|
* For now it's included from <linux/irq.h>
|
|
*/
|
|
|
|
struct irq_affinity_notify;
|
|
struct proc_dir_entry;
|
|
struct module;
|
|
struct irq_desc;
|
|
|
|
/**
|
|
* struct irq_desc - interrupt descriptor
|
|
* @irq_data: per irq and chip data passed down to chip functions
|
|
* @kstat_irqs: irq stats per cpu
|
|
* @handle_irq: highlevel irq-events handler
|
|
* @preflow_handler: handler called before the flow handler (currently used by sparc)
|
|
* @action: the irq action chain
|
|
* @status: status information
|
|
* @core_internal_state__do_not_mess_with_it: core internal status information
|
|
* @depth: disable-depth, for nested irq_disable() calls
|
|
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
|
|
* @irq_count: stats field to detect stalled irqs
|
|
* @last_unhandled: aging timer for unhandled count
|
|
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
|
* @threads_handled: stats field for deferred spurious detection of threaded handlers
|
|
* @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
|
|
* @lock: locking for SMP
|
|
* @affinity_hint: hint to user space for preferred irq affinity
|
|
* @affinity_notify: list of notification clients for affinity changes
|
|
* @affinity_work: Work queue for handling affinity change notifications
|
|
* @pending_mask: pending rebalanced interrupts
|
|
* @threads_oneshot: bitfield to handle shared oneshot threads
|
|
* @threads_active: number of irqaction threads currently running
|
|
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
|
* @dir: /proc/irq/ procfs entry
|
|
* @name: flow handler name for /proc/interrupts output
|
|
*/
|
|
struct irq_desc {
|
|
struct irq_data irq_data;
|
|
unsigned int __percpu *kstat_irqs;
|
|
irq_flow_handler_t handle_irq;
|
|
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
|
irq_preflow_handler_t preflow_handler;
|
|
#endif
|
|
struct irqaction *action; /* IRQ action list */
|
|
unsigned int status_use_accessors;
|
|
unsigned int core_internal_state__do_not_mess_with_it;
|
|
unsigned int depth; /* nested irq disables */
|
|
unsigned int wake_depth; /* nested wake enables */
|
|
unsigned int irq_count; /* For detecting broken IRQs */
|
|
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
|
unsigned int irqs_unhandled;
|
|
atomic_t threads_handled;
|
|
int threads_handled_last;
|
|
raw_spinlock_t lock;
|
|
struct cpumask *percpu_enabled;
|
|
#ifdef CONFIG_SMP
|
|
const struct cpumask *affinity_hint;
|
|
struct list_head affinity_notify;
|
|
struct work_struct affinity_work;
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
cpumask_var_t pending_mask;
|
|
#endif
|
|
#endif
|
|
unsigned long threads_oneshot;
|
|
atomic_t threads_active;
|
|
wait_queue_head_t wait_for_threads;
|
|
#ifdef CONFIG_PROC_FS
|
|
struct proc_dir_entry *dir;
|
|
#endif
|
|
int parent_irq;
|
|
struct module *owner;
|
|
const char *name;
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
#ifndef CONFIG_SPARSE_IRQ
|
|
extern struct irq_desc irq_desc[NR_IRQS];
|
|
#endif
|
|
|
|
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
|
|
{
|
|
return &desc->irq_data;
|
|
}
|
|
|
|
static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
|
|
{
|
|
return desc->irq_data.chip;
|
|
}
|
|
|
|
static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
|
|
{
|
|
return desc->irq_data.chip_data;
|
|
}
|
|
|
|
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
|
|
{
|
|
return desc->irq_data.handler_data;
|
|
}
|
|
|
|
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
|
|
{
|
|
return desc->irq_data.msi_desc;
|
|
}
|
|
|
|
/*
|
|
* Architectures call this to let the generic IRQ layer
|
|
* handle an interrupt. If the descriptor is attached to an
|
|
* irqchip-style controller then we call the ->handle_irq() handler,
|
|
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
|
*/
|
|
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
desc->handle_irq(irq, desc);
|
|
}
|
|
|
|
int generic_handle_irq(unsigned int irq);
|
|
|
|
/* Test to see if a driver has successfully requested an irq */
|
|
static inline int irq_has_action(unsigned int irq)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
return desc->action != NULL;
|
|
}
|
|
|
|
/* caller has locked the irq_desc and both params are valid */
|
|
static inline void __irq_set_handler_locked(unsigned int irq,
|
|
irq_flow_handler_t handler)
|
|
{
|
|
struct irq_desc *desc;
|
|
|
|
desc = irq_to_desc(irq);
|
|
desc->handle_irq = handler;
|
|
}
|
|
|
|
/* caller has locked the irq_desc and both params are valid */
|
|
static inline void
|
|
__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
|
|
irq_flow_handler_t handler, const char *name)
|
|
{
|
|
struct irq_desc *desc;
|
|
|
|
desc = irq_to_desc(irq);
|
|
irq_desc_get_irq_data(desc)->chip = chip;
|
|
desc->handle_irq = handler;
|
|
desc->name = name;
|
|
}
|
|
|
|
static inline int irq_balancing_disabled(unsigned int irq)
|
|
{
|
|
struct irq_desc *desc;
|
|
|
|
desc = irq_to_desc(irq);
|
|
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
|
|
}
|
|
|
|
static inline int irq_is_percpu(unsigned int irq)
|
|
{
|
|
struct irq_desc *desc;
|
|
|
|
desc = irq_to_desc(irq);
|
|
return desc->status_use_accessors & IRQ_PER_CPU;
|
|
}
|
|
|
|
static inline void
|
|
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
if (desc)
|
|
lockdep_set_class(&desc->lock, class);
|
|
}
|
|
|
|
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
|
static inline void
|
|
__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
|
|
{
|
|
struct irq_desc *desc;
|
|
|
|
desc = irq_to_desc(irq);
|
|
desc->preflow_handler = handler;
|
|
}
|
|
#endif
|
|
|
|
#endif
|