PM: wakeup_reason: correctly deduce wakeup interrupts

The wakeup_reason driver works by having a callback log_wakeup_reason(), be
called by the resume path once for each wakeup interrupt, with the irq number
as argument.  It then saves this interrupt in an array, and reports it when
requested (via /sys/kernel/wakeup_reasons/last_resume_reason) and also prints
the information out in kmsg.

This approach works, but it has the deficiency that often the reported wakeup
interrupt, while correct, is not the interrupt truly responsible for the
wakeup.  The reason for this is due to chained interrupt controllers (whether
in hardware or simulated in software).  It could be, for example, that the
power button is wired to a GPIO handled by a single interrupt for all GPIOs,
which interrupt then determines the GPIO and maps this to a software interrupt.
Whether this is done in software, or by chaining interrupt controllers, the end
result is that the wakeup reason will show not the interrupt associated with
the power button, but the base-GPIO interrupt instead.

This patch reworks the wakeup_sources driver such that it reports those final
interrupts we are interested in, and not the intermediate (and not the base)
ones.  It does so as follows:

-- The assumption is that generic_handle_irq() is called to dispatch all
   interrupts; due to this, chained interrupts result in recursive calls of
   generic_handle_irq().
-- We reconstruct the chains of interrupts that originate with the base wakeup
   interrupt and terminate with the interrupt we are interested in by tracing
   the calls to generic_handle_irq()
-- The tracing works by maitaining a per-cpu counter that is incremented with
   each call to generic_handle_irq(); that counter is reported to the
   wakeup_sources driver by a pair of functions, called
   log_possible_wakeup_reason_start() and log_possible_wakeup_reason_complete().
   The former is called before generic_handle_irq() handles the interrupt
   (thereby potentially calling itself recusively) and the latter afterward.
-- The two functions mentioned above are complemented by log_base_wake_reason()
   (renamed from log_wakeup_reason()), which is used to report the base wakeup
   interrupts to the wakeup_reason driver.
-- The three functions work together to build a set of trees, one per base
   wakeup reason, the leaves of which correspond to the interrupts we are
   interesed in; these trees can be arbitratily complex, though in reality they
   most often are a single node, or a chain of two nodes.  The complexity
   supports arbitrarily involved interrupt dispatch.
-- On resume, we build the tree; once the tree is completed, we walk it
   recursively, and print out to kmesg the (more useful) list of wakeup
   sources; simiarly, we walk the tree and print the leaves when
   /sys/kernel/wakeup_reasons/last_resume_reason is read.

Signed-off-by: Iliyan Malchev <malchev@google.com>
Change-Id: If8acb2951b61d2c6bcf4d011fe04d7f91057d139
This commit is contained in:
Iliyan Malchev 2015-03-01 13:13:33 -08:00 committed by Artem Borisov
parent 4dbec3e7db
commit 7e87a4dc87
5 changed files with 516 additions and 83 deletions

View file

@ -258,7 +258,7 @@ static void gic_show_resume_irq(struct gic_chip_data *gic)
i = find_next_bit(pending, gic->max_irq, i+1)) {
pr_warning("%s: %d triggered", __func__,
i + gic->irq_offset);
log_wakeup_reason(i + gic->irq_offset);
log_base_wakeup_reason(i + gic->irq_offset);
}
}

View file

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/wakeup_reason.h>
#include <linux/irq.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);

View file

@ -19,13 +19,71 @@
#define _LINUX_WAKEUP_REASON_H
#include <linux/types.h>
#include <linux/completion.h>
#define MAX_SUSPEND_ABORT_LEN 256
void log_wakeup_reason(int irq);
struct wakeup_irq_node {
/* @leaf is a linked list of all leaf nodes in the interrupts trees.
*/
struct list_head next;
/* @irq: IRQ number of this node.
*/
int irq;
struct irq_desc *desc;
/* @siblings contains the list of irq nodes at the same depth; at a
* depth of zero, this is the list of base wakeup interrupts.
*/
struct list_head siblings;
/* @parent: only one node in a siblings list has a pointer to the
* parent; that node is the head of the list of siblings.
*/
struct wakeup_irq_node *parent;
/* @child: any node can have one child
*/
struct wakeup_irq_node *child;
/* @handled: this flag is set to true when the interrupt handler (one of
* handle_.*_irq in kernel/irq/handle.c) for this node gets called; it is set
* to false otherwise. We use this flag to determine whether a subtree rooted
* at a node has been handled. When all trees rooted at
* base-wakeup-interrupt nodes have been handled, we stop logging
* potential wakeup interrupts, and construct the list of actual
* wakeups from the leaves of these trees.
*/
bool handled;
};
/* Called in the resume path, with interrupts and nonboot cpus disabled; on
* need for a spinlock.
*/
static inline void start_logging_wakeup_reasons(void)
{
extern bool log_wakeups;
extern struct completion wakeups_completion;
log_wakeups = true;
init_completion(&wakeups_completion);
}
static inline bool logging_wakeup_reasons(void)
{
extern bool log_wakeups;
return ACCESS_ONCE(log_wakeups);
}
void log_base_wakeup_reason(int irq);
void log_suspend_abort_reason(const char *fmt, ...);
bool log_possible_wakeup_reason(int irq,
struct irq_desc *desc,
bool (*handler)(unsigned int, struct irq_desc *));
int check_wakeup_reason(int irq);
const int* get_wakeup_reasons(size_t *len);
const struct list_head*
get_wakeup_reasons(unsigned long timeout, struct list_head *unfinished);
void clear_wakeup_reasons(void);
#endif /* _LINUX_WAKEUP_REASON_H */

View file

@ -14,6 +14,7 @@
#include <linux/kernel_stat.h>
#include <linux/radix-tree.h>
#include <linux/bitmap.h>
#include <linux/wakeup_reason.h>
#include "internals.h"
@ -303,16 +304,25 @@ static int irq_expand_nr_irqs(unsigned int nr)
/**
* generic_handle_irq - Invoke the handler for a particular irq
* @irq: The irq number to handle
*
* returns:
* negative on error
* 0 when the interrupt handler was not called
* 1 when the interrupt handler was called
*/
int generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc)
return -EINVAL;
generic_handle_irq_desc(irq, desc);
return 0;
if (unlikely(logging_wakeup_reasons()))
return log_possible_wakeup_reason(irq,
desc,
generic_handle_irq_desc);
return generic_handle_irq_desc(irq, desc);
}
EXPORT_SYMBOL_GPL(generic_handle_irq);

View file

@ -26,42 +26,227 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/slab.h>
#define MAX_WAKEUP_REASON_IRQS 32
static int irq_list[MAX_WAKEUP_REASON_IRQS];
static int irq_count;
static bool suspend_abort;
static char abort_reason[MAX_SUSPEND_ABORT_LEN];
static struct wakeup_irq_node *base_irq_nodes;
static struct wakeup_irq_node *cur_irq_tree;
static int cur_irq_tree_depth;
static LIST_HEAD(wakeup_irqs);
static struct kmem_cache *wakeup_irq_nodes_cache;
static struct kobject *wakeup_reason;
static DEFINE_SPINLOCK(resume_reason_lock);
static spinlock_t resume_reason_lock;
bool log_wakeups __read_mostly;
struct completion wakeups_completion;
static struct timespec last_xtime; /* wall time before last suspend */
static struct timespec curr_xtime; /* wall time after last suspend */
static struct timespec last_stime; /* total_sleep_time before last suspend */
static struct timespec curr_stime; /* total_sleep_time after last suspend */
static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
static void init_wakeup_irq_node(struct wakeup_irq_node *p, int irq)
{
p->irq = irq;
p->desc = irq_to_desc(irq);
p->child = NULL;
p->parent = NULL;
p->handled = false;
INIT_LIST_HEAD(&p->siblings);
INIT_LIST_HEAD(&p->next);
}
static struct wakeup_irq_node* alloc_irq_node(int irq)
{
struct wakeup_irq_node *n;
n = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
if (!n) {
pr_warning("Failed to log chained wakeup IRQ %d\n",
irq);
return NULL;
}
init_wakeup_irq_node(n, irq);
return n;
}
static struct wakeup_irq_node *
search_siblings(struct wakeup_irq_node *root, int irq)
{
bool found = false;
struct wakeup_irq_node *n = NULL;
BUG_ON(!root);
if (root->irq == irq)
return root;
list_for_each_entry(n, &root->siblings, siblings) {
if (n->irq == irq) {
found = true;
break;
}
}
return found ? n : NULL;
}
static struct wakeup_irq_node *
add_to_siblings(struct wakeup_irq_node *root, int irq)
{
struct wakeup_irq_node *n;
if (root) {
n = search_siblings(root, irq);
if (n)
return n;
}
n = alloc_irq_node(irq);
if (n && root)
list_add(&n->siblings, &root->siblings);
return n;
}
static struct wakeup_irq_node* add_child(struct wakeup_irq_node *root, int irq)
{
if (!root->child) {
root->child = alloc_irq_node(irq);
if (!root->child)
return NULL;
root->child->parent = root;
return root->child;
}
return add_to_siblings(root->child, irq);
}
static struct wakeup_irq_node *find_first_sibling(struct wakeup_irq_node *node)
{
struct wakeup_irq_node *n;
if (node->parent)
return node;
list_for_each_entry(n, &node->siblings, siblings) {
if (n->parent)
return n;
}
return NULL;
}
static struct wakeup_irq_node *
get_base_node(struct wakeup_irq_node *node, unsigned depth)
{
if (!node)
return NULL;
while (depth) {
node = find_first_sibling(node);
BUG_ON(!node);
node = node->parent;
depth--;
}
return node;
}
static const struct list_head* get_wakeup_reasons_nosync(void);
static void print_wakeup_sources(void)
{
struct wakeup_irq_node *n;
const struct list_head *wakeups;
if (suspend_abort) {
pr_info("Abort: %s", abort_reason);
return;
}
wakeups = get_wakeup_reasons_nosync();
list_for_each_entry(n, wakeups, next) {
if (n->desc && n->desc->action && n->desc->action->name)
pr_info("Resume caused by IRQ %d, %s\n", n->irq,
n->desc->action->name);
else
pr_info("Resume caused by IRQ %d\n", n->irq);
}
}
static bool walk_irq_node_tree(struct wakeup_irq_node *root,
bool (*visit)(struct wakeup_irq_node *, void *),
void *cookie)
{
struct wakeup_irq_node *n, *t;
if (!root)
return true;
list_for_each_entry_safe(n, t, &root->siblings, siblings) {
if (!walk_irq_node_tree(n->child, visit, cookie))
return false;
if (!visit(n, cookie))
return false;
}
if (!walk_irq_node_tree(root->child, visit, cookie))
return false;
return visit(root, cookie);
}
static bool is_node_handled(struct wakeup_irq_node *n, void *_p)
{
return n->handled;
}
static bool base_irq_nodes_done(void)
{
return walk_irq_node_tree(base_irq_nodes, is_node_handled, NULL);
}
struct buf_cookie {
char *buf;
int buf_offset;
};
static bool print_leaf_node(struct wakeup_irq_node *n, void *_p)
{
struct buf_cookie *b = _p;
if (!n->child) {
if (n->desc && n->desc->action && n->desc->action->name)
b->buf_offset +=
snprintf(b->buf + b->buf_offset,
PAGE_SIZE - b->buf_offset,
"%d %s\n",
n->irq, n->desc->action->name);
else
b->buf_offset +=
snprintf(b->buf + b->buf_offset,
PAGE_SIZE - b->buf_offset,
"%d\n",
n->irq);
}
return true;
}
static ssize_t last_resume_reason_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int irq_no, buf_offset = 0;
struct irq_desc *desc;
spin_lock(&resume_reason_lock);
if (suspend_abort) {
buf_offset = sprintf(buf, "Abort: %s", abort_reason);
} else {
for (irq_no = 0; irq_no < irq_count; irq_no++) {
desc = irq_to_desc(irq_list[irq_no]);
if (desc && desc->action && desc->action->name)
buf_offset += sprintf(buf + buf_offset, "%d %s\n",
irq_list[irq_no], desc->action->name);
unsigned long flags;
struct buf_cookie b = {
.buf = buf,
.buf_offset = 0
};
spin_lock_irqsave(&resume_reason_lock, flags);
if (suspend_abort)
b.buf_offset = snprintf(buf, PAGE_SIZE, "Abort: %s", abort_reason);
else
buf_offset += sprintf(buf + buf_offset, "%d\n",
irq_list[irq_no]);
}
}
spin_unlock(&resume_reason_lock);
return buf_offset;
walk_irq_node_tree(base_irq_nodes, print_leaf_node, &b);
spin_unlock_irqrestore(&resume_reason_lock, flags);
return b.buf_offset;
}
static ssize_t last_suspend_time_show(struct kobject *kobj,
@ -96,45 +281,128 @@ static struct attribute_group attr_group = {
.attrs = attrs,
};
static inline void stop_logging_wakeup_reasons(void)
{
ACCESS_ONCE(log_wakeups) = false;
}
/*
* logs all the wake up reasons to the kernel
* stores the irqs to expose them to the userspace via sysfs
* stores the immediate wakeup irqs; these often aren't the ones seen by
* the drivers that registered them, due to chained interrupt controllers,
* and multiple-interrupt dispatch.
*/
void log_wakeup_reason(int irq)
void log_base_wakeup_reason(int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (desc && desc->action && desc->action->name)
printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
desc->action->name);
else
printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
/* No locking is needed, since this function is called within
* syscore_resume, with both nonboot CPUs and interrupts disabled.
*/
base_irq_nodes = add_to_siblings(base_irq_nodes, irq);
BUG_ON(!base_irq_nodes);
}
spin_lock(&resume_reason_lock);
if (irq_count == MAX_WAKEUP_REASON_IRQS) {
spin_unlock(&resume_reason_lock);
printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
MAX_WAKEUP_REASON_IRQS);
/* This function is called by generic_handle_irq, which may call itself
* recursively. This happens with interrupts disabled. Using
* log_possible_wakeup_reason, we build a tree of interrupts, tracing the call
* stack of generic_handle_irq, for each wakeup source containing the
* interrupts actually handled.
*
* Most of these "trees" would either have a single node (in the event that the
* wakeup source is the final interrupt), or consist of a list of two
* interrupts, with the wakeup source at the root, and the final dispatched
* interrupt at the leaf.
*
* When *all* wakeup sources have been thusly spoken for, this function will
* clear the log_wakeups flag, and print the wakeup reasons.
TODO: percpu
*/
struct wakeup_irq_node *
log_possible_wakeup_reason_start(int irq, struct irq_desc *desc, unsigned depth)
{
BUG_ON(!irqs_disabled() || !logging_wakeup_reasons());
BUG_ON((signed)depth < 0);
/* If suspend was aborted, the base IRQ nodes are missing, and we stop
* logging interrupts immediately.
*/
if (!base_irq_nodes) {
stop_logging_wakeup_reasons();
return NULL;
}
/* We assume wakeup interrupts are handlerd only by the first core. */
/* TODO: relax this by having percpu versions of the irq tree */
if (smp_processor_id() != 0) {
return NULL;
}
if (depth == 0) {
cur_irq_tree_depth = 0;
cur_irq_tree = search_siblings(base_irq_nodes, irq);
}
else if (cur_irq_tree) {
if (depth > cur_irq_tree_depth) {
BUG_ON(depth - cur_irq_tree_depth > 1);
cur_irq_tree = add_child(cur_irq_tree, irq);
if (cur_irq_tree)
cur_irq_tree_depth++;
}
else {
cur_irq_tree = get_base_node(cur_irq_tree,
cur_irq_tree_depth - depth);
cur_irq_tree_depth = depth;
cur_irq_tree = add_to_siblings(cur_irq_tree, irq);
}
}
return cur_irq_tree;
}
void log_possible_wakeup_reason_complete(struct wakeup_irq_node *n,
unsigned depth,
bool handled)
{
if (!n)
return;
n->handled = handled;
if (depth == 0) {
if (base_irq_nodes_done()) {
stop_logging_wakeup_reasons();
complete(&wakeups_completion);
print_wakeup_sources();
}
}
}
irq_list[irq_count++] = irq;
spin_unlock(&resume_reason_lock);
}
int check_wakeup_reason(int irq)
bool log_possible_wakeup_reason(int irq,
struct irq_desc *desc,
bool (*handler)(unsigned int, struct irq_desc *))
{
int irq_no;
int ret = false;
static DEFINE_PER_CPU(unsigned int, depth);
spin_lock(&resume_reason_lock);
for (irq_no = 0; irq_no < irq_count; irq_no++)
if (irq_list[irq_no] == irq) {
ret = true;
break;
}
spin_unlock(&resume_reason_lock);
return ret;
struct wakeup_irq_node *n;
bool handled;
unsigned d;
d = get_cpu_var(depth)++;
put_cpu_var(depth);
n = log_possible_wakeup_reason_start(irq, desc, d);
handled = handler(irq, desc);
d = --get_cpu_var(depth);
put_cpu_var(depth);
if (!handled && desc && desc->action)
pr_debug("%s: irq %d action %pF not handled\n", __func__,
irq, desc->action->handler);
log_possible_wakeup_reason_complete(n, d, handled);
return handled;
}
void log_suspend_abort_reason(const char *fmt, ...)
@ -153,21 +421,95 @@ void log_suspend_abort_reason(const char *fmt, ...)
va_start(args, fmt);
snprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
va_end(args);
spin_unlock(&resume_reason_lock);
}
const int* get_wakeup_reasons(size_t *len)
static bool match_node(struct wakeup_irq_node *n, void *_p)
{
*len = irq_count;
return irq_list;
int irq = *((int *)_p);
return n->irq != irq;
}
int check_wakeup_reason(int irq)
{
bool found;
spin_lock(&resume_reason_lock);
found = !walk_irq_node_tree(base_irq_nodes, match_node, &irq);
spin_unlock(&resume_reason_lock);
return found;
}
static bool build_leaf_nodes(struct wakeup_irq_node *n, void *_p)
{
struct list_head *wakeups = _p;
if (!n->child)
list_add(&n->next, wakeups);
return true;
}
static const struct list_head* get_wakeup_reasons_nosync(void)
{
BUG_ON(logging_wakeup_reasons());
INIT_LIST_HEAD(&wakeup_irqs);
walk_irq_node_tree(base_irq_nodes, build_leaf_nodes, &wakeup_irqs);
return &wakeup_irqs;
}
static bool build_unfinished_nodes(struct wakeup_irq_node *n, void *_p)
{
struct list_head *unfinished = _p;
if (!n->handled) {
pr_warning("%s: wakeup irq %d was not handled\n",
__func__, n->irq);
list_add(&n->next, unfinished);
}
return true;
}
const struct list_head* get_wakeup_reasons(unsigned long timeout,
struct list_head *unfinished)
{
INIT_LIST_HEAD(unfinished);
if (logging_wakeup_reasons()) {
unsigned long signalled = 0;
if (timeout)
signalled = wait_for_completion_timeout(&wakeups_completion, timeout);
if (WARN_ON(!signalled)) {
stop_logging_wakeup_reasons();
walk_irq_node_tree(base_irq_nodes, build_unfinished_nodes, unfinished);
return NULL;
}
pr_info("%s: waited for %u ms\n",
__func__,
jiffies_to_msecs(timeout - signalled));
}
return get_wakeup_reasons_nosync();
}
static bool delete_node(struct wakeup_irq_node *n, void *unused)
{
list_del(&n->siblings);
kmem_cache_free(wakeup_irq_nodes_cache, n);
return true;
}
void clear_wakeup_reasons(void)
{
spin_lock(&resume_reason_lock);
irq_count = 0;
unsigned long flags;
spin_lock_irqsave(&resume_reason_lock, flags);
BUG_ON(logging_wakeup_reasons());
walk_irq_node_tree(base_irq_nodes, delete_node, NULL);
base_irq_nodes = NULL;
cur_irq_tree = NULL;
cur_irq_tree_depth = 0;
INIT_LIST_HEAD(&wakeup_irqs);
suspend_abort = false;
spin_unlock(&resume_reason_lock);
spin_unlock_irqrestore(&resume_reason_lock, flags);
}
/* Detects a suspend and clears all the previous wake up reasons*/
@ -186,6 +528,13 @@ static int wakeup_reason_pm_event(struct notifier_block *notifier,
case PM_POST_SUSPEND:
get_xtime_and_monotonic_and_sleep_offset(&curr_xtime, &xtom,
&curr_stime);
/* log_wakeups should have been cleared by now. */
if (WARN_ON(logging_wakeup_reasons())) {
stop_logging_wakeup_reasons();
mb();
print_wakeup_sources();
}
break;
default:
break;
@ -197,31 +546,46 @@ static struct notifier_block wakeup_reason_pm_notifier_block = {
.notifier_call = wakeup_reason_pm_event,
};
/* Initializes the sysfs parameter
* registers the pm_event notifier
*/
int __init wakeup_reason_init(void)
{
int retval;
spin_lock_init(&resume_reason_lock);
retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
if (retval)
printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
__func__, retval);
if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
pr_warning("[%s] failed to register PM notifier\n",
__func__);
goto fail;
}
wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
if (!wakeup_reason) {
printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
pr_warning("[%s] failed to create a sysfs kobject\n",
__func__);
return 1;
goto fail_unregister_pm_notifier;
}
retval = sysfs_create_group(wakeup_reason, &attr_group);
if (retval) {
kobject_put(wakeup_reason);
printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
__func__, retval);
if (sysfs_create_group(wakeup_reason, &attr_group)) {
pr_warning("[%s] failed to create a sysfs group\n",
__func__);
goto fail_kobject_put;
}
wakeup_irq_nodes_cache =
kmem_cache_create("wakeup_irq_node_cache",
sizeof(struct wakeup_irq_node), 0,
0, NULL);
if (!wakeup_irq_nodes_cache)
goto fail_remove_group;
return 0;
fail_remove_group:
sysfs_remove_group(wakeup_reason, &attr_group);
fail_kobject_put:
kobject_put(wakeup_reason);
fail_unregister_pm_notifier:
unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
fail:
return 1;
}
late_initcall(wakeup_reason_init);