mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
4855b811a1
akpm: Alex's ancient page-owner tracking code, resurrected yet again. Someone(tm) should mainline this. Please see Ingo's thoughts at https://lkml.org/lkml/2009/4/1/137. PAGE_OWNER tracks free pages by setting page->order to -1. However, it is set during __free_pages() which is not the only free path as __pagevec_free() and free_compound_page() do not go through __free_pages(). This leads to a situation where free pages are visible in page_owner which is confusing and might be interpreted as a memory leak. This patch sets page->owner when PageBuddy is set. It also prints a warning to the kernel log if a free page is found that does not appear free to PAGE_OWNER. This should be considered a fix to page-owner-tracking-leak-detector.patch. This only applies to -mm as PAGE_OWNER is not in mainline. [mel@csn.ul.ie: print out PAGE_OWNER statistics in relation to fragmentation avoidance] [mel.ul.ie: allow PAGE_OWNER to be set on any architecture] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Laura Abbott <lauraa@codeaurora.org> From: Dave Hansen <dave@linux.vnet.ibm.com> Subject: debugging-keep-track-of-page-owners-fix Updated 12/4/2012 - should apply to 3.7 kernels. I did a quick sniff-test to make sure that this boots and produces some sane output, but it's not been exhaustively tested. * Moved file over to debugfs (no reason to keep polluting /proc) * Now using generic stack tracking infrastructure * Added check for MIGRATE_CMA pages to explicitly count them as movable. The new snprint_stack_trace() probably belongs in its own patch if this were to get merged, but it won't kill anyone as it stands. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Laura Abbott <lauraa@codeaurora.org> From: Minchan Kim <minchan@kernel.org> Subject: Fix wrong EOF compare The C standards allows the character type char to be singed or unsinged, depending on the platform and compiler. Most of systems uses signed char, but those based on PowerPC and ARM processors typically use unsigned char. This can lead to unexpected results when the variable is used to compare with EOF(-1). It happens my ARM system and this patch fixes it. Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Randy Dunlap <rdunlap@infradead.org> From: Andrew Morton <akpm@linux-foundation.org> Subject: debugging-keep-track-of-page-owners-fix-2-fix Reduce scope of `val', fix coding style Cc: Minchan Kim <minchan@kernel.org> From: Minchan Kim <minchan@kernel.org> Subject: Enhance read_block of page_owner.c The read_block reads char one by one until meeting two newline. It's not good for the performance and current code isn't good shape for readability. This patch enhances speed and clean up. Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> From: Andrew Morton <akpm@linux-foundation.org> Subject: debugging-keep-track-of-page-owner-now-depends-on-stacktrace_support-fix stomp sparse gfp_t warnings Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> From: Dave Hansen <dave@linux.vnet.ibm.com> Subject: PAGE_OWNER now depends on STACKTRACE_SUPPORT One of the enhancements I made to the PAGE_OWNER code was to make it use the generic stack trace support. However, there are some architectures that do not support it, like m68k. So, make PAGE_OWNER also depend on having STACKTRACE_SUPPORT. This isn't ideal since it restricts the number of places PAGE_OWNER runs now, but it at least hits all the major architectures. tree: git://git.cmpxchg.org/linux-mmotm.git master head: 83b324c5ff5cca85bbeb2ba913d465f108afe472 commit: 2a561c9d47c295ed91984c2b916a4dd450ee0279 [484/499] debugging-keep-track-of-page-owners-fix config: make ARCH=m68k allmodconfig All warnings: warning: (PAGE_OWNER && STACK_TRACER && BLK_DEV_IO_TRACE && KMEMCHECK) selects STACKTRACE which has unmet direct dependencies (STACKTRACE_SUPPORT) Change-Id: I8d9370733ead1c6a45bb034acc7aaf96e0901fea Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Git-commit: c6ca98b4acab6ae45cf0f9d93de9c717186e62cb Git-repo: http://git.cmpxchg.org/cgit/linux-mmotm.git/ Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
163 lines
3.9 KiB
C
163 lines
3.9 KiB
C
#include <linux/debugfs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/huge_mm.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
|
|
#include <asm/elf.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/tlbflush.h>
|
|
#include "internal.h"
|
|
|
|
#include <linux/bootmem.h>
|
|
#include <linux/kallsyms.h>
|
|
|
|
static ssize_t
|
|
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|
{
|
|
unsigned long pfn;
|
|
struct page *page;
|
|
char *kbuf;
|
|
int ret = 0;
|
|
ssize_t num_written = 0;
|
|
int blocktype = 0, pagetype = 0;
|
|
|
|
page = NULL;
|
|
pfn = min_low_pfn + *ppos;
|
|
|
|
/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
|
|
while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
|
|
pfn++;
|
|
|
|
//printk("pfn: %ld max_pfn: %ld\n", pfn, max_pfn);
|
|
/* Find an allocated page */
|
|
for (; pfn < max_pfn; pfn++) {
|
|
/*
|
|
* If the new page is in a new MAX_ORDER_NR_PAGES area,
|
|
* validate the area as existing, skip it if not
|
|
*/
|
|
if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
|
|
pfn += MAX_ORDER_NR_PAGES - 1;
|
|
continue;
|
|
}
|
|
|
|
/* Check for holes within a MAX_ORDER area */
|
|
if (!pfn_valid_within(pfn))
|
|
continue;
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
/* Catch situations where free pages have a bad ->order */
|
|
if (page->order >= 0 && PageBuddy(page))
|
|
printk(KERN_WARNING
|
|
"PageOwner info inaccurate for PFN %lu\n",
|
|
pfn);
|
|
|
|
/* Stop search if page is allocated and has trace info */
|
|
if (page->order >= 0 && page->trace.nr_entries) {
|
|
//intk("stopped search at pfn: %ld\n", pfn);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!pfn_valid(pfn))
|
|
return 0;
|
|
/*
|
|
* If memory does not end at a SECTION_SIZE boundary, then
|
|
* we might have a pfn_valid() above max_pfn
|
|
*/
|
|
if (pfn >= max_pfn)
|
|
return 0;
|
|
|
|
/* Record the next PFN to read in the file offset */
|
|
*ppos = (pfn - min_low_pfn) + 1;
|
|
|
|
kbuf = kmalloc(count, GFP_KERNEL);
|
|
if (!kbuf)
|
|
return -ENOMEM;
|
|
|
|
//printk("page: %p\n", page);
|
|
ret = snprintf(kbuf, count, "Page allocated via order %d, mask 0x%x\n",
|
|
page->order, page->gfp_mask);
|
|
if (ret >= count) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
/* Print information relevant to grouping pages by mobility */
|
|
blocktype = get_pageblock_migratetype(page);
|
|
pagetype = allocflags_to_migratetype(page->gfp_mask);
|
|
ret += snprintf(kbuf+ret, count-ret,
|
|
"PFN %lu Block %lu type %d %s "
|
|
"Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
|
|
pfn,
|
|
pfn >> pageblock_order,
|
|
blocktype,
|
|
blocktype != pagetype ? "Fallback" : " ",
|
|
PageLocked(page) ? "K" : " ",
|
|
PageError(page) ? "E" : " ",
|
|
PageReferenced(page) ? "R" : " ",
|
|
PageUptodate(page) ? "U" : " ",
|
|
PageDirty(page) ? "D" : " ",
|
|
PageLRU(page) ? "L" : " ",
|
|
PageActive(page) ? "A" : " ",
|
|
PageSlab(page) ? "S" : " ",
|
|
PageWriteback(page) ? "W" : " ",
|
|
PageCompound(page) ? "C" : " ",
|
|
PageSwapCache(page) ? "B" : " ",
|
|
PageMappedToDisk(page) ? "M" : " ");
|
|
if (ret >= count) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
num_written = ret;
|
|
|
|
ret = snprint_stack_trace(kbuf + num_written, count - num_written,
|
|
&page->trace, 0);
|
|
if (ret >= count - num_written) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
num_written += ret;
|
|
|
|
ret = snprintf(kbuf + num_written, count - num_written, "\n");
|
|
if (ret >= count - num_written) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
num_written += ret;
|
|
ret = num_written;
|
|
|
|
if (copy_to_user(buf, kbuf, ret))
|
|
ret = -EFAULT;
|
|
out:
|
|
kfree(kbuf);
|
|
return ret;
|
|
}
|
|
|
|
static struct file_operations proc_page_owner_operations = {
|
|
.read = read_page_owner,
|
|
};
|
|
|
|
static int __init pageowner_init(void)
|
|
{
|
|
struct dentry *dentry;
|
|
|
|
dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
|
|
NULL, &proc_page_owner_operations);
|
|
if (IS_ERR(dentry))
|
|
return PTR_ERR(dentry);
|
|
return 0;
|
|
}
|
|
module_init(pageowner_init)
|