[PATCH] zoned vm counters: conversion of nr_bounce to per zone counter

Conversion of nr_bounce to a per zone counter

nr_bounce is only used for proc output.  So it could be left as an event
counter.  However, the event counters may not be accurate and nr_bounce is
categorizing types of pages in a zone.  So we really need this to also be a
per zone counter.

[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-06-30 01:55:41 -07:00 committed by Linus Torvalds
parent fd39fc8561
commit d2c5e30c9a
6 changed files with 9 additions and 5 deletions

View File

@ -65,6 +65,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
"Node %d AnonPages: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
"Node %d NFS Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d Slab: %8lu kB\n",
nid, K(i.totalram),
nid, K(i.freeram),
@ -82,6 +83,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
nid, K(node_page_state(nid, NR_ANON_PAGES)),
nid, K(node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
nid, K(node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(nid, NR_SLAB)));
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;

View File

@ -171,6 +171,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"Slab: %8lu kB\n"
"PageTables: %8lu kB\n"
"NFS Unstable: %8lu kB\n"
"Bounce: %8lu kB\n"
"CommitLimit: %8lu kB\n"
"Committed_AS: %8lu kB\n"
"VmallocTotal: %8lu kB\n"
@ -196,6 +197,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(global_page_state(NR_SLAB)),
K(global_page_state(NR_PAGETABLE)),
K(global_page_state(NR_UNSTABLE_NFS)),
K(global_page_state(NR_BOUNCE)),
K(allowed),
K(committed),
(unsigned long)VMALLOC_TOTAL >> 10,

View File

@ -56,6 +56,7 @@ enum zone_stat_item {
NR_FILE_DIRTY,
NR_WRITEBACK,
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
NR_VM_ZONE_STAT_ITEMS };
struct per_cpu_pages {

View File

@ -67,7 +67,6 @@ struct page_state {
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
unsigned long nr_bounce; /* pages for bounce buffers */
};
extern void get_full_page_state(struct page_state *ret);

View File

@ -315,8 +315,8 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
if (bvec->bv_page == org_vec->bv_page)
continue;
mempool_free(bvec->bv_page, pool);
dec_page_state(nr_bounce);
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
mempool_free(bvec->bv_page, pool);
}
bio_endio(bio_orig, bio_orig->bi_size, err);
@ -397,7 +397,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
inc_page_state(nr_bounce);
inc_zone_page_state(to->bv_page, NR_BOUNCE);
if (rw == WRITE) {
char *vto, *vfrom;

View File

@ -381,6 +381,7 @@ static char *vmstat_text[] = {
"nr_dirty",
"nr_writeback",
"nr_unstable",
"nr_bounce",
/* Event counters */
"pgpgin",
@ -428,7 +429,6 @@ static char *vmstat_text[] = {
"allocstall",
"pgrotated",
"nr_bounce",
};
/*