mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
list_lru: dynamically adjust node arrays
We currently use a compile-time constant to size the node array for the list_lru structure. Due to this, we don't need to allocate any memory at initialization time. But as a consequence, the structures that contain embedded list_lru lists can become way too big (the superblock for instance contains two of them). This patch aims at ameliorating this situation by dynamically allocating the node arrays with the firmware provided nr_node_ids. Change-Id: If8f8d671d505709d22918b023ed1935b12c06c89 Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
9fda83a755
commit
b21bebc0f8
2 changed files with 15 additions and 12 deletions
|
@ -27,20 +27,11 @@ struct list_lru_node {
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
struct list_lru {
|
struct list_lru {
|
||||||
/*
|
struct list_lru_node *node;
|
||||||
* Because we use a fixed-size array, this struct can be very big if
|
|
||||||
* MAX_NUMNODES is big. If this becomes a problem this is fixable by
|
|
||||||
* turning this into a pointer and dynamically allocating this to
|
|
||||||
* nr_node_ids. This quantity is firwmare-provided, and still would
|
|
||||||
* provide room for all nodes at the cost of a pointer lookup and an
|
|
||||||
* extra allocation. Because that allocation will most likely come from
|
|
||||||
* a different slab cache than the main structure holding this
|
|
||||||
* structure, we may very well fail.
|
|
||||||
*/
|
|
||||||
struct list_lru_node node[MAX_NUMNODES];
|
|
||||||
nodemask_t active_nodes;
|
nodemask_t active_nodes;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void list_lru_destroy(struct list_lru *lru);
|
||||||
int list_lru_init(struct list_lru *lru);
|
int list_lru_init(struct list_lru *lru);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/list_lru.h>
|
#include <linux/list_lru.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
||||||
{
|
{
|
||||||
|
@ -115,9 +116,14 @@ EXPORT_SYMBOL_GPL(list_lru_walk_node);
|
||||||
int list_lru_init(struct list_lru *lru)
|
int list_lru_init(struct list_lru *lru)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
size_t size = sizeof(*lru->node) * nr_node_ids;
|
||||||
|
|
||||||
|
lru->node = kzalloc(size, GFP_KERNEL);
|
||||||
|
if (!lru->node)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
nodes_clear(lru->active_nodes);
|
nodes_clear(lru->active_nodes);
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < nr_node_ids; i++) {
|
||||||
spin_lock_init(&lru->node[i].lock);
|
spin_lock_init(&lru->node[i].lock);
|
||||||
INIT_LIST_HEAD(&lru->node[i].list);
|
INIT_LIST_HEAD(&lru->node[i].list);
|
||||||
lru->node[i].nr_items = 0;
|
lru->node[i].nr_items = 0;
|
||||||
|
@ -125,3 +131,9 @@ int list_lru_init(struct list_lru *lru)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(list_lru_init);
|
EXPORT_SYMBOL_GPL(list_lru_init);
|
||||||
|
|
||||||
|
void list_lru_destroy(struct list_lru *lru)
|
||||||
|
{
|
||||||
|
kfree(lru->node);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(list_lru_destroy);
|
||||||
|
|
Loading…
Reference in a new issue