sdcardfs: Backport and use some 3.10 hlist/hash macros

* Fixes NPD when accessing /config/sdcardfs/packages_gid.list

Change-Id: I4b628ffab5e8a83642439661f97f720946f31daf
Signed-off-by: Paul Keith <javelinanddart@gmail.com>
This commit is contained in:
Paul Keith 2017-09-29 15:48:36 +02:00 committed by Artem Borisov
parent 07b074b39e
commit e675a50f40
4 changed files with 78 additions and 32 deletions

View File

@ -73,12 +73,11 @@ static inline int qstr_copy(const struct qstr *src, struct qstr *dest)
static appid_t __get_appid(const struct qstr *key)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = key->hash;
appid_t ret_id;
rcu_read_lock();
hash_for_each_possible_rcu(package_to_appid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_appid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key)) {
ret_id = atomic_read(&hash_cur->value);
rcu_read_unlock();
@ -100,12 +99,11 @@ appid_t get_appid(const char *key)
static appid_t __get_ext_gid(const struct qstr *key)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = key->hash;
appid_t ret_id;
rcu_read_lock();
hash_for_each_possible_rcu(ext_to_groupid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(ext_to_groupid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key)) {
ret_id = atomic_read(&hash_cur->value);
rcu_read_unlock();
@ -127,11 +125,10 @@ appid_t get_ext_gid(const char *key)
static appid_t __is_excluded(const struct qstr *app_name, userid_t user)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = app_name->hash;
rcu_read_lock();
hash_for_each_possible_rcu(package_to_userid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur, hlist, hash) {
if (atomic_read(&hash_cur->value) == user &&
qstr_case_eq(app_name, &hash_cur->key)) {
rcu_read_unlock();
@ -202,10 +199,9 @@ static int insert_packagelist_appid_entry_locked(const struct qstr *key, appid_t
{
struct hashtable_entry *hash_cur;
struct hashtable_entry *new_entry;
struct hlist_node *h_n;
unsigned int hash = key->hash;
hash_for_each_possible_rcu(package_to_appid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_appid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key)) {
atomic_set(&hash_cur->value, value);
return 0;
@ -222,11 +218,10 @@ static int insert_ext_gid_entry_locked(const struct qstr *key, appid_t value)
{
struct hashtable_entry *hash_cur;
struct hashtable_entry *new_entry;
struct hlist_node *h_n;
unsigned int hash = key->hash;
/* An extension can only belong to one gid */
hash_for_each_possible_rcu(ext_to_groupid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(ext_to_groupid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key))
return -EINVAL;
}
@ -241,11 +236,10 @@ static int insert_userid_exclude_entry_locked(const struct qstr *key, userid_t v
{
struct hashtable_entry *hash_cur;
struct hashtable_entry *new_entry;
struct hlist_node *h_n;
unsigned int hash = key->hash;
/* Only insert if not already present */
hash_for_each_possible_rcu(package_to_userid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur, hlist, hash) {
if (atomic_read(&hash_cur->value) == value &&
qstr_case_eq(key, &hash_cur->key))
return 0;
@ -343,18 +337,17 @@ static void free_hashtable_entry(struct hashtable_entry *entry)
static void remove_packagelist_entry_locked(const struct qstr *key)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = key->hash;
struct hlist_node *h_t;
HLIST_HEAD(free_list);
hash_for_each_possible_rcu(package_to_userid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key)) {
hash_del_rcu(&hash_cur->hlist);
hlist_add_head(&hash_cur->dlist, &free_list);
}
}
hash_for_each_possible_rcu(package_to_appid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_appid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key)) {
hash_del_rcu(&hash_cur->hlist);
hlist_add_head(&hash_cur->dlist, &free_list);
@ -362,7 +355,7 @@ static void remove_packagelist_entry_locked(const struct qstr *key)
}
}
synchronize_rcu();
hlist_for_each_entry_safe(hash_cur, h_t, h_n, &free_list, dlist)
hlist_for_each_entry_safe_new(hash_cur, h_t, &free_list, dlist)
free_hashtable_entry(hash_cur);
}
@ -377,10 +370,9 @@ static void remove_packagelist_entry(const struct qstr *key)
static void remove_ext_gid_entry_locked(const struct qstr *key, gid_t group)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = key->hash;
hash_for_each_possible_rcu(ext_to_groupid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(ext_to_groupid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key) && atomic_read(&hash_cur->value) == group) {
hash_del_rcu(&hash_cur->hlist);
synchronize_rcu();
@ -400,19 +392,18 @@ static void remove_ext_gid_entry(const struct qstr *key, gid_t group)
static void remove_userid_all_entry_locked(userid_t userid)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
struct hlist_node *h_t;
HLIST_HEAD(free_list);
int i;
hash_for_each_rcu(package_to_userid, i, h_t, hash_cur, hlist) {
hash_for_each_rcu_new(package_to_userid, i, hash_cur, hlist) {
if (atomic_read(&hash_cur->value) == userid) {
hash_del_rcu(&hash_cur->hlist);
hlist_add_head(&hash_cur->dlist, &free_list);
}
}
synchronize_rcu();
hlist_for_each_entry_safe(hash_cur, h_t, h_n, &free_list, dlist) {
hlist_for_each_entry_safe_new(hash_cur, h_t, &free_list, dlist) {
free_hashtable_entry(hash_cur);
}
}
@ -428,10 +419,9 @@ static void remove_userid_all_entry(userid_t userid)
static void remove_userid_exclude_entry_locked(const struct qstr *key, userid_t userid)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = key->hash;
hash_for_each_possible_rcu(package_to_userid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur, hlist, hash) {
if (qstr_case_eq(key, &hash_cur->key) &&
atomic_read(&hash_cur->value) == userid) {
hash_del_rcu(&hash_cur->hlist);
@ -453,22 +443,21 @@ static void remove_userid_exclude_entry(const struct qstr *key, userid_t userid)
static void packagelist_destroy(void)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
struct hlist_node *h_t;
HLIST_HEAD(free_list);
int i;
mutex_lock(&sdcardfs_super_list_lock);
hash_for_each_rcu(package_to_appid, i, h_t, hash_cur, hlist) {
hash_for_each_rcu_new(package_to_appid, i, hash_cur, hlist) {
hash_del_rcu(&hash_cur->hlist);
hlist_add_head(&hash_cur->dlist, &free_list);
}
hash_for_each_rcu(package_to_userid, i, h_t, hash_cur, hlist) {
hash_for_each_rcu_new(package_to_userid, i, hash_cur, hlist) {
hash_del_rcu(&hash_cur->hlist);
hlist_add_head(&hash_cur->dlist, &free_list);
}
synchronize_rcu();
hlist_for_each_entry_safe(hash_cur, h_t, h_n, &free_list, dlist)
hlist_for_each_entry_safe_new(hash_cur, h_t, &free_list, dlist)
free_hashtable_entry(hash_cur);
mutex_unlock(&sdcardfs_super_list_lock);
pr_info("sdcardfs: destroyed packagelist pkgld\n");
@ -517,12 +506,11 @@ static ssize_t package_details_excluded_userids_show(struct package_details *pac
char *page)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_n;
unsigned int hash = package_details->name.hash;
int count = 0;
rcu_read_lock();
hash_for_each_possible_rcu(package_to_userid, hash_cur, h_n, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur, hlist, hash) {
if (qstr_case_eq(&package_details->name, &hash_cur->key))
count += scnprintf(page + count, PAGE_SIZE - count,
"%d ", atomic_read(&hash_cur->value));
@ -765,18 +753,17 @@ static ssize_t packages_list_show(struct packages *packages,
{
struct hashtable_entry *hash_cur_app;
struct hashtable_entry *hash_cur_user;
struct hlist_node *h_t;
int i;
int count = 0, written = 0;
const char errormsg[] = "<truncated>\n";
unsigned int hash;
rcu_read_lock();
hash_for_each_rcu(package_to_appid, i, h_t, hash_cur_app, hlist) {
hash_for_each_rcu_new(package_to_appid, i, hash_cur_app, hlist) {
written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n",
hash_cur_app->key.name, atomic_read(&hash_cur_app->value));
hash = hash_cur_app->key.hash;
hash_for_each_possible_rcu(package_to_userid, hash_cur_user, h_t, hlist, hash) {
hash_for_each_possible_rcu_new(package_to_userid, hash_cur_user, hlist, hash) {
if (qstr_case_eq(&hash_cur_app->key, &hash_cur_user->key)) {
written += scnprintf(page + count + written - 1,
PAGE_SIZE - sizeof(errormsg) - count - written + 1,

View File

@ -135,6 +135,18 @@ static inline void hash_del_rcu(struct hlist_node *node)
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
/**
* hash_for_each_rcu_new - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu_new(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu_new(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
@ -174,6 +186,19 @@ static inline void hash_del_rcu(struct hlist_node *node)
#define hash_for_each_possible_rcu(name, obj, node, member, key) \
hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu_new - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu_new(name, obj, member, key) \
hlist_for_each_entry_rcu_new(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals

View File

@ -659,6 +659,11 @@ static inline void hlist_move_list(struct hlist_head *old,
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
})
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos ; pos = pos->next)
@ -716,4 +721,16 @@ static inline void hlist_move_list(struct hlist_head *old,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
/**
* hlist_for_each_entry_safe_new - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe_new(pos, n, head, member) \
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
#endif

View File

@ -447,6 +447,23 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu_new - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu_new(pos, head, member) \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.