hlist: drop the node parameter from iterators

I'm not sure why, but the hlist for each entry iterators were conceived

        list_for_each_entry(pos, head, member)

The hlist ones were greedy and wanted an extra parameter:

        hlist_for_each_entry(tpos, pos, head, member)

Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.

Besides the semantic patch, there was some manual work required:

 - Fix up the actual hlist iterators in linux/list.h
 - Fix up the declaration of other iterators based on the hlist ones.
 - A very small amount of places were using the 'node' parameter, this
 was modified to use 'obj->member' instead.
 - Coccinelle didn't handle the hlist_for_each_entry_safe iterator
 properly, so those had to be fixed up manually.

The semantic patch which is mostly the work of Peter Senna Tschudin is here:

@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;

type T;
expression a,c,d,e;
identifier b;
statement S;
@@

-T b;
    <+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
    ...+>

[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Sasha Levin 2013-02-27 17:06:00 -08:00 committed by Linus Torvalds
parent 1e142b29e2
commit b67bfe0d42
218 changed files with 987 additions and 1494 deletions

View File

@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_assert(ri, orig_ret_address, trampoline_address);
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip; ((struct fnptr *)kretprobe_trampoline)->ip;
@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->cr_iip = orig_ret_address; regs->cr_iip = orig_ret_address;
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
@ -618,7 +618,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte; struct hpte_cache *pte;
struct hlist_node *node;
int i; int i;
rcu_read_lock(); rcu_read_lock();
@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) hlist_for_each_entry_rcu(pte, list, list_vpte_long)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
} }
@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
/* Find the list of entries in the map */ /* Find the list of entries in the map */
@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock(); rcu_read_lock();
/* Check the list for matching entries and invalidate */ /* Check the list for matching entries and invalidate */
hlist_for_each_entry_rcu(pte, node, list, list_pte) hlist_for_each_entry_rcu(pte, list, list_pte)
if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
/* Find the list of entries in the map */ /* Find the list of entries in the map */
@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock(); rcu_read_lock();
/* Check the list for matching entries and invalidate */ /* Check the list for matching entries and invalidate */
hlist_for_each_entry_rcu(pte, node, list, list_pte_long) hlist_for_each_entry_rcu(pte, list, list_pte_long)
if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL; u64 vp_mask = 0xfffffffffULL;
@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock(); rcu_read_lock();
/* Check the list for matching entries and invalidate */ /* Check the list for matching entries and invalidate */
hlist_for_each_entry_rcu(pte, node, list, list_vpte) hlist_for_each_entry_rcu(pte, list, list_vpte)
if ((pte->pte.vpage & vp_mask) == guest_vp) if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL; u64 vp_mask = 0xffffff000ULL;
@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock(); rcu_read_lock();
/* Check the list for matching entries and invalidate */ /* Check the list for matching entries and invalidate */
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.vpage & vp_mask) == guest_vp) if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
int i; int i;
@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) && if ((pte->pte.raddr >= pa_start) &&
(pte->pte.raddr < pa_end)) (pte->pte.raddr < pa_end))
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);

View File

@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{ {
struct kretprobe_instance *ri; struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address; unsigned long flags, orig_ret_address;
unsigned long trampoline_address; unsigned long trampoline_address;
kprobe_opcode_t *correct_ret_addr; kprobe_opcode_t *correct_ret_addr;
@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
orig_ret_address = 0; orig_ret_address = 0;
correct_ret_addr = NULL; correct_ret_addr = NULL;
trampoline_address = (unsigned long) &kretprobe_trampoline; trampoline_address = (unsigned long) &kretprobe_trampoline;
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr; correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
struct msi_desc *__irq_get_msi_desc(unsigned int irq) struct msi_desc *__irq_get_msi_desc(unsigned int irq)
{ {
struct hlist_node *entry;
struct msi_map *map; struct msi_map *map;
hlist_for_each_entry_rcu(map, entry, hlist_for_each_entry_rcu(map,
&msi_hash[msi_hashfn(irq)], msi_chain) &msi_hash[msi_hashfn(irq)], msi_chain)
if (map->irq == irq) if (map->irq == irq)
return map->msi; return map->msi;

View File

@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to * real return address, and all the rest will point to
* kretprobe_trampoline * kretprobe_trampoline
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched(); preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
static int __ldc_channel_exists(unsigned long id) static int __ldc_channel_exists(unsigned long id)
{ {
struct ldc_channel *lp; struct ldc_channel *lp;
struct hlist_node *n;
hlist_for_each_entry(lp, n, &ldc_channel_list, list) { hlist_for_each_entry(lp, &ldc_channel_list, list) {
if (lp->id == id) if (lp->id == id)
return 1; return 1;
} }

View File

@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0; unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL; kprobe_opcode_t *correct_ret_addr = NULL;
@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
* will be the real return address, and all the rest will * will be the real return address, and all the rest will
* point to kretprobe_trampoline. * point to kretprobe_trampoline.
*/ */
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr; correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current) if (ri->task != current)
/* another task is sharing our hash bucket */ /* another task is sharing our hash bucket */
continue; continue;
@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }

View File

@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm, static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list); struct list_head *invalid_list);
#define for_each_gfn_sp(kvm, sp, gfn, pos) \ #define for_each_gfn_sp(kvm, sp, gfn) \
hlist_for_each_entry(sp, pos, \ hlist_for_each_entry(sp, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn)) {} else if ((sp)->gfn != (gfn)) {} else
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
hlist_for_each_entry(sp, pos, \ hlist_for_each_entry(sp, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn) || (sp)->role.direct || \ if ((sp)->gfn != (gfn) || (sp)->role.direct || \
(sp)->role.invalid) {} else (sp)->role.invalid) {} else
@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
struct kvm_mmu_page *s; struct kvm_mmu_page *s;
struct hlist_node *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
bool flush = false; bool flush = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!s->unsync) if (!s->unsync)
continue; continue;
@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
unsigned quadrant; unsigned quadrant;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct hlist_node *node;
bool need_sync = false; bool need_sync = false;
role = vcpu->arch.mmu.base_role; role = vcpu->arch.mmu.base_role;
@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant; role.quadrant = quadrant;
} }
for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { for_each_gfn_sp(vcpu->kvm, sp, gfn) {
if (!need_sync && sp->unsync) if (!need_sync && sp->unsync)
need_sync = true; need_sync = true;
@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct hlist_node *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
int r; int r;
pgprintk("%s: looking for gfn %llx\n", __func__, gfn); pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0; r = 0;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn, pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word); sp->role.word);
r = 1; r = 1;
@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
struct kvm_mmu_page *s; struct kvm_mmu_page *s;
struct hlist_node *node;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (s->unsync) if (s->unsync)
continue; continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync) bool can_unsync)
{ {
struct kvm_mmu_page *s; struct kvm_mmu_page *s;
struct hlist_node *node;
bool need_unsync = false; bool need_unsync = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!can_unsync) if (!can_unsync)
return 1; return 1;
@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 }; union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct hlist_node *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
u64 entry, gentry, *spte; u64 entry, gentry, *spte;
int npte; int npte;
@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) || if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) { detect_write_flooding(sp)) {
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,

View File

@ -357,7 +357,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgroup); struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct hlist_node *n;
int i; int i;
mutex_lock(&blkcg_pol_mutex); mutex_lock(&blkcg_pol_mutex);
@ -368,7 +367,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
* stat updates. This is a debug feature which shouldn't exist * stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry. * anyway. If you get hit by a race, retry.
*/ */
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
for (i = 0; i < BLKCG_MAX_POLS; i++) { for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
@ -415,11 +414,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
bool show_total) bool show_total)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct hlist_node *n;
u64 total = 0; u64 total = 0;
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
if (blkcg_policy_enabled(blkg->q, pol)) if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data); total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock);

View File

@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
*/ */
void put_io_context_active(struct io_context *ioc) void put_io_context_active(struct io_context *ioc)
{ {
struct hlist_node *n;
unsigned long flags; unsigned long flags;
struct io_cq *icq; struct io_cq *icq;
@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
*/ */
retry: retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1); spin_lock_irqsave_nested(&ioc->lock, flags, 1);
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED) if (icq->flags & ICQ_EXITED)
continue; continue;
if (spin_trylock(icq->q->queue_lock)) { if (spin_trylock(icq->q->queue_lock)) {

View File

@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{ {
struct bsg_device *bd; struct bsg_device *bd;
struct hlist_node *entry;
mutex_lock(&bsg_mutex); mutex_lock(&bsg_mutex);
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) { if (bd->queue == q) {
atomic_inc(&bd->ref_count); atomic_inc(&bd->ref_count);
goto found; goto found;

View File

@ -1435,7 +1435,6 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct hlist_node *n;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
return -EINVAL; return -EINVAL;
@ -1443,7 +1442,7 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
blkcg->cfq_weight = (unsigned int)val; blkcg->cfq_weight = (unsigned int)val;
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct cfq_group *cfqg = blkg_to_cfqg(blkg); struct cfq_group *cfqg = blkg_to_cfqg(blkg);
if (cfqg && !cfqg->dev_weight) if (cfqg && !cfqg->dev_weight)

View File

@ -288,10 +288,10 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
struct hlist_node *entry, *next; struct hlist_node *next;
struct request *rq; struct request *rq;
hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { hlist_for_each_entry_safe(rq, next, hash_list, hash) {
BUG_ON(!ELV_ON_HASH(rq)); BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) { if (unlikely(!rq_mergeable(rq))) {

View File

@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
void crypto_unregister_template(struct crypto_template *tmpl) void crypto_unregister_template(struct crypto_template *tmpl)
{ {
struct crypto_instance *inst; struct crypto_instance *inst;
struct hlist_node *p, *n; struct hlist_node *n;
struct hlist_head *list; struct hlist_head *list;
LIST_HEAD(users); LIST_HEAD(users);
@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
list_del_init(&tmpl->list); list_del_init(&tmpl->list);
list = &tmpl->instances; list = &tmpl->instances;
hlist_for_each_entry(inst, p, list, list) { hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users); int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err); BUG_ON(err);
} }
@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
up_write(&crypto_alg_sem); up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, p, n, list, list) { hlist_for_each_entry_safe(inst, n, list, list) {
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
tmpl->free(inst); tmpl->free(inst);
} }

View File

@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
{ {
struct atm_cirange ci; struct atm_cirange ci;
struct atm_vcc *vcc; struct atm_vcc *vcc;
struct hlist_node *node;
struct sock *s; struct sock *s;
int i; int i;
@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) { for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i]; struct hlist_head *head = &vcc_hash[i];
sk_for_each(s, node, head) { sk_for_each(s, head) {
vcc = atm_sk(s); vcc = atm_sk(s);
if (vcc->dev != dev) if (vcc->dev != dev)
continue; continue;
@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{ {
struct hlist_head *head; struct hlist_head *head;
struct atm_vcc *vcc; struct atm_vcc *vcc;
struct hlist_node *node;
struct sock *s; struct sock *s;
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) { sk_for_each(s, head) {
vcc = atm_sk(s); vcc = atm_sk(s);
if (vcc->dev == dev && if (vcc->dev == dev &&
vcc->vci == vci && vcc->vpi == vpi && vcc->vci == vci && vcc->vpi == vpi &&

View File

@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page) static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
{ {
struct hlist_node *node;
struct sock *s; struct sock *s;
static const char *signal[] = { "LOST","unknown","okay" }; static const char *signal[] = { "LOST","unknown","okay" };
struct eni_dev *eni_dev = ENI_DEV(dev); struct eni_dev *eni_dev = ENI_DEV(dev);
@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) { for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i]; struct hlist_head *head = &vcc_hash[i];
sk_for_each(s, node, head) { sk_for_each(s, head) {
struct eni_vcc *eni_vcc; struct eni_vcc *eni_vcc;
int length; int length;

View File

@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
{ {
struct hlist_head *head; struct hlist_head *head;
struct atm_vcc *vcc; struct atm_vcc *vcc;
struct hlist_node *node;
struct sock *s; struct sock *s;
short vpi; short vpi;
int vci; int vci;
@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
vci = cid & ((1 << he_dev->vcibits) - 1); vci = cid & ((1 << he_dev->vcibits) - 1);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) { sk_for_each(s, head) {
vcc = atm_sk(s); vcc = atm_sk(s);
if (vcc->dev == he_dev->atm_dev && if (vcc->dev == he_dev->atm_dev &&
vcc->vci == vci && vcc->vpi == vpi && vcc->vci == vci && vcc->vpi == vpi &&

View File

@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{ {
struct hlist_head *head; struct hlist_head *head;
struct atm_vcc *vcc = NULL; struct atm_vcc *vcc = NULL;
struct hlist_node *node;
struct sock *s; struct sock *s;
read_lock(&vcc_sklist_lock); read_lock(&vcc_sklist_lock);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) { sk_for_each(s, head) {
vcc = atm_sk(s); vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci && if (vcc->dev == dev && vcc->vci == vci &&
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&

View File

@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
int level) int level)
{ {
struct clk *child; struct clk *child;
struct hlist_node *tmp;
if (!c) if (!c)
return; return;
clk_summary_show_one(s, c, level); clk_summary_show_one(s, c, level);
hlist_for_each_entry(child, tmp, &c->children, child_node) hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1); clk_summary_show_subtree(s, child, level + 1);
} }
static int clk_summary_show(struct seq_file *s, void *data) static int clk_summary_show(struct seq_file *s, void *data)
{ {
struct clk *c; struct clk *c;
struct hlist_node *tmp;
seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n"); seq_printf(s, "---------------------------------------------------------------------\n");
mutex_lock(&prepare_lock); mutex_lock(&prepare_lock);
hlist_for_each_entry(c, tmp, &clk_root_list, child_node) hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0); clk_summary_show_subtree(s, c, 0);
hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0); clk_summary_show_subtree(s, c, 0);
mutex_unlock(&prepare_lock); mutex_unlock(&prepare_lock);
@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
{ {
struct clk *child; struct clk *child;
struct hlist_node *tmp;
if (!c) if (!c)
return; return;
clk_dump_one(s, c, level); clk_dump_one(s, c, level);
hlist_for_each_entry(child, tmp, &c->children, child_node) { hlist_for_each_entry(child, &c->children, child_node) {
seq_printf(s, ","); seq_printf(s, ",");
clk_dump_subtree(s, child, level + 1); clk_dump_subtree(s, child, level + 1);
} }
@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
static int clk_dump(struct seq_file *s, void *data) static int clk_dump(struct seq_file *s, void *data)
{ {
struct clk *c; struct clk *c;
struct hlist_node *tmp;
bool first_node = true; bool first_node = true;
seq_printf(s, "{"); seq_printf(s, "{");
mutex_lock(&prepare_lock); mutex_lock(&prepare_lock);
hlist_for_each_entry(c, tmp, &clk_root_list, child_node) { hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node) if (!first_node)
seq_printf(s, ","); seq_printf(s, ",");
first_node = false; first_node = false;
clk_dump_subtree(s, c, 0); clk_dump_subtree(s, c, 0);
} }
hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) { hlist_for_each_entry(c, &clk_orphan_list, child_node) {
seq_printf(s, ","); seq_printf(s, ",");
clk_dump_subtree(s, c, 0); clk_dump_subtree(s, c, 0);
} }
@ -222,7 +218,6 @@ out:
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
{ {
struct clk *child; struct clk *child;
struct hlist_node *tmp;
int ret = -EINVAL;; int ret = -EINVAL;;
if (!clk || !pdentry) if (!clk || !pdentry)
@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
if (ret) if (ret)
goto out; goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node) hlist_for_each_entry(child, &clk->children, child_node)
clk_debug_create_subtree(child, clk->dentry); clk_debug_create_subtree(child, clk->dentry);
ret = 0; ret = 0;
@ -299,7 +294,6 @@ out:
static int __init clk_debug_init(void) static int __init clk_debug_init(void)
{ {
struct clk *clk; struct clk *clk;
struct hlist_node *tmp;
struct dentry *d; struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL); rootdir = debugfs_create_dir("clk", NULL);
@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
mutex_lock(&prepare_lock); mutex_lock(&prepare_lock);
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir); clk_debug_create_subtree(clk, rootdir);
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_debug_create_subtree(clk, orphandir); clk_debug_create_subtree(clk, orphandir);
inited = 1; inited = 1;
@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
static void clk_disable_unused_subtree(struct clk *clk) static void clk_disable_unused_subtree(struct clk *clk)
{ {
struct clk *child; struct clk *child;
struct hlist_node *tmp;
unsigned long flags; unsigned long flags;
if (!clk) if (!clk)
goto out; goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node) hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child); clk_disable_unused_subtree(child);
spin_lock_irqsave(&enable_lock, flags); spin_lock_irqsave(&enable_lock, flags);
@ -384,14 +377,13 @@ out:
static int clk_disable_unused(void) static int clk_disable_unused(void)
{ {
struct clk *clk; struct clk *clk;
struct hlist_node *tmp;
mutex_lock(&prepare_lock); mutex_lock(&prepare_lock);
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk); clk_disable_unused_subtree(clk);
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk); clk_disable_unused_subtree(clk);
mutex_unlock(&prepare_lock); mutex_unlock(&prepare_lock);
@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{ {
struct clk *child; struct clk *child;
struct clk *ret; struct clk *ret;
struct hlist_node *tmp;
if (!strcmp(clk->name, name)) if (!strcmp(clk->name, name))
return clk; return clk;
hlist_for_each_entry(child, tmp, &clk->children, child_node) { hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child); ret = __clk_lookup_subtree(name, child);
if (ret) if (ret)
return ret; return ret;
@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
{ {
struct clk *root_clk; struct clk *root_clk;
struct clk *ret; struct clk *ret;
struct hlist_node *tmp;
if (!name) if (!name)
return NULL; return NULL;
/* search the 'proper' clk tree first */ /* search the 'proper' clk tree first */
hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) { hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk); ret = __clk_lookup_subtree(name, root_clk);
if (ret) if (ret)
return ret; return ret;
} }
/* if not found, then search the orphan tree */ /* if not found, then search the orphan tree */
hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) { hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk); ret = __clk_lookup_subtree(name, root_clk);
if (ret) if (ret)
return ret; return ret;
@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{ {
unsigned long old_rate; unsigned long old_rate;
unsigned long parent_rate = 0; unsigned long parent_rate = 0;
struct hlist_node *tmp;
struct clk *child; struct clk *child;
old_rate = clk->rate; old_rate = clk->rate;
@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
if (clk->notifier_count && msg) if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate); __clk_notify(clk, msg, old_rate, clk->rate);
hlist_for_each_entry(child, tmp, &clk->children, child_node) hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_rates(child, msg); __clk_recalc_rates(child, msg);
} }
@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/ */
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{ {
struct hlist_node *tmp;
struct clk *child; struct clk *child;
unsigned long new_rate; unsigned long new_rate;
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
if (ret == NOTIFY_BAD) if (ret == NOTIFY_BAD)
goto out; goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node) { hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate); ret = __clk_speculate_rates(child, new_rate);
if (ret == NOTIFY_BAD) if (ret == NOTIFY_BAD)
break; break;
@ -908,11 +896,10 @@ out:
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{ {
struct clk *child; struct clk *child;
struct hlist_node *tmp;
clk->new_rate = new_rate; clk->new_rate = new_rate;
hlist_for_each_entry(child, tmp, &clk->children, child_node) { hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate) if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate); child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else else
@ -983,7 +970,6 @@ out:
*/ */
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{ {
struct hlist_node *tmp;
struct clk *child, *fail_clk = NULL; struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
fail_clk = clk; fail_clk = clk;
} }
hlist_for_each_entry(child, tmp, &clk->children, child_node) { hlist_for_each_entry(child, &clk->children, child_node) {
clk = clk_propagate_rate_change(child, event); clk = clk_propagate_rate_change(child, event);
if (clk) if (clk)
fail_clk = clk; fail_clk = clk;
@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
struct clk *child; struct clk *child;
unsigned long old_rate; unsigned long old_rate;
unsigned long best_parent_rate = 0; unsigned long best_parent_rate = 0;
struct hlist_node *tmp;
old_rate = clk->rate; old_rate = clk->rate;
@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate) if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
hlist_for_each_entry(child, tmp, &clk->children, child_node) hlist_for_each_entry(child, &clk->children, child_node)
clk_change_rate(child); clk_change_rate(child);
} }
@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
{ {
int i, ret = 0; int i, ret = 0;
struct clk *orphan; struct clk *orphan;
struct hlist_node *tmp, *tmp2; struct hlist_node *tmp2;
if (!clk) if (!clk)
return -EINVAL; return -EINVAL;
@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* walk the list of orphan clocks and reparent any that are children of * walk the list of orphan clocks and reparent any that are children of
* this clock * this clock
*/ */
hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) { hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->ops->get_parent) { if (orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw); i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i])) if (!strcmp(clk->name, orphan->parent_names[i]))

View File

@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key; unsigned int hashed_key;
int count = 0; int count = 0;
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, list, h_list, head) hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
} }
@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key; unsigned int hashed_key;
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, list, h_list, head) { hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key) if (entry->key == key)
return list; return &entry->head;
if (entry->key > key) if (entry->key > key)
break; break;
} }
@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key; unsigned int hashed_key;
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, list, h_list, head) { hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key) if (entry->key == key)
return list; return &entry->head;
if (entry->key > key) if (entry->key > key)
break; break;
} }
@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
struct hlist_node *list, *parent; struct hlist_node *parent;
unsigned int hashed_key; unsigned int hashed_key;
unsigned long key = item->key; unsigned long key = item->key;
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
parent = NULL; parent = NULL;
hlist_for_each_entry(entry, list, h_list, head) { hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key) if (entry->key == key)
return -EINVAL; return -EINVAL;
if (entry->key > key) if (entry->key > key)
break; break;
parent = list; parent = &entry->head;
} }
if (parent) { if (parent) {
hlist_add_after_rcu(parent, &item->head); hlist_add_after_rcu(parent, &item->head);

View File

@ -2204,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
{ {
struct rdma_id_private *cur_id; struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr; struct sockaddr *addr, *cur_addr;
struct hlist_node *node;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id) if (id_priv == cur_id)
continue; continue;

View File

@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
{ {
struct hlist_head *bucket; struct hlist_head *bucket;
struct ib_pool_fmr *fmr; struct ib_pool_fmr *fmr;
struct hlist_node *pos;
if (!pool->cache_bucket) if (!pool->cache_bucket)
return NULL; return NULL;
bucket = pool->cache_bucket + ib_fmr_hash(*page_list); bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
hlist_for_each_entry(fmr, pos, bucket, cache_node) hlist_for_each_entry(fmr, bucket, cache_node)
if (io_virtual_address == fmr->io_virtual_address && if (io_virtual_address == fmr->io_virtual_address &&
page_list_len == fmr->page_list_len && page_list_len == fmr->page_list_len &&
!memcmp(page_list, fmr->page_list, !memcmp(page_list, fmr->page_list,

View File

@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{ {
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct hlist_node *node;
struct sock *csk; struct sock *csk;
int err = 0; int err = 0;
@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (sk->sk_protocol < ISDN_P_B_START) { if (sk->sk_protocol < ISDN_P_B_START) {
read_lock_bh(&data_sockets.lock); read_lock_bh(&data_sockets.lock);
sk_for_each(csk, node, &data_sockets.head) { sk_for_each(csk, &data_sockets.head) {
if (sk == csk) if (sk == csk)
continue; continue;
if (_pms(csk)->dev != _pms(sk)->dev) if (_pms(csk)->dev != _pms(sk)->dev)

View File

@ -64,12 +64,11 @@ unlock:
static void static void
send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
{ {
struct hlist_node *node;
struct sock *sk; struct sock *sk;
struct sk_buff *cskb = NULL; struct sk_buff *cskb = NULL;
read_lock(&sl->lock); read_lock(&sl->lock);
sk_for_each(sk, node, &sl->head) { sk_for_each(sk, &sl->head) {
if (sk->sk_state != MISDN_BOUND) if (sk->sk_state != MISDN_BOUND)
continue; continue;
if (!cskb) if (!cskb)

View File

@ -106,9 +106,8 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
struct dm_cell_key *key) struct dm_cell_key *key)
{ {
struct dm_bio_prison_cell *cell; struct dm_bio_prison_cell *cell;
struct hlist_node *tmp;
hlist_for_each_entry(cell, tmp, bucket, list) hlist_for_each_entry(cell, bucket, list)
if (keys_equal(&cell->key, key)) if (keys_equal(&cell->key, key))
return cell; return cell;

View File

@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{ {
struct dm_buffer *b; struct dm_buffer *b;
struct hlist_node *hn;
hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)], hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
hash_list) { hash_list) {
dm_bufio_cond_resched(); dm_bufio_cond_resched();
if (b->block == block) if (b->block == block)

View File

@ -227,12 +227,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
{ {
struct dm_snap_tracked_chunk *c; struct dm_snap_tracked_chunk *c;
struct hlist_node *hn;
int found = 0; int found = 0;
spin_lock_irq(&s->tracked_chunk_lock); spin_lock_irq(&s->tracked_chunk_lock);
hlist_for_each_entry(c, hn, hlist_for_each_entry(c,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
if (c->chunk == chunk) { if (c->chunk == chunk) {
found = 1; found = 1;

View File

@ -46,10 +46,9 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
int r = 0; int r = 0;
unsigned bucket = dm_hash_block(b, DM_HASH_MASK); unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si; struct shadow_info *si;
struct hlist_node *n;
spin_lock(&tm->lock); spin_lock(&tm->lock);
hlist_for_each_entry(si, n, tm->buckets + bucket, hlist) hlist_for_each_entry(si, tm->buckets + bucket, hlist)
if (si->where == b) { if (si->where == b) {
r = 1; r = 1;
break; break;
@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
static void wipe_shadow_table(struct dm_transaction_manager *tm) static void wipe_shadow_table(struct dm_transaction_manager *tm)
{ {
struct shadow_info *si; struct shadow_info *si;
struct hlist_node *n, *tmp; struct hlist_node *tmp;
struct hlist_head *bucket; struct hlist_head *bucket;
int i; int i;
spin_lock(&tm->lock); spin_lock(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++) { for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i; bucket = tm->buckets + i;
hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) hlist_for_each_entry_safe(si, tmp, bucket, hlist)
kfree(si); kfree(si);
INIT_HLIST_HEAD(bucket); INIT_HLIST_HEAD(bucket);

View File

@ -365,10 +365,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
short generation) short generation)
{ {
struct stripe_head *sh; struct stripe_head *sh;
struct hlist_node *hn;
pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->generation == generation) if (sh->sector == sector && sh->generation == generation)
return sh; return sh;
pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);

View File

@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
const struct mmu_notifier_ops *ops) const struct mmu_notifier_ops *ops)
{ {
struct mmu_notifier *mn, *gru_mn = NULL; struct mmu_notifier *mn, *gru_mn = NULL;
struct hlist_node *n;
if (mm->mmu_notifier_mm) { if (mm->mmu_notifier_mm) {
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
hlist) hlist)
if (mn->ops == ops) { if (mn->ops == ops) {
gru_mn = mn; gru_mn = mn;

View File

@ -127,9 +127,8 @@ static struct dbell_entry *dbell_index_table_find(u32 idx)
{ {
u32 bucket = VMCI_DOORBELL_HASH(idx); u32 bucket = VMCI_DOORBELL_HASH(idx);
struct dbell_entry *dbell; struct dbell_entry *dbell;
struct hlist_node *node;
hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket], hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
node) { node) {
if (idx == dbell->idx) if (idx == dbell->idx)
return dbell; return dbell;
@ -359,12 +358,10 @@ static void dbell_fire_entries(u32 notify_idx)
{ {
u32 bucket = VMCI_DOORBELL_HASH(notify_idx); u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
struct dbell_entry *dbell; struct dbell_entry *dbell;
struct hlist_node *node;
spin_lock_bh(&vmci_doorbell_it.lock); spin_lock_bh(&vmci_doorbell_it.lock);
hlist_for_each_entry(dbell, node, hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
&vmci_doorbell_it.entries[bucket], node) {
if (dbell->idx == notify_idx && if (dbell->idx == notify_idx &&
atomic_read(&dbell->active) == 1) { atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) { if (dbell->run_delayed) {

View File

@ -46,11 +46,10 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
enum vmci_resource_type type) enum vmci_resource_type type)
{ {
struct vmci_resource *r, *resource = NULL; struct vmci_resource *r, *resource = NULL;
struct hlist_node *node;
unsigned int idx = vmci_resource_hash(handle); unsigned int idx = vmci_resource_hash(handle);
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(r, node, hlist_for_each_entry_rcu(r,
&vmci_resource_table.entries[idx], node) { &vmci_resource_table.entries[idx], node) {
u32 cid = r->handle.context; u32 cid = r->handle.context;
u32 rid = r->handle.resource; u32 rid = r->handle.resource;
@ -146,12 +145,11 @@ void vmci_resource_remove(struct vmci_resource *resource)
struct vmci_handle handle = resource->handle; struct vmci_handle handle = resource->handle;
unsigned int idx = vmci_resource_hash(handle); unsigned int idx = vmci_resource_hash(handle);
struct vmci_resource *r; struct vmci_resource *r;
struct hlist_node *node;
/* Remove resource from hash table. */ /* Remove resource from hash table. */
spin_lock(&vmci_resource_table.lock); spin_lock(&vmci_resource_table.lock);
hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) { hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
if (vmci_handle_is_equal(r->handle, resource->handle)) { if (vmci_handle_is_equal(r->handle, resource->handle)) {
hlist_del_init_rcu(&r->node); hlist_del_init_rcu(&r->node);
break; break;

View File

@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
union ixgbe_atr_input *mask = &adapter->fdir_mask; union ixgbe_atr_input *mask = &adapter->fdir_mask;
struct ethtool_rx_flow_spec *fsp = struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs; (struct ethtool_rx_flow_spec *)&cmd->fs;
struct hlist_node *node, *node2; struct hlist_node *node2;
struct ixgbe_fdir_filter *rule = NULL; struct ixgbe_fdir_filter *rule = NULL;
/* report total rule count */ /* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2; cmd->data = (1024 << adapter->fdir_pballoc) - 2;
hlist_for_each_entry_safe(rule, node, node2, hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) { &adapter->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->sw_idx) if (fsp->location <= rule->sw_idx)
break; break;
@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd, struct ethtool_rxnfc *cmd,
u32 *rule_locs) u32 *rule_locs)
{ {
struct hlist_node *node, *node2; struct hlist_node *node2;
struct ixgbe_fdir_filter *rule; struct ixgbe_fdir_filter *rule;
int cnt = 0; int cnt = 0;
/* report total rule count */ /* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2; cmd->data = (1024 << adapter->fdir_pballoc) - 2;
hlist_for_each_entry_safe(rule, node, node2, hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) { &adapter->fdir_filter_list, fdir_node) {
if (cnt == cmd->rule_cnt) if (cnt == cmd->rule_cnt)
return -EMSGSIZE; return -EMSGSIZE;
@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
u16 sw_idx) u16 sw_idx)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node, *node2, *parent; struct hlist_node *node2;
struct ixgbe_fdir_filter *rule; struct ixgbe_fdir_filter *rule, *parent;
int err = -EINVAL; int err = -EINVAL;
parent = NULL; parent = NULL;
rule = NULL; rule = NULL;
hlist_for_each_entry_safe(rule, node, node2, hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) { &adapter->fdir_filter_list, fdir_node) {
/* hash found, or no matching entry */ /* hash found, or no matching entry */
if (rule->sw_idx >= sw_idx) if (rule->sw_idx >= sw_idx)
break; break;
parent = node; parent = rule;
} }
/* if there is an old rule occupying our place remove it */ /* if there is an old rule occupying our place remove it */
@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* add filter to the list */ /* add filter to the list */
if (parent) if (parent)
hlist_add_after(parent, &input->fdir_node); hlist_add_after(&parent->fdir_node, &input->fdir_node);
else else
hlist_add_head(&input->fdir_node, hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list); &adapter->fdir_filter_list);

View File

@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node, *node2; struct hlist_node *node2;
struct ixgbe_fdir_filter *filter; struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock); spin_lock(&adapter->fdir_perfect_lock);
@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
if (!hlist_empty(&adapter->fdir_filter_list)) if (!hlist_empty(&adapter->fdir_filter_list))
ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
hlist_for_each_entry_safe(filter, node, node2, hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) { &adapter->fdir_filter_list, fdir_node) {
ixgbe_fdir_write_perfect_filter_82599(hw, ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter, &filter->filter,
@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
{ {
struct hlist_node *node, *node2; struct hlist_node *node2;
struct ixgbe_fdir_filter *filter; struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock); spin_lock(&adapter->fdir_perfect_lock);
hlist_for_each_entry_safe(filter, node, node2, hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) { &adapter->fdir_filter_list, fdir_node) {
hlist_del(&filter->fdir_node); hlist_del(&filter->fdir_node);
kfree(filter); kfree(filter);

View File

@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port) __be16 src_port, __be16 dst_port)
{ {
struct hlist_node *elem;
struct mlx4_en_filter *filter; struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL; struct mlx4_en_filter *ret = NULL;
hlist_for_each_entry(filter, elem, hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip, filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port), src_port, dst_port),
filter_chain) { filter_chain) {
@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct mlx4_mac_entry *entry; struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp; struct hlist_node *tmp;
struct hlist_head *bucket; struct hlist_head *bucket;
unsigned int mac_hash; unsigned int mac_hash;
mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash]; bucket = &priv->mac_hash[mac_hash];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, if (ether_addr_equal_64bits(entry->mac,
priv->dev->dev_addr)) { priv->dev->dev_addr)) {
en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct hlist_head *bucket; struct hlist_head *bucket;
unsigned int mac_hash; unsigned int mac_hash;
struct mlx4_mac_entry *entry; struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp; struct hlist_node *tmp;
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) { if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac, mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id); qpn, entry->reg_id);
@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
{ {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry; struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp; struct hlist_node *tmp;
bool found; bool found;
u64 mac; u64 mac;
int err = 0; int err = 0;
@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
/* find what to remove */ /* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i]; bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
found = false; found = false;
netdev_for_each_uc_addr(ha, dev) { netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac, if (ether_addr_equal_64bits(entry->mac,
@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
netdev_for_each_uc_addr(ha, dev) { netdev_for_each_uc_addr(ha, dev) {
found = false; found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry(entry, n, bucket, hlist) { hlist_for_each_entry(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) { if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true; found = true;
break; break;

View File

@ -35,6 +35,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mlx4/qp.h> #include <linux/mlx4/qp.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/rculist.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (is_multicast_ether_addr(ethh->h_dest)) { if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry; struct mlx4_mac_entry *entry;
struct hlist_node *n;
struct hlist_head *bucket; struct hlist_head *bucket;
unsigned int mac_hash; unsigned int mac_hash;
@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash]; bucket = &priv->mac_hash[mac_hash];
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(entry, n, bucket, hlist) { hlist_for_each_entry_rcu(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, if (ether_addr_equal_64bits(entry->mac,
ethh->h_source)) { ethh->h_source)) {
rcu_read_unlock(); rcu_read_unlock();

View File

@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
{ {
struct qlcnic_filter *tmp_fil; struct qlcnic_filter *tmp_fil;
struct hlist_node *tmp_hnode, *n; struct hlist_node *n;
struct hlist_head *head; struct hlist_head *head;
int i; int i;
unsigned long time; unsigned long time;
@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->fhash.fbucket_size; i++) { for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]); head = &(adapter->fhash.fhead[i]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL; QLCNIC_MAC_DEL;
time = tmp_fil->ftime; time = tmp_fil->ftime;
@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
head = &(adapter->rx_fhash.fhead[i]); head = &(adapter->rx_fhash.fhead[i]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{ {
time = tmp_fil->ftime; time = tmp_fil->ftime;
if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
{ {
struct qlcnic_filter *tmp_fil; struct qlcnic_filter *tmp_fil;
struct hlist_node *tmp_hnode, *n; struct hlist_node *n;
struct hlist_head *head; struct hlist_head *head;
int i; int i;
u8 cmd; u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) { for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]); head = &(adapter->fhash.fhead[i]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL; QLCNIC_MAC_DEL;
qlcnic_sre_macaddr_change(adapter, qlcnic_sre_macaddr_change(adapter,

View File

@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
{ {
struct ethhdr *phdr = (struct ethhdr *)(skb->data); struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil; struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n; struct hlist_node *n;
struct hlist_head *head; struct hlist_head *head;
unsigned long time; unsigned long time;
u64 src_addr = 0; u64 src_addr = 0;
@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1); (adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]); head = &(adapter->rx_fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) { tmp_fil->vlan_id == vlan_id) {
time = tmp_fil->ftime; time = tmp_fil->ftime;
@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1); (adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]); head = &(adapter->rx_fhash.fhead[hindex]);
spin_lock(&adapter->rx_mac_learn_lock); spin_lock(&adapter->rx_mac_learn_lock);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) { tmp_fil->vlan_id == vlan_id) {
found = 1; found = 1;
@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct qlcnic_filter *fil, *tmp_fil; struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n; struct hlist_node *n;
struct hlist_head *head; struct hlist_head *head;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ethhdr *phdr = (struct ethhdr *)(skb->data); struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]); head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) { tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))

View File

@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{ {
unsigned int hash = vnet_hashfn(skb->data); unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash]; struct hlist_head *hp = &vp->port_hash[hash];
struct hlist_node *n;
struct vnet_port *port; struct vnet_port *port;
hlist_for_each_entry(port, n, hp, hash) { hlist_for_each_entry(port, hp, hash) {
if (ether_addr_equal(port->raddr, skb->data)) if (ether_addr_equal(port->raddr, skb->data))
return port; return port;
} }

View File

@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr) const unsigned char *addr)
{ {
struct macvlan_dev *vlan; struct macvlan_dev *vlan;
struct hlist_node *n;
hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
return vlan; return vlan;
} }
@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
{ {
const struct ethhdr *eth = eth_hdr(skb); const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan; const struct macvlan_dev *vlan;
struct hlist_node *n;
struct sk_buff *nskb; struct sk_buff *nskb;
unsigned int i; unsigned int i;
int err; int err;
@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
return; return;
for (i = 0; i < MACVLAN_HASH_SIZE; i++) { for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
if (vlan->dev == src || !(vlan->mode & mode)) if (vlan->dev == src || !(vlan->mode & mode))
continue; continue;

View File

@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{ {
struct tun_flow_entry *e; struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_rcu(e, n, head, hash_link) { hlist_for_each_entry_rcu(e, head, hash_link) {
if (e->rxhash == rxhash) if (e->rxhash == rxhash)
return e; return e;
} }
@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
spin_lock_bh(&tun->lock); spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e; struct tun_flow_entry *e;
struct hlist_node *h, *n; struct hlist_node *n;
hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
tun_flow_delete(tun, e); tun_flow_delete(tun, e);
} }
spin_unlock_bh(&tun->lock); spin_unlock_bh(&tun->lock);
@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_lock_bh(&tun->lock); spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e; struct tun_flow_entry *e;
struct hlist_node *h, *n; struct hlist_node *n;
hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
if (e->queue_index == queue_index) if (e->queue_index == queue_index)
tun_flow_delete(tun, e); tun_flow_delete(tun, e);
} }
@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
spin_lock_bh(&tun->lock); spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e; struct tun_flow_entry *e;
struct hlist_node *h, *n; struct hlist_node *n;
hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer; unsigned long this_timer;
count++; count++;
this_timer = e->updated + delay; this_timer = e->updated + delay;

View File

@ -145,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
{ {
struct vxlan_dev *vxlan; struct vxlan_dev *vxlan;
struct hlist_node *node;
hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) { hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
if (vxlan->vni == id) if (vxlan->vni == id)
return vxlan; return vxlan;
} }
@ -292,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{ {
struct hlist_head *head = vxlan_fdb_head(vxlan, mac); struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f; struct vxlan_fdb *f;
struct hlist_node *node;
hlist_for_each_entry_rcu(f, node, head, hlist) { hlist_for_each_entry_rcu(f, head, hlist) {
if (compare_ether_addr(mac, f->eth_addr) == 0) if (compare_ether_addr(mac, f->eth_addr) == 0)
return f; return f;
} }
@ -422,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) { for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f; struct vxlan_fdb *f;
struct hlist_node *n;
int err; int err;
hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) { hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
if (idx < cb->args[0]) if (idx < cb->args[0])
goto skip; goto skip;
@ -483,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
const struct vxlan_dev *this) const struct vxlan_dev *this)
{ {
const struct vxlan_dev *vxlan; const struct vxlan_dev *vxlan;
struct hlist_node *node;
unsigned h; unsigned h;
for (h = 0; h < VNI_HASH_SIZE; ++h) for (h = 0; h < VNI_HASH_SIZE; ++h)
hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) { hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
if (vxlan == this) if (vxlan == this)
continue; continue;

View File

@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
int datalen = urb->actual_length-1; int datalen = urb->actual_length-1;
unsigned short len, fc, seq; unsigned short len, fc, seq;
struct hlist_node *node;
len = ntohs(*(__be16 *)&data[datalen-2]); len = ntohs(*(__be16 *)&data[datalen-2]);
if (len>datalen) if (len>datalen)
@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
hlist_add_head(&frag->fnode, &zd->fraglist); hlist_add_head(&frag->fnode, &zd->fraglist);
goto resubmit; goto resubmit;
} }
hlist_for_each_entry(frag, node, &zd->fraglist, fnode) hlist_for_each_entry(frag, &zd->fraglist, fnode)
if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
break; break;
if (!frag) if (!frag)
@ -1831,14 +1830,14 @@ err_zd:
static void zd1201_disconnect(struct usb_interface *interface) static void zd1201_disconnect(struct usb_interface *interface)
{ {
struct zd1201 *zd = usb_get_intfdata(interface); struct zd1201 *zd = usb_get_intfdata(interface);
struct hlist_node *node, *node2; struct hlist_node *node2;
struct zd1201_frag *frag; struct zd1201_frag *frag;
if (!zd) if (!zd)
return; return;
usb_set_intfdata(interface, NULL); usb_set_intfdata(interface, NULL);
hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
hlist_del_init(&frag->fnode); hlist_del_init(&frag->fnode);
kfree_skb(frag->skb); kfree_skb(frag->skb);
kfree(frag); kfree(frag);

View File

@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
struct pci_dev *pci_dev, char cap) struct pci_dev *pci_dev, char cap)
{ {
struct pci_cap_saved_state *tmp; struct pci_cap_saved_state *tmp;
struct hlist_node *pos;
hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
if (tmp->cap.cap_nr == cap) if (tmp->cap.cap_nr == cap)
return tmp; return tmp;
} }
@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
struct pci_saved_state *state; struct pci_saved_state *state;
struct pci_cap_saved_state *tmp; struct pci_cap_saved_state *tmp;
struct pci_cap_saved_data *cap; struct pci_cap_saved_data *cap;
struct hlist_node *pos;
size_t size; size_t size;
if (!dev->state_saved) if (!dev->state_saved)
@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
size = sizeof(*state) + sizeof(struct pci_cap_saved_data); size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
state = kzalloc(size, GFP_KERNEL); state = kzalloc(size, GFP_KERNEL);
@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
sizeof(state->config_space)); sizeof(state->config_space));
cap = state->cap; cap = state->cap;
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) { hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
memcpy(cap, &tmp->cap, len); memcpy(cap, &tmp->cap, len);
cap = (struct pci_cap_saved_data *)((u8 *)cap + len); cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@ -2038,9 +2036,9 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
void pci_free_cap_save_buffers(struct pci_dev *dev) void pci_free_cap_save_buffers(struct pci_dev *dev)
{ {
struct pci_cap_saved_state *tmp; struct pci_cap_saved_state *tmp;
struct hlist_node *pos, *n; struct hlist_node *n;
hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next) hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
kfree(tmp); kfree(tmp);
} }

View File

@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_release(struct binder_proc *proc)
{ {
struct hlist_node *pos;
struct binder_transaction *t; struct binder_transaction *t;
struct rb_node *n; struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
node->local_weak_refs = 0; node->local_weak_refs = 0;
hlist_add_head(&node->dead_node, &binder_dead_nodes); hlist_add_head(&node->dead_node, &binder_dead_nodes);
hlist_for_each_entry(ref, pos, &node->refs, node_entry) { hlist_for_each_entry(ref, &node->refs, node_entry) {
incoming_refs++; incoming_refs++;
if (ref->death) { if (ref->death) {
death++; death++;
@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
static void print_binder_node(struct seq_file *m, struct binder_node *node) static void print_binder_node(struct seq_file *m, struct binder_node *node)
{ {
struct binder_ref *ref; struct binder_ref *ref;
struct hlist_node *pos;
struct binder_work *w; struct binder_work *w;
int count; int count;
count = 0; count = 0;
hlist_for_each_entry(ref, pos, &node->refs, node_entry) hlist_for_each_entry(ref, &node->refs, node_entry)
count++; count++;
seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
node->internal_strong_refs, count); node->internal_strong_refs, count);
if (count) { if (count) {
seq_puts(m, " proc"); seq_puts(m, " proc");
hlist_for_each_entry(ref, pos, &node->refs, node_entry) hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid); seq_printf(m, " %d", ref->proc->pid);
} }
seq_puts(m, "\n"); seq_puts(m, "\n");
@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
static int binder_state_show(struct seq_file *m, void *unused) static int binder_state_show(struct seq_file *m, void *unused)
{ {
struct binder_proc *proc; struct binder_proc *proc;
struct hlist_node *pos;
struct binder_node *node; struct binder_node *node;
int do_lock = !binder_debug_no_lock; int do_lock = !binder_debug_no_lock;
@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
if (!hlist_empty(&binder_dead_nodes)) if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n"); seq_puts(m, "dead nodes:\n");
hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node); print_binder_node(m, node);
hlist_for_each_entry(proc, pos, &binder_procs, proc_node) hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1); print_binder_proc(m, proc, 1);
if (do_lock) if (do_lock)
binder_unlock(__func__); binder_unlock(__func__);
@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
static int binder_stats_show(struct seq_file *m, void *unused) static int binder_stats_show(struct seq_file *m, void *unused)
{ {
struct binder_proc *proc; struct binder_proc *proc;
struct hlist_node *pos;
int do_lock = !binder_debug_no_lock; int do_lock = !binder_debug_no_lock;
if (do_lock) if (do_lock)
@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
print_binder_stats(m, "", &binder_stats); print_binder_stats(m, "", &binder_stats);
hlist_for_each_entry(proc, pos, &binder_procs, proc_node) hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc); print_binder_proc_stats(m, proc);
if (do_lock) if (do_lock)
binder_unlock(__func__); binder_unlock(__func__);
@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
static int binder_transactions_show(struct seq_file *m, void *unused) static int binder_transactions_show(struct seq_file *m, void *unused)
{ {
struct binder_proc *proc; struct binder_proc *proc;
struct hlist_node *pos;
int do_lock = !binder_debug_no_lock; int do_lock = !binder_debug_no_lock;
if (do_lock) if (do_lock)
binder_lock(__func__); binder_lock(__func__);
seq_puts(m, "binder transactions:\n"); seq_puts(m, "binder transactions:\n");
hlist_for_each_entry(proc, pos, &binder_procs, proc_node) hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0); print_binder_proc(m, proc, 0);
if (do_lock) if (do_lock)
binder_unlock(__func__); binder_unlock(__func__);

View File

@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{ {
struct ft_tport *tport; struct ft_tport *tport;
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *pos;
struct ft_sess *sess; struct ft_sess *sess;
rcu_read_lock(); rcu_read_lock();
@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
goto out; goto out;
head = &tport->hash[ft_sess_hash(port_id)]; head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, pos, head, hash) { hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) { if (sess->port_id == port_id) {
kref_get(&sess->kref); kref_get(&sess->kref);
rcu_read_unlock(); rcu_read_unlock();
@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
{ {
struct ft_sess *sess; struct ft_sess *sess;
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *pos;
head = &tport->hash[ft_sess_hash(port_id)]; head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, pos, head, hash) hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id) if (sess->port_id == port_id)
return sess; return sess;
@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *pos;
struct ft_sess *sess; struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)]; head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, pos, head, hash) { hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) { if (sess->port_id == port_id) {
ft_sess_unhash(sess); ft_sess_unhash(sess);
return sess; return sess;
@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_sess_delete_all(struct ft_tport *tport) static void ft_sess_delete_all(struct ft_tport *tport)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *pos;
struct ft_sess *sess; struct ft_sess *sess;
for (head = tport->hash; for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, pos, head, hash) { hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess); ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess); transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */ ft_sess_put(sess); /* release from table */

View File

@ -125,9 +125,8 @@ static void
affs_fix_dcache(struct inode *inode, u32 entry_ino) affs_fix_dcache(struct inode *inode, u32 entry_ino)
{ {
struct dentry *dentry; struct dentry *dentry;
struct hlist_node *p;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (entry_ino == (u32)(long)dentry->d_fsdata) { if (entry_ino == (u32)(long)dentry->d_fsdata) {
dentry->d_fsdata = (void *)inode->i_ino; dentry->d_fsdata = (void *)inode->i_ino;
break; break;

View File

@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct kioctx *ctx, *ret = NULL; struct kioctx *ctx, *ret = NULL;
struct hlist_node *n;
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
/* /*
* RCU protects us against accessing freed memory but * RCU protects us against accessing freed memory but
* we have to be careful not to get a reference when the * we have to be careful not to get a reference when the

View File

@ -816,10 +816,9 @@ static bool
inode_has_hashed_dentries(struct inode *inode) inode_has_hashed_dentries(struct inode *inode)
{ {
struct dentry *dentry; struct dentry *dentry;
struct hlist_node *p;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) { if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return true; return true;

View File

@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
static struct dentry *__d_find_alias(struct inode *inode, int want_discon) static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
{ {
struct dentry *alias, *discon_alias; struct dentry *alias, *discon_alias;
struct hlist_node *p;
again: again:
discon_alias = NULL; discon_alias = NULL;
hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
spin_lock(&alias->d_lock); spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) && if (IS_ROOT(alias) &&
@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
void d_prune_aliases(struct inode *inode) void d_prune_aliases(struct inode *inode)
{ {
struct dentry *dentry; struct dentry *dentry;
struct hlist_node *p;
restart: restart:
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
if (!dentry->d_count) { if (!dentry->d_count) {
__dget_dlock(dentry); __dget_dlock(dentry);
@ -1443,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
int len = entry->d_name.len; int len = entry->d_name.len;
const char *name = entry->d_name.name; const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash; unsigned int hash = entry->d_name.hash;
struct hlist_node *p;
if (!inode) { if (!inode) {
__d_instantiate(entry, NULL); __d_instantiate(entry, NULL);
return NULL; return NULL;
} }
hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
/* /*
* Don't need alias->d_lock here, because aliases with * Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or * d_parent == entry->d_parent are not subject to name or

View File

@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
static struct connection *__find_con(int nodeid) static struct connection *__find_con(int nodeid)
{ {
int r; int r;
struct hlist_node *h;
struct connection *con; struct connection *con;
r = nodeid_hash(nodeid); r = nodeid_hash(nodeid);
hlist_for_each_entry(con, h, &connection_hash[r], list) { hlist_for_each_entry(con, &connection_hash[r], list) {
if (con->nodeid == nodeid) if (con->nodeid == nodeid)
return con; return con;
} }
@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
static void foreach_conn(void (*conn_func)(struct connection *c)) static void foreach_conn(void (*conn_func)(struct connection *c))
{ {
int i; int i;
struct hlist_node *h, *n; struct hlist_node *n;
struct connection *con; struct connection *con;
for (i = 0; i < CONN_HASH_SIZE; i++) { for (i = 0; i < CONN_HASH_SIZE; i++) {
hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
conn_func(con); conn_func(con);
}
} }
} }
@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
static struct connection *assoc2con(int assoc_id) static struct connection *assoc2con(int assoc_id)
{ {
int i; int i;
struct hlist_node *h;
struct connection *con; struct connection *con;
mutex_lock(&connections_lock); mutex_lock(&connections_lock);
for (i = 0 ; i < CONN_HASH_SIZE; i++) { for (i = 0 ; i < CONN_HASH_SIZE; i++) {
hlist_for_each_entry(con, h, &connection_hash[i], list) { hlist_for_each_entry(con, &connection_hash[i], list) {
if (con->sctp_assoc == assoc_id) { if (con->sctp_assoc == assoc_id) {
mutex_unlock(&connections_lock); mutex_unlock(&connections_lock);
return con; return con;

View File

@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
*/ */
int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
{ {
struct hlist_node *elem;
int rc; int rc;
hlist_for_each_entry(*daemon, elem, hlist_for_each_entry(*daemon,
&ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
euid_chain) { euid_chain) {
if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
mutex_unlock(&ecryptfs_msg_ctx_lists_mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
} }
if (ecryptfs_daemon_hash) { if (ecryptfs_daemon_hash) {
struct hlist_node *elem;
struct ecryptfs_daemon *daemon; struct ecryptfs_daemon *daemon;
int i; int i;
@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc; int rc;
hlist_for_each_entry(daemon, elem, hlist_for_each_entry(daemon,
&ecryptfs_daemon_hash[i], &ecryptfs_daemon_hash[i],
euid_chain) { euid_chain) {
rc = ecryptfs_exorcise_daemon(daemon); rc = ecryptfs_exorcise_daemon(daemon);

View File

@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
{ {
struct dentry *dentry, *toput = NULL; struct dentry *dentry, *toput = NULL;
struct inode *inode; struct inode *inode;
struct hlist_node *p;
if (acceptable(context, result)) if (acceptable(context, result))
return result; return result;
inode = result->d_inode; inode = result->d_inode;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
dget(dentry); dget(dentry);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
if (toput) if (toput)

View File

@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
{ {
struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos); struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
struct hlist_node *_p;
struct msdos_inode_info *i; struct msdos_inode_info *i;
struct inode *inode = NULL; struct inode *inode = NULL;
spin_lock(&sbi->inode_hash_lock); spin_lock(&sbi->inode_hash_lock);
hlist_for_each_entry(i, _p, head, i_fat_hash) { hlist_for_each_entry(i, head, i_fat_hash) {
BUG_ON(i->vfs_inode.i_sb != sb); BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_pos != i_pos) if (i->i_pos != i_pos)
continue; continue;

View File

@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
{ {
struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *_p;
struct msdos_inode_info *i; struct msdos_inode_info *i;
struct inode *inode = NULL; struct inode *inode = NULL;
head = sbi->dir_hashtable + fat_dir_hash(i_logstart); head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
spin_lock(&sbi->dir_hash_lock); spin_lock(&sbi->dir_hash_lock);
hlist_for_each_entry(i, _p, head, i_dir_hash) { hlist_for_each_entry(i, head, i_dir_hash) {
BUG_ON(i->vfs_inode.i_sb != sb); BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_logstart != i_logstart) if (i->i_logstart != i_logstart)
continue; continue;

View File

@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
struct fscache_cookie *cookie) struct fscache_cookie *cookie)
{ {
struct fscache_object *object; struct fscache_object *object;
struct hlist_node *_n;
int ret; int ret;
_enter("%p,%p{%s}", cache, cookie, cookie->def->name); _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
hlist_for_each_entry(object, _n, &cookie->backing_objects, hlist_for_each_entry(object, &cookie->backing_objects,
cookie_link) { cookie_link) {
if (object->cache == cache) if (object->cache == cache)
goto object_already_extant; goto object_already_extant;
@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
{ {
struct fscache_object *p; struct fscache_object *p;
struct fscache_cache *cache = object->cache; struct fscache_cache *cache = object->cache;
struct hlist_node *_n;
int ret; int ret;
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
/* there may be multiple initial creations of this object, but we only /* there may be multiple initial creations of this object, but we only
* want one */ * want one */
ret = -EEXIST; ret = -EEXIST;
hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) { hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
if (p->cache == object->cache) { if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING) if (p->state >= FSCACHE_OBJECT_DYING)
ret = -ENOBUFS; ret = -ENOBUFS;
@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
/* pin the parent object */ /* pin the parent object */
spin_lock_nested(&cookie->parent->lock, 1); spin_lock_nested(&cookie->parent->lock, 1);
hlist_for_each_entry(p, _n, &cookie->parent->backing_objects, hlist_for_each_entry(p, &cookie->parent->backing_objects,
cookie_link) { cookie_link) {
if (p->cache == object->cache) { if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING) { if (p->state >= FSCACHE_OBJECT_DYING) {
@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
void __fscache_update_cookie(struct fscache_cookie *cookie) void __fscache_update_cookie(struct fscache_cookie *cookie)
{ {
struct fscache_object *object; struct fscache_object *object;
struct hlist_node *_p;
fscache_stat(&fscache_n_updates); fscache_stat(&fscache_n_updates);
@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
/* update the index entry on disk in each cache backing this cookie */ /* update the index entry on disk in each cache backing this cookie */
hlist_for_each_entry(object, _p, hlist_for_each_entry(object,
&cookie->backing_objects, cookie_link) { &cookie->backing_objects, cookie_link) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
} }

View File

@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
int (*test)(struct inode *, void *), int (*test)(struct inode *, void *),
void *data) void *data)
{ {
struct hlist_node *node;
struct inode *inode = NULL; struct inode *inode = NULL;
repeat: repeat:
hlist_for_each_entry(inode, node, head, i_hash) { hlist_for_each_entry(inode, head, i_hash) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (inode->i_sb != sb) { if (inode->i_sb != sb) {
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
@ -830,11 +829,10 @@ repeat:
static struct inode *find_inode_fast(struct super_block *sb, static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino) struct hlist_head *head, unsigned long ino)
{ {
struct hlist_node *node;
struct inode *inode = NULL; struct inode *inode = NULL;
repeat: repeat:
hlist_for_each_entry(inode, node, head, i_hash) { hlist_for_each_entry(inode, head, i_hash) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (inode->i_ino != ino) { if (inode->i_ino != ino) {
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
static int test_inode_iunique(struct super_block *sb, unsigned long ino) static int test_inode_iunique(struct super_block *sb, unsigned long ino)
{ {
struct hlist_head *b = inode_hashtable + hash(sb, ino); struct hlist_head *b = inode_hashtable + hash(sb, ino);
struct hlist_node *node;
struct inode *inode; struct inode *inode;
spin_lock(&inode_hash_lock); spin_lock(&inode_hash_lock);
hlist_for_each_entry(inode, node, b, i_hash) { hlist_for_each_entry(inode, b, i_hash) {
if (inode->i_ino == ino && inode->i_sb == sb) { if (inode->i_ino == ino && inode->i_sb == sb) {
spin_unlock(&inode_hash_lock); spin_unlock(&inode_hash_lock);
return 0; return 0;
@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
struct hlist_head *head = inode_hashtable + hash(sb, ino); struct hlist_head *head = inode_hashtable + hash(sb, ino);
while (1) { while (1) {
struct hlist_node *node;
struct inode *old = NULL; struct inode *old = NULL;
spin_lock(&inode_hash_lock); spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) { hlist_for_each_entry(old, head, i_hash) {
if (old->i_ino != ino) if (old->i_ino != ino)
continue; continue;
if (old->i_sb != sb) if (old->i_sb != sb)
@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
} }
break; break;
} }
if (likely(!node)) { if (likely(!old)) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_state |= I_NEW; inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head); hlist_add_head(&inode->i_hash, head);
@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct hlist_head *head = inode_hashtable + hash(sb, hashval);
while (1) { while (1) {
struct hlist_node *node;
struct inode *old = NULL; struct inode *old = NULL;
spin_lock(&inode_hash_lock); spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) { hlist_for_each_entry(old, head, i_hash) {
if (old->i_sb != sb) if (old->i_sb != sb)
continue; continue;
if (!test(old, data)) if (!test(old, data))
@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
} }
break; break;
} }
if (likely(!node)) { if (likely(!old)) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_state |= I_NEW; inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head); hlist_add_head(&inode->i_hash, head);

View File

@ -32,15 +32,15 @@
static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
#define for_each_host(host, pos, chain, table) \ #define for_each_host(host, chain, table) \
for ((chain) = (table); \ for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry((host), (pos), (chain), h_hash) hlist_for_each_entry((host), (chain), h_hash)
#define for_each_host_safe(host, pos, next, chain, table) \ #define for_each_host_safe(host, next, chain, table) \
for ((chain) = (table); \ for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry_safe((host), (pos), (next), \ hlist_for_each_entry_safe((host), (next), \
(chain), h_hash) (chain), h_hash)
static unsigned long nrhosts; static unsigned long nrhosts;
@ -225,7 +225,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
.net = net, .net = net,
}; };
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host; struct nlm_host *host;
struct nsm_handle *nsm = NULL; struct nsm_handle *nsm = NULL;
struct lockd_net *ln = net_generic(net, lockd_net_id); struct lockd_net *ln = net_generic(net, lockd_net_id);
@ -237,7 +236,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
mutex_lock(&nlm_host_mutex); mutex_lock(&nlm_host_mutex);
chain = &nlm_client_hosts[nlm_hash_address(sap)]; chain = &nlm_client_hosts[nlm_hash_address(sap)];
hlist_for_each_entry(host, pos, chain, h_hash) { hlist_for_each_entry(host, chain, h_hash) {
if (host->net != net) if (host->net != net)
continue; continue;
if (!rpc_cmp_addr(nlm_addr(host), sap)) if (!rpc_cmp_addr(nlm_addr(host), sap))
@ -322,7 +321,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const size_t hostname_len) const size_t hostname_len)
{ {
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host = NULL; struct nlm_host *host = NULL;
struct nsm_handle *nsm = NULL; struct nsm_handle *nsm = NULL;
struct sockaddr *src_sap = svc_daddr(rqstp); struct sockaddr *src_sap = svc_daddr(rqstp);
@ -350,7 +348,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
nlm_gc_hosts(net); nlm_gc_hosts(net);
chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
hlist_for_each_entry(host, pos, chain, h_hash) { hlist_for_each_entry(host, chain, h_hash) {
if (host->net != net) if (host->net != net)
continue; continue;
if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@ -515,10 +513,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
{ {
struct nlm_host *host; struct nlm_host *host;
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos;
mutex_lock(&nlm_host_mutex); mutex_lock(&nlm_host_mutex);
for_each_host(host, pos, chain, cache) { for_each_host(host, chain, cache) {
if (host->h_nsmhandle == nsm if (host->h_nsmhandle == nsm
&& host->h_nsmstate != info->state) { && host->h_nsmstate != info->state) {
host->h_nsmstate = info->state; host->h_nsmstate = info->state;
@ -570,7 +567,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
static void nlm_complain_hosts(struct net *net) static void nlm_complain_hosts(struct net *net)
{ {
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host; struct nlm_host *host;
if (net) { if (net) {
@ -587,7 +583,7 @@ static void nlm_complain_hosts(struct net *net)
dprintk("lockd: %lu hosts left:\n", nrhosts); dprintk("lockd: %lu hosts left:\n", nrhosts);
} }
for_each_host(host, pos, chain, nlm_server_hosts) { for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net) if (net && host->net != net)
continue; continue;
dprintk(" %s (cnt %d use %d exp %ld net %p)\n", dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
@ -600,14 +596,13 @@ void
nlm_shutdown_hosts_net(struct net *net) nlm_shutdown_hosts_net(struct net *net)
{ {
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host; struct nlm_host *host;
mutex_lock(&nlm_host_mutex); mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */ /* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts in net %p...\n", net); dprintk("lockd: nuking all hosts in net %p...\n", net);
for_each_host(host, pos, chain, nlm_server_hosts) { for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net) if (net && host->net != net)
continue; continue;
host->h_expires = jiffies - 1; host->h_expires = jiffies - 1;
@ -644,11 +639,11 @@ static void
nlm_gc_hosts(struct net *net) nlm_gc_hosts(struct net *net)
{ {
struct hlist_head *chain; struct hlist_head *chain;
struct hlist_node *pos, *next; struct hlist_node *next;
struct nlm_host *host; struct nlm_host *host;
dprintk("lockd: host garbage collection for net %p\n", net); dprintk("lockd: host garbage collection for net %p\n", net);
for_each_host(host, pos, chain, nlm_server_hosts) { for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net) if (net && host->net != net)
continue; continue;
host->h_inuse = 0; host->h_inuse = 0;
@ -657,7 +652,7 @@ nlm_gc_hosts(struct net *net)
/* Mark all hosts that hold locks, blocks or shares */ /* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources(net); nlmsvc_mark_resources(net);
for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { for_each_host_safe(host, next, chain, nlm_server_hosts) {
if (net && host->net != net) if (net && host->net != net)
continue; continue;
if (atomic_read(&host->h_count) || host->h_inuse if (atomic_read(&host->h_count) || host->h_inuse

View File

@ -84,7 +84,6 @@ __be32
nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
struct nfs_fh *f) struct nfs_fh *f)
{ {
struct hlist_node *pos;
struct nlm_file *file; struct nlm_file *file;
unsigned int hash; unsigned int hash;
__be32 nfserr; __be32 nfserr;
@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
/* Lock file table */ /* Lock file table */
mutex_lock(&nlm_file_mutex); mutex_lock(&nlm_file_mutex);
hlist_for_each_entry(file, pos, &nlm_files[hash], f_list) hlist_for_each_entry(file, &nlm_files[hash], f_list)
if (!nfs_compare_fh(&file->f_handle, f)) if (!nfs_compare_fh(&file->f_handle, f))
goto found; goto found;
@ -248,13 +247,13 @@ static int
nlm_traverse_files(void *data, nlm_host_match_fn_t match, nlm_traverse_files(void *data, nlm_host_match_fn_t match,
int (*is_failover_file)(void *data, struct nlm_file *file)) int (*is_failover_file)(void *data, struct nlm_file *file))
{ {
struct hlist_node *pos, *next; struct hlist_node *next;
struct nlm_file *file; struct nlm_file *file;
int i, ret = 0; int i, ret = 0;
mutex_lock(&nlm_file_mutex); mutex_lock(&nlm_file_mutex);
for (i = 0; i < FILE_NRHASH; i++) { for (i = 0; i < FILE_NRHASH; i++) {
hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) { hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
if (is_failover_file && !is_failover_file(data, file)) if (is_failover_file && !is_failover_file(data, file))
continue; continue;
file->f_count++; file->f_count++;

View File

@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
long hash) long hash)
{ {
struct nfs4_deviceid_node *d; struct nfs4_deviceid_node *d;
struct hlist_node *n;
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
if (d->ld == ld && d->nfs_client == clp && if (d->ld == ld && d->nfs_client == clp &&
!memcmp(&d->deviceid, id, sizeof(*id))) { !memcmp(&d->deviceid, id, sizeof(*id))) {
if (atomic_read(&d->ref)) if (atomic_read(&d->ref))
@ -248,12 +247,11 @@ static void
_deviceid_purge_client(const struct nfs_client *clp, long hash) _deviceid_purge_client(const struct nfs_client *clp, long hash)
{ {
struct nfs4_deviceid_node *d; struct nfs4_deviceid_node *d;
struct hlist_node *n;
HLIST_HEAD(tmp); HLIST_HEAD(tmp);
spin_lock(&nfs4_deviceid_lock); spin_lock(&nfs4_deviceid_lock);
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
if (d->nfs_client == clp && atomic_read(&d->ref)) { if (d->nfs_client == clp && atomic_read(&d->ref)) {
hlist_del_init_rcu(&d->node); hlist_del_init_rcu(&d->node);
hlist_add_head(&d->tmpnode, &tmp); hlist_add_head(&d->tmpnode, &tmp);
@ -291,12 +289,11 @@ void
nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
{ {
struct nfs4_deviceid_node *d; struct nfs4_deviceid_node *d;
struct hlist_node *n;
int i; int i;
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
if (d->nfs_client == clp) if (d->nfs_client == clp)
set_bit(NFS_DEVICEID_INVALID, &d->flags); set_bit(NFS_DEVICEID_INVALID, &d->flags);
} }

View File

@ -120,7 +120,6 @@ hash_refile(struct svc_cacherep *rp)
int int
nfsd_cache_lookup(struct svc_rqst *rqstp) nfsd_cache_lookup(struct svc_rqst *rqstp)
{ {
struct hlist_node *hn;
struct hlist_head *rh; struct hlist_head *rh;
struct svc_cacherep *rp; struct svc_cacherep *rp;
__be32 xid = rqstp->rq_xid; __be32 xid = rqstp->rq_xid;
@ -141,7 +140,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rtn = RC_DOIT; rtn = RC_DOIT;
rh = &cache_hash[request_hash(xid)]; rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) { hlist_for_each_entry(rp, rh, c_hash) {
if (rp->c_state != RC_UNUSED && if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc && xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers && proto == rp->c_prot && vers == rp->c_vers &&

View File

@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
void __fsnotify_update_child_dentry_flags(struct inode *inode) void __fsnotify_update_child_dentry_flags(struct inode *inode)
{ {
struct dentry *alias; struct dentry *alias;
struct hlist_node *p;
int watched; int watched;
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a /* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */ * directory, there damn well better only be one item on this list */
hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child; struct dentry *child;
/* run all of the children of the original inode and fix their /* run all of the children of the original inode and fix their

View File

@ -36,12 +36,11 @@
static void fsnotify_recalc_inode_mask_locked(struct inode *inode) static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{ {
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0; __u32 new_mask = 0;
assert_spin_locked(&inode->i_lock); assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
new_mask |= mark->mask; new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask; inode->i_fsnotify_mask = new_mask;
} }
@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
void fsnotify_clear_marks_by_inode(struct inode *inode) void fsnotify_clear_marks_by_inode(struct inode *inode)
{ {
struct fsnotify_mark *mark, *lmark; struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n; struct hlist_node *n;
LIST_HEAD(free_list); LIST_HEAD(free_list);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
list_add(&mark->i.free_i_list, &free_list); list_add(&mark->i.free_i_list, &free_list);
hlist_del_init_rcu(&mark->i.i_list); hlist_del_init_rcu(&mark->i.i_list);
fsnotify_get_mark(mark); fsnotify_get_mark(mark);
@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
struct inode *inode) struct inode *inode)
{ {
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock); assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
if (mark->group == group) { if (mark->group == group) {
fsnotify_get_mark(mark); fsnotify_get_mark(mark);
return mark; return mark;
@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode, struct fsnotify_group *group, struct inode *inode,
int allow_dups) int allow_dups)
{ {
struct fsnotify_mark *lmark; struct fsnotify_mark *lmark, *last = NULL;
struct hlist_node *node, *last = NULL;
int ret = 0; int ret = 0;
mark->flags |= FSNOTIFY_MARK_FLAG_INODE; mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
} }
/* should mark be in the middle of the current list? */ /* should mark be in the middle of the current list? */
hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) { hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
last = node; last = lmark;
if ((lmark->group == group) && !allow_dups) { if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST; ret = -EEXIST;
@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
BUG_ON(last == NULL); BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */ /* mark should be the last entry. last is the current last entry */
hlist_add_after_rcu(last, &mark->i.i_list); hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
out: out:
fsnotify_recalc_inode_mask_locked(inode); fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);

View File

@ -33,12 +33,12 @@
void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{ {
struct fsnotify_mark *mark, *lmark; struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n; struct hlist_node *n;
struct mount *m = real_mount(mnt); struct mount *m = real_mount(mnt);
LIST_HEAD(free_list); LIST_HEAD(free_list);
spin_lock(&mnt->mnt_root->d_lock); spin_lock(&mnt->mnt_root->d_lock);
hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) { hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
list_add(&mark->m.free_m_list, &free_list); list_add(&mark->m.free_m_list, &free_list);
hlist_del_init_rcu(&mark->m.m_list); hlist_del_init_rcu(&mark->m.m_list);
fsnotify_get_mark(mark); fsnotify_get_mark(mark);
@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
{ {
struct mount *m = real_mount(mnt); struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0; __u32 new_mask = 0;
assert_spin_locked(&mnt->mnt_root->d_lock); assert_spin_locked(&mnt->mnt_root->d_lock);
hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
new_mask |= mark->mask; new_mask |= mark->mask;
m->mnt_fsnotify_mask = new_mask; m->mnt_fsnotify_mask = new_mask;
} }
@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
{ {
struct mount *m = real_mount(mnt); struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&mnt->mnt_root->d_lock); assert_spin_locked(&mnt->mnt_root->d_lock);
hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) { hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
if (mark->group == group) { if (mark->group == group) {
fsnotify_get_mark(mark); fsnotify_get_mark(mark);
return mark; return mark;
@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
int allow_dups) int allow_dups)
{ {
struct mount *m = real_mount(mnt); struct mount *m = real_mount(mnt);
struct fsnotify_mark *lmark; struct fsnotify_mark *lmark, *last = NULL;
struct hlist_node *node, *last = NULL;
int ret = 0; int ret = 0;
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
} }
/* should mark be in the middle of the current list? */ /* should mark be in the middle of the current list? */
hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) { hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
last = node; last = lmark;
if ((lmark->group == group) && !allow_dups) { if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST; ret = -EEXIST;
@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
BUG_ON(last == NULL); BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */ /* mark should be the last entry. last is the current last entry */
hlist_add_after_rcu(last, &mark->m.m_list); hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
out: out:
fsnotify_recalc_vfsmount_mask_locked(mnt); fsnotify_recalc_vfsmount_mask_locked(mnt);
spin_unlock(&mnt->mnt_root->d_lock); spin_unlock(&mnt->mnt_root->d_lock);

View File

@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
u64 parent_blkno, u64 parent_blkno,
int skip_unhashed) int skip_unhashed)
{ {
struct hlist_node *p;
struct dentry *dentry; struct dentry *dentry;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
trace_ocfs2_find_local_alias(dentry->d_name.len, trace_ocfs2_find_local_alias(dentry->d_name.len,

View File

@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master) u8 dead_node, u8 new_master)
{ {
int i; int i;
struct hlist_node *hash_iter;
struct hlist_head *bucket; struct hlist_head *bucket;
struct dlm_lock_resource *res, *next; struct dlm_lock_resource *res, *next;
@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
* if necessary */ * if necessary */
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i); bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) { hlist_for_each_entry(res, bucket, hash_node) {
if (!(res->state & DLM_LOCK_RES_RECOVERING)) if (!(res->state & DLM_LOCK_RES_RECOVERING))
continue; continue;
@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{ {
struct hlist_node *iter;
struct dlm_lock_resource *res; struct dlm_lock_resource *res;
int i; int i;
struct hlist_head *bucket; struct hlist_head *bucket;
@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
*/ */
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i); bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, iter, bucket, hash_node) { hlist_for_each_entry(res, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes, /* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */ * otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name, if (dlm_is_recovery_lock(res->lockname.name,

View File

@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
void *data) void *data)
{ {
struct super_block *s = NULL; struct super_block *s = NULL;
struct hlist_node *node;
struct super_block *old; struct super_block *old;
int err; int err;
retry: retry:
spin_lock(&sb_lock); spin_lock(&sb_lock);
if (test) { if (test) {
hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { hlist_for_each_entry(old, &type->fs_supers, s_instances) {
if (!test(old, data)) if (!test(old, data))
continue; continue;
if (!grab_super(old)) if (!grab_super(old))
@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
void (*f)(struct super_block *, void *), void *arg) void (*f)(struct super_block *, void *), void *arg)
{ {
struct super_block *sb, *p = NULL; struct super_block *sb, *p = NULL;
struct hlist_node *node;
spin_lock(&sb_lock); spin_lock(&sb_lock);
hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);

View File

@ -461,14 +461,13 @@ const struct file_operations bin_fops = {
void unmap_bin_file(struct sysfs_dirent *attr_sd) void unmap_bin_file(struct sysfs_dirent *attr_sd)
{ {
struct bin_buffer *bb; struct bin_buffer *bb;
struct hlist_node *tmp;
if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR) if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
return; return;
mutex_lock(&sysfs_bin_lock); mutex_lock(&sysfs_bin_lock);
hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) { hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
struct inode *inode = file_inode(bb->file); struct inode *inode = file_inode(bb->file);
unmap_mapping_range(inode->i_mapping, 0, 0, 1); unmap_mapping_range(inode->i_mapping, 0, 0, 1);

View File

@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
xlog_tid_t tid) xlog_tid_t tid)
{ {
xlog_recover_t *trans; xlog_recover_t *trans;
struct hlist_node *n;
hlist_for_each_entry(trans, n, head, r_list) { hlist_for_each_entry(trans, head, r_list) {
if (trans->r_log_tid == tid) if (trans->r_log_tid == tid)
return trans; return trans;
} }

View File

@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash_for_each - iterate over a hashtable * hash_for_each - iterate over a hashtable
* @name: hashtable to iterate * @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor * @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
*/ */
#define hash_for_each(name, bkt, node, obj, member) \ #define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
hlist_for_each_entry(obj, node, &name[bkt], member) (bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
/** /**
* hash_for_each_rcu - iterate over a rcu enabled hashtable * hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate * @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor * @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
*/ */
#define hash_for_each_rcu(name, bkt, node, obj, member) \ #define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
hlist_for_each_entry_rcu(obj, node, &name[bkt], member) (bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/** /**
* hash_for_each_safe - iterate over a hashtable safe against removal of * hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry * hash entry
* @name: hashtable to iterate * @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor * @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage * @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
*/ */
#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \ #define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member) (bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/** /**
* hash_for_each_possible - iterate over all possible objects hashing to the * hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket * same bucket
* @name: hashtable to iterate * @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over * @key: the key of the objects to iterate over
*/ */
#define hash_for_each_possible(name, obj, node, member, key) \ #define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member) hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/** /**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
* in a rcu enabled hashtable * in a rcu enabled hashtable
* @name: hashtable to iterate * @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over * @key: the key of the objects to iterate over
*/ */
#define hash_for_each_possible_rcu(name, obj, node, member, key) \ #define hash_for_each_possible_rcu(name, obj, member, key) \
hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member) hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
/** /**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the * hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals * same bucket safe against removals
* @name: hashtable to iterate * @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry * @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage * @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct * @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over * @key: the key of the objects to iterate over
*/ */
#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \ #define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, node, tmp, \ hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member) &name[hash_min(key, HASH_BITS(name))], member)

View File

@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
static inline struct team_port *team_get_port_by_index(struct team *team, static inline struct team_port *team_get_port_by_index(struct team *team,
int port_index) int port_index)
{ {
struct hlist_node *p;
struct team_port *port; struct team_port *port;
struct hlist_head *head = team_port_index_hash(team, port_index); struct hlist_head *head = team_port_index_hash(team, port_index);
hlist_for_each_entry(port, p, head, hlist) hlist_for_each_entry(port, head, hlist)
if (port->index == port_index) if (port->index == port_index)
return port; return port;
return NULL; return NULL;
@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline struct team_port *team_get_port_by_index_rcu(struct team *team, static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
int port_index) int port_index)
{ {
struct hlist_node *p;
struct team_port *port; struct team_port *port;
struct hlist_head *head = team_port_index_hash(team, port_index); struct hlist_head *head = team_port_index_hash(team, port_index);
hlist_for_each_entry_rcu(port, p, head, hlist) hlist_for_each_entry_rcu(port, head, hlist)
if (port->index == port_index) if (port->index == port_index)
return port; return port;
return NULL; return NULL;

View File

@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n) pos = n)
#define hlist_entry_safe(ptr, type, member) \
(ptr) ? hlist_entry(ptr, type, member) : NULL
/** /**
* hlist_for_each_entry - iterate over list of given type * hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry(tpos, pos, head, member) \ #define hlist_for_each_entry(pos, head, member) \
for (pos = (head)->first; \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
pos && \ pos; \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
pos = pos->next)
/** /**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry_continue(tpos, pos, member) \ #define hlist_for_each_entry_continue(pos, member) \
for (pos = (pos)->next; \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
pos && \ pos; \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
pos = pos->next)
/** /**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point * hlist_for_each_entry_from - iterate over a hlist continuing from current point
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry_from(tpos, pos, member) \ #define hlist_for_each_entry_from(pos, member) \
for (; pos && \ for (; pos; \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
pos = pos->next)
/** /**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage * @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ #define hlist_for_each_entry_safe(pos, n, head, member) \
for (pos = (head)->first; \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
pos && ({ n = pos->next; 1; }) && \ pos && ({ n = pos->member.next; 1; }); \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = hlist_entry_safe(n, typeof(*pos), member))
pos = n)
#endif #endif

View File

@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \ #define do_each_pid_task(pid, type, task) \
do { \ do { \
struct hlist_node *pos___; \
if ((pid) != NULL) \ if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), pos___, \ hlist_for_each_entry_rcu((task), \
&(pid)->tasks[type], pids[type].node) { &(pid)->tasks[type], pids[type].node) {
/* /*

View File

@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
/** /**
* hlist_for_each_entry_rcu - iterate over rcu list of given type * hlist_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
* *
@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* the _rcu list-mutation primitives such as hlist_add_head_rcu() * the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock(). * as long as the traversal is guarded by rcu_read_lock().
*/ */
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ #define hlist_for_each_entry_rcu(pos, head, member) \
for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
pos && \ typeof(*(pos)), member); \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos; \
pos = rcu_dereference_raw(hlist_next_rcu(pos))) pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/** /**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
* *
@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* the _rcu list-mutation primitives such as hlist_add_head_rcu() * the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock(). * as long as the traversal is guarded by rcu_read_lock().
*/ */
#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ #define hlist_for_each_entry_rcu_bh(pos, head, member) \
for (pos = rcu_dereference_bh((head)->first); \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
pos && \ typeof(*(pos)), member); \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos; \
pos = rcu_dereference_bh(pos->next)) pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/** /**
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ #define hlist_for_each_entry_continue_rcu(pos, member) \
for (pos = rcu_dereference((pos)->next); \ for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
pos && \ typeof(*(pos)), member); \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos; \
pos = rcu_dereference(pos->next)) pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
/** /**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
* @tpos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
*/ */
#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \
for (pos = rcu_dereference_bh((pos)->next); \ for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
pos && \ typeof(*(pos)), member); \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos; \
pos = rcu_dereference_bh(pos->next)) pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -161,8 +161,8 @@ typedef struct ax25_uid_assoc {
ax25_address call; ax25_address call;
} ax25_uid_assoc; } ax25_uid_assoc;
#define ax25_uid_for_each(__ax25, node, list) \ #define ax25_uid_for_each(__ax25, list) \
hlist_for_each_entry(__ax25, node, list, uid_node) hlist_for_each_entry(__ax25, list, uid_node)
#define ax25_uid_hold(ax25) \ #define ax25_uid_hold(ax25) \
atomic_inc(&((ax25)->refcount)) atomic_inc(&((ax25)->refcount))
@ -247,8 +247,8 @@ typedef struct ax25_cb {
#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) #define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
#define ax25_for_each(__ax25, node, list) \ #define ax25_for_each(__ax25, list) \
hlist_for_each_entry(__ax25, node, list, ax25_node) hlist_for_each_entry(__ax25, list, ax25_node)
#define ax25_cb_hold(__ax25) \ #define ax25_cb_hold(__ax25) \
atomic_inc(&((__ax25)->refcount)) atomic_inc(&((__ax25)->refcount))

View File

@ -94,8 +94,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
return read_pnet(&ib->ib_net); return read_pnet(&ib->ib_net);
} }
#define inet_bind_bucket_for_each(tb, pos, head) \ #define inet_bind_bucket_for_each(tb, head) \
hlist_for_each_entry(tb, pos, head, node) hlist_for_each_entry(tb, head, node)
struct inet_bind_hashbucket { struct inet_bind_hashbucket {
spinlock_t lock; spinlock_t lock;

View File

@ -178,11 +178,11 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
#define inet_twsk_for_each(tw, node, head) \ #define inet_twsk_for_each(tw, node, head) \
hlist_nulls_for_each_entry(tw, node, head, tw_node) hlist_nulls_for_each_entry(tw, node, head, tw_node)
#define inet_twsk_for_each_inmate(tw, node, jail) \ #define inet_twsk_for_each_inmate(tw, jail) \
hlist_for_each_entry(tw, node, jail, tw_death_node) hlist_for_each_entry(tw, jail, tw_death_node)
#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \ #define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{ {

View File

@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
nr_node_put(nr_node); nr_node_put(nr_node);
} }
#define nr_neigh_for_each(__nr_neigh, node, list) \ #define nr_neigh_for_each(__nr_neigh, list) \
hlist_for_each_entry(__nr_neigh, node, list, neigh_node) hlist_for_each_entry(__nr_neigh, list, neigh_node)
#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \ #define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node) hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
#define nr_node_for_each(__nr_node, node, list) \ #define nr_node_for_each(__nr_node, list) \
hlist_for_each_entry(__nr_node, node, list, node_node) hlist_for_each_entry(__nr_node, list, node_node)
#define nr_node_for_each_safe(__nr_node, node, node2, list) \ #define nr_node_for_each_safe(__nr_node, node2, list) \
hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node) hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
/*********************************************************************/ /*********************************************************************/

View File

@ -339,11 +339,10 @@ static inline struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{ {
struct Qdisc_class_common *cl; struct Qdisc_class_common *cl;
struct hlist_node *n;
unsigned int h; unsigned int h;
h = qdisc_class_hash(id, hash->hashmask); h = qdisc_class_hash(id, hash->hashmask);
hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { hlist_for_each_entry(cl, &hash->hash[h], hnode) {
if (cl->classid == id) if (cl->classid == id)
return cl; return cl;
} }

View File

@ -675,8 +675,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
return h & (sctp_assoc_hashsize - 1); return h & (sctp_assoc_hashsize - 1);
} }
#define sctp_for_each_hentry(epb, node, head) \ #define sctp_for_each_hentry(epb, head) \
hlist_for_each_entry(epb, node, head, node) hlist_for_each_entry(epb, head, node)
/* Is a socket of this style? */ /* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))

View File

@ -606,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list); hlist_add_head(&sk->sk_bind_node, list);
} }
#define sk_for_each(__sk, node, list) \ #define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, node, list, sk_node) hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, node, list) \ #define sk_for_each_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, node, list, sk_node) hlist_for_each_entry_rcu(__sk, list, sk_node)
#define sk_nulls_for_each(__sk, node, list) \ #define sk_nulls_for_each(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \ #define sk_nulls_for_each_rcu(__sk, node, list) \
hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
#define sk_for_each_from(__sk, node) \ #define sk_for_each_from(__sk) \
if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ hlist_for_each_entry_from(__sk, sk_node)
hlist_for_each_entry_from(__sk, node, sk_node)
#define sk_nulls_for_each_from(__sk, node) \ #define sk_nulls_for_each_from(__sk, node) \
if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
#define sk_for_each_safe(__sk, node, tmp, list) \ #define sk_for_each_safe(__sk, tmp, list) \
hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \ #define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, node, list, sk_bind_node) hlist_for_each_entry(__sk, list, sk_bind_node)
static inline struct user_namespace *sk_user_ns(struct sock *sk) static inline struct user_namespace *sk_user_ns(struct sock *sk)
{ {

View File

@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
{ {
int i; int i;
struct cgroupfs_root *root = cgrp->root; struct cgroupfs_root *root = cgrp->root;
struct hlist_node *node;
struct css_set *cg; struct css_set *cg;
unsigned long key; unsigned long key;
@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
} }
key = css_set_hash(template); key = css_set_hash(template);
hash_for_each_possible(css_set_table, cg, node, hlist, key) { hash_for_each_possible(css_set_table, cg, hlist, key) {
if (!compare_css_sets(cg, oldcg, cgrp, template)) if (!compare_css_sets(cg, oldcg, cgrp, template))
continue; continue;
@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
struct cgroupfs_root *existing_root; struct cgroupfs_root *existing_root;
const struct cred *cred; const struct cred *cred;
int i; int i;
struct hlist_node *node;
struct css_set *cg; struct css_set *cg;
BUG_ON(sb->s_root != NULL); BUG_ON(sb->s_root != NULL);
@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
/* Link the top cgroup in this hierarchy into all /* Link the top cgroup in this hierarchy into all
* the css_set objects */ * the css_set objects */
write_lock(&css_set_lock); write_lock(&css_set_lock);
hash_for_each(css_set_table, i, node, cg, hlist) hash_for_each(css_set_table, i, cg, hlist)
link_css_set(&tmp_cg_links, cg, root_cgrp); link_css_set(&tmp_cg_links, cg, root_cgrp);
write_unlock(&css_set_lock); write_unlock(&css_set_lock);
@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{ {
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
int i, ret; int i, ret;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
struct css_set *cg; struct css_set *cg;
unsigned long key; unsigned long key;
@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* this is all done under the css_set_lock. * this is all done under the css_set_lock.
*/ */
write_lock(&css_set_lock); write_lock(&css_set_lock);
hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) { hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
/* skip entries that we already rehashed */ /* skip entries that we already rehashed */
if (cg->subsys[ss->subsys_id]) if (cg->subsys[ss->subsys_id])
continue; continue;
@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
cg->subsys[ss->subsys_id] = css; cg->subsys[ss->subsys_id] = css;
/* recompute hash and restore entry */ /* recompute hash and restore entry */
key = css_set_hash(cg->subsys); key = css_set_hash(cg->subsys);
hash_add(css_set_table, node, key); hash_add(css_set_table, &cg->hlist, key);
} }
write_unlock(&css_set_lock); write_unlock(&css_set_lock);

View File

@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
{ {
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event; struct perf_event *event;
struct hlist_node *node;
struct hlist_head *head; struct hlist_head *head;
rcu_read_lock(); rcu_read_lock();
@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
if (!head) if (!head)
goto end; goto end;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) { hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs)) if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs); perf_swevent_event(event, nr, data, regs);
} }
@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
{ {
struct perf_sample_data data; struct perf_sample_data data;
struct perf_event *event; struct perf_event *event;
struct hlist_node *node;
struct perf_raw_record raw = { struct perf_raw_record raw = {
.size = entry_size, .size = entry_size,
@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_sample_data_init(&data, addr, 0); perf_sample_data_init(&data, addr, 0);
data.raw = &raw; data.raw = &raw;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) { hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs)) if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs); perf_swevent_event(event, count, &data, regs);
} }

View File

@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
struct kprobe __kprobes *get_kprobe(void *addr) struct kprobe __kprobes *get_kprobe(void *addr)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, head, hlist) {
if (p->addr == addr) if (p->addr == addr)
return p; return p;
} }
@ -799,7 +798,6 @@ out:
static void __kprobes optimize_all_kprobes(void) static void __kprobes optimize_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
kprobes_allow_optimization = true; kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
optimize_kprobe(p); optimize_kprobe(p);
} }
@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
static void __kprobes unoptimize_all_kprobes(void) static void __kprobes unoptimize_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
kprobes_allow_optimization = false; kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, head, hlist) {
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
unoptimize_kprobe(p, false); unoptimize_kprobe(p, false);
} }
@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
{ {
struct kretprobe_instance *ri; struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
unsigned long hash, flags = 0; unsigned long hash, flags = 0;
if (unlikely(!kprobes_initialized)) if (unlikely(!kprobes_initialized))
@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
hash = hash_ptr(tk, KPROBE_HASH_BITS); hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash]; head = &kretprobe_inst_table[hash];
kretprobe_table_lock(hash, &flags); kretprobe_table_lock(hash, &flags);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task == tk) if (ri->task == tk)
recycle_rp_inst(ri, &empty_rp); recycle_rp_inst(ri, &empty_rp);
} }
kretprobe_table_unlock(hash, &flags); kretprobe_table_unlock(hash, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }
@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
static inline void free_rp_inst(struct kretprobe *rp) static inline void free_rp_inst(struct kretprobe *rp)
{ {
struct kretprobe_instance *ri; struct kretprobe_instance *ri;
struct hlist_node *pos, *next; struct hlist_node *next;
hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }
@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{ {
unsigned long flags, hash; unsigned long flags, hash;
struct kretprobe_instance *ri; struct kretprobe_instance *ri;
struct hlist_node *pos, *next; struct hlist_node *next;
struct hlist_head *head; struct hlist_head *head;
/* No race here */ /* No race here */
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
kretprobe_table_lock(hash, &flags); kretprobe_table_lock(hash, &flags);
head = &kretprobe_inst_table[hash]; head = &kretprobe_inst_table[hash];
hlist_for_each_entry_safe(ri, pos, next, head, hlist) { hlist_for_each_entry_safe(ri, next, head, hlist) {
if (ri->rp == rp) if (ri->rp == rp)
ri->rp = NULL; ri->rp = NULL;
} }
@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
{ {
struct module *mod = data; struct module *mod = data;
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
int checkcore = (val == MODULE_STATE_GOING); int checkcore = (val == MODULE_STATE_GOING);
@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) hlist_for_each_entry_rcu(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) || if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore && (checkcore &&
within_module_core((unsigned long)p->addr, mod))) { within_module_core((unsigned long)p->addr, mod))) {
@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p, *kp; struct kprobe *p, *kp;
const char *sym = NULL; const char *sym = NULL;
unsigned int i = *(loff_t *) v; unsigned int i = *(loff_t *) v;
@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
head = &kprobe_table[i]; head = &kprobe_table[i];
preempt_disable(); preempt_disable();
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, head, hlist) {
sym = kallsyms_lookup((unsigned long)p->addr, NULL, sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf); &offset, &modname, namebuf);
if (kprobe_aggrprobe(p)) { if (kprobe_aggrprobe(p)) {
@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
static void __kprobes arm_all_kprobes(void) static void __kprobes arm_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
/* Arming kprobes doesn't optimize kprobe itself */ /* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
arm_kprobe(p); arm_kprobe(p);
} }
@ -2265,7 +2259,6 @@ already_enabled:
static void __kprobes disarm_all_kprobes(void) static void __kprobes disarm_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
disarm_kprobe(p, false); disarm_kprobe(p, false);
} }

View File

@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
struct pid *find_pid_ns(int nr, struct pid_namespace *ns) struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{ {
struct hlist_node *elem;
struct upid *pnr; struct upid *pnr;
hlist_for_each_entry_rcu(pnr, elem, hlist_for_each_entry_rcu(pnr,
&pid_hash[pid_hashfn(nr, ns)], pid_chain) &pid_hash[pid_hashfn(nr, ns)], pid_chain)
if (pnr->nr == nr && pnr->ns == ns) if (pnr->nr == nr && pnr->ns == ns)
return container_of(pnr, struct pid, return container_of(pnr, struct pid,

View File

@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr) static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{ {
struct preempt_notifier *notifier; struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id()); notifier->ops->sched_in(notifier, raw_smp_processor_id());
} }
@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next) struct task_struct *next)
{ {
struct preempt_notifier *notifier; struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next); notifier->ops->sched_out(notifier, next);
} }

View File

@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
continue; continue;
} }
BUG_ON(td->cpu != smp_processor_id()); //BUG_ON(td->cpu != smp_processor_id());
/* Check for state change setup */ /* Check for state change setup */
switch (td->status) { switch (td->status) {

View File

@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
{ {
struct ftrace_profile *rec; struct ftrace_profile *rec;
struct hlist_head *hhd; struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key; unsigned long key;
key = hash_long(ip, ftrace_profile_bits); key = hash_long(ip, ftrace_profile_bits);
@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd)) if (hlist_empty(hhd))
return NULL; return NULL;
hlist_for_each_entry_rcu(rec, n, hhd, node) { hlist_for_each_entry_rcu(rec, hhd, node) {
if (rec->ip == ip) if (rec->ip == ip)
return rec; return rec;
} }
@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
unsigned long key; unsigned long key;
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct hlist_head *hhd; struct hlist_head *hhd;
struct hlist_node *n;
if (ftrace_hash_empty(hash)) if (ftrace_hash_empty(hash))
return NULL; return NULL;
@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key]; hhd = &hash->buckets[key];
hlist_for_each_entry_rcu(entry, n, hhd, hlist) { hlist_for_each_entry_rcu(entry, hhd, hlist) {
if (entry->ip == ip) if (entry->ip == ip)
return entry; return entry;
} }
@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
static void ftrace_hash_clear(struct ftrace_hash *hash) static void ftrace_hash_clear(struct ftrace_hash *hash)
{ {
struct hlist_head *hhd; struct hlist_head *hhd;
struct hlist_node *tp, *tn; struct hlist_node *tn;
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
int size = 1 << hash->size_bits; int size = 1 << hash->size_bits;
int i; int i;
@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
hhd = &hash->buckets[i]; hhd = &hash->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) hlist_for_each_entry_safe(entry, tn, hhd, hlist)
free_hash_entry(hash, entry); free_hash_entry(hash, entry);
} }
FTRACE_WARN_ON(hash->count); FTRACE_WARN_ON(hash->count);
@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{ {
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash; struct ftrace_hash *new_hash;
struct hlist_node *tp;
int size; int size;
int ret; int ret;
int i; int i;
@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
size = 1 << hash->size_bits; size = 1 << hash->size_bits;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
ret = add_hash_entry(new_hash, entry->ip); ret = add_hash_entry(new_hash, entry->ip);
if (ret < 0) if (ret < 0)
goto free_hash; goto free_hash;
@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_hash **dst, struct ftrace_hash *src) struct ftrace_hash **dst, struct ftrace_hash *src)
{ {
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn; struct hlist_node *tn;
struct hlist_head *hhd; struct hlist_head *hhd;
struct ftrace_hash *old_hash; struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash; struct ftrace_hash *new_hash;
@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
size = 1 << src->size_bits; size = 1 << src->size_bits;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
hhd = &src->buckets[i]; hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
if (bits > 0) if (bits > 0)
key = hash_long(entry->ip, bits); key = hash_long(entry->ip, bits);
else else
@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct hlist_head *hhd; struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key; unsigned long key;
key = hash_long(ip, FTRACE_HASH_BITS); key = hash_long(ip, FTRACE_HASH_BITS);
@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here. * on the hash. rcu_read_lock is too dangerous here.
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
hlist_for_each_entry_rcu(entry, n, hhd, node) { hlist_for_each_entry_rcu(entry, hhd, node) {
if (entry->ip == ip) if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data); entry->ops->func(ip, parent_ip, &entry->data);
} }
@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags) void *data, int flags)
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct hlist_node *n, *tmp; struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL; int type = MATCH_FULL;
int i, len = 0; int i, len = 0;
@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i]; struct hlist_head *hhd = &ftrace_func_hash[i];
hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { hlist_for_each_entry_safe(entry, tmp, hhd, node) {
/* break up if statements for readability */ /* break up if statements for readability */
if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)

View File

@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
struct trace_event *ftrace_find_event(int type) struct trace_event *ftrace_find_event(int type)
{ {
struct trace_event *event; struct trace_event *event;
struct hlist_node *n;
unsigned key; unsigned key;
key = type & (EVENT_HASHSIZE - 1); key = type & (EVENT_HASHSIZE - 1);
hlist_for_each_entry(event, n, &event_hash[key], node) { hlist_for_each_entry(event, &event_hash[key], node) {
if (event->type == type) if (event->type == type)
return event; return event;
} }

View File

@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
static struct tracepoint_entry *get_tracepoint(const char *name) static struct tracepoint_entry *get_tracepoint(const char *name)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e; struct tracepoint_entry *e;
u32 hash = jhash(name, strlen(name), 0); u32 hash = jhash(name, strlen(name), 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) { hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name)) if (!strcmp(name, e->name))
return e; return e;
} }
@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
static struct tracepoint_entry *add_tracepoint(const char *name) static struct tracepoint_entry *add_tracepoint(const char *name)
{ {
struct hlist_head *head; struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e; struct tracepoint_entry *e;
size_t name_len = strlen(name) + 1; size_t name_len = strlen(name) + 1;
u32 hash = jhash(name, name_len-1, 0); u32 hash = jhash(name, name_len-1, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) { hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name)) { if (!strcmp(name, e->name)) {
printk(KERN_NOTICE printk(KERN_NOTICE
"tracepoint %s busy\n", name); "tracepoint %s busy\n", name);

View File

@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
void fire_user_return_notifiers(void) void fire_user_return_notifiers(void)
{ {
struct user_return_notifier *urn; struct user_return_notifier *urn;
struct hlist_node *tmp1, *tmp2; struct hlist_node *tmp2;
struct hlist_head *head; struct hlist_head *head;
head = &get_cpu_var(return_notifier_list); head = &get_cpu_var(return_notifier_list);
hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) hlist_for_each_entry_safe(urn, tmp2, head, link)
urn->on_user_return(urn); urn->on_user_return(urn);
put_cpu_var(return_notifier_list); put_cpu_var(return_notifier_list);
} }

View File

@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{ {
struct user_struct *user; struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) { hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) { if (uid_eq(user->uid, uid)) {
atomic_inc(&user->__count); atomic_inc(&user->__count);
return user; return user;

View File

@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
for ((pool) = &std_worker_pools(cpu)[0]; \ for ((pool) = &std_worker_pools(cpu)[0]; \
(pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
#define for_each_busy_worker(worker, i, pos, pool) \ #define for_each_busy_worker(worker, i, pool) \
hash_for_each(pool->busy_hash, i, pos, worker, hentry) hash_for_each(pool->busy_hash, i, worker, hentry)
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw) unsigned int sw)
@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
struct work_struct *work) struct work_struct *work)
{ {
struct worker *worker; struct worker *worker;
struct hlist_node *tmp;
hash_for_each_possible(pool->busy_hash, worker, tmp, hentry, hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsigned long)work) (unsigned long)work)
if (worker->current_work == work && if (worker->current_work == work &&
worker->current_func == work->func) worker->current_func == work->func)
@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
static void rebind_workers(struct worker_pool *pool) static void rebind_workers(struct worker_pool *pool)
{ {
struct worker *worker, *n; struct worker *worker, *n;
struct hlist_node *pos;
int i; int i;
lockdep_assert_held(&pool->assoc_mutex); lockdep_assert_held(&pool->assoc_mutex);
@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
} }
/* rebind busy workers */ /* rebind busy workers */
for_each_busy_worker(worker, i, pos, pool) { for_each_busy_worker(worker, i, pool) {
struct work_struct *rebind_work = &worker->rebind_work; struct work_struct *rebind_work = &worker->rebind_work;
struct workqueue_struct *wq; struct workqueue_struct *wq;
@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct worker_pool *pool; struct worker_pool *pool;
struct worker *worker; struct worker *worker;
struct hlist_node *pos;
int i; int i;
for_each_std_worker_pool(pool, cpu) { for_each_std_worker_pool(pool, cpu) {
@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
list_for_each_entry(worker, &pool->idle_list, entry) list_for_each_entry(worker, &pool->idle_list, entry)
worker->flags |= WORKER_UNBOUND; worker->flags |= WORKER_UNBOUND;
for_each_busy_worker(worker, i, pos, pool) for_each_busy_worker(worker, i, pool)
worker->flags |= WORKER_UNBOUND; worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED; pool->flags |= POOL_DISASSOCIATED;

View File

@ -109,11 +109,10 @@ static void fill_pool(void)
*/ */
static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
{ {
struct hlist_node *node;
struct debug_obj *obj; struct debug_obj *obj;
int cnt = 0; int cnt = 0;
hlist_for_each_entry(obj, node, &b->list, node) { hlist_for_each_entry(obj, &b->list, node) {
cnt++; cnt++;
if (obj->object == addr) if (obj->object == addr)
return obj; return obj;
@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
static void debug_objects_oom(void) static void debug_objects_oom(void)
{ {
struct debug_bucket *db = obj_hash; struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
HLIST_HEAD(freelist); HLIST_HEAD(freelist);
struct debug_obj *obj; struct debug_obj *obj;
unsigned long flags; unsigned long flags;
@ -227,7 +226,7 @@ static void debug_objects_oom(void)
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */ /* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node); hlist_del(&obj->node);
free_object(obj); free_object(obj);
} }
@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
static void __debug_check_no_obj_freed(const void *address, unsigned long size) static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{ {
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
HLIST_HEAD(freelist); HLIST_HEAD(freelist);
struct debug_obj_descr *descr; struct debug_obj_descr *descr;
enum debug_obj_state state; enum debug_obj_state state;
@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat: repeat:
cnt = 0; cnt = 0;
raw_spin_lock_irqsave(&db->lock, flags); raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
cnt++; cnt++;
oaddr = (unsigned long) obj->object; oaddr = (unsigned long) obj->object;
if (oaddr < saddr || oaddr >= eaddr) if (oaddr < saddr || oaddr >= eaddr)
@ -702,7 +701,7 @@ repeat:
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */ /* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node); hlist_del(&obj->node);
free_object(obj); free_object(obj);
} }
@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
static int __init debug_objects_replace_static_objects(void) static int __init debug_objects_replace_static_objects(void)
{ {
struct debug_bucket *db = obj_hash; struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp; struct hlist_node *tmp;
struct debug_obj *obj, *new; struct debug_obj *obj, *new;
HLIST_HEAD(objects); HLIST_HEAD(objects);
int i, cnt = 0; int i, cnt = 0;
@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
local_irq_disable(); local_irq_disable();
/* Remove the statically allocated objects from the pool */ /* Remove the statically allocated objects from the pool */
hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
hlist_del(&obj->node); hlist_del(&obj->node);
/* Move the allocated objects to the pool */ /* Move the allocated objects to the pool */
hlist_move_list(&objects, &obj_pool); hlist_move_list(&objects, &obj_pool);
@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects); hlist_move_list(&db->list, &objects);
hlist_for_each_entry(obj, node, &objects, node) { hlist_for_each_entry(obj, &objects, node) {
new = hlist_entry(obj_pool.first, typeof(*obj), node); new = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&new->node); hlist_del(&new->node);
/* copy object data */ /* copy object data */
@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
obj_pool_used); obj_pool_used);
return 0; return 0;
free: free:
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { hlist_for_each_entry_safe(obj, tmp, &objects, node) {
hlist_del(&obj->node); hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj); kmem_cache_free(obj_cache, obj);
} }

View File

@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
bool include_changing) bool include_changing)
{ {
struct hlist_node *n;
struct lc_element *e; struct lc_element *e;
BUG_ON(!lc); BUG_ON(!lc);
BUG_ON(!lc->nr_elements); BUG_ON(!lc->nr_elements);
hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
/* "about to be changed" elements, pending transaction commit, /* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have * are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */ * lc_number == lc_new_number. */

View File

@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
static struct mm_slot *get_mm_slot(struct mm_struct *mm) static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{ {
struct mm_slot *mm_slot; struct mm_slot *mm_slot;
struct hlist_node *node;
hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm) hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
if (mm == mm_slot->mm) if (mm == mm_slot->mm)
return mm_slot; return mm_slot;

Some files were not shown because too many files have changed in this diff Show More