sched/fair, cpumask: Export for_each_cpu_wrap()
commit c743f0a5c50f2fcbc628526279cfa24f3dabe182 upstream. More users for for_each_cpu_wrap() have appeared. Promote the construct to generic cpumask interface. The implementation is slightly modified to reduce arguments. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Lauro Ramos Venancio <lvenanci@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: lwang@redhat.com Link: http://lkml.kernel.org/r/20170414122005.o35me2h5nowqkxbv@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org> [bwh: Backported to 3.16: there's no old version of the function to delete] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
This commit is contained in:
parent
652ea8e9d4
commit
3cc608b4c0
|
@ -217,6 +217,23 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
||||||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||||
(cpu) < nr_cpu_ids;)
|
(cpu) < nr_cpu_ids;)
|
||||||
|
|
||||||
|
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||||
|
* @cpu: the (optionally unsigned) integer iterator
|
||||||
|
* @mask: the cpumask poiter
|
||||||
|
* @start: the start location
|
||||||
|
*
|
||||||
|
* The implementation does not assume any bit in @mask is set (including @start).
|
||||||
|
*
|
||||||
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
|
*/
|
||||||
|
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||||
|
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
||||||
|
(cpu) < nr_cpumask_bits; \
|
||||||
|
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_cpu_and - iterate over every cpu in both masks
|
* for_each_cpu_and - iterate over every cpu in both masks
|
||||||
* @cpu: the (optionally unsigned) integer iterator
|
* @cpu: the (optionally unsigned) integer iterator
|
||||||
|
|
|
@ -63,6 +63,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
|
||||||
|
* @n: the cpu prior to the place to search
|
||||||
|
* @mask: the cpumask pointer
|
||||||
|
* @start: the start point of the iteration
|
||||||
|
* @wrap: assume @n crossing @start terminates the iteration
|
||||||
|
*
|
||||||
|
* Returns >= nr_cpu_ids on completion
|
||||||
|
*
|
||||||
|
* Note: the @wrap argument is required for the start condition when
|
||||||
|
* we cannot assume @start is set in @mask.
|
||||||
|
*/
|
||||||
|
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||||
|
{
|
||||||
|
int next;
|
||||||
|
|
||||||
|
again:
|
||||||
|
next = cpumask_next(n, mask);
|
||||||
|
|
||||||
|
if (wrap && n < start && next >= start) {
|
||||||
|
return nr_cpumask_bits;
|
||||||
|
|
||||||
|
} else if (next >= nr_cpumask_bits) {
|
||||||
|
wrap = true;
|
||||||
|
n = -1;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(cpumask_next_wrap);
|
||||||
|
|
||||||
/* These are not inline because of header tangles. */
|
/* These are not inline because of header tangles. */
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue