@@ -564,12 +564,36 @@ static inline unsigned int cpumask_check(unsigned int cpu)
564564}
565565
566566#if NR_CPUS == 1
567- /* Uniprocesor. */
568- #define cpumask_first (src ) ({ (void)(src); 0; })
569- #define cpumask_next (n , src ) ({ (void)(src); 1; })
570- #define cpumask_next_zero (n , src ) ({ (void)(src); 1; })
571- #define cpumask_next_and (n , srcp , andp ) ({ (void)(srcp), (void)(andp); 1; })
572- #define cpumask_any_but (mask , cpu ) ({ (void)(mask); (void)(cpu); 0; })
567+ /* Uniprocessor. Assume all masks are "1". */
568+ static inline unsigned int cpumask_first (const struct cpumask * srcp )
569+ {
570+ return 0 ;
571+ }
572+
573+ /* Valid inputs for n are -1 and 0. */
574+ static inline unsigned int cpumask_next (int n , const struct cpumask * srcp )
575+ {
576+ return n + 1 ;
577+ }
578+
579+ static inline unsigned int cpumask_next_zero (int n , const struct cpumask * srcp )
580+ {
581+ return n + 1 ;
582+ }
583+
584+ static inline unsigned int cpumask_next_and (int n ,
585+ const struct cpumask * srcp ,
586+ const struct cpumask * andp )
587+ {
588+ return n + 1 ;
589+ }
590+
591+ /* cpu must be a valid cpu, ie 0, so there's no other choice. */
592+ static inline unsigned int cpumask_any_but (const struct cpumask * mask ,
593+ unsigned int cpu )
594+ {
595+ return 1 ;
596+ }
573597
574598#define for_each_cpu (cpu , mask ) \
575599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
@@ -620,10 +644,32 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
620644int cpumask_next_and (int n , const struct cpumask * , const struct cpumask * );
621645int cpumask_any_but (const struct cpumask * mask , unsigned int cpu );
622646
647+ /**
648+ * for_each_cpu - iterate over every cpu in a mask
649+ * @cpu: the (optionally unsigned) integer iterator
650+ * @mask: the cpumask pointer
651+ *
652+ * After the loop, cpu is >= nr_cpu_ids.
653+ */
623654#define for_each_cpu (cpu , mask ) \
624655 for ((cpu) = -1; \
625656 (cpu) = cpumask_next((cpu), (mask)), \
626657 (cpu) < nr_cpu_ids;)
658+
659+ /**
660+ * for_each_cpu_and - iterate over every cpu in both masks
661+ * @cpu: the (optionally unsigned) integer iterator
662+ * @mask: the first cpumask pointer
663+ * @and: the second cpumask pointer
664+ *
665+ * This saves a temporary CPU mask in many places. It is equivalent to:
666+ * struct cpumask tmp;
667+ * cpumask_and(&tmp, &mask, &and);
668+ * for_each_cpu(cpu, &tmp)
669+ * ...
670+ *
671+ * After the loop, cpu is >= nr_cpu_ids.
672+ */
627673#define for_each_cpu_and (cpu , mask , and ) \
628674 for ((cpu) = -1; \
629675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
0 commit comments