@@ -28,21 +28,31 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
2828 free_percpu (s -> freelist );
2929}
3030
31- static inline void __pcpu_freelist_push (struct pcpu_freelist_head * head ,
32- struct pcpu_freelist_node * node )
31+ static inline void ___pcpu_freelist_push (struct pcpu_freelist_head * head ,
32+ struct pcpu_freelist_node * node )
3333{
3434 raw_spin_lock (& head -> lock );
3535 node -> next = head -> first ;
3636 head -> first = node ;
3737 raw_spin_unlock (& head -> lock );
3838}
3939
40- void pcpu_freelist_push (struct pcpu_freelist * s ,
40+ void __pcpu_freelist_push (struct pcpu_freelist * s ,
4141 struct pcpu_freelist_node * node )
4242{
4343 struct pcpu_freelist_head * head = this_cpu_ptr (s -> freelist );
4444
45- __pcpu_freelist_push (head , node );
45+ ___pcpu_freelist_push (head , node );
46+ }
47+
48+ void pcpu_freelist_push (struct pcpu_freelist * s ,
49+ struct pcpu_freelist_node * node )
50+ {
51+ unsigned long flags ;
52+
53+ local_irq_save (flags );
54+ __pcpu_freelist_push (s , node );
55+ local_irq_restore (flags );
4656}
4757
4858void pcpu_freelist_populate (struct pcpu_freelist * s , void * buf , u32 elem_size ,
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
6373 for_each_possible_cpu (cpu ) {
6474again :
6575 head = per_cpu_ptr (s -> freelist , cpu );
66- __pcpu_freelist_push (head , buf );
76+ ___pcpu_freelist_push (head , buf );
6777 i ++ ;
6878 buf += elem_size ;
6979 if (i == nr_elems )
@@ -74,31 +84,38 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
7484 local_irq_restore (flags );
7585}
7686
77- struct pcpu_freelist_node * pcpu_freelist_pop (struct pcpu_freelist * s )
87+ struct pcpu_freelist_node * __pcpu_freelist_pop (struct pcpu_freelist * s )
7888{
7989 struct pcpu_freelist_head * head ;
8090 struct pcpu_freelist_node * node ;
81- unsigned long flags ;
8291 int orig_cpu , cpu ;
8392
84- local_irq_save (flags );
8593 orig_cpu = cpu = raw_smp_processor_id ();
8694 while (1 ) {
8795 head = per_cpu_ptr (s -> freelist , cpu );
8896 raw_spin_lock (& head -> lock );
8997 node = head -> first ;
9098 if (node ) {
9199 head -> first = node -> next ;
92- raw_spin_unlock_irqrestore (& head -> lock , flags );
100+ raw_spin_unlock (& head -> lock );
93101 return node ;
94102 }
95103 raw_spin_unlock (& head -> lock );
96104 cpu = cpumask_next (cpu , cpu_possible_mask );
97105 if (cpu >= nr_cpu_ids )
98106 cpu = 0 ;
99- if (cpu == orig_cpu ) {
100- local_irq_restore (flags );
107+ if (cpu == orig_cpu )
101108 return NULL ;
102- }
103109 }
104110}
111+
112+ struct pcpu_freelist_node * pcpu_freelist_pop (struct pcpu_freelist * s )
113+ {
114+ struct pcpu_freelist_node * ret ;
115+ unsigned long flags ;
116+
117+ local_irq_save (flags );
118+ ret = __pcpu_freelist_pop (s );
119+ local_irq_restore (flags );
120+ return ret ;
121+ }
0 commit comments