@@ -1639,9 +1639,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
16391639 * need to show a valid freepointer to check_object().
16401640 *
16411641 * Note that doing this for all caches (not just ctor
1642- * ones, which have s->offset != NULL)) causes a GPF,
1643- * due to KASAN poisoning and the way set_freepointer()
1644- * eventually dereferences the freepointer.
1642+ * ones, which have s->offset >= object_size)) causes a
1643+ * GPF, due to KASAN poisoning and the way
1644+ * set_freepointer() eventually dereferences the
1645+ * freepointer.
16451646 */
16461647 set_freepointer (s , object , NULL );
16471648 }
@@ -2956,8 +2957,14 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
29562957 if (s -> ctor )
29572958 s -> ctor (object );
29582959 kasan_poison_object_data (s , object );
2959- } else if (unlikely (slab_want_init_on_alloc (gfpflags , s )) && object )
2960+ } else if (unlikely (slab_want_init_on_alloc (gfpflags , s )) && object ) {
29602961 memset (object , 0 , s -> object_size );
2962+ if (s -> ctor ) {
2963+ kasan_unpoison_object_data (s , object );
2964+ s -> ctor (object );
2965+ kasan_poison_object_data (s , object );
2966+ }
2967+ }
29612968
29622969 if (object ) {
29632970 check_canary (s , object , s -> random_inactive );
@@ -3415,8 +3422,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
34153422 } else if (unlikely (slab_want_init_on_alloc (flags , s ))) {
34163423 int j ;
34173424
3418- for (j = 0 ; j < i ; j ++ )
3425+ for (j = 0 ; j < i ; j ++ ) {
34193426 memset (p [j ], 0 , s -> object_size );
3427+ if (s -> ctor ) {
3428+ kasan_unpoison_object_data (s , p [j ]);
3429+ s -> ctor (p [j ]);
3430+ kasan_poison_object_data (s , p [j ]);
3431+ }
3432+ }
34203433 }
34213434
34223435 for (k = 0 ; k < i ; k ++ ) {
0 commit comments