@ -140,6 +140,13 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
{
void * shadow_start , * shadow_end ;
/*
* Perform shadow offset calculation based on untagged address , as
* some of the callers ( e . g . kasan_poison_object_data ) pass tagged
* addresses to this function .
*/
address = reset_tag ( address ) ;
shadow_start = kasan_mem_to_shadow ( address ) ;
shadow_end = kasan_mem_to_shadow ( address + size ) ;
@ -148,11 +155,24 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
void kasan_unpoison_shadow ( const void * address , size_t size )
{
kasan_poison_shadow ( address , size , 0 ) ;
u8 tag = get_tag ( address ) ;
/*
* Perform shadow offset calculation based on untagged address , as
* some of the callers ( e . g . kasan_unpoison_object_data ) pass tagged
* addresses to this function .
*/
address = reset_tag ( address ) ;
kasan_poison_shadow ( address , size , tag ) ;
if ( size & KASAN_SHADOW_MASK ) {
u8 * shadow = ( u8 * ) kasan_mem_to_shadow ( address + size ) ;
* shadow = size & KASAN_SHADOW_MASK ;
if ( IS_ENABLED ( CONFIG_KASAN_SW_TAGS ) )
* shadow = tag ;
else
* shadow = size & KASAN_SHADOW_MASK ;
}
}
@ -200,8 +220,9 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
void kasan_alloc_pages ( struct page * page , unsigned int order )
{
if ( likely ( ! PageHighMem ( page ) ) )
kasan_unpoison_shadow ( page_address ( page ) , PAGE_SIZE < < order ) ;
if ( unlikely ( PageHighMem ( page ) ) )
return ;
kasan_unpoison_shadow ( page_address ( page ) , PAGE_SIZE < < order ) ;
}
void kasan_free_pages ( struct page * page , unsigned int order )
@ -218,6 +239,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
*/
static inline unsigned int optimal_redzone ( unsigned int object_size )
{
if ( IS_ENABLED ( CONFIG_KASAN_SW_TAGS ) )
return 0 ;
return
object_size < = 64 - 16 ? 16 :
object_size < = 128 - 32 ? 32 :
@ -232,6 +256,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
unsigned long * flags )
{
unsigned int orig_size = * size ;
unsigned int redzone_size ;
int redzone_adjust ;
/* Add alloc meta. */
@ -239,20 +264,20 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
* size + = sizeof ( struct kasan_alloc_meta ) ;
/* Add free meta. */
if ( cache - > flags & SLAB_TYPESAFE_BY_RCU | | cache - > ctor | |
cache - > object_size < sizeof ( struct kasan_free_meta ) ) {
if ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) & &
( cache - > flags & SLAB_TYPESAFE_BY_RCU | | cache - > ctor | |
cache - > object_size < sizeof ( struct kasan_free_meta ) ) ) {
cache - > kasan_info . free_meta_offset = * size ;
* size + = sizeof ( struct kasan_free_meta ) ;
}
redzone_adjust = optimal_redzone ( cache - > object_size ) -
( * size - cache - > object_size ) ;
redzone_size = optimal_redzone ( cache - > object_size ) ;
redzone_adjust = redzone_size - ( * size - cache - > object_size ) ;
if ( redzone_adjust > 0 )
* size + = redzone_adjust ;
* size = min_t ( unsigned int , KMALLOC_MAX_SIZE ,
max ( * size , cache - > object_size +
optimal_redzone ( cache - > object_size ) ) ) ;
max ( * size , cache - > object_size + redzone_size ) ) ;
/*
* If the metadata doesn ' t fit , don ' t enable KASAN at all .
@ -265,6 +290,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
return ;
}
cache - > align = round_up ( cache - > align , KASAN_SHADOW_SCALE_SIZE ) ;
* flags | = SLAB_KASAN ;
}
@ -309,6 +336,32 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
KASAN_KMALLOC_REDZONE ) ;
}
/*
* Since it ' s desirable to only call object contructors once during slab
* allocation , we preassign tags to all such objects . Also preassign tags for
* SLAB_TYPESAFE_BY_RCU slabs to avoid use - after - free reports .
* For SLAB allocator we can ' t preassign tags randomly since the freelist is
* stored as an array of indexes instead of a linked list . Assign tags based
* on objects indexes , so that objects that are next to each other get
* different tags .
* After a tag is assigned , the object always gets allocated with the same tag .
* The reason is that we can ' t change tags for objects with constructors on
* reallocation ( even for non - SLAB_TYPESAFE_BY_RCU ) , because the constructor
* code can save the pointer to the object somewhere ( e . g . in the object
* itself ) . Then if we retag it , the old saved pointer will become invalid .
*/
static u8 assign_tag ( struct kmem_cache * cache , const void * object , bool new )
{
if ( ! cache - > ctor & & ! ( cache - > flags & SLAB_TYPESAFE_BY_RCU ) )
return new ? KASAN_TAG_KERNEL : random_tag ( ) ;
# ifdef CONFIG_SLAB
return ( u8 ) obj_to_index ( cache , virt_to_page ( object ) , ( void * ) object ) ;
# else
return new ? random_tag ( ) : get_tag ( object ) ;
# endif
}
void * kasan_init_slab_obj ( struct kmem_cache * cache , const void * object )
{
struct kasan_alloc_meta * alloc_info ;
@ -319,6 +372,9 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
alloc_info = get_alloc_info ( cache , object ) ;
__memset ( alloc_info , 0 , sizeof ( * alloc_info ) ) ;
if ( IS_ENABLED ( CONFIG_KASAN_SW_TAGS ) )
object = set_tag ( object , assign_tag ( cache , object , true ) ) ;
return ( void * ) object ;
}
@ -327,15 +383,30 @@ void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
return kasan_kmalloc ( cache , object , cache - > object_size , flags ) ;
}
static inline bool shadow_invalid ( u8 tag , s8 shadow_byte )
{
if ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) )
return shadow_byte < 0 | |
shadow_byte > = KASAN_SHADOW_SCALE_SIZE ;
else
return tag ! = ( u8 ) shadow_byte ;
}
static bool __kasan_slab_free ( struct kmem_cache * cache , void * object ,
unsigned long ip , bool quarantine )
{
s8 shadow_byte ;
u8 tag ;
void * tagged_object ;
unsigned long rounded_up_size ;
tag = get_tag ( object ) ;
tagged_object = object ;
object = reset_tag ( object ) ;
if ( unlikely ( nearest_obj ( cache , virt_to_head_page ( object ) , object ) ! =
object ) ) {
kasan_report_invalid_free ( object , ip ) ;
kasan_report_invalid_free ( tagged_ object, ip ) ;
return true ;
}
@ -344,20 +415,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return false ;
shadow_byte = READ_ONCE ( * ( s8 * ) kasan_mem_to_shadow ( object ) ) ;
if ( shadow_byte < 0 | | shadow_byte > = KASAN_SHADOW_SCALE_SIZE ) {
kasan_report_invalid_free ( object , ip ) ;
if ( shadow_invalid ( tag , shadow_byte ) ) {
kasan_report_invalid_free ( tagged_ object, ip ) ;
return true ;
}
rounded_up_size = round_up ( cache - > object_size , KASAN_SHADOW_SCALE_SIZE ) ;
kasan_poison_shadow ( object , rounded_up_size , KASAN_KMALLOC_FREE ) ;
if ( ! quarantine | | unlikely ( ! ( cache - > flags & SLAB_KASAN ) ) )
if ( ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) & & ! quarantine ) | |
unlikely ( ! ( cache - > flags & SLAB_KASAN ) ) )
return false ;
set_track ( & get_alloc_info ( cache , object ) - > free_track , GFP_NOWAIT ) ;
quarantine_put ( get_free_info ( cache , object ) , cache ) ;
return true ;
return IS_ENABLED ( CONFIG_KASAN_GENERIC ) ;
}
bool kasan_slab_free ( struct kmem_cache * cache , void * object , unsigned long ip )
@ -370,6 +443,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
{
unsigned long redzone_start ;
unsigned long redzone_end ;
u8 tag ;
if ( gfpflags_allow_blocking ( flags ) )
quarantine_reduce ( ) ;
@ -382,14 +456,18 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
redzone_end = round_up ( ( unsigned long ) object + cache - > object_size ,
KASAN_SHADOW_SCALE_SIZE ) ;
kasan_unpoison_shadow ( object , size ) ;
if ( IS_ENABLED ( CONFIG_KASAN_SW_TAGS ) )
tag = assign_tag ( cache , object , false ) ;
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow ( set_tag ( object , tag ) , size ) ;
kasan_poison_shadow ( ( void * ) redzone_start , redzone_end - redzone_start ,
KASAN_KMALLOC_REDZONE ) ;
if ( cache - > flags & SLAB_KASAN )
set_track ( & get_alloc_info ( cache , object ) - > alloc_track , flags ) ;
return ( void * ) object ;
return set_tag ( object , tag ) ;
}
EXPORT_SYMBOL ( kasan_kmalloc ) ;
@ -439,7 +517,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
page = virt_to_head_page ( ptr ) ;
if ( unlikely ( ! PageSlab ( page ) ) ) {
if ( ptr ! = page_address ( page ) ) {
if ( reset_tag ( ptr ) ! = page_address ( page ) ) {
kasan_report_invalid_free ( ptr , ip ) ;
return ;
}
@ -452,7 +530,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
void kasan_kfree_large ( void * ptr , unsigned long ip )
{
if ( ptr ! = page_address ( virt_to_head_page ( ptr ) ) )
if ( reset_tag ( ptr ) ! = page_address ( virt_to_head_page ( ptr ) ) )
kasan_report_invalid_free ( ptr , ip ) ;
/* The object will be poisoned by page_alloc. */
}