diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 87be2b84d56a..26a27fa9d0bc 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1539,6 +1539,7 @@ static void __init map_lowmem(void) vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; add_static_vm_early(svm++); + mark_vmalloc_reserved_area(vm->addr, vm->size); } } diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d22cd94b400..2002ea0d780b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -519,16 +519,16 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ -static inline bool is_vmalloc_addr(const void *x) -{ -#ifdef CONFIG_MMU - unsigned long addr = (unsigned long)x; - return addr >= VMALLOC_START && addr < VMALLOC_END; +#ifdef CONFIG_MMU +extern int is_vmalloc_addr(const void *x); #else - return false; -#endif +static inline int is_vmalloc_addr(const void *x) +{ + return 0; } +#endif + #ifdef CONFIG_MMU extern int is_vmalloc_or_module_addr(const void *x); #else diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ca0d7ab7b7dc..3eee06cb4157 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -177,6 +177,12 @@ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +extern void mark_vmalloc_reserved_area(void *addr, unsigned long size); +#else +static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size) +{ }; +#endif #ifdef CONFIG_SMP # ifdef CONFIG_MMU @@ -202,7 +208,12 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #endif #ifdef CONFIG_MMU +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +extern unsigned long total_vmalloc_size; +#define VMALLOC_TOTAL total_vmalloc_size +#else #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) +#endif #else #define VMALLOC_TOTAL 0UL #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b6fe5de985d3..0158498051a2 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -347,6 +347,57 @@ unsigned long vmalloc_nr_pages(void) return atomic_long_read(&nr_vmalloc_pages); } +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +#define POSSIBLE_VMALLOC_START PAGE_OFFSET + +#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \ + PAGE_SHIFT) +#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT) +#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE) + +unsigned long total_vmalloc_size; +static unsigned long vmalloc_reserved; + +static DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE); + +void mark_vmalloc_reserved_area(void *x, unsigned long size) +{ + unsigned long addr = (unsigned long)x; + + bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT); + vmalloc_reserved += size; +} + +int is_vmalloc_addr(const void *x) +{ + unsigned long addr = (unsigned long)x; + + if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END) + return 0; + + if (test_bit(VMALLOC_TO_BIT(addr), possible_areas)) + return 0; + + return 1; +} + +static void calc_total_vmalloc_size(void) +{ + total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START - + vmalloc_reserved; +} +#else +int is_vmalloc_addr(const void *x) +{ + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +} + +static void calc_total_vmalloc_size(void) { } +#endif +EXPORT_SYMBOL(is_vmalloc_addr); + static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node; @@ -1318,7 +1369,7 @@ void __init vmalloc_init(void) } vmap_area_pcpu_hole = VMALLOC_END; - + calc_total_vmalloc_size(); vmap_initialized = true; } @@ -2790,6 +2841,9 @@ static int s_show(struct seq_file *m, void *p) if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); + if (v->flags & VM_LOWMEM) + seq_puts(m, " lowmem"); + show_numa_info(m, v); seq_putc(m, '\n'); return 0;