diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 5bf9443cfbaa..4aa04a7c904e 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -92,7 +92,8 @@ void __init add_static_vm_early(struct static_vm *svm) void *vaddr; vm = &svm->vm; - vm_area_add_early(vm); + if (!vm_area_check_early(vm)) + vm_area_add_early(vm); vaddr = vm->addr; list_for_each_entry(curr_svm, &static_vmlist, list) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 58f0e52e3c12..87be2b84d56a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1447,12 +1447,21 @@ static void __init map_lowmem(void) struct memblock_region *reg; phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + struct static_vm *svm; + phys_addr_t start; + phys_addr_t end; + unsigned long vaddr; + unsigned long pfn; + unsigned long length; + unsigned int type; + int nr = 0; /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { - phys_addr_t start = reg->base; - phys_addr_t end = start + reg->size; struct map_desc map; + start = reg->base; + end = start + reg->size; + nr++; if (memblock_is_nomap(reg)) continue; @@ -1504,6 +1513,33 @@ static void __init map_lowmem(void) } } } + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); + + for_each_memblock(memory, reg) { + struct vm_struct *vm; + + start = reg->base; + end = start + reg->size; + + if (end > arm_lowmem_limit) + end = arm_lowmem_limit; + if (start >= end) + break; + + vm = &svm->vm; + pfn = __phys_to_pfn(start); + vaddr = __phys_to_virt(start); + length = end - start; + type = MT_MEMORY_RW; + + vm->addr = (void *)(vaddr & PAGE_MASK); + vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK)); + vm->phys_addr = __pfn_to_phys(pfn); + vm->flags = VM_LOWMEM; + vm->flags |= VM_ARM_MTYPE(type); + vm->caller = map_lowmem; + add_static_vm_early(svm++); + } } #ifdef CONFIG_ARM_PV_FIXUP diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3c4ebcfbd172..ca0d7ab7b7dc 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -20,6 +20,8 @@ struct notifier_block; /* in notifier.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ +#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */ + /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -174,6 +176,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count); extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); +extern __init int vm_area_check_early(struct vm_struct *vm); #ifdef CONFIG_SMP # ifdef CONFIG_MMU diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0f8988ff7672..b6fe5de985d3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1211,6 +1211,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; + +/** + * vm_area_check_early - check if vmap area is already mapped + * @vm: vm_struct to be checked + * + * This function is used to check if the vmap area has been + * mapped already. @vm->addr, @vm->size and @vm->flags should + * contain proper values. + * + */ +int __init vm_area_check_early(struct vm_struct *vm) +{ + struct vm_struct *tmp, **p; + + BUG_ON(vmap_initialized); + for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { + if (tmp->addr >= vm->addr) { + if (tmp->addr < vm->addr + vm->size) + return 1; + } else { + if (tmp->addr + tmp->size > vm->addr) + return 1; + } + } + return 0; +} + /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add