msm: Allow lowmem to be non contiguous and mixed

Currently on 32 bit systems, virtual space above
PAGE_OFFSET is reserved for direct mapped lowmem
and part of virtual address space is reserved for
vmalloc. We want to optimize such as to have as
much direct mapped memory as possible since there is
penalty for mapping/unmapping highmem. Now, we may
have an image that is expected to have a lifetime of
the entire system and is reserved in physical region
that would be part of direct mapped lowmem. The
physical memory which is thus reserved is never used
by Linux. This means that even though the system is
not actually accessing the  virtual memory
corresponding to the reserved physical memory, we
are still losing that portion of direct mapped lowmem
space.

So by allowing lowmem to be non contiguous we can
give this unused virtual address space of reserved
region back for use in vmalloc.

Change-Id: I980b3dfafac71884dcdcb8cd2e4a6363cde5746a
Signed-off-by: Susheel Khiani <skhiani@codeaurora.org>
tirimbino
Susheel Khiani 9 years ago committed by Zhenhua Huang
parent 4a2eeccfe9
commit 03307a0339
  1. 3
      arch/arm/mm/ioremap.c
  2. 40
      arch/arm/mm/mmu.c
  3. 3
      include/linux/vmalloc.h
  4. 27
      mm/vmalloc.c

@ -92,7 +92,8 @@ void __init add_static_vm_early(struct static_vm *svm)
void *vaddr;
vm = &svm->vm;
vm_area_add_early(vm);
if (!vm_area_check_early(vm))
vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {

@ -1447,12 +1447,21 @@ static void __init map_lowmem(void)
struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
struct static_vm *svm;
phys_addr_t start;
phys_addr_t end;
unsigned long vaddr;
unsigned long pfn;
unsigned long length;
unsigned int type;
int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map;
start = reg->base;
end = start + reg->size;
nr++;
if (memblock_is_nomap(reg))
continue;
@ -1504,6 +1513,33 @@ static void __init map_lowmem(void)
}
}
}
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for_each_memblock(memory, reg) {
struct vm_struct *vm;
start = reg->base;
end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
vm = &svm->vm;
pfn = __phys_to_pfn(start);
vaddr = __phys_to_virt(start);
length = end - start;
type = MT_MEMORY_RW;
vm->addr = (void *)(vaddr & PAGE_MASK);
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(pfn);
vm->flags = VM_LOWMEM;
vm->flags |= VM_ARM_MTYPE(type);
vm->caller = map_lowmem;
add_static_vm_early(svm++);
}
}
#ifdef CONFIG_ARM_PV_FIXUP

@ -20,6 +20,8 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@ -174,6 +176,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU

@ -1211,6 +1211,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
/**
* vm_area_check_early - check if vmap area is already mapped
* @vm: vm_struct to be checked
*
* This function is used to check if the vmap area has been
* mapped already. @vm->addr, @vm->size and @vm->flags should
* contain proper values.
*
*/
int __init vm_area_check_early(struct vm_struct *vm)
{
struct vm_struct *tmp, **p;
BUG_ON(vmap_initialized);
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
if (tmp->addr >= vm->addr) {
if (tmp->addr < vm->addr + vm->size)
return 1;
} else {
if (tmp->addr + tmp->size > vm->addr)
return 1;
}
}
return 0;
}
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add

Loading…
Cancel
Save