diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f75eaf22371..5e51fba7a02a 100755 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2406,15 +2406,16 @@ static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order) #endif /* - * Obtain a specified number of elements from the buddy allocator, all under - * a single hold of the lock, for efficiency. Add them to the supplied list. - * Returns the number of new pages which were placed at *list. + * Obtain a specified number of elements from the buddy allocator, and relax the + * zone lock when needed. Add them to the supplied list. Returns the number of + * new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, bool cold) { - int i, alloced = 0; + const bool can_resched = !preempt_count() && !irqs_disabled(); + int i, alloced = 0, last_mod = 0; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { @@ -2433,6 +2434,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (unlikely(page == NULL)) break; + /* Reschedule and ease the contention on the lock if needed */ + if (i + 1 < count && ((can_resched && need_resched()) || + spin_needbreak(&zone->lock))) { + __mod_zone_page_state(zone, NR_FREE_PAGES, + -((i + 1 - last_mod) << order)); + last_mod = i + 1; + spin_unlock(&zone->lock); + if (can_resched) + cond_resched(); + spin_lock(&zone->lock); + } + if (unlikely(check_pcp_refill(page))) continue; @@ -2462,7 +2475,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * on i. Do not confuse with 'alloced' which is the number of * pages added to the pcp list. */ - __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); + __mod_zone_page_state(zone, NR_FREE_PAGES, -((i - last_mod) << order)); spin_unlock(&zone->lock); return alloced; }