xarray: add the xa_lock to the radix_tree_root

This results in no change in structure size on 64-bit machines as it
fits in the padding between the gfp_t and the void *.  32-bit machines
will grow the structure from 8 to 12 bytes.  Almost all radix trees are
protected with (at least) a spinlock, so as they are converted from
radix trees to xarrays, the data structures will shrink again.

Initialising the spinlock requires a name for the benefit of lockdep, so
RADIX_TREE_INIT() now needs to know the name of the radix tree it's
initialising, and so do IDR_INIT() and IDA_INIT().

Also add the xa_lock() and xa_unlock() family of wrappers to make it
easier to use the lock.  If we could rely on -fplan9-extensions in the
compiler, we could avoid all of this syntactic sugar, but that wasn't
added until gcc 4.6.

Link: http://lkml.kernel.org/r/20180313132639.17387-8-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

[@RealJohnGalt: adapt to 4.14]

Signed-off-by: Cyber Knight <cyberknight755@gmail.com>
Signed-off-by: Ruchit <ruchitmarathe@gmail.com>
fourteen
Matthew Wilcox 7 years ago committed by Jenna
parent bb9af414d9
commit a51cc59d1d
  1. 2
      drivers/gpu/msm/kgsl_device.h
  2. 4
      fs/f2fs/gc.c
  3. 12
      include/linux/idr.h
  4. 7
      include/linux/radix-tree.h
  5. 24
      include/linux/xarray.h
  6. 1
      tools/include/linux/spinlock.h

@ -351,7 +351,7 @@ struct kgsl_device {
.halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\ kgsl_idle_check),\
.context_idr = IDR_INIT,\ .context_idr = IDR_INIT(_dev),\
.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\ .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\ .active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
.mutex = __MUTEX_INITIALIZER((_dev).mutex),\ .mutex = __MUTEX_INITIALIZER((_dev).mutex),\

@ -1311,7 +1311,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
unsigned int init_segno = segno; unsigned int init_segno = segno;
struct gc_inode_list gc_list = { struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist), .ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_NOFS), .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
}; };
unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
unsigned long long first_skipped, gc_start_time = 0, gc_end_time = 0; unsigned long long first_skipped, gc_start_time = 0, gc_end_time = 0;
@ -1475,7 +1475,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
for (segno = start; segno <= end; segno += sbi->segs_per_sec) { for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
struct gc_inode_list gc_list = { struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist), .ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_NOFS), .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
}; };
do_garbage_collect(sbi, segno, &gc_list, FG_GC); do_garbage_collect(sbi, segno, &gc_list, FG_GC);

@ -31,11 +31,11 @@ struct idr {
#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
(1 << (ROOT_TAG_SHIFT + IDR_FREE))) (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
#define IDR_INIT \ #define IDR_INIT(name) \
{ \ { \
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER) \
} }
#define DEFINE_IDR(name) struct idr name = IDR_INIT #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/** /**
* idr_get_cursor - Return the current position of the cyclic allocator * idr_get_cursor - Return the current position of the cyclic allocator
@ -256,10 +256,10 @@ struct ida {
struct radix_tree_root ida_rt; struct radix_tree_root ida_rt;
}; };
#define IDA_INIT { \ #define IDA_INIT(name) { \
.ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
} }
#define DEFINE_IDA(name) struct ida name = IDA_INIT #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);

@ -113,20 +113,23 @@ struct radix_tree_node {
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
struct radix_tree_root { struct radix_tree_root {
spinlock_t xa_lock;
gfp_t gfp_mask; gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode; struct radix_tree_node __rcu *rnode;
}; };
#define RADIX_TREE_INIT(mask) { \ #define RADIX_TREE_INIT(name, mask) { \
.xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
.gfp_mask = (mask), \ .gfp_mask = (mask), \
.rnode = NULL, \ .rnode = NULL, \
} }
#define RADIX_TREE(name, mask) \ #define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(mask) struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
#define INIT_RADIX_TREE(root, mask) \ #define INIT_RADIX_TREE(root, mask) \
do { \ do { \
spin_lock_init(&(root)->xa_lock); \
(root)->gfp_mask = (mask); \ (root)->gfp_mask = (mask); \
(root)->rnode = NULL; \ (root)->rnode = NULL; \
} while (0) } while (0)

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_XARRAY_H
#define _LINUX_XARRAY_H
/*
* eXtensible Arrays
* Copyright (c) 2017 Microsoft Corporation
* Author: Matthew Wilcox <mawilcox@microsoft.com>
*/
#include <linux/spinlock.h>
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
#define xa_lock_irqsave(xa, flags) \
spin_lock_irqsave(&(xa)->xa_lock, flags)
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
#endif /* _LINUX_XARRAY_H */

@ -7,6 +7,7 @@
#define spinlock_t pthread_mutex_t #define spinlock_t pthread_mutex_t
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)

Loading…
Cancel
Save