Reverting crypto patches

c57952b UPSTREAM: ubifs: wire up FS_IOC_GET_ENCRYPTION_NONCE
379237b UPSTREAM: f2fs: wire up FS_IOC_GET_ENCRYPTION_NONCE
10e5acf UPSTREAM: ext4: wire up FS_IOC_GET_ENCRYPTION_NONCE
63bf273 ANDROID: scsi: ufs: add ->map_sg_crypto() variant op
10d4512 FROMLIST: f2fs: Handle casefolding with Encryption
4efb7e2 ANDROID: fscrypt: fall back to filesystem-layer crypto when needed
a14fa7b ANDROID: block: require drivers to declare supported crypto key type(s)
5578bea ANDROID: block: make blk_crypto_start_using_mode() properly check for support
e9c80bd UPSTREAM: fscrypt: add FS_IOC_GET_ENCRYPTION_NONCE ioctl
9e469e7 UPSTREAM: fscrypt: don't evict dirty inodes after removing key
53f2446 fscrypt: don't evict dirty inodes after removing key
207be96 FROMLIST: fscrypt: Have filesystems handle their d_ops
06ab740 ANDROID: dm: Add wrapped key support in dm-default-key
23e670a ANDROID: dm: add support for passing through derive_raw_secret
166fda7 ANDROID: block: Prevent crypto fallback for wrapped keys
fe6e855 fscrypt: improve format of no-key names
216d8ca fscrypt: clarify what is meant by a per-file key
7e25032 fscrypt: derive dirhash key for casefolded directories
e16d849 fscrypt: don't allow v1 policies with casefolding
0bc68c1 fscrypt: add "fscrypt_" prefix to fname_encrypt()
85b9c3e fscrypt: don't print name of busy file when removing key
9c5c8c5 fscrypt: document gfp_flags for bounce page allocation
bee5bd5 fscrypt: optimize fscrypt_zeroout_range()
1c88eea fscrypt: remove redundant bi_status check
04f5184 fscrypt: Allow modular crypto algorithms
737ae90 fscrypt: include <linux/ioctl.h> in UAPI header
8842133 fscrypt: don't check for ENOKEY from fscrypt_get_encryption_info()
b21b79d fscrypt: remove fscrypt_is_direct_key_policy()
19b132b fscrypt: move fscrypt_valid_enc_modes() to policy.c
add6ac4 fscrypt: check for appropriate use of DIRECT_KEY flag earlier
2454b5b fscrypt: split up fscrypt_supported_policy() by policy version
bfa4ca6 fscrypt: introduce fscrypt_needs_contents_encryption()
3871977 fscrypt: move fscrypt_d_revalidate() to fname.c
39a0acc fscrypt: constify inode parameter to filename encryption functions
3942229 fscrypt: constify struct fscrypt_hkdf parameter to fscrypt_hkdf_expand()
a7b6398 fscrypt: verify that the crypto_skcipher has the correct ivsize
9c1b3af fscrypt: use crypto_skcipher_driver_name()
3529026 fscrypt: support passing a keyring key to FS_IOC_ADD_ENCRYPTION_KEY

Change-Id: Ib1abe832e16d5f40bfcc9e34bdccbb063b37dbbc
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
tirimbino
Srinivasarao P 4 years ago
parent 975fd0628f
commit b73e822d12
  1. 86
      Documentation/filesystems/fscrypt.rst
  2. 50
      block/blk-crypto-fallback.c
  3. 9
      block/blk-crypto-internal.h
  4. 55
      block/blk-crypto.c
  5. 30
      block/keyslot-manager.c
  6. 29
      drivers/md/dm-default-key.c
  7. 80
      drivers/md/dm.c
  8. 12
      drivers/scsi/ufs/ufshcd-crypto.c
  9. 8
      drivers/scsi/ufs/ufshcd-crypto.h
  10. 2
      drivers/scsi/ufs/ufshcd.c
  11. 1
      drivers/scsi/ufs/ufshcd.h
  12. 22
      fs/crypto/Kconfig
  13. 177
      fs/crypto/bio.c
  14. 57
      fs/crypto/crypto.c
  15. 314
      fs/crypto/fname.c
  16. 87
      fs/crypto/fscrypt_private.h
  17. 2
      fs/crypto/hkdf.c
  18. 48
      fs/crypto/hooks.c
  19. 74
      fs/crypto/inline_crypt.c
  20. 160
      fs/crypto/keyring.c
  21. 141
      fs/crypto/keysetup.c
  22. 21
      fs/crypto/keysetup_v1.c
  23. 191
      fs/crypto/policy.c
  24. 1
      fs/ext4/Kconfig
  25. 9
      fs/ext4/dir.c
  26. 6
      fs/ext4/ioctl.c
  27. 1
      fs/ext4/namei.c
  28. 5
      fs/ext4/super.c
  29. 1
      fs/f2fs/Kconfig
  30. 74
      fs/f2fs/dir.c
  31. 14
      fs/f2fs/f2fs.h
  32. 11
      fs/f2fs/file.c
  33. 25
      fs/f2fs/hash.c
  34. 9
      fs/f2fs/inline.c
  35. 1
      fs/f2fs/namei.c
  36. 7
      fs/f2fs/super.c
  37. 3
      fs/inode.c
  38. 50
      fs/libfs.c
  39. 1
      fs/ubifs/Kconfig
  40. 20
      fs/ubifs/dir.c
  41. 4
      fs/ubifs/ioctl.c
  42. 3
      include/linux/bio-crypt-ctx.h
  43. 18
      include/linux/blk-crypto.h
  44. 2
      include/linux/fs.h
  45. 134
      include/linux/fscrypt.h
  46. 14
      include/linux/keyslot-manager.h
  47. 15
      include/uapi/linux/fscrypt.h

@ -234,8 +234,8 @@ HKDF is more flexible, is nonreversible, and evenly distributes
entropy from the master key. HKDF is also standardized and widely
used by other software, whereas the AES-128-ECB based KDF is ad-hoc.
Per-file encryption keys
------------------------
Per-file keys
-------------
Since each master key can protect many files, it is necessary to
"tweak" the encryption of each file so that the same plaintext in two
@ -268,9 +268,9 @@ is greater than that of an AES-256-XTS key.
Therefore, to improve performance and save memory, for Adiantum a
"direct key" configuration is supported. When the user has enabled
this by setting FSCRYPT_POLICY_FLAG_DIRECT_KEY in the fscrypt policy,
per-file encryption keys are not used. Instead, whenever any data
(contents or filenames) is encrypted, the file's 16-byte nonce is
included in the IV. Moreover:
per-file keys are not used. Instead, whenever any data (contents or
filenames) is encrypted, the file's 16-byte nonce is included in the
IV. Moreover:
- For v1 encryption policies, the encryption is done directly with the
master key. Because of this, users **must not** use the same master
@ -302,16 +302,6 @@ For master keys used for v2 encryption policies, a unique 16-byte "key
identifier" is also derived using the KDF. This value is stored in
the clear, since it is needed to reliably identify the key itself.
Dirhash keys
------------
For directories that are indexed using a secret-keyed dirhash over the
plaintext filenames, the KDF is also used to derive a 128-bit
SipHash-2-4 key per directory in order to hash filenames. This works
just like deriving a per-file encryption key, except that a different
KDF context is used. Currently, only casefolded ("case-insensitive")
encrypted directories use this style of hashing.
Encryption modes and usage
==========================
@ -335,11 +325,11 @@ used.
Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true
wide-block mode, unlike XTS. It can also eliminate the need to derive
per-file encryption keys. However, it depends on the security of two
primitives, XChaCha12 and AES-256, rather than just one. See the
paper "Adiantum: length-preserving encryption for entry-level
processors" (https://eprint.iacr.org/2018/720.pdf) for more details.
To use Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast
per-file keys. However, it depends on the security of two primitives,
XChaCha12 and AES-256, rather than just one. See the paper
"Adiantum: length-preserving encryption for entry-level processors"
(https://eprint.iacr.org/2018/720.pdf) for more details. To use
Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast
implementations of ChaCha and NHPoly1305 should be enabled, e.g.
CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM.
@ -523,9 +513,7 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
- ``EEXIST``: the file is already encrypted with an encryption policy
different from the one specified
- ``EINVAL``: an invalid encryption policy was specified (invalid
version, mode(s), or flags; or reserved bits were set); or a v1
encryption policy was specified but the directory has the casefold
flag enabled (casefolding is incompatible with v1 policies).
version, mode(s), or flags; or reserved bits were set)
- ``ENOKEY``: a v2 encryption policy was specified, but the key with
the specified ``master_key_identifier`` has not been added, nor does
the process have the CAP_FOWNER capability in the initial user
@ -633,17 +621,6 @@ from a passphrase or other low-entropy user credential.
FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to
generate and manage any needed salt(s) in userspace.
Getting a file's encryption nonce
---------------------------------
Since Linux v5.7, the ioctl FS_IOC_GET_ENCRYPTION_NONCE is supported.
On encrypted files and directories it gets the inode's 16-byte nonce.
On unencrypted files and directories, it fails with ENODATA.
This ioctl can be useful for automated tests which verify that the
encryption is being done correctly. It is not needed for normal use
of fscrypt.
Adding keys
-----------
@ -661,8 +638,7 @@ follows::
struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
__u32 key_id;
__u32 __reserved[8];
__u32 __reserved[9];
__u8 raw[];
};
@ -679,12 +655,6 @@ follows::
} u;
};
struct fscrypt_provisioning_key_payload {
__u32 type;
__u32 __reserved;
__u8 raw[];
};
:c:type:`struct fscrypt_add_key_arg` must be zeroed, then initialized
as follows:
@ -707,26 +677,9 @@ as follows:
``Documentation/security/keys/core.rst``).
- ``raw_size`` must be the size of the ``raw`` key provided, in bytes.
Alternatively, if ``key_id`` is nonzero, this field must be 0, since
in that case the size is implied by the specified Linux keyring key.
- ``key_id`` is 0 if the raw key is given directly in the ``raw``
field. Otherwise ``key_id`` is the ID of a Linux keyring key of
type "fscrypt-provisioning" whose payload is a :c:type:`struct
fscrypt_provisioning_key_payload` whose ``raw`` field contains the
raw key and whose ``type`` field matches ``key_spec.type``. Since
``raw`` is variable-length, the total size of this key's payload
must be ``sizeof(struct fscrypt_provisioning_key_payload)`` plus the
raw key size. The process must have Search permission on this key.
Most users should leave this 0 and specify the raw key directly.
The support for specifying a Linux keyring key is intended mainly to
allow re-adding keys after a filesystem is unmounted and re-mounted,
without having to store the raw keys in userspace memory.
- ``raw`` is a variable-length field which must contain the actual
key, ``raw_size`` bytes long. Alternatively, if ``key_id`` is
nonzero, then this field is unused.
key, ``raw_size`` bytes long.
For v2 policy keys, the kernel keeps track of which user (identified
by effective user ID) added the key, and only allows the key to be
@ -748,16 +701,11 @@ FS_IOC_ADD_ENCRYPTION_KEY can fail with the following errors:
- ``EACCES``: FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR was specified, but the
caller does not have the CAP_SYS_ADMIN capability in the initial
user namespace; or the raw key was specified by Linux key ID but the
process lacks Search permission on the key.
user namespace
- ``EDQUOT``: the key quota for this user would be exceeded by adding
the key
- ``EINVAL``: invalid key size or key specifier type, or reserved bits
were set
- ``EKEYREJECTED``: the raw key was specified by Linux key ID, but the
key has the wrong type
- ``ENOKEY``: the raw key was specified by Linux key ID, but no key
exists with that ID
- ``ENOTTY``: this type of filesystem does not implement encryption
- ``EOPNOTSUPP``: the kernel was not configured with encryption
support for this filesystem, or the filesystem superblock has not
@ -1160,8 +1108,8 @@ The context structs contain the same information as the corresponding
policy structs (see `Setting an encryption policy`_), except that the
context structs also contain a nonce. The nonce is randomly generated
by the kernel and is used as KDF input or as a tweak to cause
different files to be encrypted differently; see `Per-file encryption
keys`_ and `DIRECT_KEY policies`_.
different files to be encrypted differently; see `Per-file keys`_ and
`DIRECT_KEY policies`_.
Data path changes
-----------------
@ -1213,7 +1161,7 @@ filesystem-specific hash(es) needed for directory lookups. This
allows the filesystem to still, with a high degree of confidence, map
the filename given in ->lookup() back to a particular directory entry
that was previously listed by readdir(). See :c:type:`struct
fscrypt_nokey_name` in the source for more details.
fscrypt_digested_name` in the source for more details.
Note that the precise way that filenames are presented to userspace
without the key is subject to change in the future. It is only meant

@ -487,13 +487,21 @@ out:
return false;
}
/*
* Prepare blk-crypto-fallback for the specified crypto mode.
* Returns -ENOPKG if the needed crypto API support is missing.
/**
* blk_crypto_start_using_mode() - Start using a crypto algorithm on a device
* @mode_num: the blk_crypto_mode we want to allocate ciphers for.
* @data_unit_size: the data unit size that will be used
* @q: the request queue for the device
*
* Upper layers must call this function to ensure that a the crypto API fallback
* has transforms for this algorithm, if they become necessary.
*
* Return: 0 on success and -err on error.
*/
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q)
{
const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
struct blk_crypto_keyslot *slotp;
unsigned int i;
int err = 0;
@ -506,20 +514,25 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
if (likely(smp_load_acquire(&tfms_inited[mode_num])))
return 0;
/*
* If the keyslot manager of the request queue supports this
* crypto mode, then we don't need to allocate this mode.
*/
if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num,
data_unit_size))
return 0;
mutex_lock(&tfms_init_lock);
if (likely(tfms_inited[mode_num]))
goto out;
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
slotp->tfms[mode_num] = crypto_alloc_skcipher(
blk_crypto_modes[mode_num].cipher_str,
0, 0);
if (IS_ERR(slotp->tfms[mode_num])) {
err = PTR_ERR(slotp->tfms[mode_num]);
if (err == -ENOENT) {
pr_warn_once("Missing crypto API support for \"%s\"\n",
cipher_str);
err = -ENOPKG;
}
slotp->tfms[mode_num] = NULL;
goto out_free_tfms;
}
@ -545,6 +558,7 @@ out:
mutex_unlock(&tfms_init_lock);
return err;
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
@ -557,12 +571,6 @@ int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
struct bio_fallback_crypt_ctx *f_ctx;
if (bc->bc_key->is_hw_wrapped) {
pr_warn_once("HW wrapped key cannot be used with fallback.\n");
bio->bi_status = BLK_STS_NOTSUPP;
return -EOPNOTSUPP;
}
if (!tfms_inited[bc->bc_key->crypto_mode]) {
bio->bi_status = BLK_STS_IOERR;
return -EIO;
@ -600,11 +608,9 @@ int __init blk_crypto_fallback_init(void)
crypto_mode_supported[i] = 0xFFFFFFFF;
crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_ksm = keyslot_manager_create(
NULL, blk_crypto_num_keyslots,
&blk_crypto_ksm_ll_ops,
BLK_CRYPTO_FEATURE_STANDARD_KEYS,
crypto_mode_supported, NULL);
blk_crypto_ksm = keyslot_manager_create(NULL, blk_crypto_num_keyslots,
&blk_crypto_ksm_ll_ops,
crypto_mode_supported, NULL);
if (!blk_crypto_ksm)
return -ENOMEM;

@ -19,8 +19,6 @@ extern const struct blk_crypto_mode blk_crypto_modes[];
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
bool blk_crypto_queue_decrypt_bio(struct bio *bio);
@ -31,13 +29,6 @@ bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
{
pr_warn_once("crypto API fallback is disabled\n");
return -ENOPKG;
}
static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
{
return false;

@ -109,8 +109,7 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
/* Get device keyslot if supported */
if (keyslot_manager_crypto_mode_supported(q->ksm,
bc->bc_key->crypto_mode,
bc->bc_key->data_unit_size,
bc->bc_key->is_hw_wrapped)) {
bc->bc_key->data_unit_size)) {
err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
if (!err)
return 0;
@ -176,9 +175,7 @@ bool blk_crypto_endio(struct bio *bio)
* @raw_key_size: Size of raw key. Must be at least the required size for the
* chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
* to be longer than the mode's actual key size, in order to
* support inline encryption hardware that accepts wrapped keys.
* @is_hw_wrapped has to be set for such keys)
* @is_hw_wrapped: Denotes @raw_key is wrapped.
* support inline encryption hardware that accepts wrapped keys.)
* @crypto_mode: identifier for the encryption algorithm to use
* @data_unit_size: the data unit size to use for en/decryption
*
@ -187,7 +184,6 @@ bool blk_crypto_endio(struct bio *bio)
*/
int blk_crypto_init_key(struct blk_crypto_key *blk_key,
const u8 *raw_key, unsigned int raw_key_size,
bool is_hw_wrapped,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
{
@ -202,14 +198,9 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
mode = &blk_crypto_modes[crypto_mode];
if (is_hw_wrapped) {
if (raw_key_size < mode->keysize ||
raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
return -EINVAL;
} else {
if (raw_key_size != mode->keysize)
return -EINVAL;
}
if (raw_key_size < mode->keysize ||
raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
return -EINVAL;
if (!is_power_of_2(data_unit_size))
return -EINVAL;
@ -218,7 +209,6 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
blk_key->data_unit_size = data_unit_size;
blk_key->data_unit_size_bits = ilog2(data_unit_size);
blk_key->size = raw_key_size;
blk_key->is_hw_wrapped = is_hw_wrapped;
memcpy(blk_key->raw, raw_key, raw_key_size);
/*
@ -233,38 +223,6 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
}
EXPORT_SYMBOL_GPL(blk_crypto_init_key);
/**
* blk_crypto_start_using_mode() - Start using blk-crypto on a device
* @crypto_mode: the crypto mode that will be used
* @data_unit_size: the data unit size that will be used
* @is_hw_wrapped_key: whether the key will be hardware-wrapped
* @q: the request queue for the device
*
* Upper layers must call this function to ensure that either the hardware
* supports the needed crypto settings, or the crypto API fallback has
* transforms for the needed mode allocated and ready to go.
*
* Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto
* settings and blk-crypto-fallback is either disabled or the needed
* algorithm is disabled in the crypto API; or another -errno code.
*/
int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size,
bool is_hw_wrapped_key,
struct request_queue *q)
{
if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
data_unit_size,
is_hw_wrapped_key))
return 0;
if (is_hw_wrapped_key) {
pr_warn_once("hardware doesn't support wrapped keys\n");
return -EOPNOTSUPP;
}
return blk_crypto_fallback_start_using_mode(crypto_mode);
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
/**
* blk_crypto_evict_key() - Evict a key from any inline encryption hardware
* it may have been programmed into
@ -285,8 +243,7 @@ int blk_crypto_evict_key(struct request_queue *q,
{
if (q->ksm &&
keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
key->data_unit_size,
key->is_hw_wrapped))
key->data_unit_size))
return keyslot_manager_evict_key(q->ksm, key);
return blk_crypto_fallback_evict_key(key);

@ -44,7 +44,6 @@ struct keyslot {
struct keyslot_manager {
unsigned int num_slots;
struct keyslot_mgmt_ll_ops ksm_ll_ops;
unsigned int features;
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
void *ll_priv_data;
@ -137,8 +136,6 @@ static inline void keyslot_manager_hw_exit(struct keyslot_manager *ksm)
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot
* manager will use to perform operations like programming and
* evicting keys.
* @features: The supported features as a bitmask of BLK_CRYPTO_FEATURE_* flags.
* Most drivers should set BLK_CRYPTO_FEATURE_STANDARD_KEYS here.
* @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of
* bitmasks that represents whether a crypto mode
* and data unit size are supported. The i'th bit
@ -158,7 +155,6 @@ struct keyslot_manager *keyslot_manager_create(
struct device *dev,
unsigned int num_slots,
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
@ -180,7 +176,6 @@ struct keyslot_manager *keyslot_manager_create(
ksm->num_slots = num_slots;
ksm->ksm_ll_ops = *ksm_ll_ops;
ksm->features = features;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
@ -387,24 +382,23 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot)
}
/**
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode /
* data unit size / is_hw_wrapped_key
* combination is supported by a ksm.
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data
* unit size combination is supported
* by a ksm.
* @ksm: The keyslot manager to check
* @crypto_mode: The crypto mode to check for.
* @data_unit_size: The data_unit_size for the mode.
* @is_hw_wrapped_key: Whether a hardware-wrapped key will be used.
*
* Calls and returns the result of the crypto_mode_supported function specified
* by the ksm.
*
* Context: Process context.
* Return: Whether or not this ksm supports the specified crypto settings.
* Return: Whether or not this ksm supports the specified crypto_mode/
* data_unit_size combo.
*/
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size,
bool is_hw_wrapped_key)
unsigned int data_unit_size)
{
if (!ksm)
return false;
@ -412,13 +406,6 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
return false;
if (WARN_ON(!is_power_of_2(data_unit_size)))
return false;
if (is_hw_wrapped_key) {
if (!(ksm->features & BLK_CRYPTO_FEATURE_WRAPPED_KEYS))
return false;
} else {
if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS))
return false;
}
return ksm->crypto_mode_supported[crypto_mode] & data_unit_size;
}
@ -534,7 +521,6 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
* keyslot_manager_create_passthrough() - Create a passthrough keyslot manager
* @dev: Device for runtime power management (NULL if none)
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops
* @features: Bitmask of BLK_CRYPTO_FEATURE_* flags
* @crypto_mode_supported: Bitmasks for supported encryption modes
* @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops.
*
@ -552,7 +538,6 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
struct keyslot_manager *keyslot_manager_create_passthrough(
struct device *dev,
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
@ -563,7 +548,6 @@ struct keyslot_manager *keyslot_manager_create_passthrough(
return NULL;
ksm->ksm_ll_ops = *ksm_ll_ops;
ksm->features = features;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
@ -592,13 +576,11 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent,
if (child) {
unsigned int i;
parent->features &= child->features;
for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) {
parent->crypto_mode_supported[i] &=
child->crypto_mode_supported[i];
}
} else {
parent->features = 0;
memset(parent->crypto_mode_supported, 0,
sizeof(parent->crypto_mode_supported));
}

@ -9,7 +9,7 @@
#define DM_MSG_PREFIX "default-key"
#define DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE 128
#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
@ -49,7 +49,6 @@ struct default_key_c {
unsigned int sector_size;
unsigned int sector_bits;
struct blk_crypto_key key;
bool is_hw_wrapped;
};
static const struct dm_default_key_cipher *
@ -85,7 +84,7 @@ static int default_key_ctr_optional(struct dm_target *ti,
struct default_key_c *dkc = ti->private;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 4, "Invalid number of feature args"},
{0, 3, "Invalid number of feature args"},
};
unsigned int opt_params;
const char *opt_string;
@ -118,8 +117,6 @@ static int default_key_ctr_optional(struct dm_target *ti,
}
} else if (!strcmp(opt_string, "iv_large_sectors")) {
iv_large_sectors = true;
} else if (!strcmp(opt_string, "wrappedkey_v0")) {
dkc->is_hw_wrapped = true;
} else {
ti->error = "Invalid feature arguments";
return -EINVAL;
@ -147,8 +144,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct default_key_c *dkc;
const struct dm_default_key_cipher *cipher;
u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE];
unsigned int raw_key_size;
u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE];
unsigned long long tmpll;
char dummy;
int err;
@ -180,15 +176,12 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
/* <key> */
raw_key_size = strlen(argv[1]);
if (raw_key_size > 2 * DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE ||
raw_key_size % 2) {
ti->error = "Invalid keysize";
if (strlen(argv[1]) != 2 * cipher->key_size) {
ti->error = "Incorrect key size for cipher";
err = -EINVAL;
goto bad;
}
raw_key_size /= 2;
if (hex2bin(raw_key, argv[1], raw_key_size) != 0) {
if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) {
ti->error = "Malformed key string";
err = -EINVAL;
goto bad;
@ -233,15 +226,13 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size,
dkc->is_hw_wrapped, cipher->mode_num,
dkc->sector_size);
cipher->mode_num, dkc->sector_size);
if (err) {
ti->error = "Error initializing blk-crypto key";
goto bad;
}
err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size,
dkc->is_hw_wrapped,
dkc->dev->bdev->bd_queue);
if (err) {
ti->error = "Error starting to use blk-crypto";
@ -328,8 +319,6 @@ static void default_key_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios;
if (dkc->sector_size != SECTOR_SIZE)
num_feature_args += 2;
if (dkc->is_hw_wrapped)
num_feature_args += 1;
if (num_feature_args != 0) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
@ -338,8 +327,6 @@ static void default_key_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sector_size:%u", dkc->sector_size);
DMEMIT(" iv_large_sectors");
}
if (dkc->is_hw_wrapped)
DMEMIT(" wrappedkey_v0");
}
break;
}
@ -385,7 +372,7 @@ static void default_key_io_hints(struct dm_target *ti,
static struct target_type default_key_target = {
.name = "default-key",
.version = {2, 1, 0},
.version = {2, 0, 0},
.module = THIS_MODULE,
.ctr = default_key_ctr,
.dtr = default_key_dtr,

@ -2088,98 +2088,22 @@ static int dm_keyslot_evict(struct keyslot_manager *ksm,
return args.err;
}
struct dm_derive_raw_secret_args {
const u8 *wrapped_key;
unsigned int wrapped_key_size;
u8 *secret;
unsigned int secret_size;
int err;
};
static int dm_derive_raw_secret_callback(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct dm_derive_raw_secret_args *args = data;
struct request_queue *q = dev->bdev->bd_queue;
if (!args->err)
return 0;
if (!q->ksm) {
args->err = -EOPNOTSUPP;
return 0;
}
args->err = keyslot_manager_derive_raw_secret(q->ksm, args->wrapped_key,
args->wrapped_key_size,
args->secret,
args->secret_size);
/* Try another device in case this fails. */
return 0;
}
/*
* Retrieve the raw_secret from the underlying device. Given that
* only only one raw_secret can exist for a particular wrappedkey,
* retrieve it only from the first device that supports derive_raw_secret()
*/
static int dm_derive_raw_secret(struct keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size)
{
struct mapped_device *md = keyslot_manager_private(ksm);
struct dm_derive_raw_secret_args args = {
.wrapped_key = wrapped_key,
.wrapped_key_size = wrapped_key_size,
.secret = secret,
.secret_size = secret_size,
.err = -EOPNOTSUPP,
};
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return -EOPNOTSUPP;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_derive_raw_secret_callback,
&args);
if (!args.err)
break;
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = {
.keyslot_evict = dm_keyslot_evict,
.derive_raw_secret = dm_derive_raw_secret,
};
static int dm_init_inline_encryption(struct mapped_device *md)
{
unsigned int features;
unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX];
/*
* Initially declare support for all crypto settings. Anything
* unsupported by a child device will be removed later when calculating
* the device restrictions.
* Start out with all crypto mode support bits set. Any unsupported
* bits will be cleared later when calculating the device restrictions.
*/
features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
memset(mode_masks, 0xFF, sizeof(mode_masks));
md->queue->ksm = keyslot_manager_create_passthrough(NULL,
&dm_ksm_ll_ops,
features,
mode_masks, md);
if (!md->queue->ksm)
return -ENOMEM;

@ -336,9 +336,7 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
ufshcd_clear_all_keyslots(hba);
hba->ksm = keyslot_manager_create(hba->dev, ufshcd_num_keyslots(hba),
ksm_ops,
BLK_CRYPTO_FEATURE_STANDARD_KEYS,
crypto_modes_supported, hba);
ksm_ops, crypto_modes_supported, hba);
if (!hba->ksm) {
err = -ENOMEM;
@ -458,14 +456,6 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp);
}
int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
if (hba->crypto_vops && hba->crypto_vops->map_sg_crypto)
return hba->crypto_vops->map_sg_crypto(hba, lrbp);
return 0;
}
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)

@ -80,8 +80,6 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
@ -135,12 +133,6 @@ static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
return 0;
}
static inline int ufshcd_map_sg_crypto(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
return 0;
}
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
{
return false;

@ -2023,7 +2023,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
return ufshcd_map_sg_crypto(hba, lrbp);
return 0;
}
/**

@ -354,7 +354,6 @@ struct ufs_hba_crypto_variant_ops {
int (*prepare_lrbp_crypto)(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
int (*map_sg_crypto)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
int (*complete_lrbp_crypto)(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);

@ -1,8 +1,13 @@
config FS_ENCRYPTION
bool "FS Encryption (Per-file encryption)"
select CRYPTO
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_SHA512
select CRYPTO_HMAC
select KEYS
help
Enable encryption of files and directories. This
@ -11,19 +16,6 @@ config FS_ENCRYPTION
decrypted pages in the page cache. Currently Ext4,
F2FS and UBIFS make use of this feature.
# Filesystems supporting encryption must select this if FS_ENCRYPTION. This
# allows the algorithms to be built as modules when all the filesystems are.
config FS_ENCRYPTION_ALGS
tristate
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_CTS
select CRYPTO_ECB
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_XTS
config FS_ENCRYPTION_INLINE_CRYPT
bool "Enable fscrypt to use inline crypto"
depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION

@ -41,154 +41,63 @@ void fscrypt_decrypt_bio(struct bio *bio)
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
static int fscrypt_zeroout_range_inlinecrypt(const struct inode *inode,
pgoff_t lblk,
sector_t pblk, unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
unsigned int i;
struct bio *bio;
int ret, err;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
do {
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
i = 0;
do {
unsigned int blocks_this_page =
min(len, blocks_per_page);
unsigned int bytes_this_page =
blocks_this_page << blockbits;
ret = bio_add_page(bio, ZERO_PAGE(0),
bytes_this_page, 0);
if (WARN_ON(ret != bytes_this_page)) {
err = -EIO;
goto out;
}
lblk += blocks_this_page;
pblk += blocks_this_page;
len -= blocks_this_page;
} while (++i != BIO_MAX_PAGES && len != 0);
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio);
} while (len != 0);
err = 0;
out:
bio_put(bio);
return err;
}
/**
* fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
* @inode: the file's inode
* @lblk: the first file logical block to zero out
* @pblk: the first filesystem physical block to zero out
* @len: number of blocks to zero out
*
* Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
* ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
* both logically and physically contiguous. It's also assumed that the
* filesystem only uses a single block device, ->s_bdev.
*
* Note that since each block uses a different IV, this involves writing a
* different ciphertext to each block; we can't simply reuse the same one.
*
* Return: 0 on success; -errno on failure.
*/
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
sector_t pblk, unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
unsigned int offset;
const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode);
struct page *ciphertext_page;
struct bio *bio;
int ret, err;
if (len == 0)
return 0;
if (fscrypt_inode_uses_inline_crypto(inode))
return fscrypt_zeroout_range_inlinecrypt(inode, lblk, pblk,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES);
nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
(len + blocks_per_page - 1) >> blocks_per_page_bits);
/*
* We need at least one page for ciphertext. Allocate the first one
* from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
*
* Any additional page allocations are allowed to fail, as they only
* help performance, and waiting on the mempool for them could deadlock.
*/
for (i = 0; i < nr_pages; i++) {
pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
GFP_NOWAIT | __GFP_NOWARN);
if (!pages[i])
break;
int ret, err = 0;
if (inlinecrypt) {
ciphertext_page = ZERO_PAGE(0);
} else {
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
return -ENOMEM;
}
nr_pages = i;
if (WARN_ON(nr_pages <= 0))
return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(GFP_NOFS, nr_pages);
while (len--) {
if (!inlinecrypt) {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
blocksize, 0, GFP_NOFS);
if (err)
goto errout;
}
bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
}
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO);
do {
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
i = 0;
offset = 0;
do {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), pages[i],
blocksize, offset, GFP_NOFS);
if (err)
goto out;
lblk++;
pblk++;
len--;
offset += blocksize;
if (offset == PAGE_SIZE || len == 0) {
ret = bio_add_page(bio, pages[i++], offset, 0);
if (WARN_ON(ret != offset)) {
err = -EIO;
goto out;
}
offset = 0;
}
} while (i != nr_pages && len != 0);
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
if (WARN_ON(ret != blocksize)) {
/* should never happen! */
bio_put(bio);
err = -EIO;
goto errout;
}
err = submit_bio_wait(bio);
if (err == 0 && bio->bi_status)
err = -EIO;
bio_put(bio);
if (err)
goto out;
bio_reset(bio);
} while (len != 0);
goto errout;
lblk++;
pblk++;
}
err = 0;
out:
bio_put(bio);
for (i = 0; i < nr_pages; i++)
fscrypt_free_bounce_page(pages[i]);
errout:
if (!inlinecrypt)
fscrypt_free_bounce_page(ciphertext_page);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);

@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
@ -137,7 +139,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to encrypt. Must be
* a multiple of the filesystem's block size.
* @gfp_flags: Memory allocation flags. See details below.
* @gfp_flags: Memory allocation flags
*
* A new bounce page is allocated, and the specified block(s) are encrypted into
* it. In the bounce page, the ciphertext block(s) will be located at the same
@ -147,11 +149,6 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
*
* This is for use by the filesystem's ->writepages() method.
*
* The bounce page allocation is mempool-backed, so it will always succeed when
* @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
* only the first page of each bio can be allocated this way. To prevent
* deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
@ -288,6 +285,54 @@ int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
/*
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
int err;
int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
* reverting to ciphertext names without evicting the directory's inode
* -- which implies eviction of the dentries in the directory.
*/
if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
return 1;
/*
* Ciphertext name; valid if the directory's key is still unavailable.
*
* Although fscrypt forbids rename() on ciphertext names, we still must
* use dget_parent() here rather than use ->d_parent directly. That's
* because a corrupted fs image may contain directory hard links, which
* the VFS handles by moving the directory's dentry tree in the dcache
* each time ->lookup() finds the directory and it already has a dentry
* elsewhere. Thus ->d_parent can be changing, and we must safely grab
* a reference to some ->d_parent to prevent it from being freed.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
err = fscrypt_get_encryption_info(d_inode(dir));
valid = !fscrypt_has_encryption_key(d_inode(dir));
dput(dir);
if (err < 0)
return err;
return valid;
}
const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags

@ -11,88 +11,10 @@
* This has not yet undergone a rigorous security audit.
*/
#include <linux/namei.h>
#include <linux/scatterlist.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
/**
* struct fscrypt_nokey_name - identifier for directory entry when key is absent
*
* When userspace lists an encrypted directory without access to the key, the
* filesystem must present a unique "no-key name" for each filename that allows
* it to find the directory entry again if requested. Naively, that would just
* mean using the ciphertext filenames. However, since the ciphertext filenames
* can contain illegal characters ('\0' and '/'), they must be encoded in some
* way. We use base64. But that can cause names to exceed NAME_MAX (255
* bytes), so we also need to use a strong hash to abbreviate long names.
*
* The filesystem may also need another kind of hash, the "dirhash", to quickly
* find the directory entry. Since filesystems normally compute the dirhash
* over the on-disk filename (i.e. the ciphertext), it's not computable from
* no-key names that abbreviate the ciphertext using the strong hash to fit in
* NAME_MAX. It's also not computable if it's a keyed hash taken over the
* plaintext (but it may still be available in the on-disk directory entry);
* casefolded directories use this type of dirhash. At least in these cases,
* each no-key name must include the name's dirhash too.
*
* To meet all these requirements, we base64-encode the following
* variable-length structure. It contains the dirhash, or 0's if the filesystem
* didn't provide one; up to 149 bytes of the ciphertext name; and for
* ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes.
*
* This ensures that each no-key name contains everything needed to find the
* directory entry again, contains only legal characters, doesn't exceed
* NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only
* take the performance hit of SHA-256 on very long filenames (which are rare).
*/
struct fscrypt_nokey_name {
u32 dirhash[2];
u8 bytes[149];
u8 sha256[SHA256_DIGEST_SIZE];
}; /* 189 bytes => 252 bytes base64-encoded, which is <= NAME_MAX (255) */
/*
* Decoded size of max-size nokey name, i.e. a name that was abbreviated using
* the strong hash and thus includes the 'sha256' field. This isn't simply
* sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included.
*/
#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256)
static struct crypto_shash *sha256_hash_tfm;
static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result)
{
struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm);
if (unlikely(!tfm)) {
struct crypto_shash *prev_tfm;
tfm = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(tfm)) {
fscrypt_err(NULL,
"Error allocating SHA-256 transform: %ld",
PTR_ERR(tfm));
return PTR_ERR(tfm);
}
prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm);
if (prev_tfm) {
crypto_free_shash(tfm);
tfm = prev_tfm;
}
}
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
desc->flags = 0;
return crypto_shash_digest(desc, data, data_len, result);
}
}
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
if (str->len == 1 && str->name[0] == '.')
@ -105,19 +27,19 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
}
/**
* fscrypt_fname_encrypt() - encrypt a filename
* fname_encrypt() - encrypt a filename
*
* The output buffer must be at least as large as the input buffer.
* Any extra space is filled with NUL padding before encryption.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen)
int fname_encrypt(struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
const struct fscrypt_info *ci = inode->i_crypt_info;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_key.tfm;
union fscrypt_iv iv;
struct scatterlist sg;
@ -163,14 +85,14 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
*
* Return: 0 on success, -errno on failure
*/
static int fname_decrypt(const struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
static int fname_decrypt(struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
const struct fscrypt_info *ci = inode->i_crypt_info;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_key.tfm;
union fscrypt_iv iv;
int res;
@ -284,7 +206,9 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
const u32 max_encoded_len = BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX);
const u32 max_encoded_len =
max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
u32 max_presented_len;
max_presented_len = max(max_encoded_len, max_encrypted_len);
@ -317,21 +241,19 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
*
* The caller must have allocated sufficient memory for the @oname string.
*
* If the key is available, we'll decrypt the disk name. Otherwise, we'll
* encode it for presentation in fscrypt_nokey_name format.
* See struct fscrypt_nokey_name for details.
* If the key is available, we'll decrypt the disk name; otherwise, we'll encode
* it for presentation. Short names are directly base64-encoded, while long
* names are encoded in fscrypt_digested_name format.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
const struct qstr qname = FSTR_TO_QSTR(iname);
struct fscrypt_nokey_name nokey_name;
u32 size; /* size of the unencoded no-key name */
int err;
struct fscrypt_digested_name digested_name;
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
@ -346,37 +268,24 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
if (fscrypt_has_encryption_key(inode))
return fname_decrypt(inode, iname, oname);
/*
* Sanity check that struct fscrypt_nokey_name doesn't have padding
* between fields and that its encoded size never exceeds NAME_MAX.
*/
BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) !=
offsetof(struct fscrypt_nokey_name, bytes));
BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) !=
offsetof(struct fscrypt_nokey_name, sha256));
BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
if (hash) {
nokey_name.dirhash[0] = hash;
nokey_name.dirhash[1] = minor_hash;
} else {
nokey_name.dirhash[0] = 0;
nokey_name.dirhash[1] = 0;
if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
oname->len = base64_encode(iname->name, iname->len,
oname->name);
return 0;
}
if (iname->len <= sizeof(nokey_name.bytes)) {
memcpy(nokey_name.bytes, iname->name, iname->len);
size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
if (hash) {
digested_name.hash = hash;
digested_name.minor_hash = minor_hash;
} else {
memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes));
/* Compute strong hash of remaining part of name. */
err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)],
iname->len - sizeof(nokey_name.bytes),
nokey_name.sha256);
if (err)
return err;
size = FSCRYPT_NOKEY_NAME_MAX;
digested_name.hash = 0;
digested_name.minor_hash = 0;
}
oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name);
memcpy(digested_name.digest,
FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
FSCRYPT_FNAME_DIGEST_SIZE);
oname->name[0] = '_';
oname->len = 1 + base64_encode((const u8 *)&digested_name,
sizeof(digested_name), oname->name + 1);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@ -397,7 +306,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
* get the disk_name.
*
* Else, for keyless @lookup operations, @iname is the presented ciphertext, so
* we decode it to get the fscrypt_nokey_name. Non-@lookup operations will be
* we decode it to get either the ciphertext disk_name (for short names) or the
* fscrypt_digested_name (for long names). Non-@lookup operations will be
* impossible in this case, so we fail them with ENOKEY.
*
* If successful, fscrypt_free_filename() must be called later to clean up.
@ -407,8 +317,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
struct fscrypt_nokey_name *nokey_name;
int ret;
int digested;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
@ -432,8 +342,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (!fname->crypto_buf.name)
return -ENOMEM;
ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name,
fname->crypto_buf.len);
ret = fname_encrypt(dir, iname, fname->crypto_buf.name,
fname->crypto_buf.len);
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
@ -448,31 +358,40 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
* We don't have the key and we are doing a lookup; decode the
* user-supplied name
*/
if (iname->name[0] == '_') {
if (iname->len !=
1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)))
return -ENOENT;
digested = 1;
} else {
if (iname->len >
BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE))
return -ENOENT;
digested = 0;
}
if (iname->len > BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX))
return -ENOENT;
fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL);
fname->crypto_buf.name =
kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE,
sizeof(struct fscrypt_digested_name)),
GFP_KERNEL);
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
ret = base64_decode(iname->name, iname->len, fname->crypto_buf.name);
if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
(ret > offsetof(struct fscrypt_nokey_name, sha256) &&
ret != FSCRYPT_NOKEY_NAME_MAX)) {
ret = base64_decode(iname->name + digested, iname->len - digested,
fname->crypto_buf.name);
if (ret < 0) {
ret = -ENOENT;
goto errout;
}
fname->crypto_buf.len = ret;
nokey_name = (void *)fname->crypto_buf.name;
fname->hash = nokey_name->dirhash[0];
fname->minor_hash = nokey_name->dirhash[1];
if (ret != FSCRYPT_NOKEY_NAME_MAX) {
/* The full ciphertext filename is available. */
fname->disk_name.name = nokey_name->bytes;
fname->disk_name.len =
ret - offsetof(struct fscrypt_nokey_name, bytes);
if (digested) {
const struct fscrypt_digested_name *n =
(const void *)fname->crypto_buf.name;
fname->hash = n->hash;
fname->minor_hash = n->minor_hash;
} else {
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
}
return 0;
@ -481,106 +400,3 @@ errout:
return ret;
}
EXPORT_SYMBOL(fscrypt_setup_filename);
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and the name we're
* looking for is very long, then we won't have the full disk_name and instead
* we'll need to match against a fscrypt_nokey_name that includes a strong hash.
*
* Return: %true if the name matches, otherwise %false.
*/
bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
const struct fscrypt_nokey_name *nokey_name =
(const void *)fname->crypto_buf.name;
u8 sha256[SHA256_DIGEST_SIZE];
if (likely(fname->disk_name.name)) {
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, de_name_len);
}
if (de_name_len <= sizeof(nokey_name->bytes))
return false;
if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes)))
return false;
if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)],
de_name_len - sizeof(nokey_name->bytes), sha256))
return false;
return !memcmp(sha256, nokey_name->sha256, sizeof(sha256));
}
EXPORT_SYMBOL_GPL(fscrypt_match_name);
/**
* fscrypt_fname_siphash() - calculate the SipHash of a filename
* @dir: the parent directory
* @name: the filename to calculate the SipHash of
*
* Given a plaintext filename @name and a directory @dir which uses SipHash as
* its dirhash method and has had its fscrypt key set up, this function
* calculates the SipHash of that name using the directory's secret dirhash key.
*
* Return: the SipHash of @name using the hash key of @dir
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
const struct fscrypt_info *ci = dir->i_crypt_info;
WARN_ON(!ci->ci_dirhash_key_initialized);
return siphash(name->name, name->len, &ci->ci_dirhash_key);
}
EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
/*
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
int err;
int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
* reverting to ciphertext names without evicting the directory's inode
* -- which implies eviction of the dentries in the directory.
*/
if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
return 1;
/*
* Ciphertext name; valid if the directory's key is still unavailable.
*
* Although fscrypt forbids rename() on ciphertext names, we still must
* use dget_parent() here rather than use ->d_parent directly. That's
* because a corrupted fs image may contain directory hard links, which
* the VFS handles by moving the directory's dentry tree in the dcache
* each time ->lookup() finds the directory and it already has a dentry
* elsewhere. Thus ->d_parent can be changing, and we must safely grab
* a reference to some ->d_parent to prevent it from being freed.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
err = fscrypt_get_encryption_info(d_inode(dir));
valid = !fscrypt_has_encryption_key(d_inode(dir));
dput(dir);
if (err < 0)
return err;
return valid;
}
EXPORT_SYMBOL(fscrypt_d_revalidate);

@ -12,7 +12,6 @@
#define _FSCRYPT_PRIVATE_H
#include <linux/fscrypt.h>
#include <linux/siphash.h>
#include <crypto/hash.h>
#include <linux/bio-crypt-ctx.h>
@ -78,26 +77,6 @@ static inline int fscrypt_context_size(const union fscrypt_context *ctx)
return 0;
}
/* Check whether an fscrypt_context has a recognized version number and size */
static inline bool fscrypt_context_is_valid(const union fscrypt_context *ctx,
int ctx_size)
{
return ctx_size >= 1 && ctx_size == fscrypt_context_size(ctx);
}
/* Retrieve the context's nonce, assuming the context was already validated */
static inline const u8 *fscrypt_context_nonce(const union fscrypt_context *ctx)
{
switch (ctx->version) {
case FSCRYPT_CONTEXT_V1:
return ctx->v1.nonce;
case FSCRYPT_CONTEXT_V2:
return ctx->v2.nonce;
}
WARN_ON(1);
return NULL;
}
#undef fscrypt_policy
union fscrypt_policy {
u8 version;
@ -159,6 +138,12 @@ fscrypt_policy_flags(const union fscrypt_policy *policy)
BUG();
}
static inline bool
fscrypt_is_direct_key_policy(const union fscrypt_policy *policy)
{
return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY;
}
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@ -233,14 +218,6 @@ struct fscrypt_info {
*/
struct fscrypt_direct_key *ci_direct_key;
/*
* This inode's hash key for filenames. This is a 128-bit SipHash-2-4
* key. This is only set for directories that use a keyed dirhash over
* the plaintext filenames -- currently just casefolded directories.
*/
siphash_key_t ci_dirhash_key;
bool ci_dirhash_key_initialized;
/* The encryption policy used by this inode */
union fscrypt_policy ci_policy;
@ -253,6 +230,24 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
filenames_mode == FSCRYPT_MODE_AES_128_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
filenames_mode == FSCRYPT_MODE_ADIANTUM)
return true;
return false;
}
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
@ -262,6 +257,7 @@ extern int fscrypt_crypt_block(const struct inode *inode,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold
fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
@ -289,9 +285,8 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci);
/* fname.c */
extern int fscrypt_fname_encrypt(const struct inode *inode,
const struct qstr *iname,
u8 *out, unsigned int olen);
extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen);
extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
u32 orig_len, u32 max_len,
u32 *encrypted_len_ret);
@ -313,12 +308,11 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
* output doesn't reveal another.
*/
#define HKDF_CONTEXT_KEY_IDENTIFIER 1
#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2
#define HKDF_CONTEXT_PER_FILE_KEY 2
#define HKDF_CONTEXT_DIRECT_KEY 3
#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
#define HKDF_CONTEXT_DIRHASH_KEY 5
extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen);
@ -326,8 +320,7 @@ extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
extern int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
bool is_hw_wrapped_key);
extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci);
static inline bool
fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
@ -339,7 +332,6 @@ extern int fscrypt_prepare_inline_crypt_key(
struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
unsigned int raw_key_size,
bool is_hw_wrapped,
const struct fscrypt_info *ci);
extern void fscrypt_destroy_inline_crypt_key(
@ -371,10 +363,8 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
bool is_hw_wrapped_key)
static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
{
return 0;
}
static inline bool fscrypt_using_inline_encryption(
@ -386,7 +376,6 @@ static inline bool fscrypt_using_inline_encryption(
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
bool is_hw_wrapped,
const struct fscrypt_info *ci)
{
WARN_ON(1);
@ -579,18 +568,20 @@ struct fscrypt_mode {
extern struct fscrypt_mode fscrypt_modes[];
static inline bool
fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode)
{
return mode->ivsize >= offsetofend(union fscrypt_iv, nonce);
}
extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
bool is_hw_wrapped,
const struct fscrypt_info *ci);
extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key);
extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci,
const u8 *raw_key);
extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
const struct fscrypt_master_key *mk);
extern int fscrypt_set_derived_key(struct fscrypt_info *ci,
const u8 *derived_key);
/* keysetup_v1.c */

@ -113,7 +113,7 @@ out:
* adds to its application-specific info strings to guarantee that it doesn't
* accidentally repeat an info string when using HKDF for different purposes.)
*/
int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen)
{

@ -4,8 +4,6 @@
* Encryption hooks for higher-level filesystem operations.
*/
#include <linux/key.h>
#include "fscrypt_private.h"
/**
@ -117,53 +115,12 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_ENCRYPTED_NAME;
spin_unlock(&dentry->d_lock);
d_set_d_op(dentry, &fscrypt_d_ops);
}
return err;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
/**
* fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS
* @inode: the inode on which flags are being changed
* @oldflags: the old flags
* @flags: the new flags
*
* The caller should be holding i_rwsem for write.
*
* Return: 0 on success; -errno if the flags change isn't allowed or if
* another error occurs.
*/
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
struct fscrypt_info *ci;
struct fscrypt_master_key *mk;
int err;
/*
* When the CASEFOLD flag is set on an encrypted directory, we must
* derive the secret key needed for the dirhash. This is only possible
* if the directory uses a v2 encryption policy.
*/
if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) {
err = fscrypt_require_key(inode);
if (err)
return err;
ci = inode->i_crypt_info;
if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
return -EINVAL;
mk = ci->ci_master_key->payload.data[0];
down_read(&mk->mk_secret_sem);
if (is_master_key_secret_present(&mk->mk_secret))
err = fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
up_read(&mk->mk_secret_sem);
return err;
}
return 0;
}
int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link)
@ -230,8 +187,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
ciphertext_len = disk_link->len - sizeof(*sd);
sd->len = cpu_to_le16(ciphertext_len);
err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path,
ciphertext_len);
err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len);
if (err)
goto err_free_sd;

@ -26,94 +26,44 @@ struct fscrypt_blk_crypto_key {
struct request_queue *devs[];
};
static int fscrypt_get_num_devices(struct super_block *sb)
{
if (sb->s_cop->get_num_devices)
return sb->s_cop->get_num_devices(sb);
return 1;
}
static void fscrypt_get_devices(struct super_block *sb, int num_devs,
struct request_queue **devs)
{
if (num_devs == 1)
devs[0] = bdev_get_queue(sb->s_bdev);
else
sb->s_cop->get_devices(sb, devs);
}
/* Enable inline encryption for this file if supported. */
int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
bool is_hw_wrapped_key)
void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
struct request_queue **devs;
int num_devs;
int i;
/* The file must need contents encryption, not filenames encryption */
if (!S_ISREG(inode->i_mode))
return 0;
return;
/* blk-crypto must implement the needed encryption algorithm */
if (crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
return 0;
if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
return;
/* The filesystem must be mounted with -o inlinecrypt */
if (!sb->s_cop->inline_crypt_enabled ||
!sb->s_cop->inline_crypt_enabled(sb))
return 0;
/*
* The needed encryption settings must be supported either by
* blk-crypto-fallback, or by hardware on all the filesystem's devices.
*/
if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
!is_hw_wrapped_key) {
ci->ci_inlinecrypt = true;
return 0;
}
num_devs = fscrypt_get_num_devices(sb);
devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS);
if (!devs)
return -ENOMEM;
fscrypt_get_devices(sb, num_devs, devs);
for (i = 0; i < num_devs; i++) {
if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm,
crypto_mode,
sb->s_blocksize,
is_hw_wrapped_key))
goto out_free_devs;
}
return;
ci->ci_inlinecrypt = true;
out_free_devs:
kfree(devs);
return 0;
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
unsigned int raw_key_size,
bool is_hw_wrapped,
const struct fscrypt_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
int num_devs;
int num_devs = 1;
int queue_refs = 0;
struct fscrypt_blk_crypto_key *blk_key;
int err;
int i;
num_devs = fscrypt_get_num_devices(sb);
if (sb->s_cop->get_num_devices)
num_devs = sb->s_cop->get_num_devices(sb);
if (WARN_ON(num_devs < 1))
return -EINVAL;
@ -122,13 +72,16 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
return -ENOMEM;
blk_key->num_devs = num_devs;
fscrypt_get_devices(sb, num_devs, blk_key->devs);
if (num_devs == 1)
blk_key->devs[0] = bdev_get_queue(sb->s_bdev);
else
sb->s_cop->get_devices(sb, blk_key->devs);
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
is_hw_wrapped, crypto_mode, sb->s_blocksize);
crypto_mode, sb->s_blocksize);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
@ -150,7 +103,6 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
queue_refs++;
err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize,
is_hw_wrapped,
blk_key->devs[i]);
if (err) {
fscrypt_err(inode,

@ -465,111 +465,6 @@ out_unlock:
return err;
}
static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep)
{
const struct fscrypt_provisioning_key_payload *payload = prep->data;
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < FSCRYPT_MAX_KEY_SIZE);
if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
prep->datalen > sizeof(*payload) + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE)
return -EINVAL;
if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
return -EINVAL;
if (payload->__reserved)
return -EINVAL;
prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL);
if (!prep->payload.data[0])
return -ENOMEM;
prep->quotalen = prep->datalen;
return 0;
}
static void fscrypt_provisioning_key_free_preparse(
struct key_preparsed_payload *prep)
{
kzfree(prep->payload.data[0]);
}
static void fscrypt_provisioning_key_describe(const struct key *key,
struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_positive(key)) {
const struct fscrypt_provisioning_key_payload *payload =
key->payload.data[0];
seq_printf(m, ": %u [%u]", key->datalen, payload->type);
}
}
static void fscrypt_provisioning_key_destroy(struct key *key)
{
kzfree(key->payload.data[0]);
}
static struct key_type key_type_fscrypt_provisioning = {
.name = "fscrypt-provisioning",
.preparse = fscrypt_provisioning_key_preparse,
.free_preparse = fscrypt_provisioning_key_free_preparse,
.instantiate = generic_key_instantiate,
.describe = fscrypt_provisioning_key_describe,
.destroy = fscrypt_provisioning_key_destroy,
};
/*
* Retrieve the raw key from the Linux keyring key specified by 'key_id', and
* store it into 'secret'.
*
* The key must be of type "fscrypt-provisioning" and must have the field
* fscrypt_provisioning_key_payload::type set to 'type', indicating that it's
* only usable with fscrypt with the particular KDF version identified by
* 'type'. We don't use the "logon" key type because there's no way to
* completely restrict the use of such keys; they can be used by any kernel API
* that accepts "logon" keys and doesn't require a specific service prefix.
*
* The ability to specify the key via Linux keyring key is intended for cases
* where userspace needs to re-add keys after the filesystem is unmounted and
* re-mounted. Most users should just provide the raw key directly instead.
*/
static int get_keyring_key(u32 key_id, u32 type,
struct fscrypt_master_key_secret *secret)
{
key_ref_t ref;
struct key *key;
const struct fscrypt_provisioning_key_payload *payload;
int err;
ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH);
if (IS_ERR(ref))
return PTR_ERR(ref);
key = key_ref_to_ptr(ref);
if (key->type != &key_type_fscrypt_provisioning)
goto bad_key;
payload = key->payload.data[0];
/* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */
if (payload->type != type)
goto bad_key;
secret->size = key->datalen - sizeof(*payload);
memcpy(secret->raw, payload->raw, secret->size);
err = 0;
goto out_put;
bad_key:
err = -EKEYREJECTED;
out_put:
key_ref_put(ref);
return err;
}
/* Size of software "secret" derived from hardware-wrapped key */
#define RAW_SECRET_SIZE 32
@ -617,28 +512,20 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
return -EINVAL;
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE <
FSCRYPT_MAX_KEY_SIZE);
if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
arg.raw_size >
((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ?
FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE))
return -EINVAL;
memset(&secret, 0, sizeof(secret));
if (arg.key_id) {
if (arg.raw_size != 0)
return -EINVAL;
err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
if (err)
goto out_wipe_secret;
err = -EINVAL;
if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) &&
secret.size > FSCRYPT_MAX_KEY_SIZE)
goto out_wipe_secret;
} else {
if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
arg.raw_size >
((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ?
FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE))
return -EINVAL;
secret.size = arg.raw_size;
err = -EFAULT;
if (copy_from_user(secret.raw, uarg->raw, secret.size))
goto out_wipe_secret;
}
secret.size = arg.raw_size;
err = -EFAULT;
if (copy_from_user(secret.raw, uarg->raw, secret.size))
goto out_wipe_secret;
switch (arg.key_spec.type) {
case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
@ -809,6 +696,9 @@ static int check_for_busy_inodes(struct super_block *sb,
struct list_head *pos;
size_t busy_count = 0;
unsigned long ino;
struct dentry *dentry;
char _path[256];
char *path = NULL;
spin_lock(&mk->mk_decrypted_inodes_lock);
@ -827,14 +717,22 @@ static int check_for_busy_inodes(struct super_block *sb,
struct fscrypt_info,
ci_master_key_link)->ci_inode;
ino = inode->i_ino;
dentry = d_find_alias(inode);
}
spin_unlock(&mk->mk_decrypted_inodes_lock);
if (dentry) {
path = dentry_path(dentry, _path, sizeof(_path));
dput(dentry);
}
if (IS_ERR_OR_NULL(path))
path = "(unknown)";
fscrypt_warn(NULL,
"%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu",
"%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)",
sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec),
master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u,
ino);
ino, path);
return -EBUSY;
}
@ -1132,14 +1030,8 @@ int __init fscrypt_init_keyring(void)
if (err)
goto err_unregister_fscrypt;
err = register_key_type(&key_type_fscrypt_provisioning);
if (err)
goto err_unregister_fscrypt_user;
return 0;
err_unregister_fscrypt_user:
unregister_key_type(&key_type_fscrypt_user);
err_unregister_fscrypt:
unregister_key_type(&key_type_fscrypt);
return err;

@ -92,11 +92,8 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
* first time a mode is used.
*/
pr_info("fscrypt: %s using implementation \"%s\"\n",
mode->friendly_name, crypto_skcipher_driver_name(tfm));
}
if (WARN_ON(crypto_skcipher_ivsize(tfm) != mode->ivsize)) {
err = -EINVAL;
goto err_free_tfm;
mode->friendly_name,
crypto_skcipher_alg(tfm)->base.cra_driver_name);
}
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
@ -117,15 +114,15 @@ err_free_tfm:
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
bool is_hw_wrapped, const struct fscrypt_info *ci)
const struct fscrypt_info *ci)
{
struct crypto_skcipher *tfm;
if (fscrypt_using_inline_encryption(ci))
return fscrypt_prepare_inline_crypt_key(prep_key,
raw_key, raw_key_size, is_hw_wrapped, ci);
raw_key, raw_key_size, ci);
if (WARN_ON(is_hw_wrapped || raw_key_size != ci->ci_mode->keysize))
if (WARN_ON(raw_key_size != ci->ci_mode->keysize))
return -EINVAL;
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
@ -146,18 +143,18 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
fscrypt_destroy_inline_crypt_key(prep_key);
}
/* Given a per-file encryption key, set up the file's crypto transform object */
int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
{
ci->ci_owns_key = true;
return fscrypt_prepare_key(&ci->ci_key, raw_key, ci->ci_mode->keysize,
false /*is_hw_wrapped*/, ci);
return fscrypt_prepare_key(&ci->ci_key, derived_key,
ci->ci_mode->keysize, ci);
}
static int setup_per_mode_enc_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
static int setup_per_mode_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
{
static DEFINE_MUTEX(mode_key_setup_mutex);
const struct inode *inode = ci->ci_inode;
@ -202,7 +199,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci,
}
}
err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw,
mk->mk_secret.size, true, ci);
mk->mk_secret.size, ci);
if (err)
goto out_unlock;
} else {
@ -221,7 +218,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci,
if (err)
goto out_unlock;
err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize,
false /*is_hw_wrapped*/, ci);
ci);
memzero_explicit(mode_key, mode->keysize);
if (err)
goto out_unlock;
@ -234,24 +231,10 @@ out_unlock:
return err;
}
int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
const struct fscrypt_master_key *mk)
{
int err;
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE,
(u8 *)&ci->ci_dirhash_key,
sizeof(ci->ci_dirhash_key));
if (err)
return err;
ci->ci_dirhash_key_initialized = true;
return 0;
}
static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk)
{
u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
int err;
if (mk->mk_secret.is_hw_wrapped &&
@ -263,15 +246,21 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
* DIRECT_KEY: instead of deriving per-file encryption keys, the
* per-file nonce will be included in all the IVs. But unlike
* v1 policies, for v2 policies in this case we don't encrypt
* with the master key directly but rather derive a per-mode
* encryption key. This ensures that the master key is
* consistently used only for HKDF, avoiding key reuse issues.
* DIRECT_KEY: instead of deriving per-file keys, the per-file
* nonce will be included in all the IVs. But unlike v1
* policies, for v2 policies in this case we don't encrypt with
* the master key directly but rather derive a per-mode key.
* This ensures that the master key is consistently used only
* for HKDF, avoiding key reuse issues.
*/
err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys,
HKDF_CONTEXT_DIRECT_KEY, false);
if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) {
fscrypt_warn(ci->ci_inode,
"Direct key flag not allowed with %s",
ci->ci_mode->friendly_name);
return -EINVAL;
}
return setup_per_mode_key(ci, mk, mk->mk_direct_keys,
HKDF_CONTEXT_DIRECT_KEY, false);
} else if (ci->ci_policy.v2.flags &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
/*
@ -280,34 +269,21 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* the IVs. This format is optimized for use with inline
* encryption hardware compliant with the UFS or eMMC standards.
*/
err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys,
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
true);
} else {
u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
HKDF_CONTEXT_PER_FILE_ENC_KEY,
ci->ci_nonce,
FS_KEY_DERIVATION_NONCE_SIZE,
derived_key, ci->ci_mode->keysize);
if (err)
return err;
err = fscrypt_set_per_file_enc_key(ci, derived_key);
memzero_explicit(derived_key, ci->ci_mode->keysize);
return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys,
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
true);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
HKDF_CONTEXT_PER_FILE_KEY,
ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE,
derived_key, ci->ci_mode->keysize);
if (err)
return err;
/* Derive a secret dirhash key for directories that need it. */
if (S_ISDIR(ci->ci_inode->i_mode) && IS_CASEFOLDED(ci->ci_inode)) {
err = fscrypt_derive_dirhash_key(ci, mk);
if (err)
return err;
}
return 0;
err = fscrypt_set_derived_key(ci, derived_key);
memzero_explicit(derived_key, ci->ci_mode->keysize);
return err;
}
/*
@ -328,6 +304,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
struct fscrypt_key_specifier mk_spec;
int err;
fscrypt_select_encryption_impl(ci);
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
@ -352,10 +330,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
ci->ci_policy.version != FSCRYPT_POLICY_V1)
return PTR_ERR(key);
err = fscrypt_select_encryption_impl(ci, false);
if (err)
return err;
/*
* As a legacy fallback for v1 policies, search for the key in
* the current task's subscribed keyrings too. Don't move this
@ -390,10 +364,6 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
goto out_release_key;
}
err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped);
if (err)
goto out_release_key;
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
@ -500,8 +470,20 @@ int fscrypt_get_encryption_info(struct inode *inode)
goto out;
}
memcpy(crypt_info->ci_nonce, fscrypt_context_nonce(&ctx),
FS_KEY_DERIVATION_NONCE_SIZE);
switch (ctx.version) {
case FSCRYPT_CONTEXT_V1:
memcpy(crypt_info->ci_nonce, ctx.v1.nonce,
FS_KEY_DERIVATION_NONCE_SIZE);
break;
case FSCRYPT_CONTEXT_V2:
memcpy(crypt_info->ci_nonce, ctx.v2.nonce,
FS_KEY_DERIVATION_NONCE_SIZE);
break;
default:
WARN_ON(1);
res = -EINVAL;
goto out;
}
if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) {
res = -EINVAL;
@ -601,15 +583,6 @@ int fscrypt_drop_inode(struct inode *inode)
return 0;
mk = ci->ci_master_key->payload.data[0];
/*
* With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
* protected by the key were cleaned by sync_filesystem(). But if
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
if (inode->i_state & I_DIRTY_ALL)
return 0;
/*
* Note: since we aren't holding ->mk_secret_sem, the result here can
* immediately become outdated. But there's no correctness problem with

@ -9,7 +9,7 @@
* This file implements compatibility functions for the original encryption
* policy version ("v1"), including:
*
* - Deriving per-file encryption keys using the AES-128-ECB based KDF
* - Deriving per-file keys using the AES-128-ECB based KDF
* (rather than the new method of using HKDF-SHA512)
*
* - Retrieving fscrypt master keys from process-subscribed keyrings
@ -234,7 +234,7 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
refcount_set(&dk->dk_refcount, 1);
dk->dk_mode = ci->ci_mode;
err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize,
false /*is_hw_wrapped*/, ci);
ci);
if (err)
goto err_free_dk;
memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor,
@ -252,8 +252,23 @@ err_free_dk:
static int setup_v1_file_key_direct(struct fscrypt_info *ci,
const u8 *raw_master_key)
{
const struct fscrypt_mode *mode = ci->ci_mode;
struct fscrypt_direct_key *dk;
if (!fscrypt_mode_supports_direct_key(mode)) {
fscrypt_warn(ci->ci_inode,
"Direct key mode not allowed with %s",
mode->friendly_name);
return -EINVAL;
}
if (ci->ci_policy.v1.contents_encryption_mode !=
ci->ci_policy.v1.filenames_encryption_mode) {
fscrypt_warn(ci->ci_inode,
"Direct key mode not allowed with different contents and filenames modes");
return -EINVAL;
}
dk = fscrypt_get_direct_key(ci, raw_master_key);
if (IS_ERR(dk))
return PTR_ERR(dk);
@ -282,7 +297,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci,
if (err)
goto out;
err = fscrypt_set_per_file_enc_key(ci, derived_key);
err = fscrypt_set_derived_key(ci, derived_key);
out:
kzfree(derived_key);
return err;

@ -29,43 +29,6 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode)
{
if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
filenames_mode == FSCRYPT_MODE_AES_128_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
filenames_mode == FSCRYPT_MODE_ADIANTUM)
return true;
return false;
}
static bool supported_direct_key_modes(const struct inode *inode,
u32 contents_mode, u32 filenames_mode)
{
const struct fscrypt_mode *mode;
if (contents_mode != filenames_mode) {
fscrypt_warn(inode,
"Direct key flag not allowed with different contents and filenames modes");
return false;
}
mode = &fscrypt_modes[contents_mode];
if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
fscrypt_warn(inode, "Direct key flag not allowed with %s",
mode->friendly_name);
return false;
}
return true;
}
static bool supported_iv_ino_lblk_64_policy(
const struct fscrypt_policy_v2 *policy,
const struct inode *inode)
@ -100,82 +63,13 @@ static bool supported_iv_ino_lblk_64_policy(
return true;
}
static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy,
const struct inode *inode)
{
if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
if (IS_CASEFOLDED(inode)) {
/* With v1, there's no way to derive dirhash keys. */
fscrypt_warn(inode,
"v1 policies can't be used on casefolded directories");
return false;
}
return true;
}
static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
const struct inode *inode)
{
if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
!supported_iv_ino_lblk_64_policy(policy, inode))
return false;
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
fscrypt_warn(inode, "Reserved bits set in encryption policy");
return false;
}
return true;
}
/**
* fscrypt_supported_policy - check whether an encryption policy is supported
*
* Given an encryption policy, check whether all its encryption modes and other
* settings are supported by this kernel on the given inode. (But we don't
* currently don't check for crypto API support here, so attempting to use an
* algorithm not configured into the crypto API will still fail later.)
* settings are supported by this kernel. (But we don't currently don't check
* for crypto API support here, so attempting to use an algorithm not configured
* into the crypto API will still fail later.)
*
* Return: %true if supported, else %false
*/
@ -183,10 +77,60 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
const struct inode *inode)
{
switch (policy_u->version) {
case FSCRYPT_POLICY_V1:
return fscrypt_supported_v1_policy(&policy_u->v1, inode);
case FSCRYPT_POLICY_V2:
return fscrypt_supported_v2_policy(&policy_u->v2, inode);
case FSCRYPT_POLICY_V1: {
const struct fscrypt_policy_v1 *policy = &policy_u->v1;
if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
return true;
}
case FSCRYPT_POLICY_V2: {
const struct fscrypt_policy_v2 *policy = &policy_u->v2;
if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
!supported_iv_ino_lblk_64_policy(policy, inode))
return false;
if (memchr_inv(policy->__reserved, 0,
sizeof(policy->__reserved))) {
fscrypt_warn(inode,
"Reserved bits set in encryption policy");
return false;
}
return true;
}
}
return false;
}
@ -258,7 +202,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
{
memset(policy_u, 0, sizeof(*policy_u));
if (!fscrypt_context_is_valid(ctx_u, ctx_size))
if (ctx_size <= 0 || ctx_size != fscrypt_context_size(ctx_u))
return -EINVAL;
switch (ctx_u->version) {
@ -481,25 +425,6 @@ int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg)
}
EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex);
/* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
{
struct inode *inode = file_inode(filp);
union fscrypt_context ctx;
int ret;
ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (ret < 0)
return ret;
if (!fscrypt_context_is_valid(&ctx, ret))
return -EINVAL;
if (copy_to_user(arg, fscrypt_context_nonce(&ctx),
FS_KEY_DERIVATION_NONCE_SIZE))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce);
/**
* fscrypt_has_permitted_context() - is a file's encryption policy permitted
* within its directory?

@ -37,7 +37,6 @@ config EXT4_FS
select CRC16
select CRYPTO
select CRYPTO_CRC32C
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
This is the next generation of the ext3 filesystem.

@ -117,7 +117,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err)
if (err && err != -ENOKEY)
return err;
}
@ -664,3 +664,10 @@ const struct file_operations ext4_dir_operations = {
.open = ext4_dir_open,
.release = ext4_release_dir,
};
#ifdef CONFIG_UNICODE
const struct dentry_operations ext4_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
};
#endif

@ -1100,11 +1100,6 @@ resizefs_out:
return -EOPNOTSUPP;
return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
case FS_IOC_GET_ENCRYPTION_NONCE:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
case EXT4_IOC_FSGETXATTR:
{
struct fsxattr fa;
@ -1248,7 +1243,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_REMOVE_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case FS_IOC_GET_ENCRYPTION_NONCE:
case EXT4_IOC_SHUTDOWN:
case FS_IOC_GETFSMAP:
case FS_IOC_ENABLE_VERITY:

@ -1608,7 +1608,6 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
struct buffer_head *bh;
err = ext4_fname_prepare_lookup(dir, dentry, &fname);
generic_set_encrypted_ci_d_ops(dir, dentry);
if (err == -ENOENT)
return NULL;
if (err)

@ -4490,6 +4490,11 @@ no_journal:
goto failed_mount4;
}
#ifdef CONFIG_UNICODE
if (sb->s_encoding)
sb->s_d_op = &ext4_dentry_ops;
#endif
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");

@ -5,7 +5,6 @@ config F2FS_FS
select CRYPTO
select CRYPTO_CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on

@ -108,52 +108,34 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
*
* Only called for encrypted names if the key is available.
*
* Returns: 0 if the directory entry matches, more than 0 if it
* doesn't match or less than zero on error.
*/
static int f2fs_ci_compare(const struct inode *parent, const struct qstr *name,
u8 *de_name, size_t de_name_len, bool quick)
int f2fs_ci_compare(const struct inode *parent, const struct qstr *name,
const struct qstr *entry, bool quick)
{
const struct super_block *sb = parent->i_sb;
const struct unicode_map *um = sb->s_encoding;
struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
struct qstr entry = QSTR_INIT(de_name, de_name_len);
int ret;
if (IS_ENCRYPTED(parent)) {
const struct fscrypt_str encrypted_name =
FSTR_INIT(de_name, de_name_len);
decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
if (!decrypted_name.name)
return -ENOMEM;
ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
&decrypted_name);
if (ret < 0)
goto out;
entry.name = decrypted_name.name;
entry.len = decrypted_name.len;
}
if (quick)
ret = utf8_strncasecmp_folded(um, name, &entry);
ret = utf8_strncasecmp_folded(um, name, entry);
else
ret = utf8_strncasecmp(um, name, &entry);
ret = utf8_strncasecmp(um, name, entry);
if (ret < 0) {
/* Handle invalid character sequence as either an error
* or as an opaque byte sequence.
*/
if (sb_has_enc_strict_mode(sb))
ret = -EINVAL;
else if (name->len != entry.len)
ret = 1;
else
ret = !!memcmp(name->name, entry.name, entry.len);
return -EINVAL;
if (name->len != entry->len)
return 1;
return !!memcmp(name->name, entry->name, name->len);
}
out:
kfree(decrypted_name.name);
return ret;
}
@ -191,24 +173,24 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d,
{
#ifdef CONFIG_UNICODE
struct inode *parent = d->inode;
u8 *name;
int len;
struct super_block *sb = parent->i_sb;
struct qstr entry;
#endif
if (de->hash_code != namehash)
return false;
#ifdef CONFIG_UNICODE
name = d->filename[bit_pos];
len = le16_to_cpu(de->name_len);
entry.name = d->filename[bit_pos];
entry.len = de->name_len;
if (needs_casefold(parent)) {
if (sb->s_encoding && IS_CASEFOLDED(parent)) {
if (cf_str->name) {
struct qstr cf = {.name = cf_str->name,
.len = cf_str->len};
return !f2fs_ci_compare(parent, &cf, name, len, true);
return !f2fs_ci_compare(parent, &cf, &entry, true);
}
return !f2fs_ci_compare(parent, fname->usr_fname, name, len,
return !f2fs_ci_compare(parent, fname->usr_fname, &entry,
false);
}
#endif
@ -632,13 +614,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
const struct qstr *orig_name,
f2fs_hash_t dentry_hash,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
unsigned int level;
unsigned int current_depth;
unsigned long bidx, block;
f2fs_hash_t dentry_hash;
unsigned int nbucket, nblock;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
@ -648,6 +630,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
level = 0;
slots = GET_DENTRY_SLOTS(new_name->len);
dentry_hash = f2fs_dentry_hash(dir, new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@ -733,19 +716,17 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
struct inode *inode, nid_t ino, umode_t mode)
{
struct qstr new_name;
f2fs_hash_t dentry_hash;
int err = -EAGAIN;
new_name.name = fname_name(fname);
new_name.len = fname_len(fname);
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, fname,
err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
inode, ino, mode);
dentry_hash = f2fs_dentry_hash(dir, &new_name, fname);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
dentry_hash, inode, ino, mode);
inode, ino, mode);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
@ -1018,7 +999,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err)
if (err && err != -ENOKEY)
goto out;
err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
@ -1094,3 +1075,10 @@ const struct file_operations f2fs_dir_operations = {
.compat_ioctl = f2fs_compat_ioctl,
#endif
};
#ifdef CONFIG_UNICODE
const struct dentry_operations f2fs_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
};
#endif

@ -3137,6 +3137,11 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
extern int f2fs_ci_compare(const struct inode *parent,
const struct qstr *name,
const struct qstr *entry,
bool quick);
/*
* dir.c
*/
@ -3170,7 +3175,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct qstr *name, f2fs_hash_t name_hash,
unsigned int bit_pos);
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
const struct qstr *orig_name, f2fs_hash_t dentry_hash,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
struct inode *inode, nid_t ino, umode_t mode);
@ -3203,7 +3208,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
* hash.c
*/
f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
const struct qstr *name_info, const struct fscrypt_name *fname);
const struct qstr *name_info, struct fscrypt_name *fname);
/*
* node.c
@ -3683,6 +3688,9 @@ static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
#endif
extern const struct file_operations f2fs_dir_operations;
#ifdef CONFIG_UNICODE
extern const struct dentry_operations f2fs_dentry_ops;
#endif
extern const struct file_operations f2fs_file_operations;
extern const struct inode_operations f2fs_file_inode_operations;
extern const struct address_space_operations f2fs_dblock_aops;
@ -3713,7 +3721,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage);
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
const struct fscrypt_name *fname,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
struct page *page, struct inode *dir,

@ -2444,14 +2444,6 @@ static int f2fs_ioc_get_encryption_key_status(struct file *filp,
return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
}
static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
{
if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
return -EOPNOTSUPP;
return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
}
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@ -3419,8 +3411,6 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
return f2fs_ioc_get_encryption_key_status(filp, arg);
case FS_IOC_GET_ENCRYPTION_NONCE:
return f2fs_ioc_get_encryption_nonce(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT:
return f2fs_ioc_gc(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
@ -3600,7 +3590,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_REMOVE_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case FS_IOC_GET_ENCRYPTION_NONCE:
case F2FS_IOC_GARBAGE_COLLECT:
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
case F2FS_IOC_WRITE_CHECKPOINT:

@ -68,9 +68,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir,
const struct qstr *name_info,
const struct fscrypt_name *fname)
static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
struct fscrypt_name *fname)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
@ -80,17 +79,12 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir,
size_t len = name_info->len;
/* encrypted bigname case */
if (fname && fname->is_ciphertext_name)
if (fname && !fname->disk_name.name)
return cpu_to_le32(fname->hash);
if (is_dot_dotdot(name_info))
return 0;
if (IS_CASEFOLDED(dir) && IS_ENCRYPTED(dir)) {
f2fs_hash = cpu_to_le32(fscrypt_fname_siphash(dir, name_info));
return f2fs_hash;
}
/* Initialize the default seed for the hash checksum functions */
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
@ -112,7 +106,7 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir,
}
f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
const struct qstr *name_info, const struct fscrypt_name *fname)
const struct qstr *name_info, struct fscrypt_name *fname)
{
#ifdef CONFIG_UNICODE
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
@ -120,30 +114,27 @@ f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
int r, dlen;
unsigned char *buff;
struct qstr folded;
const struct qstr *name = fname ? fname->usr_fname : name_info;
if (!name_info->len || !IS_CASEFOLDED(dir))
goto opaque_seq;
if (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))
goto opaque_seq;
buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL);
if (!buff)
return -ENOMEM;
dlen = utf8_casefold(um, name, buff, PATH_MAX);
dlen = utf8_casefold(um, name_info, buff, PATH_MAX);
if (dlen < 0) {
kvfree(buff);
goto opaque_seq;
}
folded.name = buff;
folded.len = dlen;
r = __f2fs_dentry_hash(dir, &folded, fname);
r = __f2fs_dentry_hash(&folded, fname);
kvfree(buff);
return r;
opaque_seq:
#endif
return __f2fs_dentry_hash(dir, name_info, fname);
return __f2fs_dentry_hash(name_info, fname);
}

@ -483,8 +483,8 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
ino = le32_to_cpu(de->ino);
fake_mode = f2fs_get_de_type(de) << S_SHIFT;
err = f2fs_add_regular_entry(dir, &new_name, NULL,
de->hash_code, NULL, ino, fake_mode);
err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
ino, fake_mode);
if (err)
goto punch_dentry_pages;
@ -596,7 +596,7 @@ out:
}
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
const struct fscrypt_name *fname,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
@ -607,7 +607,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL;
const struct qstr *orig_name = fname->usr_fname;
int err = 0;
ipage = f2fs_get_node_page(sbi, dir->i_ino);
@ -638,7 +637,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
name_hash = f2fs_dentry_hash(dir, new_name, fname);
name_hash = f2fs_dentry_hash(dir, new_name, NULL);
f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);

@ -492,7 +492,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
}
err = fscrypt_prepare_lookup(dir, dentry, &fname);
generic_set_encrypted_ci_d_ops(dir, dentry);
if (err == -ENOENT)
goto out_splice;
if (err)

@ -3309,6 +3309,12 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
struct unicode_map *encoding;
__u16 encoding_flags;
if (f2fs_sb_has_encrypt(sbi)) {
f2fs_err(sbi,
"Can't mount with encoding and encryption");
return -EINVAL;
}
if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
&encoding_flags)) {
f2fs_err(sbi,
@ -3331,6 +3337,7 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
sbi->sb->s_encoding = encoding;
sbi->sb->s_encoding_flags = encoding_flags;
sbi->sb->s_d_op = &f2fs_dentry_ops;
}
#else
if (f2fs_sb_has_casefold(sbi)) {

@ -11,7 +11,6 @@
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
@ -2167,7 +2166,7 @@ int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
!capable(CAP_LINUX_IMMUTABLE))
return -EPERM;
return fscrypt_prepare_setflags(inode, oldflags, flags);
return 0;
}
EXPORT_SYMBOL(vfs_ioc_setflags_prepare);

@ -1281,54 +1281,4 @@ err:
return ret;
}
EXPORT_SYMBOL(generic_ci_d_hash);
static const struct dentry_operations generic_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
};
#endif
#ifdef CONFIG_FS_ENCRYPTION
static const struct dentry_operations generic_encrypted_dentry_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
#endif
#if IS_ENABLED(CONFIG_UNICODE) && IS_ENABLED(CONFIG_FS_ENCRYPTION)
static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
.d_revalidate = fscrypt_d_revalidate,
};
#endif
/**
* generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry
* @dir: parent of dentry whose ops to set
* @dentry: detnry to set ops on
*
* This function sets the dentry ops for the given dentry to handle both
* casefolding and encryption of the dentry name.
*/
void generic_set_encrypted_ci_d_ops(struct inode *dir, struct dentry *dentry)
{
#ifdef CONFIG_FS_ENCRYPTION
if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) {
#ifdef CONFIG_UNICODE
if (dir->i_sb->s_encoding) {
d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
return;
}
#endif
d_set_d_op(dentry, &generic_encrypted_dentry_ops);
return;
}
#endif
#ifdef CONFIG_UNICODE
if (dir->i_sb->s_encoding) {
d_set_d_op(dentry, &generic_ci_dentry_ops);
return;
}
#endif
}
EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);

@ -7,7 +7,6 @@ config UBIFS_FS
select CRYPTO if UBIFS_FS_ZLIB
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.

@ -208,7 +208,6 @@ static int dbg_check_name(const struct ubifs_info *c,
return 0;
}
static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry);
static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
@ -222,7 +221,6 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
err = fscrypt_prepare_lookup(dir, dentry, &nm);
ubifs_set_d_ops(dir, dentry);
if (err == -ENOENT)
return d_splice_alias(NULL, dentry);
if (err)
@ -539,7 +537,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
if (encrypted) {
err = fscrypt_get_encryption_info(dir);
if (err)
if (err && err != -ENOKEY)
return err;
err = fscrypt_fname_alloc_buffer(dir, UBIFS_MAX_NLEN, &fstr);
@ -1686,19 +1684,3 @@ const struct file_operations ubifs_dir_operations = {
.compat_ioctl = ubifs_compat_ioctl,
#endif
};
#ifdef CONFIG_FS_ENCRYPTION
static const struct dentry_operations ubifs_encrypted_dentry_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
#endif
static void ubifs_set_d_ops(struct inode *dir, struct dentry *dentry)
{
#ifdef CONFIG_FS_ENCRYPTION
if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) {
d_set_d_op(dentry, &ubifs_encrypted_dentry_ops);
return;
}
#endif
}

@ -229,9 +229,6 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
return fscrypt_ioctl_get_key_status(file, (void __user *)arg);
case FS_IOC_GET_ENCRYPTION_NONCE:
return fscrypt_ioctl_get_nonce(file, (void __user *)arg);
default:
return -ENOTTY;
}
@ -254,7 +251,6 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_REMOVE_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case FS_IOC_GET_ENCRYPTION_NONCE:
break;
default:
return -ENOIOCTLCMD;

@ -31,8 +31,6 @@ enum blk_crypto_mode_num {
* @data_unit_size_bits: log2 of data_unit_size
* @size: size of this key in bytes (determined by @crypto_mode)
* @hash: hash of this key, for keyslot manager use only
* @is_hw_wrapped: @raw points to a wrapped key to be used by an inline
* encryption hardware that accepts wrapped keys.
* @raw: the raw bytes of this key. Only the first @size bytes are used.
*
* A blk_crypto_key is immutable once created, and many bios can reference it at
@ -44,7 +42,6 @@ struct blk_crypto_key {
unsigned int data_unit_size_bits;
unsigned int size;
unsigned int hash;
bool is_hw_wrapped;
u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE];
};

@ -18,15 +18,9 @@ bool blk_crypto_endio(struct bio *bio);
int blk_crypto_init_key(struct blk_crypto_key *blk_key,
const u8 *raw_key, unsigned int raw_key_size,
bool is_hw_wrapped,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size);
int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size,
bool is_hw_wrapped_key,
struct request_queue *q);
int blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key);
@ -46,10 +40,22 @@ static inline bool blk_crypto_endio(struct bio *bio)
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q);
int blk_crypto_fallback_init(void);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline int
blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q)
{
return 0;
}
static inline int blk_crypto_fallback_init(void)
{
return 0;

@ -3212,8 +3212,6 @@ static inline bool needs_casefold(const struct inode *dir)
return 0;
}
#endif
extern void generic_set_encrypted_ci_d_ops(struct inode *dir,
struct dentry *dentry);
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,

@ -76,21 +76,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return READ_ONCE(inode->i_crypt_info) != NULL;
}
/**
* fscrypt_needs_contents_encryption() - check whether an inode needs
* contents encryption
*
* Return: %true iff the inode is an encrypted regular file and the kernel was
* built with fscrypt support.
*
* If you need to know whether the encrypt bit is set even when the kernel was
* built without fscrypt support, you must use IS_ENCRYPTED() directly instead.
*/
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return inode->i_sb->s_cop->dummy_context &&
@ -138,13 +123,11 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
}
extern void fscrypt_free_bounce_page(struct page *bounce_page);
extern int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *);
extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
@ -176,14 +159,82 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname);
extern bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len);
extern u64 fscrypt_fname_siphash(const struct inode *dir,
const struct qstr *name);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
/* Extracts the second-to-last ciphertext block; see explanation below */
#define FSCRYPT_FNAME_DIGEST(name, len) \
((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
FS_CRYPTO_BLOCK_SIZE))
#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
/**
* fscrypt_digested_name - alternate identifier for an on-disk filename
*
* When userspace lists an encrypted directory without access to the key,
* filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
* bytes are shown in this abbreviated form (base64-encoded) rather than as the
* full ciphertext (base64-encoded). This is necessary to allow supporting
* filenames up to NAME_MAX bytes, since base64 encoding expands the length.
*
* To make it possible for filesystems to still find the correct directory entry
* despite not knowing the full on-disk name, we encode any filesystem-specific
* 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
* followed by the second-to-last ciphertext block of the filename. Due to the
* use of the CBC-CTS encryption mode, the second-to-last ciphertext block
* depends on the full plaintext. (Note that ciphertext stealing causes the
* last two blocks to appear "flipped".) This makes accidental collisions very
* unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
* share the same filesystem-specific hashes.
*
* However, this scheme isn't immune to intentional collisions, which can be
* created by anyone able to create arbitrary plaintext filenames and view them
* without the key. Making the "digest" be a real cryptographic hash like
* SHA-256 over the full ciphertext would prevent this, although it would be
* less efficient and harder to implement, especially since the filesystem would
* need to calculate it for each directory entry examined during a search.
*/
struct fscrypt_digested_name {
u32 hash;
u32 minor_hash;
u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
};
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and a filename in it is
* very long, then we won't have the full disk_name and we'll instead need to
* match against the fscrypt_digested_name.
*
* Return: %true if the name matches, otherwise %false.
*/
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
if (unlikely(!fname->disk_name.name)) {
const struct fscrypt_digested_name *n =
(const void *)fname->crypto_buf.name;
if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
return false;
if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
return false;
return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
}
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
@ -201,8 +252,6 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
extern int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
@ -219,11 +268,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return false;
}
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
{
return false;
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return false;
@ -303,11 +347,6 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp,
return -EOPNOTSUPP;
}
static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_has_permitted_context(struct inode *parent,
struct inode *child)
{
@ -412,7 +451,7 @@ static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
return;
}
static inline int fscrypt_fname_disk_to_usr(const struct inode *inode,
static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
@ -429,13 +468,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
static inline u64 fscrypt_fname_siphash(const struct inode *dir,
const struct qstr *name)
{
WARN_ON_ONCE(1);
return 0;
}
/* bio.c */
static inline void fscrypt_decrypt_bio(struct bio *bio)
{
@ -478,13 +510,6 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
static inline int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags,
unsigned int flags)
{
return 0;
}
static inline int __fscrypt_prepare_symlink(struct inode *dir,
unsigned int len,
unsigned int max_len,
@ -677,9 +702,8 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
* filenames are presented in encrypted form. Therefore, we'll try to set up
* the directory's encryption key, but even without it the lookup can continue.
*
* After calling this function, a filesystem should ensure that it's dentry
* operations contain fscrypt_d_revalidate if DCACHE_ENCRYPTED_NAME was set,
* so that the dentry can be invalidated if the key is later added.
* This also installs a custom ->d_revalidate() method which will invalidate the
* dentry if it was created without the key and the key is later added.
*
* Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a
* correctly formed encoded ciphertext name, so a negative dentry should be

@ -8,15 +8,6 @@
#include <linux/bio.h>
/* Inline crypto feature bits. Must set at least one. */
enum {
/* Support for standard software-specified keys */
BLK_CRYPTO_FEATURE_STANDARD_KEYS = BIT(0),
/* Support for hardware-wrapped keys */
BLK_CRYPTO_FEATURE_WRAPPED_KEYS = BIT(1),
};
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct keyslot_manager;
@ -54,7 +45,6 @@ struct keyslot_manager *keyslot_manager_create(
struct device *dev,
unsigned int num_slots,
const struct keyslot_mgmt_ll_ops *ksm_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data);
@ -67,8 +57,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot);
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size,
bool is_hw_wrapped_key);
unsigned int data_unit_size);
int keyslot_manager_evict_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key);
@ -82,7 +71,6 @@ void keyslot_manager_destroy(struct keyslot_manager *ksm);
struct keyslot_manager *keyslot_manager_create_passthrough(
struct device *dev,
const struct keyslot_mgmt_ll_ops *ksm_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data);

@ -8,7 +8,6 @@
#ifndef _UAPI_LINUX_FSCRYPT_H
#define _UAPI_LINUX_FSCRYPT_H
#include <linux/ioctl.h>
#include <linux/types.h>
/* Encryption policy flags */
@ -110,22 +109,11 @@ struct fscrypt_key_specifier {
} u;
};
/*
* Payload of Linux keyring key of type "fscrypt-provisioning", referenced by
* fscrypt_add_key_arg::key_id as an alternative to fscrypt_add_key_arg::raw.
*/
struct fscrypt_provisioning_key_payload {
__u32 type;
__u32 __reserved;
__u8 raw[];
};
/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
__u32 key_id;
__u32 __reserved[7];
__u32 __reserved[8];
/* N.B.: "temporary" flag, not reserved upstream */
#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
__u32 __flags;
@ -166,7 +154,6 @@ struct fscrypt_get_key_status_arg {
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg)
#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16])
/**********************************************************************/

Loading…
Cancel
Save