Add support for block disk encryption

With new file encryption framework the crypto vops should support
crypto configuration for block disk encryption feature as well.

Change-Id: I1504a023f91376b207d9af19ad097405a3a42c85
Signed-off-by: Neeraj Soni <neersoni@codeaurora.org>
tirimbino
Neeraj Soni 4 years ago
parent a445c3365f
commit b8f1b6a6df
  1. 1
      arch/arm64/configs/vendor/sm8150-perf_defconfig
  2. 1
      arch/arm64/configs/vendor/sm8150_defconfig
  3. 1
      arch/arm64/configs/vendor/trinket-perf_defconfig
  4. 1
      arch/arm64/configs/vendor/trinket_defconfig
  5. 10
      drivers/crypto/Kconfig
  6. 280
      drivers/crypto/msm/ice.c
  7. 37
      drivers/mmc/host/cmdq_hci-crypto-qti.c
  8. 65
      drivers/scsi/ufs/ufshcd-crypto-qti.c
  9. 3
      drivers/scsi/ufs/ufshcd-crypto-qti.h
  10. 7
      drivers/scsi/ufs/ufshcd-crypto.c
  11. 8
      include/crypto/ice.h
  12. 1
      include/linux/bio-crypt-ctx.h

@ -725,6 +725,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

@ -810,6 +810,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

@ -704,6 +704,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

@ -790,6 +790,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

@ -771,4 +771,14 @@ config CRYPTO_DEV_ARTPEC6
To compile this driver as a module, choose M here.
config CRYPTO_DEV_QCOM_ICE
tristate "Inline Crypto Module"
default n
depends on BLK_DEV_DM
help
This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
and later, to accelerate crypto operations for storage needs.
To compile this driver as a module, choose M here: the
module will be called ice.
endif # CRYPTO_HW

@ -25,7 +25,6 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/qseecomi.h>
#include "iceregs.h"
#include <linux/pfk.h>
#include <linux/atomic.h>
#include <linux/wait.h>
@ -68,7 +67,6 @@
#define ICE_CRYPTO_CXT_FBE 2
static int ice_fde_flag;
struct ice_clk_info {
struct list_head list;
struct clk *clk;
@ -120,24 +118,13 @@ struct ice_device {
wait_queue_head_t block_suspend_ice_queue;
};
static int qcom_ice_init(struct ice_device *ice_dev, void *host_controller_data,
ice_error_cb error_cb);
static int qti_ice_setting_config(struct request *req,
struct platform_device *pdev,
struct ice_crypto_setting *crypto_data,
struct ice_data_setting *setting, uint32_t cxt)
{
struct ice_device *ice_dev = platform_get_drvdata(pdev);
if (!ice_dev) {
pr_debug("%s no ICE device\n", __func__);
/* make the caller finish peacefully */
return 0;
}
if (ice_dev->is_ice_disable_fuse_blown) {
pr_err("%s ICE disabled fuse is blown\n", __func__);
return -EPERM;
}
if (!setting)
return -EINVAL;
@ -297,23 +284,6 @@ static int qcom_ice_get_vreg(struct ice_device *ice_dev)
return ret;
}
static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
{
u32 regval;
if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
regval = qcom_ice_readl(ice_dev,
QCOM_ICE_REGS_ADVANCED_CONTROL);
regval |= 0x800;
qcom_ice_writel(ice_dev, regval,
QCOM_ICE_REGS_ADVANCED_CONTROL);
/* Ensure register is updated */
mb();
}
}
static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
{
u32 regval;
@ -476,45 +446,6 @@ static int qcom_ice_enable(struct ice_device *ice_dev)
return 0;
}
static int qcom_ice_verify_ice(struct ice_device *ice_dev)
{
unsigned int rev;
unsigned int maj_rev, min_rev, step_rev;
rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
__func__, (unsigned long)ice_dev->mmio,
maj_rev, min_rev, step_rev);
return -ENODEV;
}
ice_dev->ice_hw_version = rev;
dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n",
maj_rev, min_rev, step_rev,
ice_dev->mmio);
return 0;
}
static void qcom_ice_enable_intr(struct ice_device *ice_dev)
{
unsigned int reg;
reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
/*
* Ensure previous instructions was completed before issuing next
* ICE initialization/optimization instruction
*/
mb();
}
static void qcom_ice_disable_intr(struct ice_device *ice_dev)
{
unsigned int reg;
@ -627,25 +558,13 @@ out:
}
static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
struct ice_device *ice_dev)
struct ice_device *ice_dev)
{
struct device *dev = &pdev->dev;
int rc = -1;
int irq;
ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ice_dev->res) {
pr_err("%s: No memory available for IORESOURCE\n", __func__);
return -ENOMEM;
}
ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
if (IS_ERR(ice_dev->mmio)) {
rc = PTR_ERR(ice_dev->mmio);
pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
goto out;
}
ice_dev->mmio = NULL;
if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
__func__);
@ -688,7 +607,7 @@ static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
err_dev:
if (rc && ice_dev->mmio)
devm_iounmap(dev, ice_dev->mmio);
out:
//out:
return rc;
}
@ -810,7 +729,12 @@ static int qcom_ice_probe(struct platform_device *pdev)
* We would enable ICE when first request for crypto
* operation arrives.
*/
ice_dev->is_ice_enabled = false;
rc = qcom_ice_init(ice_dev, NULL, NULL);
if (rc) {
pr_err("create character device failed.\n");
goto err_ice_dev;
}
ice_dev->is_ice_enabled = true;
platform_set_drvdata(pdev, ice_dev);
list_add_tail(&ice_dev->list, &ice_devices);
@ -999,31 +923,6 @@ out:
return ret;
}
static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
{
/* We need to enable source for ICE secure interrupts */
int ret = 0;
u32 regval;
regval = scm_io_read((unsigned long)ice_dev->res +
QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
regval &= ~QCOM_ICE_SEC_IRQ_MASK;
ret = scm_io_write((unsigned long)ice_dev->res +
QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
/*
* Ensure previous instructions was completed before issuing next
* ICE initialization/optimization instruction
*/
mb();
if (!ret)
pr_err("%s: failed(0x%x) to init secure ICE config\n",
__func__, ret);
return ret;
}
static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
{
int ret = 0, scm_ret = 0;
@ -1064,7 +963,6 @@ out:
static int qcom_ice_finish_init(struct ice_device *ice_dev)
{
unsigned int reg;
int err = 0;
if (!ice_dev) {
@ -1090,53 +988,12 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev)
* configurations of host & ice. It is prudent to restore the config
*/
err = qcom_ice_update_sec_cfg(ice_dev);
if (err)
goto out;
err = qcom_ice_verify_ice(ice_dev);
if (err)
goto out;
/* if ICE_DISABLE_FUSE is blown, return immediately
* Currently, FORCE HW Keys are also disabled, since
* there is no use case for their usage neither in FDE
* nor in PFE
*/
reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
reg &= (ICE_FUSE_SETTING_MASK |
ICE_FORCE_HW_KEY0_SETTING_MASK |
ICE_FORCE_HW_KEY1_SETTING_MASK);
if (reg) {
ice_dev->is_ice_disable_fuse_blown = true;
pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
__func__);
err = -EPERM;
goto out;
}
/* TZ side of ICE driver would handle secure init of ICE HW from v2 */
if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
!qcom_ice_secure_ice_init(ice_dev)) {
pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
err = -EFAULT;
goto out;
}
init_waitqueue_head(&ice_dev->block_suspend_ice_queue);
qcom_ice_low_power_mode_enable(ice_dev);
qcom_ice_optimization_enable(ice_dev);
qcom_ice_config_proc_ignore(ice_dev);
qcom_ice_enable_test_bus_config(ice_dev);
qcom_ice_enable(ice_dev);
ice_dev->is_ice_enabled = true;
qcom_ice_enable_intr(ice_dev);
atomic_set(&ice_dev->is_ice_suspended, 0);
atomic_set(&ice_dev->is_ice_busy, 0);
out:
return err;
}
static int qcom_ice_init(struct platform_device *pdev,
static int qcom_ice_init(struct ice_device *ice_dev,
void *host_controller_data,
ice_error_cb error_cb)
{
@ -1147,13 +1004,6 @@ static int qcom_ice_init(struct platform_device *pdev,
* When any request for data transfer is received, it would enable
* the ICE for that particular request
*/
struct ice_device *ice_dev;
ice_dev = platform_get_drvdata(pdev);
if (!ice_dev) {
pr_err("%s: invalid device\n", __func__);
return -EINVAL;
}
ice_dev->error_cb = error_cb;
ice_dev->host_controller_data = host_controller_data;
@ -1201,12 +1051,6 @@ static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
if (err)
goto out;
/*
* for PFE case, clear the cached ICE key table,
* this will force keys to be reconfigured
* per each next transaction
*/
pfk_clear_on_reset();
}
}
@ -1444,8 +1288,8 @@ static void qcom_ice_debug(struct platform_device *pdev)
qcom_ice_dump_test_bus(ice_dev);
pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
ice_dev->ice_instance_type,
(unsigned long long)ice_dev->ice_reset_start_time.tv64,
(unsigned long long)ice_dev->ice_reset_complete_time.tv64);
(unsigned long long)ice_dev->ice_reset_start_time,
(unsigned long long)ice_dev->ice_reset_complete_time);
if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
ice_dev->ice_reset_start_time)) > 0)
@ -1473,28 +1317,15 @@ static int qcom_ice_reset(struct platform_device *pdev)
return qcom_ice_finish_power_collapse(ice_dev);
}
static int qcom_ice_config_start(struct platform_device *pdev,
struct request *req,
struct ice_data_setting *setting, bool async)
int qcom_ice_config_start(struct request *req, struct ice_data_setting *setting)
{
struct ice_crypto_setting pfk_crypto_data = {0};
struct ice_crypto_setting ice_data = {0};
int ret = 0;
bool is_pfe = false;
unsigned long sec_end = 0;
sector_t data_size;
struct ice_device *ice_dev;
if (!pdev || !req) {
if (!req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
ice_dev = platform_get_drvdata(pdev);
if (!ice_dev) {
pr_err("%s: INVALID ice_dev\n", __func__);
return -EINVAL;
}
/*
* It is not an error to have a request with no bio
@ -1511,30 +1342,6 @@ static int qcom_ice_config_start(struct platform_device *pdev,
return 0;
}
if (atomic_read(&ice_dev->is_ice_suspended) == 1)
return -EINVAL;
if (async)
atomic_set(&ice_dev->is_ice_busy, 1);
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
if (async) {
atomic_set(&ice_dev->is_ice_busy, 0);
wake_up_interruptible(&ice_dev->block_suspend_ice_queue);
}
if (is_pfe) {
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("%s error %d while configuring ice key for PFE\n",
__func__, ret);
return ret;
}
return qti_ice_setting_config(req, pdev,
&pfk_crypto_data, setting, ICE_CRYPTO_CXT_FBE);
}
if (ice_fde_flag && req->part && req->part->info
&& req->part->info->volname[0]) {
if (!strcmp(req->part->info->volname, "userdata")) {
@ -1559,7 +1366,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
if ((req->__sector + data_size) > sec_end)
return 0;
else
return qti_ice_setting_config(req, pdev,
return qti_ice_setting_config(req,
&ice_data, setting,
ICE_CRYPTO_CXT_FDE);
}
@ -1575,34 +1382,6 @@ static int qcom_ice_config_start(struct platform_device *pdev,
}
EXPORT_SYMBOL(qcom_ice_config_start);
static int qcom_ice_config_end(struct request *req)
{
int ret = 0;
bool is_pfe = false;
if (!req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
if (!req->bio) {
/* It is not an error to have a request with no bio */
return 0;
}
ret = pfk_load_key_end(req->bio, &is_pfe);
if (is_pfe) {
if (ret != 0)
pr_err("%s error %d while end configuring ice key for PFE\n",
__func__, ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(qcom_ice_config_end);
static int qcom_ice_status(struct platform_device *pdev)
{
struct ice_device *ice_dev;
@ -1628,18 +1407,6 @@ static int qcom_ice_status(struct platform_device *pdev)
}
struct qcom_ice_variant_ops qcom_ice_ops = {
.name = "qcom",
.init = qcom_ice_init,
.reset = qcom_ice_reset,
.resume = qcom_ice_resume,
.suspend = qcom_ice_suspend,
.config_start = qcom_ice_config_start,
.config_end = qcom_ice_config_end,
.status = qcom_ice_status,
.debug = qcom_ice_debug,
};
struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
{
struct platform_device *ice_pdev = NULL;
@ -1805,13 +1572,22 @@ int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
if (!ice_dev || (ice_dev->is_ice_enabled == false))
return ret;
if (enable)
return enable_ice_setup(ice_dev);
else
return disable_ice_setup(ice_dev);
}
static struct qcom_ice_variant_ops qcom_ice_ops = {
.name = "qcom",
.reset = qcom_ice_reset,
.resume = qcom_ice_resume,
.suspend = qcom_ice_suspend,
.config_start = qcom_ice_config_start,
.status = qcom_ice_status,
.debug = qcom_ice_debug,
};
struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
{
return &qcom_ice_ops;

@ -21,6 +21,10 @@
#include <linux/crypto-qti-common.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
#include <crypto/ice.h>
#include <linux/blkdev.h>
#endif
#define RAW_SECRET_SIZE 32
#define MINIMUM_DUN_SIZE 512
@ -320,12 +324,34 @@ int cmdq_crypto_qti_prep_desc(struct cmdq_host *host, struct mmc_request *mrq,
{
struct bio_crypt_ctx *bc;
struct request *req = mrq->req;
int ret;
int ret = 0;
int val = 0;
if (!req->bio || !bio_crypt_should_process(req)) {
*ice_ctx = 0;
return 0;
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
struct ice_data_setting setting;
bool bypass = true;
short key_index = 0;
#endif
*ice_ctx = 0;
if (!req || !req->bio)
return ret;
if (!bio_crypt_should_process(req)) {
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
ret = qcom_ice_config_start(req, &setting);
if (!ret) {
key_index = setting.crypto_data.key_index;
bypass = (rq_data_dir(req) == WRITE) ?
setting.encr_bypass : setting.decr_bypass;
*ice_ctx = DATA_UNIT_NUM(req->__sector) |
CRYPTO_CONFIG_INDEX(key_index) |
CRYPTO_ENABLE(!bypass);
} else {
pr_err("%s crypto config failed err = %d\n", __func__,
ret);
}
#endif
return ret;
}
if (WARN_ON(!cmdq_is_crypto_enabled(host))) {
/*
@ -339,6 +365,7 @@ int cmdq_crypto_qti_prep_desc(struct cmdq_host *host, struct mmc_request *mrq,
if (!cmdq_keyslot_valid(host, bc->bc_keyslot))
return -EINVAL;
if (!(atomic_read(&keycache) & (1 << bc->bc_keyslot))) {
ret = cmdq_crypto_qti_keyslot_program(host->ksm, bc->bc_key,
bc->bc_keyslot);

@ -16,7 +16,10 @@
#include <crypto/algapi.h>
#include <linux/platform_device.h>
#include <linux/crypto-qti-common.h>
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
#include <crypto/ice.h>
#include <linux/blkdev.h>
#endif
#include "ufshcd-crypto-qti.h"
#define MINIMUM_DUN_SIZE 512
@ -30,6 +33,7 @@ static struct ufs_hba_crypto_variant_ops ufshcd_crypto_qti_variant_ops = {
.disable = ufshcd_crypto_qti_disable,
.resume = ufshcd_crypto_qti_resume,
.debug = ufshcd_crypto_qti_debug,
.prepare_lrbp_crypto = ufshcd_crypto_qti_prep_lrbp_crypto,
};
static uint8_t get_data_unit_size_mask(unsigned int data_unit_size)
@ -289,6 +293,65 @@ int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba,
return err;
}
int ufshcd_crypto_qti_prep_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
struct bio_crypt_ctx *bc;
int ret = 0;
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
struct ice_data_setting setting;
bool bypass = true;
short key_index = 0;
#endif
struct request *req;
lrbp->crypto_enable = false;
req = cmd->request;
if (!req || !req->bio)
return ret;
if (!bio_crypt_should_process(req)) {
#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE)
ret = qcom_ice_config_start(req, &setting);
if (!ret) {
key_index = setting.crypto_data.key_index;
bypass = (rq_data_dir(req) == WRITE) ?
setting.encr_bypass : setting.decr_bypass;
lrbp->crypto_enable = !bypass;
lrbp->crypto_key_slot = key_index;
lrbp->data_unit_num = req->bio->bi_iter.bi_sector >>
ICE_CRYPTO_DATA_UNIT_4_KB;
} else {
pr_err("%s crypto config failed err = %d\n", __func__,
ret);
}
#endif
return ret;
}
bc = req->bio->bi_crypt_context;
if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) {
/*
* Upper layer asked us to do inline encryption
* but that isn't enabled, so we fail this request.
*/
return -EINVAL;
}
if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot))
return -EINVAL;
lrbp->crypto_enable = true;
lrbp->crypto_key_slot = bc->bc_keyslot;
if (bc->is_ext4) {
lrbp->data_unit_num = (u64)cmd->request->bio->bi_iter.bi_sector;
lrbp->data_unit_num >>= 3;
} else {
lrbp->data_unit_num = bc->bc_dun[0];
}
return 0;
}
int ufshcd_crypto_qti_debug(struct ufs_hba *hba)
{
return crypto_qti_debug(hba->crypto_vops->priv);

@ -41,6 +41,9 @@ int ufshcd_crypto_qti_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op);
int ufshcd_crypto_qti_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op);
int ufshcd_crypto_qti_prep_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
#ifdef CONFIG_SCSI_UFS_CRYPTO_QTI
void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba);
#else

@ -400,12 +400,7 @@ int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba,
lrbp->crypto_enable = true;
lrbp->crypto_key_slot = bc->bc_keyslot;
if (bc->is_ext4) {
lrbp->data_unit_num = (u64)cmd->request->bio->bi_iter.bi_sector;
lrbp->data_unit_num >>= 3;
} else {
lrbp->data_unit_num = bc->bc_dun[0];
}
lrbp->data_unit_num = bc->bc_dun[0];
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec);

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -69,6 +69,8 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
void qcom_ice_set_fde_flag(int flag);
int qcom_ice_config_start(struct request *req,
struct ice_data_setting *setting);
#else
static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
{
@ -79,12 +81,10 @@ static inline void qcom_ice_set_fde_flag(int flag) {}
struct qcom_ice_variant_ops {
const char *name;
int (*init)(struct platform_device *, void *, ice_error_cb);
int (*reset)(struct platform_device *);
int (*resume)(struct platform_device *);
int (*suspend)(struct platform_device *);
int (*config_start)(struct platform_device *, struct request *,
struct ice_data_setting *, bool);
int (*config_start)(struct request *, struct ice_data_setting *);
int (*config_end)(struct request *);
int (*status)(struct platform_device *);
void (*debug)(struct platform_device *);

@ -133,6 +133,7 @@ static inline void bio_crypt_set_ctx(struct bio *bio,
memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
bc->bc_ksm = NULL;
bc->bc_keyslot = -1;
bc->is_ext4 = 0;
bio->bi_crypt_context = bc;
}

Loading…
Cancel
Save