scsi: ufs: Add simple IRQ-affined PM QoS operations

Qualcomm's PM QoS solution suffers from a number of issues: applying
PM QoS to all CPUs, convoluted spaghetti code that wastes CPU cycles,
and keeping PM QoS applied for 10 ms after all requests finish
processing.

This implements a simple IRQ-affined PM QoS mechanism for each UFS
adapter which uses atomics to elide locking, and enqueues a worker to
apply PM QoS to the target CPU as soon as a command request is issued.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
Change-Id: I634873deb7baf0916208809c025eb0d3ccdb0fa3
fourteen
Sultan Alsawaf 4 years ago committed by Jenna
parent cba4ee39c9
commit 02a1d651cd
  1. 86
      drivers/scsi/ufs/ufshcd.c
  2. 10
      drivers/scsi/ufs/ufshcd.h

@ -4266,6 +4266,48 @@ static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
up_read(&hba->lock);
}
static void ufshcd_pm_qos_get_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.get_work);
if (!atomic_read(&hba->pm_qos.count))
return;
mutex_lock(&hba->pm_qos.lock);
if (atomic_read(&hba->pm_qos.count) && !hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, 100);
hba->pm_qos.active = true;
}
mutex_unlock(&hba->pm_qos.lock);
}
static void ufshcd_pm_qos_put_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.put_work);
if (atomic_read(&hba->pm_qos.count))
return;
mutex_lock(&hba->pm_qos.lock);
if (!atomic_read(&hba->pm_qos.count) && hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
hba->pm_qos.active = false;
}
mutex_unlock(&hba->pm_qos.lock);
}
static void ufshcd_pm_qos_get(struct ufs_hba *hba)
{
if (atomic_inc_return(&hba->pm_qos.count) == 1)
queue_work(system_unbound_wq, &hba->pm_qos.get_work);
}
static void ufshcd_pm_qos_put(struct ufs_hba *hba)
{
if (atomic_dec_return(&hba->pm_qos.count) == 0)
queue_work(system_unbound_wq, &hba->pm_qos.put_work);
}
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
@ -4281,12 +4323,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag;
int err = 0;
bool has_read_lock = false;
bool cmd_sent = false;
hba = shost_priv(host);
if (!cmd || !cmd->request || !hba)
return -EINVAL;
/* Wake the CPU managing the IRQ as soon as possible */
ufshcd_pm_qos_get(hba);
tag = cmd->request->tag;
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
@ -4298,10 +4344,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_get_read_lock(hba, cmd->device->lun);
if (unlikely(err < 0)) {
if (err == -EPERM) {
return SCSI_MLQUEUE_HOST_BUSY;
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_pm_qos;
}
if (err == -EAGAIN) {
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_pm_qos;
}
if (err == -EAGAIN)
return SCSI_MLQUEUE_HOST_BUSY;
} else if (err == 1) {
has_read_lock = true;
}
@ -4458,16 +4507,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (has_read_lock)
ufshcd_put_read_lock(hba);
cmd->scsi_done(cmd);
return 0;
err = 0;
goto out_pm_qos;
}
goto out;
}
cmd_sent = true;
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
if (has_read_lock)
ufshcd_put_read_lock(hba);
out_pm_qos:
if (!cmd_sent)
ufshcd_pm_qos_put(hba);
return err;
}
@ -7502,6 +7557,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
if (cmd->request) {
ufshcd_pm_qos_put(hba);
}
req = cmd->request;
if (req) {
@ -7577,6 +7635,14 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
if (cmd->request) {
/*
* As we are accessing the "request" structure,
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_pm_qos_put(hba);
}
clear_bit_unlock(index, &hba->lrb_in_use);
ufshcd_release_all(hba);
@ -12733,6 +12799,9 @@ void ufshcd_remove(struct ufs_hba *hba)
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
cancel_work_sync(&hba->pm_qos.put_work);
cancel_work_sync(&hba->pm_qos.get_work);
pm_qos_remove_request(&hba->pm_qos.req);
ufshcd_exit_clk_gating(hba);
ufshcd_exit_hibern8_on_idle(hba);
@ -13007,6 +13076,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
mb();
mutex_init(&hba->pm_qos.lock);
INIT_WORK(&hba->pm_qos.get_work, ufshcd_pm_qos_get_worker);
INIT_WORK(&hba->pm_qos.put_work, ufshcd_pm_qos_put_worker);
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
hba->pm_qos.req.irq = irq;
pm_qos_add_request(&hba->pm_qos.req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED,
dev_name(dev), hba);
@ -13113,6 +13190,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
pm_qos_remove_request(&hba->pm_qos.req);
ufshcd_exit_clk_gating(hba);
ufshcd_exit_latency_hist(hba);
out_disable:

@ -58,6 +58,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/extcon.h>
#include <linux/pm_qos.h>
#include "unipro.h"
#include <asm/irq.h>
@ -1238,6 +1239,15 @@ struct ufs_hba {
#if defined(CONFIG_UFS_DATA_LOG)
atomic_t log_count;
#endif
struct {
struct pm_qos_request req;
struct work_struct get_work;
struct work_struct put_work;
struct mutex lock;
atomic_t count;
bool active;
} pm_qos;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)

Loading…
Cancel
Save