[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DM6PR04MB6575A58446F1EB9ABDFBB7A6FC6C9@DM6PR04MB6575.namprd04.prod.outlook.com>
Date: Mon, 15 Mar 2021 09:20:55 +0000
From: Avri Altman <Avri.Altman@....com>
To: Can Guo <cang@...eaurora.org>
CC: "James E . J . Bottomley" <jejb@...ux.vnet.ibm.com>,
"Martin K . Petersen" <martin.petersen@...cle.com>,
"linux-scsi@...r.kernel.org" <linux-scsi@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"gregkh@...uxfoundation.org" <gregkh@...uxfoundation.org>,
Bart Van Assche <bvanassche@....org>,
yongmyung lee <ymhungry.lee@...sung.com>,
Daejun Park <daejun7.park@...sung.com>,
"alim.akhtar@...sung.com" <alim.akhtar@...sung.com>,
"asutoshd@...eaurora.org" <asutoshd@...eaurora.org>,
Zang Leigang <zangleigang@...ilicon.com>,
Avi Shchislowski <Avi.Shchislowski@....com>,
Bean Huo <beanhuo@...ron.com>,
"stanley.chu@...iatek.com" <stanley.chu@...iatek.com>
Subject: RE: [PATCH v5 03/10] scsi: ufshpb: Add region's reads counter
> > +
> > + if (hpb->is_hcm) {
> > + spin_lock_irqsave(&rgn->rgn_lock, flags);
>
> rgn_lock is never used in IRQ contexts, so no need of irqsave and
> irqrestore everywhere, which can impact performance. Please correct
> me if I am wrong.
Thanks. Will do.
>
> Meanwhile, have you ever initialized the rgn_lock before use it???
Yep - forgot to do that here (but not in gs20 and mi10). Thanks.
Thanks,
Avri
>
> Thanks,
> Can Guo.
>
> > + rgn->reads = 0;
> > + spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> > + }
> > +
> > return 0;
> > }
> >
> > if (!ufshpb_is_support_chunk(hpb, transfer_len))
> > return 0;
> >
> > + if (hpb->is_hcm) {
> > + bool activate = false;
> > + /*
> > + * in host control mode, reads are the main source for
> > + * activation trials.
> > + */
> > + spin_lock_irqsave(&rgn->rgn_lock, flags);
> > + rgn->reads++;
> > + if (rgn->reads == ACTIVATION_THRESHOLD)
> > + activate = true;
> > + spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> > + if (activate) {
> > + spin_lock_irqsave(&hpb->rsp_list_lock, flags);
> > + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> > + hpb->stats.rb_active_cnt++;
> > + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
> > + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
> > + "activate region %d-%d\n", rgn_idx, srgn_idx);
> > + }
> > +
> > + /* keep those counters normalized */
> > + if (rgn->reads > hpb->entries_per_srgn)
> > + schedule_work(&hpb->ufshpb_normalization_work);
> > + }
> > +
> > spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> > if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
> > transfer_len)) {
> > @@ -745,21 +794,6 @@ static int ufshpb_clear_dirty_bitmap(struct
> > ufshpb_lu *hpb,
> > return 0;
> > }
> >
> > -static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int
> > rgn_idx,
> > - int srgn_idx)
> > -{
> > - struct ufshpb_region *rgn;
> > - struct ufshpb_subregion *srgn;
> > -
> > - rgn = hpb->rgn_tbl + rgn_idx;
> > - srgn = rgn->srgn_tbl + srgn_idx;
> > -
> > - list_del_init(&rgn->list_inact_rgn);
> > -
> > - if (list_empty(&srgn->list_act_srgn))
> > - list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
> > -}
> > -
> > static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int
> > rgn_idx)
> > {
> > struct ufshpb_region *rgn;
> > @@ -1079,6 +1113,14 @@ static void __ufshpb_evict_region(struct
> > ufshpb_lu *hpb,
> >
> > ufshpb_cleanup_lru_info(lru_info, rgn);
> >
> > + if (hpb->is_hcm) {
> > + unsigned long flags;
> > +
> > + spin_lock_irqsave(&rgn->rgn_lock, flags);
> > + rgn->reads = 0;
> > + spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> > + }
> > +
> > for_each_sub_region(rgn, srgn_idx, srgn)
> > ufshpb_purge_active_subregion(hpb, srgn);
> > }
> > @@ -1523,6 +1565,31 @@ static void
> > ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
> > spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
> > }
> >
> > +static void ufshpb_normalization_work_handler(struct work_struct
> > *work)
> > +{
> > + struct ufshpb_lu *hpb;
> > + int rgn_idx;
> > + unsigned long flags;
> > +
> > + hpb = container_of(work, struct ufshpb_lu,
> > ufshpb_normalization_work);
> > +
> > + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
> > + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
> > +
> > + spin_lock_irqsave(&rgn->rgn_lock, flags);
> > + rgn->reads = (rgn->reads >> 1);
> > + spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> > +
> > + if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
> > + continue;
> > +
> > + /* if region is active but has no reads - inactivate it */
> > + spin_lock(&hpb->rsp_list_lock);
> > + ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
> > + spin_unlock(&hpb->rsp_list_lock);
> > + }
> > +}
> > +
> > static void ufshpb_map_work_handler(struct work_struct *work)
> > {
> > struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
> > map_work);
> > @@ -1913,6 +1980,9 @@ static int ufshpb_lu_hpb_init(struct ufs_hba
> > *hba, struct ufshpb_lu *hpb)
> > INIT_LIST_HEAD(&hpb->list_hpb_lu);
> >
> > INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
> > + if (hpb->is_hcm)
> > + INIT_WORK(&hpb->ufshpb_normalization_work,
> > + ufshpb_normalization_work_handler);
> >
> > hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
> > sizeof(struct ufshpb_req), 0, 0, NULL);
> > @@ -2012,6 +2082,8 @@ static void ufshpb_discard_rsp_lists(struct
> > ufshpb_lu *hpb)
> >
> > static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
> > {
> > + if (hpb->is_hcm)
> > + cancel_work_sync(&hpb->ufshpb_normalization_work);
> > cancel_work_sync(&hpb->map_work);
> > }
> >
> > diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
> > index 8119b1a3d1e5..bd4308010466 100644
> > --- a/drivers/scsi/ufs/ufshpb.h
> > +++ b/drivers/scsi/ufs/ufshpb.h
> > @@ -121,6 +121,10 @@ struct ufshpb_region {
> > struct list_head list_lru_rgn;
> > unsigned long rgn_flags;
> > #define RGN_FLAG_DIRTY 0
> > +
> > + /* region reads - for host mode */
> > + spinlock_t rgn_lock;
> > + unsigned int reads;
> > };
> >
> > #define for_each_sub_region(rgn, i, srgn) \
> > @@ -211,6 +215,7 @@ struct ufshpb_lu {
> >
> > /* for selecting victim */
> > struct victim_select_info lru_info;
> > + struct work_struct ufshpb_normalization_work;
> >
> > /* pinned region information */
> > u32 lu_pinned_start;
Powered by blists - more mailing lists