[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20201117063030.12004-1-carl.yin@quectel.com>
Date: Tue, 17 Nov 2020 14:30:30 +0800
From: carl.yin@...ctel.com
To: manivannan.sadhasivam@...aro.org, hemantk@...eaurora.org,
sfr@...b.auug.org.au
Cc: linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
carl.yin@...ctel.com, naveen.kumar@...ctel.com
Subject: [PATCH] bus: mhi: core: manager all mhi ctrl data in one segment
From: Carl Yin <carl.yin@...ctel.com>
I test SDX24 and SDX55 modems on Dell-OptiPlex-7060 has 16GB memory.
So I set dma_data_width in mhi/pci_generic.c to 37,
then get next error:
[ 538.338317] mhi 0000:03:00.0: Requested to power ON
[ 538.338441] mhi 0000:03:00.0: Power on setup success
[ 538.338519] mhi 0000:03:00.0: Handling state transition: PBL
[ 543.383661] mhi 0000:03:00.0: Device in READY State
[ 543.383667] mhi 0000:03:00.0: Initializing MHI registers
[ 545.612647] mhi 0000:03:00.0: local ee:AMSS device ee:PASS THRU dev_state:READY
[ 545.646114] mhi 0000:03:00.0: Unhandled event type: 0
[ 545.646150] mhi 0000:03:00.0: tre: 0, 0, 0
[ 545.656697] mhi 0000:03:00.0: Unhandled event type: 0
[ 545.656733] mhi 0000:03:00.0: tre: 0, 0, 0
I refer to the QUALLCOMM Windows MHI driver,
manager all mhi ctrl data in one segment, above error can be solved.
Signed-off-by: Carl Yin <carl.yin@...ctel.com>
---
drivers/bus/mhi/core/init.c | 251 +++++++++++++++-----------------
drivers/bus/mhi/core/internal.h | 6 +-
2 files changed, 123 insertions(+), 134 deletions(-)
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 655d539c6808..996b0f61920b 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -112,23 +112,6 @@ static struct attribute *mhi_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mhi_dev);
-/* MHI protocol requires the transfer ring to be aligned with ring length */
-static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring,
- u64 len)
-{
- ring->alloc_size = len + (len - 1);
- ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
- &ring->dma_handle, GFP_KERNEL);
- if (!ring->pre_aligned)
- return -ENOMEM;
-
- ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
- ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
-
- return 0;
-}
-
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
int i;
@@ -205,40 +188,136 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
mhi_cmd = mhi_cntrl->mhi_cmd;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
ring = &mhi_cmd->ring;
- mhi_free_coherent(mhi_cntrl, ring->alloc_size,
- ring->pre_aligned, ring->dma_handle);
ring->base = NULL;
ring->iommu_base = 0;
}
- mhi_free_coherent(mhi_cntrl,
- sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
- mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
-
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
ring = &mhi_event->ring;
- mhi_free_coherent(mhi_cntrl, ring->alloc_size,
- ring->pre_aligned, ring->dma_handle);
ring->base = NULL;
ring->iommu_base = 0;
}
- mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
- mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
- mhi_ctxt->er_ctxt_addr);
-
- mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
- mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
- mhi_ctxt->chan_ctxt_addr);
+ mhi_free_coherent(mhi_cntrl,
+ mhi_ctxt->ctrl_seg_len,
+ mhi_ctxt->ctrl_seg,
+ mhi_ctxt->ctrl_seg_addr);
kfree(mhi_ctxt);
mhi_cntrl->mhi_ctxt = NULL;
}
+static struct mhi_ctxt *mhi_alloc_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_ring *ring;
+ int i;
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return NULL;
+
+ mhi_ctxt->chan_ctxt_addr = mhi_ctxt->ctrl_seg_len;
+ mhi_ctxt->ctrl_seg_len += (sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan);
+
+ mhi_ctxt->er_ctxt_addr = mhi_ctxt->ctrl_seg_len;
+ mhi_ctxt->ctrl_seg_len += (sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings);
+
+ mhi_ctxt->cmd_ctxt_addr = mhi_ctxt->ctrl_seg_len;
+ mhi_ctxt->ctrl_seg_len += (sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS);
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+#define mhi_aligned_ring(mhi_ctxt, ring) \
+ do { \
+ ring->el_size = sizeof(struct mhi_tre); \
+ ring->len = ring->el_size * ring->elements; \
+ mhi_ctxt->ctrl_seg_len = ALIGN(mhi_ctxt->ctrl_seg_len, ring->len); \
+ ring->iommu_base = mhi_ctxt->ctrl_seg_len; \
+ mhi_ctxt->ctrl_seg_len += ring->len; \
+ } while (0)
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ /* Skip if it is an invalid or offload channel */
+ if (!mhi_chan->name || mhi_chan->offload_ch)
+ continue;
+
+ ring = &mhi_chan->tre_ring;
+ mhi_aligned_ring(mhi_ctxt, ring);
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ mhi_aligned_ring(mhi_ctxt, ring);
+ }
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ ring->elements = CMD_EL_PER_RING;
+ mhi_aligned_ring(mhi_ctxt, ring);
+ }
+
+ mhi_ctxt->ctrl_seg = mhi_alloc_coherent(mhi_cntrl,
+ mhi_ctxt->ctrl_seg_len,
+ &mhi_ctxt->ctrl_seg_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->ctrl_seg) {
+ kfree(mhi_ctxt);
+ return NULL;
+ }
+
+ mhi_ctxt->chan_ctxt = mhi_ctxt->ctrl_seg + mhi_ctxt->chan_ctxt_addr;
+ mhi_ctxt->chan_ctxt_addr += mhi_ctxt->ctrl_seg_addr;
+ mhi_ctxt->er_ctxt = mhi_ctxt->ctrl_seg + mhi_ctxt->er_ctxt_addr;
+ mhi_ctxt->er_ctxt_addr += mhi_ctxt->ctrl_seg_addr;
+ mhi_ctxt->cmd_ctxt = mhi_ctxt->ctrl_seg + mhi_ctxt->cmd_ctxt_addr;
+ mhi_ctxt->cmd_ctxt_addr += mhi_ctxt->ctrl_seg_addr;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ /* Skip if it is an invalid or offload channel */
+ if (!mhi_chan->name || mhi_chan->offload_ch)
+ continue;
+
+ ring = &mhi_chan->tre_ring;
+ ring->base = mhi_ctxt->ctrl_seg + ring->iommu_base;
+ ring->iommu_base += mhi_ctxt->ctrl_seg_addr;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ ring->base = mhi_ctxt->ctrl_seg + ring->iommu_base;
+ ring->iommu_base += mhi_ctxt->ctrl_seg_addr;
+ }
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ ring->base = mhi_ctxt->ctrl_seg + ring->iommu_base;
+ ring->iommu_base += mhi_ctxt->ctrl_seg_addr;
+ }
+
+ return mhi_ctxt;
+}
+
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
@@ -249,24 +328,16 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
struct mhi_event *mhi_event;
struct mhi_cmd *mhi_cmd;
u32 tmp;
- int ret = -ENOMEM, i;
+ int i;
atomic_set(&mhi_cntrl->dev_wake, 0);
atomic_set(&mhi_cntrl->pending_pkts, 0);
- mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ mhi_ctxt = mhi_alloc_dev_ctxt(mhi_cntrl);
if (!mhi_ctxt)
return -ENOMEM;
/* Setup channel ctxt */
- mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
- sizeof(*mhi_ctxt->chan_ctxt) *
- mhi_cntrl->max_chan,
- &mhi_ctxt->chan_ctxt_addr,
- GFP_KERNEL);
- if (!mhi_ctxt->chan_ctxt)
- goto error_alloc_chan_ctxt;
-
mhi_chan = mhi_cntrl->mhi_chan;
chan_ctxt = mhi_ctxt->chan_ctxt;
for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
@@ -291,14 +362,6 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
}
/* Setup event context */
- mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
- sizeof(*mhi_ctxt->er_ctxt) *
- mhi_cntrl->total_ev_rings,
- &mhi_ctxt->er_ctxt_addr,
- GFP_KERNEL);
- if (!mhi_ctxt->er_ctxt)
- goto error_alloc_er_ctxt;
-
er_ctxt = mhi_ctxt->er_ctxt;
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
@@ -319,12 +382,6 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
er_ctxt->msivec = mhi_event->irq;
mhi_event->db_cfg.db_mode = true;
- ring->el_size = sizeof(struct mhi_tre);
- ring->len = ring->el_size * ring->elements;
- ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
- if (ret)
- goto error_alloc_er;
-
/*
* If the read pointer equals to the write pointer, then the
* ring is empty
@@ -337,27 +394,11 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
}
/* Setup cmd context */
- ret = -ENOMEM;
- mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
- sizeof(*mhi_ctxt->cmd_ctxt) *
- NR_OF_CMD_RINGS,
- &mhi_ctxt->cmd_ctxt_addr,
- GFP_KERNEL);
- if (!mhi_ctxt->cmd_ctxt)
- goto error_alloc_er;
-
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_ctxt->cmd_ctxt;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
struct mhi_ring *ring = &mhi_cmd->ring;
- ring->el_size = sizeof(struct mhi_tre);
- ring->elements = CMD_EL_PER_RING;
- ring->len = ring->el_size * ring->elements;
- ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
- if (ret)
- goto error_alloc_cmd;
-
ring->rp = ring->wp = ring->base;
cmd_ctxt->rbase = ring->iommu_base;
cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
@@ -368,43 +409,6 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
mhi_cntrl->mhi_ctxt = mhi_ctxt;
return 0;
-
-error_alloc_cmd:
- for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
- struct mhi_ring *ring = &mhi_cmd->ring;
-
- mhi_free_coherent(mhi_cntrl, ring->alloc_size,
- ring->pre_aligned, ring->dma_handle);
- }
- mhi_free_coherent(mhi_cntrl,
- sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
- mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
- i = mhi_cntrl->total_ev_rings;
- mhi_event = mhi_cntrl->mhi_event + i;
-
-error_alloc_er:
- for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
- struct mhi_ring *ring = &mhi_event->ring;
-
- if (mhi_event->offload_ev)
- continue;
-
- mhi_free_coherent(mhi_cntrl, ring->alloc_size,
- ring->pre_aligned, ring->dma_handle);
- }
- mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
- mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
- mhi_ctxt->er_ctxt_addr);
-
-error_alloc_er_ctxt:
- mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
- mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
- mhi_ctxt->chan_ctxt_addr);
-
-error_alloc_chan_ctxt:
- kfree(mhi_ctxt);
-
- return ret;
}
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
@@ -455,11 +459,11 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
},
{
MHICTRLBASE_HIGHER, U32_MAX, 0,
- upper_32_bits(mhi_cntrl->iova_start),
+ upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr),
},
{
MHICTRLBASE_LOWER, U32_MAX, 0,
- lower_32_bits(mhi_cntrl->iova_start),
+ lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr),
},
{
MHIDATABASE_HIGHER, U32_MAX, 0,
@@ -471,11 +475,13 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
},
{
MHICTRLLIMIT_HIGHER, U32_MAX, 0,
- upper_32_bits(mhi_cntrl->iova_stop),
+ upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr
+ + mhi_cntrl->mhi_ctxt->ctrl_seg_len),
},
{
MHICTRLLIMIT_LOWER, U32_MAX, 0,
- lower_32_bits(mhi_cntrl->iova_stop),
+ lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr
+ + mhi_cntrl->mhi_ctxt->ctrl_seg_len),
},
{
MHIDATALIMIT_HIGHER, U32_MAX, 0,
@@ -542,19 +548,10 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring;
- struct mhi_ring *tre_ring;
- struct mhi_chan_ctxt *chan_ctxt;
buf_ring = &mhi_chan->buf_ring;
- tre_ring = &mhi_chan->tre_ring;
- chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
-
- mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
- tre_ring->pre_aligned, tre_ring->dma_handle);
vfree(buf_ring->base);
-
- buf_ring->base = tre_ring->base = NULL;
- chan_ctxt->rbase = 0;
+ buf_ring->base = NULL;
}
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -564,24 +561,16 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_ring *tre_ring;
struct mhi_chan_ctxt *chan_ctxt;
u32 tmp;
- int ret;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
- tre_ring->el_size = sizeof(struct mhi_tre);
- tre_ring->len = tre_ring->el_size * tre_ring->elements;
chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
- ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
- if (ret)
- return -ENOMEM;
buf_ring->el_size = sizeof(struct mhi_buf_info);
buf_ring->len = buf_ring->el_size * buf_ring->elements;
buf_ring->base = vzalloc(buf_ring->len);
if (!buf_ring->base) {
- mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
- tre_ring->pre_aligned, tre_ring->dma_handle);
return -ENOMEM;
}
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 6f80ec30c0cd..546997d1a390 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -255,6 +255,9 @@ struct mhi_ctxt {
dma_addr_t er_ctxt_addr;
dma_addr_t chan_ctxt_addr;
dma_addr_t cmd_ctxt_addr;
+ void *ctrl_seg;
+ dma_addr_t ctrl_seg_addr;
+ size_t ctrl_seg_len;
};
struct mhi_tre {
@@ -483,17 +486,14 @@ struct state_transition {
};
struct mhi_ring {
- dma_addr_t dma_handle;
dma_addr_t iommu_base;
u64 *ctxt_wp; /* point to ctxt wp */
- void *pre_aligned;
void *base;
void *rp;
void *wp;
size_t el_size;
size_t len;
size_t elements;
- size_t alloc_size;
void __iomem *db_addr;
};
--
2.25.1
Powered by blists - more mailing lists