[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aXzQkqUuT/c/Ypwc@lizhi-Precision-Tower-5810>
Date: Fri, 30 Jan 2026 10:38:58 -0500
From: Frank Li <Frank.li@....com>
To: Sai Sree Kartheek Adivi <s-adivi@...com>
Cc: peter.ujfalusi@...il.com, vkoul@...nel.org, robh@...nel.org,
krzk+dt@...nel.org, conor+dt@...nel.org, nm@...com,
ssantosh@...nel.org, dmaengine@...r.kernel.org,
devicetree@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, vigneshr@...com,
r-sharma3@...com, gehariprasath@...com
Subject: Re: [PATCH v4 03/19] dmaengine: ti: k3-udma: move static inline
helper functions to header file
On Fri, Jan 30, 2026 at 04:31:43PM +0530, Sai Sree Kartheek Adivi wrote:
> Move static inline helper functions in k3-udma.c to k3-udma.h header
> file for better separation and re-use.
>
> Signed-off-by: Sai Sree Kartheek Adivi <s-adivi@...com>
> ---
Reviewed-by: Frank Li <Frank.Li@....com>
> drivers/dma/ti/k3-udma.c | 108 --------------------------------------
> drivers/dma/ti/k3-udma.h | 109 +++++++++++++++++++++++++++++++++++++++
> 2 files changed, 109 insertions(+), 108 deletions(-)
>
> diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
> index e0684d83f9791..4adcd679c6997 100644
> --- a/drivers/dma/ti/k3-udma.c
> +++ b/drivers/dma/ti/k3-udma.c
> @@ -40,91 +40,6 @@ static const char * const mmr_names[] = {
> [MMR_TCHANRT] = "tchanrt",
> };
>
> -static inline struct udma_dev *to_udma_dev(struct dma_device *d)
> -{
> - return container_of(d, struct udma_dev, ddev);
> -}
> -
> -static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
> -{
> - return container_of(c, struct udma_chan, vc.chan);
> -}
> -
> -static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
> -{
> - return container_of(t, struct udma_desc, vd.tx);
> -}
> -
> -/* Generic register access functions */
> -static inline u32 udma_read(void __iomem *base, int reg)
> -{
> - return readl(base + reg);
> -}
> -
> -static inline void udma_write(void __iomem *base, int reg, u32 val)
> -{
> - writel(val, base + reg);
> -}
> -
> -static inline void udma_update_bits(void __iomem *base, int reg,
> - u32 mask, u32 val)
> -{
> - u32 tmp, orig;
> -
> - orig = readl(base + reg);
> - tmp = orig & ~mask;
> - tmp |= (val & mask);
> -
> - if (tmp != orig)
> - writel(tmp, base + reg);
> -}
> -
> -/* TCHANRT */
> -static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
> -{
> - if (!uc->tchan)
> - return 0;
> - return udma_read(uc->tchan->reg_rt, reg);
> -}
> -
> -static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
> -{
> - if (!uc->tchan)
> - return;
> - udma_write(uc->tchan->reg_rt, reg, val);
> -}
> -
> -static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
> - u32 mask, u32 val)
> -{
> - if (!uc->tchan)
> - return;
> - udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
> -}
> -
> -/* RCHANRT */
> -static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
> -{
> - if (!uc->rchan)
> - return 0;
> - return udma_read(uc->rchan->reg_rt, reg);
> -}
> -
> -static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
> -{
> - if (!uc->rchan)
> - return;
> - udma_write(uc->rchan->reg_rt, reg, val);
> -}
> -
> -static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
> - u32 mask, u32 val)
> -{
> - if (!uc->rchan)
> - return;
> - udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
> -}
> -
> static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
> {
> struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
> @@ -216,17 +131,6 @@ static void udma_dump_chan_stdata(struct udma_chan *uc)
> }
> }
>
> -static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
> - int idx)
> -{
> - return d->hwdesc[idx].cppi5_desc_paddr;
> -}
> -
> -static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
> -{
> - return d->hwdesc[idx].cppi5_desc_vaddr;
> -}
> -
> static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
> dma_addr_t paddr)
> {
> @@ -369,11 +273,6 @@ static bool udma_is_chan_paused(struct udma_chan *uc)
> return false;
> }
>
> -static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
> -{
> - return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
> -}
> -
> static int udma_push_to_ring(struct udma_chan *uc, int idx)
> {
> struct udma_desc *d = uc->desc;
> @@ -775,13 +674,6 @@ static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
> d->desc_idx = (d->desc_idx + 1) % d->sglen;
> }
>
> -static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
> -{
> - struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
> -
> - memcpy(d->metadata, h_desc->epib, d->metadata_size);
> -}
> -
> static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
> {
> u32 peer_bcnt, bcnt;
> diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
> index 37aa9ba5b4d18..3a786b3eddc67 100644
> --- a/drivers/dma/ti/k3-udma.h
> +++ b/drivers/dma/ti/k3-udma.h
> @@ -447,6 +447,115 @@ struct udma_chan {
> u32 id;
> };
>
> +/* K3 UDMA helper functions */
> +static inline struct udma_dev *to_udma_dev(struct dma_device *d)
> +{
> + return container_of(d, struct udma_dev, ddev);
> +}
> +
> +static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
> +{
> + return container_of(c, struct udma_chan, vc.chan);
> +}
> +
> +static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
> +{
> + return container_of(t, struct udma_desc, vd.tx);
> +}
> +
> +/* Generic register access functions */
> +static inline u32 udma_read(void __iomem *base, int reg)
> +{
> + return readl(base + reg);
> +}
> +
> +static inline void udma_write(void __iomem *base, int reg, u32 val)
> +{
> + writel(val, base + reg);
> +}
> +
> +static inline void udma_update_bits(void __iomem *base, int reg,
> + u32 mask, u32 val)
> +{
> + u32 tmp, orig;
> +
> + orig = readl(base + reg);
> + tmp = orig & ~mask;
> + tmp |= (val & mask);
> +
> + if (tmp != orig)
> + writel(tmp, base + reg);
> +}
> +
> +/* TCHANRT */
> +static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
> +{
> + if (!uc->tchan)
> + return 0;
> + return udma_read(uc->tchan->reg_rt, reg);
> +}
> +
> +static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
> +{
> + if (!uc->tchan)
> + return;
> + udma_write(uc->tchan->reg_rt, reg, val);
> +}
> +
> +static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
> + u32 mask, u32 val)
> +{
> + if (!uc->tchan)
> + return;
> + udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
> +}
> +
> +/* RCHANRT */
> +static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
> +{
> + if (!uc->rchan)
> + return 0;
> + return udma_read(uc->rchan->reg_rt, reg);
> +}
> +
> +static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
> +{
> + if (!uc->rchan)
> + return;
> + udma_write(uc->rchan->reg_rt, reg, val);
> +}
> +
> +static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
> + u32 mask, u32 val)
> +{
> + if (!uc->rchan)
> + return;
> + udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
> +}
> +
> +static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
> + int idx)
> +{
> + return d->hwdesc[idx].cppi5_desc_paddr;
> +}
> +
> +static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
> +{
> + return d->hwdesc[idx].cppi5_desc_vaddr;
> +}
> +
> +static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
> +{
> + return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
> +}
> +
> +static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
> +{
> + struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
> +
> + memcpy(d->metadata, h_desc->epib, d->metadata_size);
> +}
> +
> /* Direct access to UDMA low lever resources for the glue layer */
> int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
> int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
> --
> 2.34.1
>
Powered by blists - more mailing lists