[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <be53a6d3-9cac-0bc2-c659-68bea6034073@embeddedor.com>
Date: Thu, 17 Aug 2023 19:25:24 -0600
From: "Gustavo A. R. Silva" <gustavo@...eddedor.com>
To: Kees Cook <keescook@...omium.org>, Vinod Koul <vkoul@...nel.org>
Cc: Maxime Coquelin <mcoquelin.stm32@...il.com>,
Alexandre Torgue <alexandre.torgue@...s.st.com>,
dmaengine@...r.kernel.org,
linux-stm32@...md-mailman.stormreply.com,
linux-arm-kernel@...ts.infradead.org,
Hector Martin <marcan@...can.st>,
Sven Peter <sven@...npeter.dev>,
Alyssa Rosenzweig <alyssa@...enzweig.io>,
Ludovic Desroches <ludovic.desroches@...rochip.com>,
Tudor Ambarus <tudor.ambarus@...aro.org>,
Lars-Peter Clausen <lars@...afoo.de>,
Zhou Wang <wangzhou1@...ilicon.com>,
Jie Hai <haijie1@...wei.com>, Andy Gross <agross@...nel.org>,
Bjorn Andersson <andersson@...nel.org>,
Konrad Dybcio <konrad.dybcio@...aro.org>,
Green Wan <green.wan@...ive.com>,
Orson Zhai <orsonzhai@...il.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Chunyan Zhang <zhang.lyra@...il.com>,
Patrice Chotard <patrice.chotard@...s.st.com>,
Laxman Dewangan <ldewangan@...dia.com>,
Jon Hunter <jonathanh@...dia.com>,
Thierry Reding <thierry.reding@...il.com>,
Peter Ujfalusi <peter.ujfalusi@...il.com>,
Kunihiko Hayashi <hayashi.kunihiko@...ionext.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Yu Kuai <yukuai3@...wei.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Jordy Zomer <jordy@...ing.systems>,
Jernej Skrabec <jernej.skrabec@...il.com>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Tom Rix <trix@...hat.com>, linux-kernel@...r.kernel.org,
asahi@...ts.linux.dev, linux-arm-msm@...r.kernel.org,
linux-tegra@...r.kernel.org, llvm@...ts.linux.dev,
linux-hardening@...r.kernel.org
Subject: Re: [PATCH 12/21] dmaengine: stm32-dma: Annotate struct
stm32_dma_desc with __counted_by
On 8/17/23 17:58, Kees Cook wrote:
> Prepare for the coming implementation by GCC and Clang of the __counted_by
> attribute. Flexible array members annotated with __counted_by can have
> their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS
> (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family
> functions).
>
> As found with Coccinelle[1], add __counted_by for struct stm32_dma_desc.
> Additionally, since the element count member must be set before accessing
> the annotated flexible array member, move its initialization earlier.
>
> [1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci
>
> Cc: Vinod Koul <vkoul@...nel.org>
> Cc: Maxime Coquelin <mcoquelin.stm32@...il.com>
> Cc: Alexandre Torgue <alexandre.torgue@...s.st.com>
> Cc: dmaengine@...r.kernel.org
> Cc: linux-stm32@...md-mailman.stormreply.com
> Cc: linux-arm-kernel@...ts.infradead.org
> Signed-off-by: Kees Cook <keescook@...omium.org>
Reviewed-by: Gustavo A. R. Silva <gustavoars@...nel.org>
Thanks
--
Gustavo
> ---
> drivers/dma/stm32-dma.c | 11 ++++-------
> 1 file changed, 4 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
> index 5c36811aa134..a732b3807b11 100644
> --- a/drivers/dma/stm32-dma.c
> +++ b/drivers/dma/stm32-dma.c
> @@ -191,7 +191,7 @@ struct stm32_dma_desc {
> struct virt_dma_desc vdesc;
> bool cyclic;
> u32 num_sgs;
> - struct stm32_dma_sg_req sg_req[];
> + struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
> };
>
> /**
> @@ -1105,6 +1105,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
> desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
> if (!desc)
> return NULL;
> + desc->num_sgs = sg_len;
>
> /* Set peripheral flow controller */
> if (chan->dma_sconfig.device_fc)
> @@ -1141,8 +1142,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
> desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
> desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
> }
> -
> - desc->num_sgs = sg_len;
> desc->cyclic = false;
>
> return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
> @@ -1216,6 +1215,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
> desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
> if (!desc)
> return NULL;
> + desc->num_sgs = num_periods;
>
> for (i = 0; i < num_periods; i++) {
> desc->sg_req[i].len = period_len;
> @@ -1232,8 +1232,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
> if (!chan->trig_mdma)
> buf_addr += period_len;
> }
> -
> - desc->num_sgs = num_periods;
> desc->cyclic = true;
>
> return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
> @@ -1254,6 +1252,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
> desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
> if (!desc)
> return NULL;
> + desc->num_sgs = num_sgs;
>
> threshold = chan->threshold;
>
> @@ -1283,8 +1282,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
> desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
> desc->sg_req[i].len = xfer_count;
> }
> -
> - desc->num_sgs = num_sgs;
> desc->cyclic = false;
>
> return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
Powered by blists - more mailing lists