[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAG99D9Jss=h5aVLDq0tkDjfZgGUbrNV1gqwcw631RbwCiPVqNg@mail.gmail.com>
Date: Fri, 6 Sep 2024 14:20:09 +0530
From: Mukul Sikka <mukul.sikka@...adcom.com>
To: Alex Deucher <alexdeucher@...il.com>
Cc: stable@...r.kernel.org, gregkh@...uxfoundation.org, evan.quan@....com,
alexander.deucher@....com, christian.koenig@....com, airlied@...ux.ie,
daniel@...ll.ch, Jun.Ma2@....com, kevinyang.wang@....com, sashal@...nel.org,
amd-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org, ajay.kaher@...adcom.com,
alexey.makhalov@...adcom.com, vasavi.sirnapalli@...adcom.com,
Bob Zhou <bob.zhou@....com>, Tim Huang <Tim.Huang@....com>
Subject: Re: [PATCH v5.15-v5.10] drm/amd/pm: Fix the null pointer dereference
for vega10_hwmgr
On Fri, Sep 6, 2024 at 12:05 AM Alex Deucher <alexdeucher@...il.com> wrote:
>
> On Tue, Sep 3, 2024 at 5:53 AM sikkamukul <mukul.sikka@...adcom.com> wrote:
> >
> > From: Bob Zhou <bob.zhou@....com>
> >
> > [ Upstream commit 50151b7f1c79a09117837eb95b76c2de76841dab ]
> >
> > Check return value and conduct null pointer handling to avoid null pointer dereference.
> >
> > Signed-off-by: Bob Zhou <bob.zhou@....com>
> > Reviewed-by: Tim Huang <Tim.Huang@....com>
> > Signed-off-by: Alex Deucher <alexander.deucher@....com>
> > Signed-off-by: Sasha Levin <sashal@...nel.org>
> > Signed-off-by: Mukul Sikka <mukul.sikka@...adcom.com>
>
> Just out of curiosity, are you actually seeing an issue? This and a
> lot of the other recent NULL check patches are just static checker
> fixes. They don't actually fix a known issue.
>
No, according to the description of this patch and CVE-2024-43905.
It seems to be applicable to LTS.
- Mukul
> > ---
> > .../drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 30 ++++++++++++++++---
> > 1 file changed, 26 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
> > index 10678b519..304874cba 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
> > @@ -3391,13 +3391,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
> > const struct vega10_power_state *vega10_ps =
> > cast_const_phw_vega10_power_state(states->pnew_state);
> > struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
> > - uint32_t sclk = vega10_ps->performance_levels
> > - [vega10_ps->performance_level_count - 1].gfx_clock;
> > struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
> > - uint32_t mclk = vega10_ps->performance_levels
> > - [vega10_ps->performance_level_count - 1].mem_clock;
> > + uint32_t sclk, mclk;
> > uint32_t i;
> >
> > + if (vega10_ps == NULL)
> > + return -EINVAL;
> > + sclk = vega10_ps->performance_levels
> > + [vega10_ps->performance_level_count - 1].gfx_clock;
> > + mclk = vega10_ps->performance_levels
> > + [vega10_ps->performance_level_count - 1].mem_clock;
> > +
> > for (i = 0; i < sclk_table->count; i++) {
> > if (sclk == sclk_table->dpm_levels[i].value)
> > break;
> > @@ -3704,6 +3708,9 @@ static int vega10_generate_dpm_level_enable_mask(
> > cast_const_phw_vega10_power_state(states->pnew_state);
> > int i;
> >
> > + if (vega10_ps == NULL)
> > + return -EINVAL;
> > +
> > PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
> > "Attempt to Trim DPM States Failed!",
> > return -1);
> > @@ -4828,6 +4835,9 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
> >
> > psa = cast_const_phw_vega10_power_state(pstate1);
> > psb = cast_const_phw_vega10_power_state(pstate2);
> > + if (psa == NULL || psb == NULL)
> > + return -EINVAL;
> > +
> > /* If the two states don't even have the same number of performance levels they cannot be the same state. */
> > if (psa->performance_level_count != psb->performance_level_count) {
> > *equal = false;
> > @@ -4953,6 +4963,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
> > return -EINVAL;
> >
> > vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
> > + if (vega10_ps == NULL)
> > + return -EINVAL;
> >
> > vega10_ps->performance_levels
> > [vega10_ps->performance_level_count - 1].gfx_clock =
> > @@ -5004,6 +5016,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
> > return -EINVAL;
> >
> > vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
> > + if (vega10_ps == NULL)
> > + return -EINVAL;
> >
> > vega10_ps->performance_levels
> > [vega10_ps->performance_level_count - 1].mem_clock =
> > @@ -5239,6 +5253,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
> > return;
> >
> > vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
> > + if (vega10_ps == NULL)
> > + return;
> > +
> > max_level = vega10_ps->performance_level_count - 1;
> >
> > if (vega10_ps->performance_levels[max_level].gfx_clock !=
> > @@ -5261,6 +5278,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
> >
> > ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
> > vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
> > + if (vega10_ps == NULL)
> > + return;
> > +
> > max_level = vega10_ps->performance_level_count - 1;
> >
> > if (vega10_ps->performance_levels[max_level].gfx_clock !=
> > @@ -5451,6 +5471,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
> > return -EINVAL;
> >
> > ps = cast_const_phw_vega10_power_state(state);
> > + if (ps == NULL)
> > + return -EINVAL;
> >
> > i = index > ps->performance_level_count - 1 ?
> > ps->performance_level_count - 1 : index;
> > --
> > 2.39.4
> >
Powered by blists - more mailing lists