lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CADnq5_NLSmco8_zs7pt9KSr7hia-2HcjpLR6PEN6YLZs3Nf=7Q@mail.gmail.com>
Date:   Mon, 7 Aug 2023 12:25:35 -0400
From:   Alex Deucher <alexdeucher@...il.com>
To:     Ran Sun <sunran001@...suo.com>
Cc:     alexander.deucher@....com, airlied@...il.com, daniel@...ll.ch,
        dri-devel@...ts.freedesktop.org, amd-gfx@...ts.freedesktop.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH] drm/amd/pm: Clean up errors in vega12_hwmgr.c

Applied.  Thanks!

On Mon, Jul 31, 2023 at 10:57 PM Ran Sun <sunran001@...suo.com> wrote:
>
> Fix the following errors reported by checkpatch:
>
> ERROR: need consistent spacing around '/' (ctx:WxV)
> ERROR: code indent should use tabs where possible
>
> Signed-off-by: Ran Sun <sunran001@...suo.com>
> ---
>  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> index 1937be1cf5b4..4bd573d815ff 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1623,13 +1623,13 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
>
>         if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
>                 clock_req.clock_type = amd_pp_dcef_clock;
> -               clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
> +               clock_req.clock_freq_in_khz = min_clocks.dcefClock / 10;
>                 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
>                         if (data->smu_features[GNLD_DS_DCEFCLK].supported)
>                                 PP_ASSERT_WITH_CODE(
>                                         !smum_send_msg_to_smc_with_parameter(
>                                         hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
> -                                       min_clocks.dcefClockInSR /100,
> +                                       min_clocks.dcefClockInSR / 100,
>                                         NULL),
>                                         "Attempt to set divider for DCEFCLK Failed!",
>                                         return -1);
> @@ -2354,8 +2354,8 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
>         uint32_t i, latency;
>
>         disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
> -                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> -                                 vblank_too_short;
> +                               !hwmgr->display_config->multi_monitor_in_sync) ||
> +                               vblank_too_short;
>         latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
>
>         /* gfxclk */
> @@ -2522,7 +2522,7 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
>                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
>                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
>                                 PPSMC_MSG_SetHardMinByFreq,
> -                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
> +                               (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
>                                 NULL)),
>                                 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
>                                 return ret);
> --
> 2.17.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ