lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Tue, 24 Nov 2020 13:39:29 +0100
From:   Vlastimil Babka <vbabka@...e.cz>
To:     Hui Su <sh_def@....com>, akpm@...ux-foundation.org,
        nigupta@...dia.com, bhe@...hat.com, mateusznosek0@...il.com,
        iamjoonsoo.kim@....com, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Subject: Re: [PATCH] mm/compaction: make defer_compaction and
 compaction_deferred static

On 11/23/20 6:08 PM, Hui Su wrote:
> defer_compaction() and  compaction_deferred() and
> compaction_restarting() in mm/compaction.c won't
> be used in other files, so make them static, and
> remove the declaration in the header file.
> 
> Take the chance to fix a typo.
> 
> Signed-off-by: Hui Su <sh_def@....com>

Acked-by: Vlastimil Babka <vbabka@...e.cz>

> ---
>   include/linux/compaction.h | 12 ------------
>   mm/compaction.c            |  8 ++++----
>   2 files changed, 4 insertions(+), 16 deletions(-)
> 
> diff --git a/include/linux/compaction.h b/include/linux/compaction.h
> index 1de5a1151ee7..ed4070ed41ef 100644
> --- a/include/linux/compaction.h
> +++ b/include/linux/compaction.h
> @@ -98,11 +98,8 @@ extern void reset_isolation_suitable(pg_data_t *pgdat);
>   extern enum compact_result compaction_suitable(struct zone *zone, int order,
>   		unsigned int alloc_flags, int highest_zoneidx);
>   
> -extern void defer_compaction(struct zone *zone, int order);
> -extern bool compaction_deferred(struct zone *zone, int order);
>   extern void compaction_defer_reset(struct zone *zone, int order,
>   				bool alloc_success);
> -extern bool compaction_restarting(struct zone *zone, int order);
>   
>   /* Compaction has made some progress and retrying makes sense */
>   static inline bool compaction_made_progress(enum compact_result result)
> @@ -194,15 +191,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord
>   	return COMPACT_SKIPPED;
>   }
>   
> -static inline void defer_compaction(struct zone *zone, int order)
> -{
> -}
> -
> -static inline bool compaction_deferred(struct zone *zone, int order)
> -{
> -	return true;
> -}
> -
>   static inline bool compaction_made_progress(enum compact_result result)
>   {
>   	return false;
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 13cb7a961b31..60135a820b55 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -157,7 +157,7 @@ EXPORT_SYMBOL(__ClearPageMovable);
>    * allocation success. 1 << compact_defer_shift, compactions are skipped up
>    * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
>    */
> -void defer_compaction(struct zone *zone, int order)
> +static void defer_compaction(struct zone *zone, int order)
>   {
>   	zone->compact_considered = 0;
>   	zone->compact_defer_shift++;
> @@ -172,7 +172,7 @@ void defer_compaction(struct zone *zone, int order)
>   }
>   
>   /* Returns true if compaction should be skipped this time */
> -bool compaction_deferred(struct zone *zone, int order)
> +static bool compaction_deferred(struct zone *zone, int order)
>   {
>   	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
>   
> @@ -209,7 +209,7 @@ void compaction_defer_reset(struct zone *zone, int order,
>   }
>   
>   /* Returns true if restarting compaction after many failures */
> -bool compaction_restarting(struct zone *zone, int order)
> +static bool compaction_restarting(struct zone *zone, int order)
>   {
>   	if (order < zone->compact_order_failed)
>   		return false;
> @@ -237,7 +237,7 @@ static void reset_cached_positions(struct zone *zone)
>   }
>   
>   /*
> - * Compound pages of >= pageblock_order should consistenly be skipped until
> + * Compound pages of >= pageblock_order should consistently be skipped until
>    * released. It is always pointless to compact pages of such order (if they are
>    * migratable), and the pageblocks they occupy cannot contain any free pages.
>    */
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ