[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YLXz76e5bXR1d/cN@dhcp22.suse.cz>
Date: Tue, 1 Jun 2021 10:46:39 +0200
From: Michal Hocko <mhocko@...e.com>
To: Feng Tang <feng.tang@...el.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
David Rientjes <rientjes@...gle.com>,
Dave Hansen <dave.hansen@...el.com>,
Ben Widawsky <ben.widawsky@...el.com>,
linux-kernel@...r.kernel.org,
Andrea Arcangeli <aarcange@...hat.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Mike Kravetz <mike.kravetz@...cle.com>,
Randy Dunlap <rdunlap@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Andi Kleen <ak@...ux.intel.com>,
Dan Williams <dan.j.williams@...el.com>, ying.huang@...el.com
Subject: Re: [v3 PATCH 3/3] mm/mempolicy: unify the parameter sanity check
for mbind and set_mempolicy
On Mon 31-05-21 22:05:56, Feng Tang wrote:
> Currently the kernel_mbind() and kernel_set_mempolicy() do almost
> the same operation for parameter sanity check.
>
> Add a helper function to unify the code to reduce the redundancy,
> and make it easier for changing the pre-processing code in future.
>
> [thanks to David Rientjes for suggesting using helper function
> instead of macro]
>
> Signed-off-by: Feng Tang <feng.tang@...el.com>
sanitize_mpol_flags would benefit from some high level comments
explaining those modifications but this can be done on top. This looks
like a useful cleanup on its own
Acked-by: Michal Hocko <mhocko@...e.com>
Thanks!
> ---
> mm/mempolicy.c | 47 +++++++++++++++++++++++++++++------------------
> 1 file changed, 29 insertions(+), 18 deletions(-)
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index c337bd7..85ef512 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1444,26 +1444,37 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
> return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
> }
>
> +static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
> +{
> + *flags = *mode & MPOL_MODE_FLAGS;
> + *mode &= ~MPOL_MODE_FLAGS;
> + if ((unsigned int)(*mode) >= MPOL_MAX)
> + return -EINVAL;
> + if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> static long kernel_mbind(unsigned long start, unsigned long len,
> unsigned long mode, const unsigned long __user *nmask,
> unsigned long maxnode, unsigned int flags)
> {
> + unsigned short mode_flags;
> nodemask_t nodes;
> + int lmode = mode;
> int err;
> - unsigned short mode_flags;
>
> start = untagged_addr(start);
> - mode_flags = mode & MPOL_MODE_FLAGS;
> - mode &= ~MPOL_MODE_FLAGS;
> - if (mode >= MPOL_MAX)
> - return -EINVAL;
> - if ((mode_flags & MPOL_F_STATIC_NODES) &&
> - (mode_flags & MPOL_F_RELATIVE_NODES))
> - return -EINVAL;
> + err = sanitize_mpol_flags(&lmode, &mode_flags);
> + if (err)
> + return err;
> +
> err = get_nodes(&nodes, nmask, maxnode);
> if (err)
> return err;
> - return do_mbind(start, len, mode, mode_flags, &nodes, flags);
> +
> + return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
> }
>
> SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
> @@ -1477,20 +1488,20 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
> static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
> unsigned long maxnode)
> {
> - int err;
> + unsigned short mode_flags;
> nodemask_t nodes;
> - unsigned short flags;
> + int lmode = mode;
> + int err;
> +
> + err = sanitize_mpol_flags(&lmode, &mode_flags);
> + if (err)
> + return err;
>
> - flags = mode & MPOL_MODE_FLAGS;
> - mode &= ~MPOL_MODE_FLAGS;
> - if ((unsigned int)mode >= MPOL_MAX)
> - return -EINVAL;
> - if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
> - return -EINVAL;
> err = get_nodes(&nodes, nmask, maxnode);
> if (err)
> return err;
> - return do_set_mempolicy(mode, flags, &nodes);
> +
> + return do_set_mempolicy(lmode, mode_flags, &nodes);
> }
>
> SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
> --
> 2.7.4
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists