lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20171023123728.p7cpnlyflqjf6nqg@gmail.com>
Date:   Mon, 23 Oct 2017 14:37:30 +0200
From:   Christian Brauner <christian.brauner@...onical.com>
To:     Tycho Andersen <tycho@...ho.ws>
Cc:     Christian Brauner <christian.brauner@...ntu.com>,
        linux-kernel@...r.kernel.org, serge@...lyn.com,
        stgraber@...ntu.com, Eric Biederman <ebiederm@...ssion.com>
Subject: Re: [PATCH 2/2 v5] user namespaces: bump idmap limits to 340

On Mon, Oct 23, 2017 at 10:02:23AM +0200, Tycho Andersen wrote:
> On Sun, Oct 22, 2017 at 08:40:36PM +0200, Christian Brauner wrote:
> > +/**
> > + * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
> > + * Takes care to allocate a 4K block of memory if the number of mappings exceeds
> > + * UID_GID_MAP_MAX_BASE_EXTENTS.
> > + */
> > +static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
> > +{
> > +	if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS) {
> > +		map->extent[map->nr_extents].first = extent->first;
> > +		map->extent[map->nr_extents].lower_first = extent->lower_first;
> > +		map->extent[map->nr_extents].count = extent->count;
> > +		return 0;
> > +	}
> > +
> > +	if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
> > +		struct uid_gid_extent *forward;
> > +
> > +		/* Allocate memory for 340 mappings. */
> > +		forward = kmalloc(sizeof(struct uid_gid_extent) *
> > +				 UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL);
> > +		if (IS_ERR(forward))
> 
> I think you have the same issue here as in v4, kmalloc doesn't return
> -ENOMEM, it returns NULL on failure.

Ah, I somehow was convinced that only kmemdup() simply returned NULL. Thanks,
Tycho.

> 
> >  static ssize_t map_write(struct file *file, const char __user *buf,
> >  			 size_t count, loff_t *ppos,
> >  			 int cap_setid,
> > @@ -648,7 +911,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  	struct user_namespace *ns = seq->private;
> >  	struct uid_gid_map new_map;
> >  	unsigned idx;
> > -	struct uid_gid_extent *extent = NULL;
> > +	struct uid_gid_extent extent;
> 
> struct uid_gid_extent = {}; maybe? Then you can drop the memset().

I don't think this is right. First, I don't care about struct uid_gid_extent to
be zeroed. What I care about is struct uid_gid_map new_map to be zeroed. This
needs to be done otherwise when the condition if (map->nr_extents != 0) is true
further down below you'd jump to unitialized memory in the cleanup code below.
Also, any modification of struct uid_gid_map_new new_map needs to be done while
holding the mutex lock. So I think keeping the memset() here is ok.

> 
> >  	char *kbuf = NULL, *pos, *next_line;
> >  	ssize_t ret = -EINVAL;
> >  
> > @@ -673,6 +936,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  	 */
> >  	mutex_lock(&userns_state_mutex);
> >  
> > +	memset(&new_map, 0, sizeof(struct uid_gid_map));
> > +
> >  	ret = -EPERM;
> >  	/* Only allow one successful write to the map */
> >  	if (map->nr_extents != 0)
> > @@ -700,9 +965,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  	/* Parse the user data */
> >  	ret = -EINVAL;
> >  	pos = kbuf;
> > -	new_map.nr_extents = 0;
> >  	for (; pos; pos = next_line) {
> > -		extent = &new_map.extent[new_map.nr_extents];
> >  
> >  		/* Find the end of line and ensure I don't look past it */
> >  		next_line = strchr(pos, '\n');
> > @@ -714,17 +977,17 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  		}
> >  
> >  		pos = skip_spaces(pos);
> > -		extent->first = simple_strtoul(pos, &pos, 10);
> > +		extent.first = simple_strtoul(pos, &pos, 10);
> >  		if (!isspace(*pos))
> >  			goto out;
> >  
> >  		pos = skip_spaces(pos);
> > -		extent->lower_first = simple_strtoul(pos, &pos, 10);
> > +		extent.lower_first = simple_strtoul(pos, &pos, 10);
> >  		if (!isspace(*pos))
> >  			goto out;
> >  
> >  		pos = skip_spaces(pos);
> > -		extent->count = simple_strtoul(pos, &pos, 10);
> > +		extent.count = simple_strtoul(pos, &pos, 10);
> >  		if (*pos && !isspace(*pos))
> >  			goto out;
> >  
> > @@ -734,29 +997,33 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  			goto out;
> >  
> >  		/* Verify we have been given valid starting values */
> > -		if ((extent->first == (u32) -1) ||
> > -		    (extent->lower_first == (u32) -1))
> > +		if ((extent.first == (u32) -1) ||
> > +		    (extent.lower_first == (u32) -1))
> >  			goto out;
> >  
> >  		/* Verify count is not zero and does not cause the
> >  		 * extent to wrap
> >  		 */
> > -		if ((extent->first + extent->count) <= extent->first)
> > +		if ((extent.first + extent.count) <= extent.first)
> >  			goto out;
> > -		if ((extent->lower_first + extent->count) <=
> > -		     extent->lower_first)
> > +		if ((extent.lower_first + extent.count) <=
> > +		     extent.lower_first)
> >  			goto out;
> >  
> >  		/* Do the ranges in extent overlap any previous extents? */
> > -		if (mappings_overlap(&new_map, extent))
> > +		if (mappings_overlap(&new_map, &extent))
> >  			goto out;
> >  
> > -		new_map.nr_extents++;
> > -
> > -		/* Fail if the file contains too many extents */
> > -		if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
> > +		if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
> >  		    (next_line != NULL))
> >  			goto out;
> > +
> > +		ret = insert_extent(&new_map, &extent);
> > +		if (ret < 0)
> > +			goto out;
> > +		ret = -EINVAL;
> > +
> > +		new_map.nr_extents++;
> >  	}
> >  	/* Be very certaint the new map actually exists */
> >  	if (new_map.nr_extents == 0)
> > @@ -767,16 +1034,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
> >  		goto out;
> >  
> > +	ret = sort_idmaps(&new_map);
> > +	if (ret < 0)
> > +		goto out;
> > +
> > +	ret = -EPERM;
> >  	/* Map the lower ids from the parent user namespace to the
> >  	 * kernel global id space.
> >  	 */
> >  	for (idx = 0; idx < new_map.nr_extents; idx++) {
> > +		struct uid_gid_extent *e;
> >  		u32 lower_first;
> > -		extent = &new_map.extent[idx];
> > +
> > +		if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
> > +			e = &new_map.extent[idx];
> > +		else
> > +			e = &new_map.forward[idx];
> >  
> >  		lower_first = map_id_range_down(parent_map,
> > -						extent->lower_first,
> > -						extent->count);
> > +						e->lower_first,
> > +						e->count);
> >  
> >  		/* Fail if we can not map the specified extent to
> >  		 * the kernel global id space.
> > @@ -784,18 +1061,34 @@ static ssize_t map_write(struct file *file, const char __user *buf,
> >  		if (lower_first == (u32) -1)
> >  			goto out;
> >  
> > -		extent->lower_first = lower_first;
> > +		e->lower_first = lower_first;
> >  	}
> >  
> >  	/* Install the map */
> > -	memcpy(map->extent, new_map.extent,
> > -		new_map.nr_extents*sizeof(new_map.extent[0]));
> > +	if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
> > +		memcpy(map->extent, new_map.extent,
> > +		       new_map.nr_extents * sizeof(new_map.extent[0]));
> > +	} else {
> > +		map->forward = new_map.forward;
> > +		map->reverse = new_map.reverse;
> > +	}
> >  	smp_wmb();
> >  	map->nr_extents = new_map.nr_extents;
> >  
> >  	*ppos = count;
> >  	ret = count;
> >  out:
> > +	if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
> > +		if (new_map.forward)
> > +			kfree(new_map.forward);
> 
> It's safe to pass null to kfree(), so I think you can just do,
> 
> kfree(new_map.forward);
> 
> and in the other places in this function.

Yeah, that sounds right. Thanks!

> 
> Tycho

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ