lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 13 Jul 2010 09:00:44 -0500
From:	Brian King <brking@...ux.vnet.ibm.com>
To:	Nathan Fontenot <nfont@...tin.ibm.com>
CC:	linux-kernel@...r.kernel.org, linuxppc-dev@...abs.org
Subject: Re: [PATCH 1/7] Split the memory_block structure

On 07/12/2010 10:42 AM, Nathan Fontenot wrote:
> @@ -123,13 +130,20 @@
>  static ssize_t show_mem_removable(struct sys_device *dev,
>  			struct sysdev_attribute *attr, char *buf)
>  {
> -	unsigned long start_pfn;
> -	int ret;
> -	struct memory_block *mem =
> -		container_of(dev, struct memory_block, sysdev);
> +	struct list_head *pos, *tmp;
> +	struct memory_block *mem;
> +	int ret = 1;
> +
> +	mem = container_of(dev, struct memory_block, sysdev);
> +	list_for_each_safe(pos, tmp, &mem->sections) {
> +		struct memory_block_section *mbs;
> +		unsigned long start_pfn;
> +
> +		mbs = list_entry(pos, struct memory_block_section, next);
> +		start_pfn = section_nr_to_pfn(mbs->phys_index);
> +		ret &= is_mem_section_removable(start_pfn, PAGES_PER_SECTION);
> +	}

I don't see you deleting anyting from the list in this loop. Why do you need
to use list_for_each_safe? That won't protect you if someone else is messing
with the list.

> 
> -	start_pfn = section_nr_to_pfn(mem->phys_index);
> -	ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION);
>  	return sprintf(buf, "%d\n", ret);
>  }
> 


> @@ -238,19 +252,40 @@
>  static int memory_block_change_state(struct memory_block *mem,
>  		unsigned long to_state, unsigned long from_state_req)
>  {
> +	struct memory_block_section *mbs;
> +	struct list_head *pos;
>  	int ret = 0;
> +
>  	mutex_lock(&mem->state_mutex);
> 
> -	if (mem->state != from_state_req) {
> -		ret = -EINVAL;
> -		goto out;
> +	list_for_each(pos, &mem->sections) {
> +		mbs = list_entry(pos, struct memory_block_section, next);
> +
> +		if (mbs->state != from_state_req)
> +			continue;
> +
> +		ret = memory_block_action(mbs, to_state);
> +		if (ret)
> +			break;
> +	}

Would it be better here to loop through all the sections and ensure they
are in the proper state first before starting to change the state of any
of them? Then you could easily return -EINVAL if one or more is in
the incorrect state and wouldn't need to the code below.

> +	if (ret) {
> +		list_for_each(pos, &mem->sections) {
> +			mbs = list_entry(pos, struct memory_block_section,
> +					 next);
> +
> +			if (mbs->state == from_state_req)
> +				continue;
> +
> +			if (memory_block_action(mbs, to_state))
> +				printk(KERN_ERR "Could not re-enable memory "
> +				       "section %lx\n", mbs->phys_index);
> +		}
>  	}
> 
> -	ret = memory_block_action(mem, to_state);
>  	if (!ret)
>  		mem->state = to_state;
> 
> -out:
>  	mutex_unlock(&mem->state_mutex);
>  	return ret;
>  }


> @@ -498,19 +496,97 @@
> 
>  	return mem;
>  }
> +static int add_mem_block_section(struct memory_block *mem,
> +				 int section_nr, unsigned long state)
> +{
> +	struct memory_block_section *mbs;
> +
> +	mbs = kzalloc(sizeof(*mbs), GFP_KERNEL);
> +	if (!mbs)
> +		return -ENOMEM;
> +
> +	mbs->phys_index = section_nr;
> +	mbs->state = state;
> +
> +	list_add(&mbs->next, &mem->sections);

I don't think there is sufficient protection for this list. Don't we
need to be holding a lock of some sort when adding/deleting/iterating
through this list? 

> +	return 0;
> +}

-- 
Brian King
Linux on Power Virtualization
IBM Linux Technology Center


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ