lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:	Mon, 02 Feb 2015 15:47:20 -0500
From:	Paul Moore <paul@...l-moore.com>
To:	Imre Palik <imrep.amz@...il.com>
Cc:	linux-audit@...hat.com, Eric Paris <eparis@...hat.com>,
	linux-kernel@...r.kernel.org, "Palik, Imre" <imrep@...zon.de>,
	Matt Wilson <msw@...zon.com>
Subject: Re: [RFC PATCH v4] audit: move the tree pruning to a dedicated thread

On Friday, January 30, 2015 03:05:59 PM Imre Palik wrote:
> From: "Palik, Imre" <imrep@...zon.de>
> 
> When file auditing is enabled, during a low memory situation, a memory
> allocation with __GFP_FS can lead to pruning the inode cache.  Which can,
> in turn lead to audit_tree_freeing_mark() being called.  This can call
> audit_schedule_prune(), that tries to fork a pruning thread, and
> waits until the thread is created.  But forking needs memory, and the
> memory allocations there are done with __GFP_FS.
> 
> So we are waiting merrily for some __GFP_FS memory allocations to complete,
> while holding some filesystem locks.  This can take a while ...
> 
> This patch creates a single thread for pruning the tree from
> audit_add_tree_rule(), and thus avoids the deadlock that the on-demand
> thread creation can cause.
> 
> Reported-by: Matt Wilson <msw@...zon.com>
> Cc: Matt Wilson <msw@...zon.com>
> Signed-off-by: Imre Palik <imrep@...zon.de>

Thanks for your persistence on this patch, I know it can be frustrating at 
times.  I'm happy with this revision of the patch, but considering that we are 
only one week away from the merge window opening, I'm going to queue this up 
for the *next* merge window; I'll move this to the audit#next branch as soon 
as the v3.20 merge window closes.

> diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
> index 2e0c974..71fd1f2 100644
> --- a/kernel/audit_tree.c
> +++ b/kernel/audit_tree.c
> @@ -37,6 +37,7 @@ struct audit_chunk {
> 
>  static LIST_HEAD(tree_list);
>  static LIST_HEAD(prune_list);
> +static struct task_struct *prune_thread;
> 
>  /*
>   * One struct chunk is attached to each inode of interest.
> @@ -651,6 +652,57 @@ static int tag_mount(struct vfsmount *mnt, void *arg)
>  	return tag_chunk(mnt->mnt_root->d_inode, arg);
>  }
> 
> +/*
> + * That gets run when evict_chunk() ends up needing to kill audit_tree.
> + * Runs from a separate thread.
> + */
> +static int prune_tree_thread(void *unused)
> +{
> +	for (;;) {
> +		set_current_state(TASK_INTERRUPTIBLE);
> +		if (list_empty(&prune_list))
> +			schedule();
> +		__set_current_state(TASK_RUNNING);
> +
> +		mutex_lock(&audit_cmd_mutex);
> +		mutex_lock(&audit_filter_mutex);
> +
> +		while (!list_empty(&prune_list)) {
> +			struct audit_tree *victim;
> +
> +			victim = list_entry(prune_list.next,
> +					struct audit_tree, list);
> +			list_del_init(&victim->list);
> +
> +			mutex_unlock(&audit_filter_mutex);
> +
> +			prune_one(victim);
> +
> +			mutex_lock(&audit_filter_mutex);
> +		}
> +
> +		mutex_unlock(&audit_filter_mutex);
> +		mutex_unlock(&audit_cmd_mutex);
> +	}
> +	return 0;
> +}
> +
> +static int audit_launch_prune(void)
> +{
> +	if (prune_thread)
> +		return 0;
> +	prune_thread = kthread_create(prune_tree_thread, NULL,
> +				"audit_prune_tree");
> +	if (IS_ERR(prune_thread)) {
> +		pr_err("cannot start thread audit_prune_tree");
> +		prune_thread = NULL;
> +		return -ENOMEM;
> +	} else {
> +		wake_up_process(prune_thread);
> +		return 0;
> +	}
> +}
> +
>  /* called with audit_filter_mutex */
>  int audit_add_tree_rule(struct audit_krule *rule)
>  {
> @@ -674,6 +726,12 @@ int audit_add_tree_rule(struct audit_krule *rule)
>  	/* do not set rule->tree yet */
>  	mutex_unlock(&audit_filter_mutex);
> 
> +	if (unlikely(!prune_thread)) {
> +		err = audit_launch_prune();
> +		if (err)
> +			goto Err;
> +	}
> +
>  	err = kern_path(tree->pathname, 0, &path);
>  	if (err)
>  		goto Err;
> @@ -811,36 +869,10 @@ int audit_tag_tree(char *old, char *new)
>  	return failed;
>  }
> 
> -/*
> - * That gets run when evict_chunk() ends up needing to kill audit_tree.
> - * Runs from a separate thread.
> - */
> -static int prune_tree_thread(void *unused)
> -{
> -	mutex_lock(&audit_cmd_mutex);
> -	mutex_lock(&audit_filter_mutex);
> -
> -	while (!list_empty(&prune_list)) {
> -		struct audit_tree *victim;
> -
> -		victim = list_entry(prune_list.next, struct audit_tree, list);
> -		list_del_init(&victim->list);
> -
> -		mutex_unlock(&audit_filter_mutex);
> -
> -		prune_one(victim);
> -
> -		mutex_lock(&audit_filter_mutex);
> -	}
> -
> -	mutex_unlock(&audit_filter_mutex);
> -	mutex_unlock(&audit_cmd_mutex);
> -	return 0;
> -}
> 
>  static void audit_schedule_prune(void)
>  {
> -	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
> +	wake_up_process(prune_thread);
>  }
> 
>  /*
> @@ -907,9 +939,9 @@ static void evict_chunk(struct audit_chunk *chunk)
>  	for (n = 0; n < chunk->count; n++)
>  		list_del_init(&chunk->owners[n].list);
>  	spin_unlock(&hash_lock);
> +	mutex_unlock(&audit_filter_mutex);
>  	if (need_prune)
>  		audit_schedule_prune();
> -	mutex_unlock(&audit_filter_mutex);
>  }
> 
>  static int audit_tree_handle_event(struct fsnotify_group *group,

-- 
paul moore
www.paul-moore.com

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ