lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20130301114923.056dab07563c70bb2802e972@canb.auug.org.au>
Date:	Fri, 1 Mar 2013 11:49:23 +1100
From:	Stephen Rothwell <sfr@...b.auug.org.au>
To:	"J. Bruce Fields" <bfields@...ldses.org>
Cc:	linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
	Sasha Levin <sasha.levin@...cle.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Jeff Layton <jlayton@...hat.com>
Subject: linux-next: manual merge of the nfsd tree with Linus' tree

Hi all,

Today's linux-next merge of the nfsd tree got a conflict in
fs/nfsd/nfscache.c between commit b67bfe0d42ca ("hlist: drop the node
parameter from iterators") from Linus' tree and commit a4a3ec329124
("nfsd: break out hashtable search into separate function") from the nfsd
tree.

I fixed it up (see below) and can carry the fix as necessary (no action
is required).

-- 
Cheers,
Stephen Rothwell                    sfr@...b.auug.org.au

diff --cc fs/nfsd/nfscache.c
index da3dbd0,ca43664..0000000
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@@ -112,10 -182,130 +182,129 @@@ hash_refile(struct svc_cacherep *rp
  	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
  }
  
+ static inline bool
+ nfsd_cache_entry_expired(struct svc_cacherep *rp)
+ {
+ 	return rp->c_state != RC_INPROG &&
+ 	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
+ }
+ 
+ /*
+  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+  * Also prune the oldest ones when the total exceeds the max number of entries.
+  */
+ static void
+ prune_cache_entries(void)
+ {
+ 	struct svc_cacherep *rp, *tmp;
+ 
+ 	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
+ 		if (!nfsd_cache_entry_expired(rp) &&
+ 		    num_drc_entries <= max_drc_entries)
+ 			break;
+ 		nfsd_reply_cache_free_locked(rp);
+ 	}
+ 
+ 	/*
+ 	 * Conditionally rearm the job. If we cleaned out the list, then
+ 	 * cancel any pending run (since there won't be any work to do).
+ 	 * Otherwise, we rearm the job or modify the existing one to run in
+ 	 * RC_EXPIRE since we just ran the pruner.
+ 	 */
+ 	if (list_empty(&lru_head))
+ 		cancel_delayed_work(&cache_cleaner);
+ 	else
+ 		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
+ }
+ 
+ static void
+ cache_cleaner_func(struct work_struct *unused)
+ {
+ 	spin_lock(&cache_lock);
+ 	prune_cache_entries();
+ 	spin_unlock(&cache_lock);
+ }
+ 
+ static int
+ nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
+ {
+ 	unsigned int num;
+ 
+ 	spin_lock(&cache_lock);
+ 	if (sc->nr_to_scan)
+ 		prune_cache_entries();
+ 	num = num_drc_entries;
+ 	spin_unlock(&cache_lock);
+ 
+ 	return num;
+ }
+ 
+ /*
+  * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+  */
+ static __wsum
+ nfsd_cache_csum(struct svc_rqst *rqstp)
+ {
+ 	int idx;
+ 	unsigned int base;
+ 	__wsum csum;
+ 	struct xdr_buf *buf = &rqstp->rq_arg;
+ 	const unsigned char *p = buf->head[0].iov_base;
+ 	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+ 				RC_CSUMLEN);
+ 	size_t len = min(buf->head[0].iov_len, csum_len);
+ 
+ 	/* rq_arg.head first */
+ 	csum = csum_partial(p, len, 0);
+ 	csum_len -= len;
+ 
+ 	/* Continue into page array */
+ 	idx = buf->page_base / PAGE_SIZE;
+ 	base = buf->page_base & ~PAGE_MASK;
+ 	while (csum_len) {
+ 		p = page_address(buf->pages[idx]) + base;
+ 		len = min_t(size_t, PAGE_SIZE - base, csum_len);
+ 		csum = csum_partial(p, len, csum);
+ 		csum_len -= len;
+ 		base = 0;
+ 		++idx;
+ 	}
+ 	return csum;
+ }
+ 
+ /*
+  * Search the request hash for an entry that matches the given rqstp.
+  * Must be called with cache_lock held. Returns the found entry or
+  * NULL on failure.
+  */
+ static struct svc_cacherep *
+ nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
+ {
+ 	struct svc_cacherep	*rp;
 -	struct hlist_node	*hn;
+ 	struct hlist_head 	*rh;
+ 	__be32			xid = rqstp->rq_xid;
+ 	u32			proto =  rqstp->rq_prot,
+ 				vers = rqstp->rq_vers,
+ 				proc = rqstp->rq_proc;
+ 
+ 	rh = &cache_hash[request_hash(xid)];
 -	hlist_for_each_entry(rp, hn, rh, c_hash) {
++	hlist_for_each_entry(rp, rh, c_hash) {
+ 		if (xid == rp->c_xid && proc == rp->c_proc &&
+ 		    proto == rp->c_prot && vers == rp->c_vers &&
+ 		    rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum &&
+ 		    rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
+ 		    rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
+ 			return rp;
+ 	}
+ 	return NULL;
+ }
+ 
  /*
   * Try to find an entry matching the current call in the cache. When none
-  * is found, we grab the oldest unlocked entry off the LRU list.
-  * Note that no operation within the loop may sleep.
+  * is found, we try to grab the oldest expired entry off the LRU list. If
+  * a suitable one isn't there, then drop the cache_lock and allocate a
+  * new one, then search again in case one got inserted while this thread
+  * didn't hold the lock.
   */
  int
  nfsd_cache_lookup(struct svc_rqst *rqstp)

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ