lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159ce32b-a7c4-08be-9283-9e38a7c27848@konsulko.com>
Date:   Wed, 9 Dec 2020 00:26:42 +0100
From:   Vitaly Wool <vitaly.wool@...sulko.com>
To:     Mike Galbraith <efault@....de>
Cc:     Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Oleksandr Natalenko <oleksandr@...alenko.name>,
        LKML <linux-kernel@...r.kernel.org>,
        Linux-MM <linux-mm@...ck.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        linux-rt-users@...r.kernel.org
Subject: Re: scheduling while atomic in z3fold

Hi Mike,

On 2020-12-07 16:41, Mike Galbraith wrote:
> On Mon, 2020-12-07 at 16:21 +0100, Vitaly Wool wrote:
>> On Mon, Dec 7, 2020 at 1:34 PM Mike Galbraith <efault@....de> wrote:
>>>
>>
>>> Unfortunately, that made zero difference.
>>
>> Okay, I suggest that you submit the patch that changes read_lock() to
>> write_lock() in __release_z3fold_page() and I'll ack it then.
>> I would like to rewrite the code so that write_lock is not necessary
>> there but I don't want to hold you back and it isn't likely that I'll
>> complete this today.
> 
> Nah, I'm in no rush... especially not to sign off on "Because the
> little voices in my head said this bit should look like that bit over
> yonder, and testing _seems_ to indicate they're right about that" :)
> 
> 	-Mike
> 

okay, thanks. Would this make things better:

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 18feaa0bc537..340c38a5ffac 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -303,10 +303,9 @@ static inline void put_z3fold_header(struct 
z3fold_header *zhdr)
  		z3fold_page_unlock(zhdr);
  }

-static inline void free_handle(unsigned long handle)
+static inline void free_handle(unsigned long handle, struct 
z3fold_header *zhdr)
  {
  	struct z3fold_buddy_slots *slots;
-	struct z3fold_header *zhdr;
  	int i;
  	bool is_free;

@@ -316,22 +315,13 @@ static inline void free_handle(unsigned long handle)
  	if (WARN_ON(*(unsigned long *)handle == 0))
  		return;

-	zhdr = handle_to_z3fold_header(handle);
  	slots = handle_to_slots(handle);
  	write_lock(&slots->lock);
  	*(unsigned long *)handle = 0;
-	if (zhdr->slots == slots) {
-		write_unlock(&slots->lock);
-		return; /* simple case, nothing else to do */
-	}
+	if (zhdr->slots != slots)
+		zhdr->foreign_handles--;

-	/* we are freeing a foreign handle if we are here */
-	zhdr->foreign_handles--;
  	is_free = true;
-	if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
-		write_unlock(&slots->lock);
-		return;
-	}
  	for (i = 0; i <= BUDDY_MASK; i++) {
  		if (slots->slot[i]) {
  			is_free = false;
@@ -343,6 +333,8 @@ static inline void free_handle(unsigned long handle)
  	if (is_free) {
  		struct z3fold_pool *pool = slots_to_pool(slots);

+		if (zhdr->slots == slots)
+			zhdr->slots = NULL;
  		kmem_cache_free(pool->c_handle, slots);
  	}
  }
@@ -525,8 +517,6 @@ static void __release_z3fold_page(struct 
z3fold_header *zhdr, bool locked)
  {
  	struct page *page = virt_to_page(zhdr);
  	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
-	bool is_free = true;
-	int i;

  	WARN_ON(!list_empty(&zhdr->buddy));
  	set_bit(PAGE_STALE, &page->private);
@@ -536,21 +526,6 @@ static void __release_z3fold_page(struct 
z3fold_header *zhdr, bool locked)
  		list_del_init(&page->lru);
  	spin_unlock(&pool->lock);

-	/* If there are no foreign handles, free the handles array */
-	read_lock(&zhdr->slots->lock);
-	for (i = 0; i <= BUDDY_MASK; i++) {
-		if (zhdr->slots->slot[i]) {
-			is_free = false;
-			break;
-		}
-	}
-	if (!is_free)
-		set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
-	read_unlock(&zhdr->slots->lock);
-
-	if (is_free)
-		kmem_cache_free(pool->c_handle, zhdr->slots);
-
  	if (locked)
  		z3fold_page_unlock(zhdr);

@@ -973,6 +948,9 @@ static inline struct z3fold_header 
*__z3fold_alloc(struct z3fold_pool *pool,
  		}
  	}

+	if (zhdr && !zhdr->slots)
+		zhdr->slots = alloc_slots(pool,
+					can_sleep ? GFP_NOIO : GFP_ATOMIC);
  	return zhdr;
  }

@@ -1270,7 +1248,7 @@ static void z3fold_free(struct z3fold_pool *pool, 
unsigned long handle)
  	}

  	if (!page_claimed)
-		free_handle(handle);
+		free_handle(handle, zhdr);
  	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
  		atomic64_dec(&pool->pages_nr);
  		return;
@@ -1429,19 +1407,19 @@ static int z3fold_reclaim_page(struct 
z3fold_pool *pool, unsigned int retries)
  			ret = pool->ops->evict(pool, middle_handle);
  			if (ret)
  				goto next;
-			free_handle(middle_handle);
+			free_handle(middle_handle, zhdr);
  		}
  		if (first_handle) {
  			ret = pool->ops->evict(pool, first_handle);
  			if (ret)
  				goto next;
-			free_handle(first_handle);
+			free_handle(first_handle, zhdr);
  		}
  		if (last_handle) {
  			ret = pool->ops->evict(pool, last_handle);
  			if (ret)
  				goto next;
-			free_handle(last_handle);
+			free_handle(last_handle, zhdr);
  		}
  next:
  		if (test_bit(PAGE_HEADLESS, &page->private)) {

--

Best regards,
    Vitaly

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ