lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <84144f020908241033l4af09e7h9caac47d8d9b7841@mail.gmail.com>
Date:	Mon, 24 Aug 2009 20:33:25 +0300
From:	Pekka Enberg <penberg@...helsinki.fi>
To:	ngupta@...are.org
Cc:	akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
	linux-mm@...ck.org, linux-mm-cc@...top.org
Subject: Re: [PATCH 1/4] compcache: xvmalloc memory allocator

Hi Nitin,

[ Nit: the name xmalloc() is usually reserved for non-failing allocators in
  user-space which is why xvmalloc() looks so confusing to me. Can we
  get a better name for the thing? Also, I'm not sure why xvmalloc is a
  separate module. Can't you just make it in-kernel or compile it in to the
  ramzswap module? ]

On Mon, Aug 24, 2009 at 7:37 AM, Nitin Gupta<ngupta@...are.org> wrote:
> +/**
> + * xv_malloc - Allocate block of given size from pool.
> + * @pool: pool to allocate from
> + * @size: size of block to allocate
> + * @pagenum: page no. that holds the object
> + * @offset: location of object within pagenum
> + *
> + * On success, <pagenum, offset> identifies block allocated
> + * and 0 is returned. On failure, <pagenum, offset> is set to
> + * 0 and -ENOMEM is returned.
> + *
> + * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
> + */
> +int xv_malloc(struct xv_pool *pool, u32 size, u32 *pagenum, u32 *offset,
> +                                                       gfp_t flags)
> +{
> +       int error;
> +       u32 index, tmpsize, origsize, tmpoffset;
> +       struct block_header *block, *tmpblock;
> +
> +       *pagenum = 0;
> +       *offset = 0;
> +       origsize = size;
> +
> +       if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
> +               return -ENOMEM;
> +
> +       size = ALIGN(size, XV_ALIGN);
> +
> +       spin_lock(&pool->lock);
> +
> +       index = find_block(pool, size, pagenum, offset);
> +
> +       if (!*pagenum) {
> +               spin_unlock(&pool->lock);
> +               if (flags & GFP_NOWAIT)
> +                       return -ENOMEM;
> +               error = grow_pool(pool, flags);
> +               if (unlikely(error))
> +                       return -ENOMEM;
> +
> +               spin_lock(&pool->lock);
> +               index = find_block(pool, size, pagenum, offset);
> +       }
> +
> +       if (!*pagenum) {
> +               spin_unlock(&pool->lock);
> +               return -ENOMEM;
> +       }
> +
> +       block = get_ptr_atomic(*pagenum, *offset, KM_USER0);
> +
> +       remove_block_head(pool, block, index);
> +
> +       /* Split the block if required */
> +       tmpoffset = *offset + size + XV_ALIGN;
> +       tmpsize = block->size - size;
> +       tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
> +       if (tmpsize) {
> +               tmpblock->size = tmpsize - XV_ALIGN;
> +               set_flag(tmpblock, BLOCK_FREE);
> +               clear_flag(tmpblock, PREV_FREE);
> +
> +               set_blockprev(tmpblock, *offset);
> +               if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
> +                       insert_block(pool, *pagenum, tmpoffset, tmpblock);
> +
> +               if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
> +                       tmpblock = BLOCK_NEXT(tmpblock);
> +                       set_blockprev(tmpblock, tmpoffset);
> +               }
> +       } else {
> +               /* This block is exact fit */
> +               if (tmpoffset != PAGE_SIZE)
> +                       clear_flag(tmpblock, PREV_FREE);
> +       }
> +
> +       block->size = origsize;
> +       clear_flag(block, BLOCK_FREE);
> +
> +       put_ptr_atomic(block, KM_USER0);
> +       spin_unlock(&pool->lock);
> +
> +       *offset += XV_ALIGN;
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL_GPL(xv_malloc);

What's the purpose of passing PFNs around? There's quite a lot of PFN
to struct page conversion going on because of it. Wouldn't it make
more sense to return (and pass) a pointer to struct page instead?

                        Pekka
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ