lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1238705969.10479.8.camel@nigel-laptop>
Date:	Fri, 03 Apr 2009 07:59:29 +1100
From:	Nigel Cunningham <ncunningham@...a.org.au>
To:	Andreas Robinson <andr345@...il.com>
Cc:	Arjan van de Ven <arjan@...ux.intel.com>,
	"H. Peter Anvin" <hpa@...or.com>, Alain Knaff <alain@...ff.lu>,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] lib: add fast lzo decompressor

Hi.

On Thu, 2009-04-02 at 14:30 +0200, Andreas Robinson wrote:
> On Thu, 2009-04-02 at 10:40 +1100, Nigel Cunningham wrote:
> > Sorry to jump in with a tangential issue, but I just noticed the
> > thread
> > and it reminded me of an issue :)
> > 
> > Should the lzo code used via cryptoapi (I believe it's the same stuff)
> > be SMP safe? I've tried to use it work TuxOnIce and get image corruption
> > (test kernel is 64 bit). The same code works fine if I tell it to use
> > LZF (which comes with TuxOnIce), no compression or, IIRC, work single
> > threaded.
> 
> Do you compress or decompress data through the crypto API?
> 
> Neither function modifies the input data and there are no static or
> global variables in use, so there shouldn't be a problem there.
> 
> They do however modify the destination length argument.
> 
> But most importantly, each compressor thread needs a private work buffer
> allocated through lzo_init() in crypto/lzo.c. So, if you use the crypto
> API to compress and share the crypto_tfm struct among threads, that
> would explain your breakage.

I use cryptoapi, and have per-cpu struct crypto_comps and per-cpu
buffers for output. As I said, it works fine if I use LZF (all I'm doing
is changing the name of the algorithm passed to crypto_alloc_comp. Here
are the relevant data structures, the initialisation routine and the
routines for reading and writing one page.

Regards,

Nigel

struct cpu_context {
        u8 *page_buffer;
        struct crypto_comp *transform;
        unsigned int len;
        char *buffer_start;
};

static DEFINE_PER_CPU(struct cpu_context, contexts);

static int toi_compress_crypto_prepare(void)
{
        int cpu;

        if (!*toi_compressor_name) {
                printk(KERN_INFO "TuxOnIce: Compression enabled but no "
                                "compressor name set.\n");
                return 1;
        }

        for_each_online_cpu(cpu) {
                struct cpu_context *this = &per_cpu(contexts, cpu);
                this->transform = crypto_alloc_comp(toi_compressor_name, 0, 0);
                if (IS_ERR(this->transform)) {
                        printk(KERN_INFO "TuxOnIce: Failed to initialise the "
                                        "%s compression transform.\n",
                                        toi_compressor_name);
                        this->transform = NULL;
                        return 1;
                }

                this->page_buffer =
                        (char *) toi_get_zeroed_page(16, TOI_ATOMIC_GFP);

                if (!this->page_buffer) {
                        printk(KERN_ERR
                          "Failed to allocate a page buffer for TuxOnIce "
                          "encryption driver.\n");
                        return -ENOMEM;
                }
        }

        return 0;
}

static int toi_compress_write_page(unsigned long index,
                struct page *buffer_page, unsigned int buf_size)
{
        int ret, cpu = smp_processor_id();
        struct cpu_context *ctx = &per_cpu(contexts, cpu);

        if (!ctx->transform)
                return next_driver->write_page(index, buffer_page, buf_size);

        ctx->buffer_start = kmap(buffer_page);

        ctx->len = buf_size;

        ret = crypto_comp_compress(ctx->transform,
                        ctx->buffer_start, buf_size,
                        ctx->page_buffer, &ctx->len);

        kunmap(buffer_page);

        if (ret) {
                printk(KERN_INFO "Compression failed.\n");
                return ret;
        }

        mutex_lock(&stats_lock);
        toi_compress_bytes_in += buf_size;
        toi_compress_bytes_out += ctx->len;
        mutex_unlock(&stats_lock);

        if (ctx->len < buf_size) /* some compression */
                return next_driver->write_page(index,
                                virt_to_page(ctx->page_buffer),
                                ctx->len);
        else
                return next_driver->write_page(index, buffer_page, buf_size);
}

static int toi_compress_read_page(unsigned long *index,
                struct page *buffer_page, unsigned int *buf_size)
{
        int ret, cpu = smp_processor_id();
        unsigned int len;
        unsigned int outlen = PAGE_SIZE;
        char *buffer_start;
        struct cpu_context *ctx = &per_cpu(contexts, cpu);

        if (!ctx->transform)
                return next_driver->read_page(index, buffer_page, buf_size);

        /*
         * All our reads must be synchronous - we can't decompress
         * data that hasn't been read yet.
         */

        *buf_size = PAGE_SIZE;

        ret = next_driver->read_page(index, buffer_page, &len);

        /* Error or uncompressed data */
        if (ret || len == PAGE_SIZE)
                return ret;

        buffer_start = kmap(buffer_page);
        memcpy(ctx->page_buffer, buffer_start, len);
        ret = crypto_comp_decompress(
                        ctx->transform,
                        ctx->page_buffer,
                        len, buffer_start, &outlen);
        if (ret)
                abort_hibernate(TOI_FAILED_IO,
                        "Compress_read returned %d.\n", ret);
        else if (outlen != PAGE_SIZE) {
                abort_hibernate(TOI_FAILED_IO,
                        "Decompression yielded %d bytes instead of %ld.\n",
                        outlen, PAGE_SIZE);
                printk(KERN_ERR "Decompression yielded %d bytes instead of "
                                "%ld.\n", outlen, PAGE_SIZE);
                ret = -EIO;
                *buf_size = outlen;
        }
        kunmap(buffer_page);
        return ret;
}


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ