lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAN9tCgTPdK1jya5br4k1GhJ1KDVqarV4gLAbZL=NyMN5xHnuYg@mail.gmail.com>
Date:	Sat, 6 Sep 2014 19:36:14 +0200
From:	Mircea Gherzan <mgherzan@...il.com>
To:	Daniel Borkmann <dborkman@...hat.com>
Cc:	davem@...emloft.net, ast@...mgrid.com, netdev@...r.kernel.org
Subject: Re: [PATCH net-next 2/3] net: bpf: arm: address randomize and write
 protect JIT code

2014-09-06 11:42 GMT+02:00 Daniel Borkmann <dborkman@...hat.com>:
> This is the ARM variant for 314beb9bcab ("x86: bpf_jit_comp: secure bpf
> jit against spraying attacks").
>
> It is now possible to implement it due to commits 75374ad47c64 ("ARM: mm:
> Define set_memory_* functions for ARM") and dca9aa92fc7c ("ARM: add
> DEBUG_SET_MODULE_RONX option to Kconfig") which added infrastructure for
> this facility.
>
> Thus, this patch makes sure the BPF generated JIT code is marked RO, as
> other kernel text sections, and also lets the generated JIT code start
> at a pseudo random offset instead on a page boundary. The holes are filled
> with illegal instructions.
>
> JIT tested on armv7hl with BPF test suite.
>
> Reference: http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.html
> Signed-off-by: Daniel Borkmann <dborkman@...hat.com>
> Signed-off-by: Alexei Starovoitov <ast@...mgrid.com>
> Cc: Mircea Gherzan <mgherzan@...il.com>
> ---
>  arch/arm/net/bpf_jit_32.c | 32 ++++++++++++++++++++++++++------
>  1 file changed, 26 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> index a76623b..2d1a5b9 100644
> --- a/arch/arm/net/bpf_jit_32.c
> +++ b/arch/arm/net/bpf_jit_32.c
> @@ -12,7 +12,6 @@
>  #include <linux/compiler.h>
>  #include <linux/errno.h>
>  #include <linux/filter.h>
> -#include <linux/moduleloader.h>
>  #include <linux/netdevice.h>
>  #include <linux/string.h>
>  #include <linux/slab.h>
> @@ -174,6 +173,15 @@ static inline bool is_load_to_a(u16 inst)
>         }
>  }
>
> +static void jit_fill_hole(void *area, unsigned int size)
> +{
> +       /* Insert illegal UND instructions. */
> +       u32 *ptr, fill_ins = 0xe7ffffff;
> +       /* We are guaranteed to have aligned memory. */
> +       for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
> +               *ptr++ = fill_ins;
> +}
> +
>  static void build_prologue(struct jit_ctx *ctx)
>  {
>         u16 reg_set = saved_regs(ctx);
> @@ -859,9 +867,11 @@ b_epilogue:
>
>  void bpf_jit_compile(struct bpf_prog *fp)
>  {
> +       struct bpf_binary_header *header;
>         struct jit_ctx ctx;
>         unsigned tmp_idx;
>         unsigned alloc_size;
> +       u8 *target_ptr;
>
>         if (!bpf_jit_enable)
>                 return;
> @@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
>         /* there's nothing after the epilogue on ARMv7 */
>         build_epilogue(&ctx);
>  #endif
> -
>         alloc_size = 4 * ctx.idx;
> -       ctx.target = module_alloc(alloc_size);
> -       if (unlikely(ctx.target == NULL))
> +       header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
> +                                     4, jit_fill_hole);
> +       if (header == NULL)
>                 goto out;
>
> +       ctx.target = (u32 *) target_ptr;
>         ctx.idx = 0;
> +
>         build_prologue(&ctx);
>         build_body(&ctx);
>         build_epilogue(&ctx);
> @@ -919,6 +931,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
>                 /* there are 2 passes here */
>                 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
>
> +       set_memory_ro((unsigned long)header, header->pages);
>         fp->bpf_func = (void *)ctx.target;
>         fp->jited = 1;
>  out:
> @@ -928,8 +941,15 @@ out:
>
>  void bpf_jit_free(struct bpf_prog *fp)
>  {
> -       if (fp->jited)
> -               module_free(NULL, fp->bpf_func);
> +       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
> +       struct bpf_binary_header *header = (void *)addr;
> +
> +       if (!fp->jited)
> +               goto free_filter;
> +
> +       set_memory_rw(addr, header->pages);
> +       bpf_jit_binary_free(header);
>
> +free_filter:
>         bpf_prog_unlock_free(fp);
>  }

Acked-by: Mircea Gherzan <mgherzan@...il.com>
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ