[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190130210529.GI2278@hirez.programming.kicks-ass.net>
Date: Wed, 30 Jan 2019 22:05:29 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Alexei Starovoitov <ast@...nel.org>
Cc: davem@...emloft.net, daniel@...earbox.net, jannh@...gle.com,
paulmck@...ux.ibm.com, will.deacon@....com, mingo@...hat.com,
netdev@...r.kernel.org, kernel-team@...com
Subject: Re: [PATCH v5 bpf-next 1/9] bpf: introduce bpf_spin_lock
On Sun, Jan 27, 2019 at 06:50:02PM -0800, Alexei Starovoitov wrote:
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index a74972b07e74..e1d6aefbab50 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -221,6 +221,63 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
> .arg2_type = ARG_CONST_SIZE,
> };
>
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> +struct dumb_spin_lock {
> + atomic_t val;
> +};
> +#endif
> +
> +notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
> +{
> +#if defined(CONFIG_SMP)
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> + struct qspinlock *qlock = (void *)lock;
> +
> + BUILD_BUG_ON(sizeof(*qlock) != sizeof(*lock));
> + queued_spin_lock(qlock);
> +#else
> + struct dumb_spin_lock *qlock = (void *)lock;
> +
> + BUILD_BUG_ON(sizeof(*qlock) != sizeof(*lock));
> + do {
> + while (atomic_read(&qlock->val) != 0)
> + cpu_relax();
> + } while (atomic_cmpxchg(&qlock->val, 0, 1) != 0);
> +#endif
> +#endif
> + return 0;
> +}
> +
> +const struct bpf_func_proto bpf_spin_lock_proto = {
> + .func = bpf_spin_lock,
> + .gpl_only = false,
> + .ret_type = RET_VOID,
> + .arg1_type = ARG_PTR_TO_SPIN_LOCK,
> +};
> +
> +notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
> +{
> +#if defined(CONFIG_SMP)
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> + struct qspinlock *qlock = (void *)lock;
> +
> + queued_spin_unlock(qlock);
> +#else
> + struct dumb_spin_lock *qlock = (void *)lock;
> +
> + atomic_set_release(&qlock->val, 0);
> +#endif
> +#endif
> + return 0;
> +}
> +
> +const struct bpf_func_proto bpf_spin_unlock_proto = {
> + .func = bpf_spin_unlock,
> + .gpl_only = false,
> + .ret_type = RET_VOID,
> + .arg1_type = ARG_PTR_TO_SPIN_LOCK,
> +};
> +
> #ifdef CONFIG_CGROUPS
> BPF_CALL_0(bpf_get_current_cgroup_id)
> {
Would something like the below work for you instead?
I find it easier to read, and the additional CONFIG symbol would give
architectures (say ARM) an easy way to force the issue.
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -221,6 +221,72 @@ const struct bpf_func_proto bpf_get_curr
.arg2_type = ARG_CONST_SIZE,
};
+#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
+
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
+{
+ arch_spinlock_t *l = (void *)lock;
+ BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
+ if (1) {
+ union {
+ __u32 val;
+ arch_spinlock_t lock;
+ } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
+ compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
+ }
+ arch_spin_lock(l);
+}
+
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
+{
+ arch_spinlock_t *l = (void *)lock;
+ arch_spin_unlock(l);
+}
+
+#else
+
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
+{
+ atomic_t *l = (void *)lock;
+ do {
+ atomic_cond_read_relaxed(l, !VAL);
+ } while (atomic_xchg(l, 1));
+}
+
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
+{
+ atomic_t *l = (void *)lock;
+ atomic_set_release(l, 0);
+}
+
+#endif
+
+notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+{
+ __bpf_spin_lock(lock);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_spin_lock_proto = {
+ .func = bpf_spin_lock,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
+notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+{
+ __bpf_spin_unlock(lock);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_spin_unlock_proto = {
+ .func = bpf_spin_unlock,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{
Powered by blists - more mailing lists