[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161026191810.12275-14-dh.herrmann@gmail.com>
Date: Wed, 26 Oct 2016 21:18:09 +0200
From: David Herrmann <dh.herrmann@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Andy Lutomirski <luto@...capital.net>,
Jiri Kosina <jikos@...nel.org>, Greg KH <greg@...ah.com>,
Hannes Reinecke <hare@...e.com>,
Steven Rostedt <rostedt@...dmis.org>,
Arnd Bergmann <arnd@...db.de>, Tom Gundersen <teg@...m.no>,
David Herrmann <dh.herrmann@...il.com>,
Josh Triplett <josh@...htriplett.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [RFC v1 13/14] bus1: limit and protect resources
From: Tom Gundersen <teg@...m.no>
This adds resource-counters to peers and users. They limit the number
of objects that a peer can operate on. The root limits per user can be
configured as a module option. Everything else is just based on them.
This also adds LSM hooks. They are not integrated with ./security/, yet,
but just provide the hooks as discussed with the SELinux maintainers.
Since the operations on bus1 are very similar in nature to Binder, the
hooks are the same as well (since the LSM people seemed pretty happy
with the Binder hooks, anyway).
Signed-off-by: Tom Gundersen <teg@...m.no>
Signed-off-by: David Herrmann <dh.herrmann@...il.com>
---
ipc/bus1/message.c | 47 +++++-
ipc/bus1/peer.c | 95 ++++++++++-
ipc/bus1/peer.h | 2 +
ipc/bus1/security.h | 45 +++++
ipc/bus1/user.c | 475 ++++++++++++++++++++++++++++++++++++++++++++++++++++
ipc/bus1/user.h | 75 ++++++++-
6 files changed, 728 insertions(+), 11 deletions(-)
create mode 100644 ipc/bus1/security.h
diff --git a/ipc/bus1/message.c b/ipc/bus1/message.c
index 4c5c905..6145d5f 100644
--- a/ipc/bus1/message.c
+++ b/ipc/bus1/message.c
@@ -26,6 +26,7 @@
#include "handle.h"
#include "message.h"
#include "peer.h"
+#include "security.h"
#include "tx.h"
#include "user.h"
#include "util.h"
@@ -242,9 +243,16 @@ int bus1_factory_seal(struct bus1_factory *f)
struct bus1_handle *h;
struct bus1_flist *e;
size_t i;
+ int r;
lockdep_assert_held(&f->peer->local.lock);
+ r = bus1_user_charge(&f->peer->user->limits.n_handles,
+ &f->peer->data.limits.n_handles,
+ f->n_handles_charge);
+ if (r < 0)
+ return r;
+
for (i = 0, e = f->handles;
i < f->n_handles;
e = bus1_flist_next(e, &i)) {
@@ -291,11 +299,29 @@ struct bus1_message *bus1_factory_instantiate(struct bus1_factory *f,
transmit_secctx = f->has_secctx &&
(READ_ONCE(peer->flags) & BUS1_PEER_FLAG_WANT_SECCTX);
+ r = bus1_user_charge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, 1);
+ if (r < 0)
+ return ERR_PTR(r);
+
+ r = bus1_user_charge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, f->n_handles);
+ if (r < 0) {
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, 1);
+ return ERR_PTR(r);
+ }
+
size = sizeof(*m) + bus1_flist_inline_size(f->n_handles) +
f->n_files * sizeof(struct file *);
m = kmalloc(size, GFP_KERNEL);
- if (!m)
+ if (!m) {
+ bus1_user_discharge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, f->n_handles);
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, 1);
return ERR_PTR(-ENOMEM);
+ }
/* set to default first, so the destructor can be called anytime */
kref_init(&m->ref);
@@ -329,6 +355,8 @@ struct bus1_message *bus1_factory_instantiate(struct bus1_factory *f,
m->slice = bus1_pool_alloc(&peer->data.pool, size);
mutex_unlock(&peer->data.lock);
if (IS_ERR(m->slice)) {
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, 1);
r = PTR_ERR(m->slice);
m->slice = NULL;
goto error;
@@ -376,6 +404,11 @@ struct bus1_message *bus1_factory_instantiate(struct bus1_factory *f,
/* import files */
while (m->n_files < f->n_files) {
+ r = security_bus1_transfer_file(f->peer, peer,
+ f->files[m->n_files]);
+ if (r < 0)
+ goto error;
+
m->files[m->n_files] = get_file(f->files[m->n_files]);
++m->n_files;
}
@@ -436,10 +469,15 @@ void bus1_message_free(struct kref *k)
bus1_handle_unref(e->ptr);
}
}
+ bus1_user_discharge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, m->n_handles_charge);
bus1_flist_deinit(m->handles, m->n_handles);
if (m->slice) {
mutex_lock(&peer->data.lock);
+ if (!bus1_pool_slice_is_public(m->slice))
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, 1);
bus1_pool_release_kernel(&peer->data.pool, m->slice);
mutex_unlock(&peer->data.lock);
}
@@ -575,7 +613,12 @@ int bus1_message_install(struct bus1_message *m, struct bus1_cmd_recv *param)
}
/* charge resources */
- if (!peek) {
+ if (peek) {
+ r = bus1_user_charge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, n_handles);
+ if (r < 0)
+ goto exit;
+ } else {
WARN_ON(n_handles < m->n_handles_charge);
m->n_handles_charge -= n_handles;
}
diff --git a/ipc/bus1/peer.c b/ipc/bus1/peer.c
index f0da4a7..db29a69 100644
--- a/ipc/bus1/peer.c
+++ b/ipc/bus1/peer.c
@@ -114,6 +114,7 @@ struct bus1_peer *bus1_peer_new(void)
mutex_init(&peer->data.lock);
peer->data.pool = BUS1_POOL_NULL;
bus1_queue_init(&peer->data.queue);
+ bus1_user_limits_init(&peer->data.limits, peer->user);
/* initialize peer-private section */
mutex_init(&peer->local.lock);
@@ -201,6 +202,8 @@ static void bus1_peer_flush(struct bus1_peer *peer, u64 flags)
rb_to_peer) {
n = atomic_xchg(&h->n_user, 0);
bus1_handle_forget_keep(h);
+ bus1_user_discharge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, n);
if (bus1_handle_is_anchor(h)) {
if (n > 1)
@@ -218,6 +221,9 @@ static void bus1_peer_flush(struct bus1_peer *peer, u64 flags)
bus1_pool_flush(&peer->data.pool, &n_slices);
mutex_unlock(&peer->data.lock);
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, n_slices);
+
while ((qnode = qlist)) {
qlist = qnode->next;
qnode->next = NULL;
@@ -281,6 +287,7 @@ struct bus1_peer *bus1_peer_free(struct bus1_peer *peer)
mutex_destroy(&peer->local.lock);
/* deinitialize data section */
+ bus1_user_limits_deinit(&peer->data.limits);
bus1_queue_deinit(&peer->data.queue);
bus1_pool_deinit(&peer->data.pool);
mutex_destroy(&peer->data.lock);
@@ -311,10 +318,10 @@ static int bus1_peer_ioctl_peer_query(struct bus1_peer *peer,
mutex_lock(&peer->local.lock);
param.peer_flags = peer->flags & BUS1_PEER_FLAG_WANT_SECCTX;
- param.max_slices = -1;
- param.max_handles = -1;
- param.max_inflight_bytes = -1;
- param.max_inflight_fds = -1;
+ param.max_slices = peer->data.limits.max_slices;
+ param.max_handles = peer->data.limits.max_handles;
+ param.max_inflight_bytes = peer->data.limits.max_inflight_bytes;
+ param.max_inflight_fds = peer->data.limits.max_inflight_fds;
mutex_unlock(&peer->local.lock);
return copy_to_user(uparam, ¶m, sizeof(param)) ? -EFAULT : 0;
@@ -336,10 +343,14 @@ static int bus1_peer_ioctl_peer_reset(struct bus1_peer *peer,
if (unlikely(param.peer_flags != -1 &&
(param.peer_flags & ~BUS1_PEER_FLAG_WANT_SECCTX)))
return -EINVAL;
- if (unlikely(param.max_slices != -1 ||
- param.max_handles != -1 ||
- param.max_inflight_bytes != -1 ||
- param.max_inflight_fds != -1))
+ if (unlikely((param.max_slices != -1 &&
+ param.max_slices > INT_MAX) ||
+ (param.max_handles != -1 &&
+ param.max_handles > INT_MAX) ||
+ (param.max_inflight_bytes != -1 &&
+ param.max_inflight_bytes > INT_MAX) ||
+ (param.max_inflight_fds != -1 &&
+ param.max_inflight_fds > INT_MAX)))
return -EINVAL;
mutex_lock(&peer->local.lock);
@@ -347,6 +358,34 @@ static int bus1_peer_ioctl_peer_reset(struct bus1_peer *peer,
if (param.peer_flags != -1)
peer->flags = param.peer_flags;
+ if (param.max_slices != -1) {
+ atomic_add((int)param.max_slices -
+ (int)peer->data.limits.max_slices,
+ &peer->data.limits.n_slices);
+ peer->data.limits.max_slices = param.max_slices;
+ }
+
+ if (param.max_handles != -1) {
+ atomic_add((int)param.max_handles -
+ (int)peer->data.limits.max_handles,
+ &peer->data.limits.n_handles);
+ peer->data.limits.max_handles = param.max_handles;
+ }
+
+ if (param.max_inflight_bytes != -1) {
+ atomic_add((int)param.max_inflight_bytes -
+ (int)peer->data.limits.max_inflight_bytes,
+ &peer->data.limits.n_inflight_bytes);
+ peer->data.limits.max_inflight_bytes = param.max_inflight_bytes;
+ }
+
+ if (param.max_inflight_fds != -1) {
+ atomic_add((int)param.max_inflight_fds -
+ (int)peer->data.limits.max_inflight_fds,
+ &peer->data.limits.n_inflight_fds);
+ peer->data.limits.max_inflight_fds = param.max_inflight_fds;
+ }
+
bus1_peer_flush(peer, param.flags);
mutex_unlock(&peer->local.lock);
@@ -403,6 +442,8 @@ static int bus1_peer_ioctl_handle_release(struct bus1_peer *peer,
WARN_ON(atomic_dec_return(&h->n_user) < 0);
bus1_handle_forget(h);
+ bus1_user_discharge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, 1);
bus1_handle_release(h, strong);
r = 0;
@@ -458,7 +499,20 @@ static int bus1_peer_transfer(struct bus1_peer *src,
}
}
+ r = bus1_user_charge(&dst->user->limits.n_handles,
+ &dst->data.limits.n_handles, 1);
+ if (r < 0)
+ goto exit;
+
if (is_new) {
+ r = bus1_user_charge(&src->user->limits.n_handles,
+ &src->data.limits.n_handles, 1);
+ if (r < 0) {
+ bus1_user_discharge(&dst->user->limits.n_handles,
+ &dst->data.limits.n_handles, 1);
+ goto exit;
+ }
+
WARN_ON(src_h != bus1_handle_acquire(src_h, false));
WARN_ON(atomic_inc_return(&src_h->n_user) != 1);
}
@@ -543,6 +597,16 @@ static int bus1_peer_ioctl_nodes_destroy(struct bus1_peer *peer,
bus1_tx_init(&tx, peer);
ptr_nodes = (const u64 __user *)(unsigned long)param.ptr_nodes;
+ /*
+ * We must limit the work that user-space can dispatch in one go. We
+ * use the maximum number of handles as natural limit. You cannot hit
+ * it, anyway, except if your call would fail without it as well.
+ */
+ if (unlikely(param.n_nodes > peer->user->limits.max_handles)) {
+ r = -EINVAL;
+ goto exit;
+ }
+
for (i = 0; i < param.n_nodes; ++i) {
if (get_user(id, ptr_nodes + i)) {
r = -EFAULT;
@@ -578,6 +642,11 @@ static int bus1_peer_ioctl_nodes_destroy(struct bus1_peer *peer,
++n_charge;
}
+ r = bus1_user_charge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, n_charge);
+ if (r < 0)
+ goto exit;
+
/* nothing below this point can fail, anymore */
mutex_lock(&peer->data.lock);
@@ -611,6 +680,9 @@ static int bus1_peer_ioctl_nodes_destroy(struct bus1_peer *peer,
bus1_handle_unref(h);
}
+ bus1_user_discharge(&peer->user->limits.n_handles,
+ &peer->data.limits.n_handles, n_discharge);
+
r = 0;
exit:
@@ -642,6 +714,8 @@ static int bus1_peer_ioctl_slice_release(struct bus1_peer *peer,
mutex_lock(&peer->data.lock);
r = bus1_pool_release_user(&peer->data.pool, offset, &n_slices);
mutex_unlock(&peer->data.lock);
+ bus1_user_discharge(&peer->user->limits.n_slices,
+ &peer->data.limits.n_slices, n_slices);
return r;
}
@@ -747,6 +821,11 @@ static int bus1_peer_ioctl_send(struct bus1_peer *peer,
ptr_destinations =
(const u64 __user *)(unsigned long)param.ptr_destinations;
+ if (unlikely(param.n_destinations > peer->user->limits.max_handles)) {
+ r = -EINVAL;
+ goto exit;
+ }
+
factory = bus1_factory_new(peer, ¶m, stack, sizeof(stack));
if (IS_ERR(factory)) {
r = PTR_ERR(factory);
diff --git a/ipc/bus1/peer.h b/ipc/bus1/peer.h
index 26c051f..f601b8e 100644
--- a/ipc/bus1/peer.h
+++ b/ipc/bus1/peer.h
@@ -77,6 +77,7 @@ struct pid_namespace;
* @data.lock: data lock
* @data.pool: data pool
* @data.queue: message queue
+ * @data.limits: resource limit counter
* @local.lock: local peer runtime lock
* @local.seed: pinned seed message
* @local.map_handles: map of owned handles (by handle ID)
@@ -97,6 +98,7 @@ struct bus1_peer {
struct mutex lock;
struct bus1_pool pool;
struct bus1_queue queue;
+ struct bus1_user_limits limits;
} data;
struct {
diff --git a/ipc/bus1/security.h b/ipc/bus1/security.h
new file mode 100644
index 0000000..5addf09
--- /dev/null
+++ b/ipc/bus1/security.h
@@ -0,0 +1,45 @@
+#ifndef __BUS1_SECURITY_H
+#define __BUS1_SECURITY_H
+
+/*
+ * Copyright (C) 2013-2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+/**
+ * DOC: Security
+ *
+ * This implements LSM hooks for bus1. Out-of-tree modules cannot provide their
+ * own hooks, so we just provide stubs that are to be converted into real LSM
+ * hooks once this is no longer out-of-tree.
+ */
+
+struct bus1_handle;
+struct bus1_peer;
+struct file;
+
+static inline int security_bus1_transfer_message(struct bus1_peer *from,
+ struct bus1_peer *to)
+{
+ return 0;
+}
+
+static inline int security_bus1_transfer_handle(struct bus1_peer *from,
+ struct bus1_peer *to,
+ struct bus1_handle *node)
+{
+ return 0;
+}
+
+static inline int security_bus1_transfer_file(struct bus1_peer *from,
+ struct bus1_peer *to,
+ struct file *what)
+{
+ return 0;
+}
+
+#endif /* __BUS1_SECURITY_H */
diff --git a/ipc/bus1/user.c b/ipc/bus1/user.c
index 0498ab4..9db5ffd 100644
--- a/ipc/bus1/user.c
+++ b/ipc/bus1/user.c
@@ -23,6 +23,28 @@
static DEFINE_MUTEX(bus1_user_lock);
static DEFINE_IDR(bus1_user_idr);
+static unsigned int bus1_user_max_slices = 16384;
+static unsigned int bus1_user_max_handles = 65536;
+static unsigned int bus1_user_max_inflight_bytes = 16 * 1024 * 1024;
+static unsigned int bus1_user_max_inflight_fds = 4096;
+
+module_param_named(user_slices_max, bus1_user_max_slices,
+ uint, 0644);
+module_param_named(user_handles_max, bus1_user_max_handles,
+ uint, 0644);
+module_param_named(user_inflight_bytes_max, bus1_user_max_inflight_bytes,
+ uint, 0644);
+module_param_named(user_inflight_fds_max, bus1_user_max_inflight_fds,
+ uint, 0644);
+MODULE_PARM_DESC(user_max_slices,
+ "Max number of slices for each user.");
+MODULE_PARM_DESC(user_max_handles,
+ "Max number of handles for each user.");
+MODULE_PARM_DESC(user_max_inflight_bytes,
+ "Max number of inflight bytes for each user.");
+MODULE_PARM_DESC(user_max_inflight_fds,
+ "Max number of inflight fds for each user.");
+
/**
* bus1_user_modexit() - clean up global resources of user accounting
*
@@ -40,6 +62,113 @@ void bus1_user_modexit(void)
idr_init(&bus1_user_idr);
}
+static struct bus1_user_usage *bus1_user_usage_new(void)
+{
+ struct bus1_user_usage *usage;
+
+ usage = kzalloc(sizeof(*usage), GFP_KERNEL);
+ if (!usage)
+ return ERR_PTR(-ENOMEM);
+
+ return usage;
+}
+
+static struct bus1_user_usage *
+bus1_user_usage_free(struct bus1_user_usage *usage)
+{
+ if (usage) {
+ WARN_ON(atomic_read(&usage->n_slices));
+ WARN_ON(atomic_read(&usage->n_handles));
+ WARN_ON(atomic_read(&usage->n_bytes));
+ WARN_ON(atomic_read(&usage->n_fds));
+ kfree(usage);
+ }
+
+ return NULL;
+}
+
+/**
+ * bus1_user_limits_init() - initialize resource limit counter
+ * @limits: object to initialize
+ * @source: source to initialize from, or NULL
+ *
+ * This initializes the resource-limit counter @limit. The initial limits are
+ * taken from @source, if given. If NULL, the global default limits are taken.
+ */
+void bus1_user_limits_init(struct bus1_user_limits *limits,
+ struct bus1_user *source)
+{
+ if (source) {
+ limits->max_slices = source->limits.max_slices;
+ limits->max_handles = source->limits.max_handles;
+ limits->max_inflight_bytes = source->limits.max_inflight_bytes;
+ limits->max_inflight_fds = source->limits.max_inflight_fds;
+ } else {
+ limits->max_slices = bus1_user_max_slices;
+ limits->max_handles = bus1_user_max_handles;
+ limits->max_inflight_bytes = bus1_user_max_inflight_bytes;
+ limits->max_inflight_fds = bus1_user_max_inflight_fds;
+ }
+
+ atomic_set(&limits->n_slices, limits->max_slices);
+ atomic_set(&limits->n_handles, limits->max_handles);
+ atomic_set(&limits->n_inflight_bytes, limits->max_inflight_bytes);
+ atomic_set(&limits->n_inflight_fds, limits->max_inflight_fds);
+
+ idr_init(&limits->usages);
+}
+
+/**
+ * bus1_user_limits_deinit() - deinitialize source limit counter
+ * @limits: object to deinitialize
+ *
+ * This should be called on destruction of @limits. It verifies the correctness
+ * of the limits and emits warnings if something went wrong.
+ */
+void bus1_user_limits_deinit(struct bus1_user_limits *limits)
+{
+ struct bus1_user_usage *usage;
+ int i;
+
+ idr_for_each_entry(&limits->usages, usage, i)
+ bus1_user_usage_free(usage);
+
+ idr_destroy(&limits->usages);
+
+ WARN_ON(atomic_read(&limits->n_slices) !=
+ limits->max_slices);
+ WARN_ON(atomic_read(&limits->n_handles) !=
+ limits->max_handles);
+ WARN_ON(atomic_read(&limits->n_inflight_bytes) !=
+ limits->max_inflight_bytes);
+ WARN_ON(atomic_read(&limits->n_inflight_fds) !=
+ limits->max_inflight_fds);
+}
+
+static struct bus1_user_usage *
+bus1_user_limits_map(struct bus1_user_limits *limits, struct bus1_user *actor)
+{
+ struct bus1_user_usage *usage;
+ int r;
+
+ usage = idr_find(&limits->usages, __kuid_val(actor->uid));
+ if (usage)
+ return usage;
+
+ usage = bus1_user_usage_new();
+ if (!IS_ERR(usage))
+ return ERR_CAST(usage);
+
+ r = idr_alloc(&limits->usages, usage, __kuid_val(actor->uid),
+ __kuid_val(actor->uid) + 1, GFP_KERNEL);
+ if (r < 0) {
+ bus1_user_usage_free(usage);
+ return ERR_PTR(r);
+ }
+
+ return usage;
+}
+
static struct bus1_user *bus1_user_new(void)
{
struct bus1_user *user;
@@ -51,6 +180,7 @@ static struct bus1_user *bus1_user_new(void)
kref_init(&user->ref);
user->uid = INVALID_UID;
mutex_init(&user->lock);
+ bus1_user_limits_init(&user->limits, NULL);
return user;
}
@@ -63,6 +193,7 @@ static void bus1_user_free(struct kref *ref)
if (likely(uid_valid(user->uid)))
idr_remove(&bus1_user_idr, __kuid_val(user->uid));
+ bus1_user_limits_deinit(&user->limits);
mutex_destroy(&user->lock);
kfree_rcu(user, rcu);
}
@@ -151,3 +282,347 @@ struct bus1_user *bus1_user_unref(struct bus1_user *user)
return NULL;
}
+
+/**
+ * bus1_user_charge() - charge a user resource
+ * @global: global resource to charge on
+ * @local: local resource to charge on
+ * @charge: charge to apply
+ *
+ * This charges @charge on two resource counters. Only if both charges apply,
+ * this returns success. It is an error to call this with negative charges.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int bus1_user_charge(atomic_t *global, atomic_t *local, int charge)
+{
+ int v;
+
+ WARN_ON(charge < 0);
+
+ if (!charge)
+ return 0;
+
+ v = bus1_atomic_add_if_ge(global, -charge, charge);
+ if (v < charge)
+ return -EDQUOT;
+
+ v = bus1_atomic_add_if_ge(local, -charge, charge);
+ if (v < charge) {
+ atomic_add(charge, global);
+ return -EDQUOT;
+ }
+
+ return 0;
+}
+
+/**
+ * bus1_user_discharge() - discharge a user resource
+ * @global: global resource to charge on
+ * @local: local resource to charge on
+ * @charge: charge to apply
+ *
+ * This discharges @charge on two resource counters. This always succeeds. It
+ * is an error to call this with a negative charge.
+ */
+void bus1_user_discharge(atomic_t *global, atomic_t *local, int charge)
+{
+ WARN_ON(charge < 0);
+ atomic_add(charge, local);
+ atomic_add(charge, global);
+}
+
+static int bus1_user_charge_one(atomic_t *global_remaining,
+ atomic_t *local_remaining,
+ int global_share,
+ int local_share,
+ int charge)
+{
+ int v, global_reserved, local_reserved;
+
+ WARN_ON(charge < 0);
+
+ /*
+ * Try charging a single resource type. If limits are exceeded, return
+ * an error-code, otherwise apply charges.
+ *
+ * @remaining: per-user atomic that counts all instances of this
+ * resource for this single user. It is initially set to the
+ * limit for this user. For each accounted resource, we
+ * decrement it. Thus, it must not drop below 0, or you
+ * exceeded your quota.
+ * @share: current amount of resources that the acting task has in
+ * the local peer.
+ * @charge: number of resources to charge with this operation
+ *
+ * We try charging @charge on @remaining. The applied logic is: The
+ * caller is not allowed to account for more than the half of the
+ * remaining space (including what its current share). That is, if 'n'
+ * free resources are remaining, then after charging @charge, it must
+ * not drop below @share+@...rge. That is, the remaining resources after
+ * the charge are still at least as big as what the caller has charged
+ * in total.
+ */
+
+ if (charge > charge * 2)
+ return -EDQUOT;
+
+ global_reserved = global_share + charge * 2;
+
+ if (global_share > global_reserved || charge * 2 > global_reserved)
+ return -EDQUOT;
+
+ v = bus1_atomic_add_if_ge(global_remaining, -charge, global_reserved);
+ if (v < charge)
+ return -EDQUOT;
+
+ local_reserved = local_share + charge * 2;
+
+ if (local_share > local_reserved || charge * 2 > local_reserved)
+ return -EDQUOT;
+
+ v = bus1_atomic_add_if_ge(local_remaining, -charge, local_reserved);
+ if (v < charge) {
+ atomic_add(charge, global_remaining);
+ return -EDQUOT;
+ }
+
+ return 0;
+}
+
+static int bus1_user_charge_quota_locked(struct bus1_user_usage *q_global,
+ struct bus1_user_usage *q_local,
+ struct bus1_user_limits *l_global,
+ struct bus1_user_limits *l_local,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds)
+{
+ int r;
+
+ r = bus1_user_charge_one(&l_global->n_slices, &l_local->n_slices,
+ atomic_read(&q_global->n_slices),
+ atomic_read(&q_local->n_slices),
+ n_slices);
+ if (r < 0)
+ return r;
+
+ r = bus1_user_charge_one(&l_global->n_handles, &l_local->n_handles,
+ atomic_read(&q_global->n_handles),
+ atomic_read(&q_local->n_handles),
+ n_handles);
+ if (r < 0)
+ goto revert_slices;
+
+ r = bus1_user_charge_one(&l_global->n_inflight_bytes,
+ &l_local->n_inflight_bytes,
+ atomic_read(&q_global->n_bytes),
+ atomic_read(&q_local->n_bytes),
+ n_bytes);
+ if (r < 0)
+ goto revert_handles;
+
+ r = bus1_user_charge_one(&l_global->n_inflight_fds,
+ &l_local->n_inflight_fds,
+ atomic_read(&q_global->n_fds),
+ atomic_read(&q_local->n_fds),
+ n_fds);
+ if (r < 0)
+ goto revert_bytes;
+
+ atomic_add(n_slices, &q_global->n_slices);
+ atomic_add(n_handles, &q_global->n_handles);
+ atomic_add(n_bytes, &q_global->n_bytes);
+ atomic_add(n_fds, &q_global->n_fds);
+
+ atomic_add(n_slices, &q_local->n_slices);
+ atomic_add(n_handles, &q_local->n_handles);
+ atomic_add(n_bytes, &q_local->n_bytes);
+ atomic_add(n_fds, &q_local->n_fds);
+
+ return 0;
+
+revert_bytes:
+ atomic_add(n_bytes, &l_local->n_inflight_bytes);
+ atomic_add(n_bytes, &l_global->n_inflight_bytes);
+revert_handles:
+ atomic_add(n_handles, &l_local->n_handles);
+ atomic_add(n_handles, &l_global->n_handles);
+revert_slices:
+ atomic_add(n_slices, &l_local->n_slices);
+ atomic_add(n_slices, &l_global->n_slices);
+ return r;
+}
+
+/**
+ * bus1_user_charge_quota() - charge quota resources
+ * @user: user to charge on
+ * @actor: user to charge as
+ * @limits: local limits to charge on
+ * @n_slices: number of slices to charge
+ * @n_handles: number of handles to charge
+ * @n_bytes: number of bytes to charge
+ * @n_fds: number of FDs to charge
+ *
+ * This charges the given resources on @user and @limits. It does both, local
+ * and remote charges. It is all charged for user @actor.
+ *
+ * Negative charges always succeed. Positive charges might fail if quota is
+ * denied. Note that a single call is always atomic, so either all succeed or
+ * all fail. Hence, it makes little sense to mix negative and positive charges
+ * in a single call.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int bus1_user_charge_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *limits,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds)
+{
+ struct bus1_user_usage *u_usage, *usage;
+ int r;
+
+ WARN_ON(n_slices < 0 || n_handles < 0 || n_bytes < 0 || n_fds < 0);
+
+ mutex_lock(&user->lock);
+
+ usage = bus1_user_limits_map(limits, actor);
+ if (IS_ERR(usage)) {
+ r = PTR_ERR(usage);
+ goto exit;
+ }
+
+ u_usage = bus1_user_limits_map(&user->limits, actor);
+ if (IS_ERR(u_usage)) {
+ r = PTR_ERR(u_usage);
+ goto exit;
+ }
+
+ r = bus1_user_charge_quota_locked(u_usage, usage, &user->limits,
+ limits, n_slices, n_handles,
+ n_bytes, n_fds);
+
+exit:
+ mutex_unlock(&user->lock);
+ return r;
+}
+
+/**
+ * bus1_user_discharge_quota() - discharge quota resources
+ * @user: user to charge on
+ * @actor: user to charge as
+ * @l_local: local limits to charge on
+ * @n_slices: number of slices to charge
+ * @n_handles: number of handles to charge
+ * @n_bytes: number of bytes to charge
+ * @n_fds: number of FDs to charge
+ *
+ * This discharges the given resources on @user and @limits. It does both local
+ * and remote charges. It is all discharged for user @actor.
+ */
+void bus1_user_discharge_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *l_local,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds)
+{
+ struct bus1_user_usage *q_global, *q_local;
+ struct bus1_user_limits *l_global = &user->limits;
+
+ WARN_ON(n_slices < 0 || n_handles < 0 || n_bytes < 0 || n_fds < 0);
+
+ mutex_lock(&user->lock);
+
+ q_local = bus1_user_limits_map(l_local, actor);
+ if (WARN_ON(IS_ERR(q_local)))
+ goto exit;
+
+ q_global = bus1_user_limits_map(&user->limits, actor);
+ if (WARN_ON(IS_ERR(q_global)))
+ goto exit;
+
+ atomic_sub(n_slices, &q_global->n_slices);
+ atomic_sub(n_handles, &q_global->n_handles);
+ atomic_sub(n_bytes, &q_global->n_bytes);
+ atomic_sub(n_fds, &q_global->n_fds);
+
+ atomic_sub(n_slices, &q_local->n_slices);
+ atomic_sub(n_handles, &q_local->n_handles);
+ atomic_sub(n_bytes, &q_local->n_bytes);
+ atomic_sub(n_fds, &q_local->n_fds);
+
+ atomic_add(n_slices, &l_global->n_slices);
+ atomic_add(n_handles, &l_global->n_handles);
+ atomic_add(n_bytes, &l_global->n_inflight_bytes);
+ atomic_add(n_fds, &l_global->n_inflight_fds);
+
+ atomic_add(n_slices, &l_local->n_slices);
+ atomic_add(n_handles, &l_local->n_handles);
+ atomic_add(n_bytes, &l_local->n_inflight_bytes);
+ atomic_add(n_fds, &l_local->n_inflight_fds);
+
+exit:
+ mutex_unlock(&user->lock);
+}
+
+/**
+ * bus1_user_commit_quota() - commit quota resources
+ * @user: user to charge on
+ * @actor: user to charge as
+ * @l_local: local limits to charge on
+ * @n_slices: number of slices to charge
+ * @n_handles: number of handles to charge
+ * @n_bytes: number of bytes to charge
+ * @n_fds: number of FDs to charge
+ *
+ * This commits the given resources on @user and @limits. Committing a quota
+ * means discharging the usage objects but leaving the limits untouched.
+ */
+void bus1_user_commit_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *l_local,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds)
+{
+ struct bus1_user_usage *q_global, *q_local;
+ struct bus1_user_limits *l_global = &user->limits;
+
+ WARN_ON(n_slices < 0 || n_handles < 0 || n_bytes < 0 || n_fds < 0);
+
+ mutex_lock(&user->lock);
+
+ q_local = bus1_user_limits_map(l_local, actor);
+ if (WARN_ON(IS_ERR(q_local)))
+ goto exit;
+
+ q_global = bus1_user_limits_map(&user->limits, actor);
+ if (WARN_ON(IS_ERR(q_global)))
+ goto exit;
+
+ atomic_sub(n_slices, &q_global->n_slices);
+ atomic_sub(n_handles, &q_global->n_handles);
+ atomic_sub(n_bytes, &q_global->n_bytes);
+ atomic_sub(n_fds, &q_global->n_fds);
+
+ atomic_sub(n_slices, &q_local->n_slices);
+ atomic_sub(n_handles, &q_local->n_handles);
+ atomic_sub(n_bytes, &q_local->n_bytes);
+ atomic_sub(n_fds, &q_local->n_fds);
+
+ atomic_add(n_bytes, &l_global->n_inflight_bytes);
+ atomic_add(n_fds, &l_global->n_inflight_fds);
+
+ atomic_add(n_bytes, &l_local->n_inflight_bytes);
+ atomic_add(n_fds, &l_local->n_inflight_fds);
+
+exit:
+ mutex_unlock(&user->lock);
+}
diff --git a/ipc/bus1/user.h b/ipc/bus1/user.h
index 6cdc264..48f987c8 100644
--- a/ipc/bus1/user.h
+++ b/ipc/bus1/user.h
@@ -41,6 +41,45 @@
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/uidgid.h>
+#include "util.h"
+
+/**
+ * struct bus1_user_usage - usage counters
+ * @n_slices: number of used slices
+ * @n_handles: number of used handles
+ * @n_bytes: number of used bytes
+ * @n_fds: number of used fds
+ */
+struct bus1_user_usage {
+ atomic_t n_slices;
+ atomic_t n_handles;
+ atomic_t n_bytes;
+ atomic_t n_fds;
+};
+
+/**
+ * struct bus1_user_limits - resource limit counters
+ * @n_slices: number of remaining quota for owned slices
+ * @n_handles: number of remaining quota for owned handles
+ * @n_inflight_bytes: number of remaining quota for inflight bytes
+ * @n_inflight_fds: number of remaining quota for inflight FDs
+ * @max_slices: maximum number of owned slices
+ * @max_handles: maximum number of owned handles
+ * @max_inflight_bytes: maximum number of inflight bytes
+ * @max_inflight_fds: maximum number of inflight FDs
+ * @usages: idr of usage entries per uid
+ */
+struct bus1_user_limits {
+ atomic_t n_slices;
+ atomic_t n_handles;
+ atomic_t n_inflight_bytes;
+ atomic_t n_inflight_fds;
+ unsigned int max_slices;
+ unsigned int max_handles;
+ unsigned int max_inflight_bytes;
+ unsigned int max_inflight_fds;
+ struct idr usages;
+};
/**
* struct bus1_user - resource accounting for users
@@ -48,20 +87,54 @@
* @uid: UID of the user
* @lock: object lock
* @rcu: rcu
+ * @limits: resource limit counters
*/
struct bus1_user {
struct kref ref;
kuid_t uid;
struct mutex lock;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct bus1_user_limits limits;
+ };
};
/* module cleanup */
void bus1_user_modexit(void);
+/* limits */
+void bus1_user_limits_init(struct bus1_user_limits *limits,
+ struct bus1_user *source);
+void bus1_user_limits_deinit(struct bus1_user_limits *limits);
+
/* users */
struct bus1_user *bus1_user_ref_by_uid(kuid_t uid);
struct bus1_user *bus1_user_ref(struct bus1_user *user);
struct bus1_user *bus1_user_unref(struct bus1_user *user);
+/* charges */
+int bus1_user_charge(atomic_t *global, atomic_t *local, int charge);
+void bus1_user_discharge(atomic_t *global, atomic_t *local, int charge);
+int bus1_user_charge_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *limits,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds);
+void bus1_user_discharge_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *l_local,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds);
+void bus1_user_commit_quota(struct bus1_user *user,
+ struct bus1_user *actor,
+ struct bus1_user_limits *l_local,
+ int n_slices,
+ int n_handles,
+ int n_bytes,
+ int n_fds);
+
#endif /* __BUS1_USER_H */
--
2.10.1
Powered by blists - more mailing lists