[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1436486405.27924.32.camel@stgolabs.net>
Date: Thu, 09 Jul 2015 17:00:05 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: Doug Ledford <dledford@...hat.com>
Cc: mtk.manpages@...il.com, Marcus Gelderie <redmnic@...il.com>,
lkml <linux-kernel@...r.kernel.org>,
David Howells <dhowells@...hat.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
John Duffy <jb_duffy@...nternet.com>,
Arto Bendiken <arto@...diken.net>,
Linux API <linux-api@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCH v3] ipc: Modify message queue accounting to not take
kernel data structures into account
While on the subject, after taking another look at where and how we
calculate the treesize, I think we can remove this check when checking
attributes (mqueue_attr_ok -- which itself could use some simplifying):
mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
total_size = attr->mq_maxmsg * attr->mq_msgsize;
if (total_size + mq_treesize < total_size)
return -EOVERFLOW;
^^^ this condition is never true, so we never return EOVERFLOW.
return 0;
The below gets rid of this bogus check and refactors similar code,
introducing a mqueue_sizeof() helper to do the job of calculating total
mqueue overhead.
Thoughts?
Thanks,
Davidlohr
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 161a180..a5d0c9e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -209,6 +209,31 @@ try_again:
return msg;
}
+/*
+ * We used to allocate a static array of pointers and account
+ * the size of that array as well as one msg_msg struct per
+ * possible message into the queue size. That's no longer
+ * accurate as the queue is now an rbtree and will grow and
+ * shrink depending on usage patterns. We can, however, still
+ * account one msg_msg struct per message, but the nodes are
+ * allocated depending on priority usage, and most programs
+ * only use one, or a handful, of priorities. However, since
+ * this is pinned memory, we need to assume worst case, so
+ * that means the min(mq_maxmsg, max_priorities) * struct
+ * posix_msg_tree_node.
+ */
+static inline unsigned long mqueue_sizeof(struct mqueue_inode_info *info)
+{
+ unsigned long mq_treesize, mq_max_msgsize;
+
+ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
+
+ mq_max_msgsize = info->attr.mq_maxmsg * info->attr.mq_msgsize;
+ return mq_treesize + mq_max_msgsize; /* bytes */
+}
+
static struct inode *mqueue_get_inode(struct super_block *sb,
struct ipc_namespace *ipc_ns, umode_t mode,
struct mq_attr *attr)
@@ -229,7 +254,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
- unsigned long mq_bytes, mq_treesize;
+ unsigned long mq_bytes;
inode->i_fop = &mqueue_file_operations;
inode->i_size = FILENT_SIZE;
@@ -254,25 +279,8 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
info->attr.mq_maxmsg = attr->mq_maxmsg;
info->attr.mq_msgsize = attr->mq_msgsize;
}
- /*
- * We used to allocate a static array of pointers and account
- * the size of that array as well as one msg_msg struct per
- * possible message into the queue size. That's no longer
- * accurate as the queue is now an rbtree and will grow and
- * shrink depending on usage patterns. We can, however, still
- * account one msg_msg struct per message, but the nodes are
- * allocated depending on priority usage, and most programs
- * only use one, or a handful, of priorities. However, since
- * this is pinned memory, we need to assume worst case, so
- * that means the min(mq_maxmsg, max_priorities) * struct
- * posix_msg_tree_node.
- */
- mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
+ mq_bytes = mqueue_sizeof(info);
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -371,7 +379,7 @@ static void mqueue_evict_inode(struct inode *inode)
{
struct mqueue_inode_info *info;
struct user_struct *user;
- unsigned long mq_bytes, mq_treesize;
+ unsigned long mq_bytes;
struct ipc_namespace *ipc_ns;
struct msg_msg *msg;
@@ -388,13 +396,7 @@ static void mqueue_evict_inode(struct inode *inode)
kfree(info->node_cache);
spin_unlock(&info->lock);
- /* Total amount of bytes accounted for the mqueue */
- mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
-
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
+ mq_bytes = mqueue_sizeof(info);
user = info->user;
if (user) {
@@ -692,9 +694,6 @@ static void remove_notification(struct mqueue_inode_info *info)
static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
{
- int mq_treesize;
- unsigned long total_size;
-
if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
return -EINVAL;
if (capable(CAP_SYS_RESOURCE)) {
@@ -709,12 +708,6 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
/* check for overflow */
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
return -EOVERFLOW;
- mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
- total_size = attr->mq_maxmsg * attr->mq_msgsize;
- if (total_size + mq_treesize < total_size)
- return -EOVERFLOW;
return 0;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists