lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 26 Oct 2016 21:17:57 +0200
From:   David Herrmann <dh.herrmann@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     Andy Lutomirski <luto@...capital.net>,
        Jiri Kosina <jikos@...nel.org>, Greg KH <greg@...ah.com>,
        Hannes Reinecke <hare@...e.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Arnd Bergmann <arnd@...db.de>, Tom Gundersen <teg@...m.no>,
        David Herrmann <dh.herrmann@...il.com>,
        Josh Triplett <josh@...htriplett.org>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Andrew Morton <akpm@...ux-foundation.org>
Subject: [RFC v1 01/14] bus1: add bus1(7) man-page

From: Tom Gundersen <teg@...m.no>

Add new directory ./Documentation/bus1/ and include DocBook scripts to
build the bus1(7) man-page. This documents the bus1 character-device,
including all the file-operations you can perform on it.

Furthermore, the man-page also introduces the core bus1 concepts and
explains how they work.

Build the bus1-documentation via:

    $ make -C Documentation/bus1/ mandocs
    $ make -C Documentation/bus1/ htmldocs

Signed-off-by: Tom Gundersen <teg@...m.no>
Signed-off-by: David Herrmann <dh.herrmann@...il.com>
---
 Documentation/bus1/.gitignore     |   2 +
 Documentation/bus1/Makefile       |  41 ++
 Documentation/bus1/bus1.xml       | 833 ++++++++++++++++++++++++++++++++++++++
 Documentation/bus1/stylesheet.xsl |  16 +
 4 files changed, 892 insertions(+)
 create mode 100644 Documentation/bus1/.gitignore
 create mode 100644 Documentation/bus1/Makefile
 create mode 100644 Documentation/bus1/bus1.xml
 create mode 100644 Documentation/bus1/stylesheet.xsl

diff --git a/Documentation/bus1/.gitignore b/Documentation/bus1/.gitignore
new file mode 100644
index 0000000..b4a77cc
--- /dev/null
+++ b/Documentation/bus1/.gitignore
@@ -0,0 +1,2 @@
+*.7
+*.html
diff --git a/Documentation/bus1/Makefile b/Documentation/bus1/Makefile
new file mode 100644
index 0000000..d2b9e61
--- /dev/null
+++ b/Documentation/bus1/Makefile
@@ -0,0 +1,41 @@
+cmd = $(cmd_$(1))
+srctree = $(shell pwd)
+src = .
+obj = $(srctree)/$(src)
+
+DOCS := \
+	bus1.xml
+
+XMLFILES := $(addprefix $(obj)/,$(DOCS))
+MANFILES := $(patsubst %.xml, %.7, $(XMLFILES))
+HTMLFILES := $(patsubst %.xml, %.html, $(XMLFILES))
+
+XMLTO_ARGS := \
+	-m $(srctree)/$(src)/stylesheet.xsl \
+	--skip-validation \
+	--stringparam funcsynopsis.style=ansi \
+	--stringparam man.output.quietly=1 \
+	--stringparam man.authors.section.enabled=0 \
+	--stringparam man.copyright.section.enabled=0
+
+quiet_cmd_db2man = MAN     $@
+      cmd_db2man = xmlto man $(XMLTO_ARGS) -o $(obj) $<
+%.7: %.xml
+	@(which xmlto > /dev/null 2>&1) || \
+	 (echo "*** You need to install xmlto ***"; \
+	  exit 1)
+	$(call cmd,db2man)
+
+quiet_cmd_db2html = HTML    $@
+      cmd_db2html = xmlto html-nochunks $(XMLTO_ARGS) -o $(obj) $<
+%.html: %.xml
+	@(which xmlto > /dev/null 2>&1) || \
+	 (echo "*** You need to install xmlto ***"; \
+	  exit 1)
+	$(call cmd,db2html)
+
+mandocs: $(MANFILES)
+
+htmldocs: $(HTMLFILES)
+
+clean-files := $(MANFILES) $(HTMLFILES)
diff --git a/Documentation/bus1/bus1.xml b/Documentation/bus1/bus1.xml
new file mode 100644
index 0000000..40b55c0
--- /dev/null
+++ b/Documentation/bus1/bus1.xml
@@ -0,0 +1,833 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<refentry id="bus1">
+
+  <refentryinfo>
+    <title>bus1</title>
+    <productname>bus1</productname>
+
+    <authorgroup>
+      <author>
+        <contrib>Documentation</contrib>
+        <firstname>David</firstname>
+        <surname>Herrmann</surname>
+      </author>
+      <author>
+        <contrib>Documentation</contrib>
+        <firstname>Tom</firstname>
+        <surname>Gundersen</surname>
+      </author>
+    </authorgroup>
+  </refentryinfo>
+
+  <refmeta>
+    <refentrytitle>bus1</refentrytitle>
+    <manvolnum>7</manvolnum>
+  </refmeta>
+
+  <refnamediv>
+    <refname>bus1</refname>
+    <refpurpose>Kernel Message Bus</refpurpose>
+  </refnamediv>
+
+  <refsynopsisdiv>
+    <funcsynopsis>
+      <funcsynopsisinfo>#include &lt;linux/bus1.h&gt;</funcsynopsisinfo>
+    </funcsynopsis>
+  </refsynopsisdiv>
+
+  <refsect1> <!-- DESCRIPTION -->
+    <title>Description</title>
+    <para>
+The bus1 Kernel Message Bus defines and implements a distributed object model.
+It allows local processes to send messages to objects owned by remote processes,
+as well as share their own objects with others. Object ownership is static and
+cannot be transferred. Access to remote objects is prohibited, unless it was
+explicitly granted. Processes can transmit messages to a remote object via the
+message bus, transferring a data payload, object access rights, file
+descriptors, or other auxiliary data.
+    </para>
+    <para>
+To participate on the message bus, a peer context must be created. Peer contexts
+are kernel objects, identified by a file descriptor. They are not bound to any
+process, but can be shared freely. The peer context provides a message queue to
+store all incoming messages, a registry for all locally owned objects, and
+tracks access rights to remote objects. A peer context never serves as
+routing entity, but merely as anchor for peer-owned resources. Any message on
+the bus is always destined for an object, and the bus takes care to transfer a
+message into the message queue of the peer context that owns this object.
+    </para>
+    <para>
+The message bus manages object access using capabilities. That is, by default
+only the owner of an object is granted access rights. No other peer can access
+the object, nor are they aware of the existance of the object. However, access
+rights can be transmitted as auxiliary data with any message, effectively
+granting them to the receiver of the message. This even works transitively, that
+is, any peer that was granted access to an object can pass on those rights, even
+if they do not own the object. But mind that no access rights can ever be
+revoked, besides the owner destroying the object.
+    </para>
+
+    <refsect2>
+      <title>Nodes and Handles</title>
+      <para>
+Each peer context comes with a registry of owned objects, which in bus1
+parlance are called <emphasis>nodes</emphasis>. A peer is always the exclusive
+owner of all nodes it has created. Ownership cannot be transferred. The message
+bus manages access rights to nodes as a set of <emphasis>handles</emphasis> held
+by each peer. For each node a peer has access to, whether it is local or remote,
+the message bus keeps a handle on the peer. Initially when a node is created the
+node owner is the only peer with a handle to the newly created node. Handles are
+local to each peer, but can be transmitted as auxiliary data with any message,
+effectively allocating a new handle to the same node in the destination peer.
+This works transitively, and each peer that holds a handle can pass it on
+further, or deliberately drop it. As long as a peer has a handle to a node it
+can send messages to it. However, a node owner can, at any time, decide to
+destroy a node. This causes all further message transactions to this node to
+fail, although messages that have already been queued for the node are still
+delivered. When a node is destroyed, all peers that hold handles to the node are
+notified of the destruction. Moreover, if the owner of a node that has been
+destroyed releases all its handles to the node, no further messages or
+notifications destined for the node are delivered.
+      </para>
+      <para>
+Handles are the only way to refer to both local and remote nodes. For each
+handle allocated on a peer, a 64-bit ID is assigned to identify that particular
+handle on that particular peer. The ID is only valid locally on that peer, it
+cannot be used by remote peers to address the handle (in other words, the ID
+namespace is tied to each peer and does not define global entities). When
+creating a new node, userspace freely selects the ID except that the
+<constant>BUS1_HANDLE_FLAG_MANAGED</constant> bit must be cleared, and when
+receiving a handle from a remote peer the kernel assigns the ID, which always
+has the <constant>BUS1_HANDLE_FLAG_MANAGED</constant> set. Additionally, the
+<constant>BUS1_HANDLE_FLAG_REMOTE</constant> flag tells whether a specific ID
+refers to a remote handle (if set), or to an owner handle (if unset). An ID
+assigned by the
+kernel is never reused, even after a handle has been dropped. The kernel keeps a
+user-reference count for each handle. Every time a handle is exposed to a peer,
+the user-reference count of that handle is incremented by one. This is never
+done asynchronously, but only synchronously when an ioctl is called by the
+holding peer. Therefore, a peer can reliable deduce the current user-reference
+count of all its handles, regardless of any ongoing message transaction.
+References can be explicitly dropped by a peer. Once the counter of a handle
+hits zero, it is destroyed, its ID becomes invalid, and if it was assigned by
+the kernel, it will not be reused again. Note that a peer can never have
+multiple different handles to the same node, rather the kernel always coalesces
+them into a single handle, using the user-reference counter to track it.
+However, if a handle is fully released, but the peer later acquires a handle to
+the same remote node again, its ID will be different, as IDs are never reused.
+      </para>
+      <para>
+New nodes are allocated on-demand by passing the desired ID to the kernel in any
+ioctl that accepts a handle ID. When allocating a new node, the node owner
+implicitly also gets a handle to that node. As long as the node is valid, the
+kernel will pin a single user-reference to the owner's handle. This guarantees
+that a node owner always retains access to their node, until they explicitly
+destroy it (which will make it possible for userspace to release the handle like
+any other). Once all the handles to a local node have been released, no more
+messages destined for the node will be received. Otherwise, a handle to a local
+node behaves just like any other handle, that is, user-references are acquired
+and released according to its use. However, whenever the overall sum of all
+user-references on all handles to a node drops to one (which implies that only
+the pinned reference of the owner is left), a release-notification is queued on
+the node owner. If the counter is incremented again, any such notification is
+dropped, if not already dequeued.
+      </para>
+    </refsect2>
+
+    <refsect2>
+      <title>Message Transactions</title>
+      <para>
+A message transaction atomically transfers a message to any number of
+destinations. Unless requested otherwise, the message transaction fully succeeds
+or fully fails.
+      </para>
+      <para>
+To receive messag payloads, each peer has an associated shmem-backed
+<emphasis>pool</emphasis> which may be mapped read-only by the receiving peer.
+The kernel copies the message payload directly from the sending peer to each of
+the receivers' pool without an intermediary kernel buffer. The pool is divided
+into <emphasis>slices</emphasis> to hold each message. When a message is
+received, its <emphasis>offset</emphasis> into the pool in bytes is returned to
+userspace, and userspace has to explicitly release the slice once it has
+finished with it.
+      </para>
+      <para>
+The kernel amends all data messages with the <varname>uid</varname>,
+<varname>gid</varname>, <varname>pid</varname>, <varname>tid</varname>, and
+optionally the security context of the sending peer. The information is
+collected from the sending peer when the message is sent and translated into the
+namespaces of the receiving peer's file-descriptor.
+      </para>
+    </refsect2>
+
+    <refsect2>
+      <title>Seed Message</title>
+      <para>
+Every peer may pin a special <emphasis>seed</emphasis> message. Only the peer
+itself may set and retrieve the seed, and at most one seed message may be pinned
+at any given time. The seed typically describes the peer itself and pins any
+nodes and handles necessary to bootstrap the peer.
+      </para>
+    </refsect2>
+
+    <refsect2>
+      <title>Resource quotas</title>
+      <para>
+Each user has a fixed amount of available resources. The limits are static, but
+may be overridden by module parameters. Limits are placed on the amount of
+memory a user's pools may consume, the number of handles a user may hold and,
+the number of inflight messages may be destined for a user and the number of
+file descriptors may be inflight to a user. All inflight resources are accounted
+on the receiving peer.
+      </para>
+      <para>
+As resources are accounted on the receiver, a quota mechanism is in place in
+order to avoid intentional or unintentional resource exhaustion by a malicious
+or broken sending user. At the time of a message transaction, the sending user
+may consume in total (including what is consumed by previous transactions) half
+of the total resources of the receiving user that have not been consumed by
+another user. When a message is dequeued its resource consumption is deaccounted
+from the sending users quota.
+      </para>
+      <para>
+If a receiving peer does not dequeue any of its incoming messages it would be
+possible for a users quota to be fully consumed by one peer, making it
+impossible to communicate with other functioning peers owned by the same user. A
+second quota is therefore enforced per-peer, enforcing that at the time of a
+message transaction the receiving peer may consume at in total (including what
+is consumed by previous transactions) half of the total resources available to
+the sending user that have not been consumed by another peer.
+      </para>
+    </refsect2>
+
+    <refsect2>
+      <title>Global Ordering</title>
+      <para>
+Despite there being no global synchronization, all events on the bus, such as
+sending or receiving of messages, release of handles or destruction of nodes,
+behave as if they were globally ordered. That is, for any two events it is
+always possible to consider one to have happened before the other in such a way
+that it is consistent with all the effects observed on the bus.
+      </para>
+      <para>
+For instance, if two events occurr on one peer (say the sending of a message,
+and the destruction of a node), and they are observed on another peer (by
+receiving the message and receiving a destruction notification for the node), we
+are guaranteed that the order the events occurred in and the order they were
+observed in is the same.
+      </para>
+      <para>
+One could consider a further example involving three peers, if a message is sent
+from one peer to two others, and after receiving the message the first recipient
+sends a further message to the second recipient, it is guaranteed that the
+original message is received before the subsequent one.
+      </para>
+      <para>
+This principle of causality is also respected in the pressence of side-channel
+communication. That is, if one event may have triggered another, even if on
+different, disconnected, peers, we are guaranteed that the events are ordered
+accordingly. To be precise, if one event (such as receiving a message) completed
+before another (such as sending a message) was started, then they are ordered
+accordingly.
+      </para>
+      <para>
+Also in the case where there can be no causal relationship, we are guaranteed a
+global order. In case two events happend concurrently, there can never be any
+inconsistency in which occurred before the other. By way of example, consider
+two peers sending one message each to two different peers, we are guaranteed
+that both the recipient peers receive the two messages in the same order, even
+though the order may be arbitrary.
+      </para>
+    </refsect2>
+
+    <refsect2>
+      <title>Operating on a bus1 file descriptor</title>
+      <para>
+The bus1 peer file descriptor supports the following operations:
+      </para>
+      <variablelist>
+        <varlistentry> <!-- FOPS OPEN -->
+          <term>
+            <citerefentry>
+              <refentrytitle>open</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <listitem>
+            <para>
+A call to
+<citerefentry>
+  <refentrytitle>open</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>
+on the bus1 character device (usually <filename>/dev/bus1</filename>) creates a
+new peer context identified by the returned file descriptor.
+            </para>
+          </listitem>
+        </varlistentry> <!-- FOPS OPEN -->
+
+        <varlistentry> <!-- FOPS POLL -->
+          <term>
+            <citerefentry>
+              <refentrytitle>poll</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <term>
+            <citerefentry>
+              <refentrytitle>select</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <term>(and similar)</term>
+          <listitem>
+            <para>
+The file descriptor supports
+<citerefentry>
+  <refentrytitle>poll</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>
+(and analogously
+<citerefentry>
+  <refentrytitle>epoll</refentrytitle><manvolnum>7</manvolnum>
+</citerefentry>) and
+<citerefentry>
+  <refentrytitle>select</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>, as follows:
+            </para>
+
+            <itemizedlist>
+              <listitem>
+                <para>
+The file descriptor is readable (the <varname>readfds</varname> argument of
+<citerefentry>
+  <refentrytitle>select</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>;
+the <constant>POLLIN</constant> flag of
+<citerefentry>
+  <refentrytitle>poll</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>)
+if one or more messages are ready to be dequeued.
+                </para>
+              </listitem>
+
+              <listitem>
+                <para>
+The file descriptor is writable (the <varname>writefds</varname> argument of
+<citerefentry>
+  <refentrytitle>select</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>;
+the <constant>POLLOUT</constant> flag of
+<citerefentry>
+  <refentrytitle>poll</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>)
+if the peer has not been shut down, yet (i.e., the peer can be used to send
+messages).
+                </para>
+              </listitem>
+
+              <listitem>
+                <para>
+The file descriptor signals a hang-up (overloaded on the
+<varname>readfds</varname> argument of
+<citerefentry>
+  <refentrytitle>select</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>;
+the <constant>POLLHUP</constant> flag of
+<citerefentry>
+  <refentrytitle>poll</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>)
+if the peer has been shut down.
+                </para>
+              </listitem>
+            </itemizedlist>
+
+            <para>
+The bus1 peer file descriptor also supports the other file descriptor
+multiplexing APIs:
+<citerefentry>
+  <refentrytitle>pselect</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>, and
+<citerefentry>
+  <refentrytitle>ppoll</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>.
+            </para>
+          </listitem>
+        </varlistentry> <!-- FOPS POLL -->
+
+        <varlistentry> <!-- FOPS MMAP -->
+          <term>
+            <citerefentry>
+              <refentrytitle>mmap</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <listitem>
+            <para>
+A call to
+<citerefentry>
+  <refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>
+installs a memory mapping to the message pool of the peer into the caller's
+address-space. No writable mappings are allowed. Furthermore, the pool has no
+fixed size, but grows dynamically with the demands of the peer.
+            </para>
+          </listitem>
+        </varlistentry> <!-- FOPS MMAP -->
+
+        <varlistentry> <!-- FOPS IOCTL -->
+          <term>
+            <citerefentry>
+              <refentrytitle>ioctl</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <listitem>
+            <para>
+The following bus1-specific commands are supported:
+            </para>
+            <variablelist>
+              <varlistentry>
+                <term><constant>BUS1_CMD_PEER_DISCONNECT</constant></term>
+                <listitem>
+                  <para>
+This argument disconnects a peer and does not take an argument. All slices,
+handles, nodes and queued messages are released and destroyed and all future
+operations on the peer will fail with <constant>-ESHUTDOWN</constant>.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_PEER_QUERY</constant></term>
+                <listitem>
+                  <para>
+This command queries the state of a peer context. It takes the following
+structure as argument:
+<programlisting>
+struct bus1_cmd_peer_reset {
+        __u64 flags;
+        __u64 peer_flags;
+        __u64 max_slices;
+        __u64 max_handles;
+        __u64 max_inflight_bytes;
+        __u64 max_inflight_fds;
+};
+</programlisting>
+<varname>flags</varname> must always be set to 0. The state as set via
+<constant>BUS1_CMD_PEER_RESET</constant>, or the default state if it was never
+reset, is returned.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_PEER_RESET</constant></term>
+                <listitem>
+                  <para>
+This command resets a peer context. It takes the following structure as
+argument:
+<programlisting>
+struct bus1_cmd_peer_reset {
+        __u64 flags;
+        __u64 peer_flags;
+        __u64 max_slices;
+        __u64 max_handles;
+        __u64 max_inflight_bytes;
+        __u64 max_inflight_fds;
+};
+</programlisting>
+If <varname>peer_flags</varname> has
+<constant>BUS1_PEER_FLAG_WANT_SECCTX</constant> set, the security context of the
+sending task is attached to each message received by this peer.
+<varname>max_slices</varname>, <varname>max_handles</varname>,
+<varname>max_inflight_bytes</varname>, and <varname>max_inflight_fds</varname>
+are the resource limits for this peer. Note that these are simply max valuse,
+the resource usage is also limited per user.
+                  </para>
+                  <para>
+If <varname>flags</varname> has
+<constant>BUS1_CMD_PEER_RESET_FLAG_FLUSH_SEED</constant> set, the seed message
+is dropped, and if <constant>BUS1_CMD_PEER_RESET_FLAG_FLUSH</constant> is set,
+all slices and handles are released, all messages are dropped from the queue and
+all nodes that are not pinned by the seed message are destroyed.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_HANDLE_TRANSFER</constant></term>
+                <listitem>
+                  <para>
+This command transfers a handle from one peer context to another. It takes the
+following structure as argument:
+<programlisting>
+struct bus1_cmd_handle_transfer {
+        __u64 flags;
+        __u64 src_handle;
+        __u64 dst_fd;
+        __u64 dst_handle;
+};
+</programlisting>
+<varname>flags</varname> must always be set to 0, <varname>src_handle</varname>
+is the handle ID of the handle being transferred in the source context,
+<varname>dst_fd</varname> is the file descriptor representing the destination
+peer context and <varname>dst_handle</varname> must be
+<constant>BUS1_HANDLE_INVALID</constant> and is set to the new handle ID in the
+destination context on return.
+                  </para>
+                  <para>
+If <varname>dst_fd</varname> is set to <constant>-1</constant> the source
+context is also used as the destination.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_HANDLE_RELEASE</constant></term>
+                <listitem>
+                  <para>
+This command releases one user reference to a handle. It takes a handle ID as
+argument.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_NODE_DESTROY</constant></term>
+                <listitem>
+                  <para>
+This command destroys a set of nodes. It takes the following structure as
+argument:
+<programlisting>
+struct bus1_cmd_node_destroy {
+        __u64 flags;
+        __u64 ptr_nodes;
+        __u64 n_nodes;
+};
+</programlisting>
+<varname>flags</varname> must always be set to 0, <varname>ptr_nodes</varname>
+must be a pointer to an array of handle IDs of owner handles of local nodes, and
+<varname>n_nodes</varname> must be the size of the array.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_SLICE_RELEASE</constant></term>
+                <listitem>
+                  <para>
+This command releases one slice from the local pool. It takes a pool offset to
+the start of the slice to be released.
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_SEND</constant></term>
+                <listitem>
+                  <para>
+This command sends a message. It takes the following structure as argument:
+<programlisting>
+struct bus1_cmd_send {
+        __u64 flags;
+        __u64 ptr_destinations;
+        __u64 ptr_errors;
+        __u64 n_destinations;
+        __u64 ptr_vecs;
+        __u64 n_vecs;
+        __u64 ptr_handles;
+        __u64 n_handles;
+        __u64 ptr_fds;
+        __u64 n_fds;
+};
+</programlisting>
+                  </para>
+                  <para>
+<varname>flags</varname> may be set to at most one of
+<constant>BUS1_SEND_FLAG_CONTINUE</constant> and
+<constant>BUS1_SEND_FLAG_SEED</constant>. If
+<constant>BUS1_SEND_FLAG_CONTINUE</constant> is set any messages that cannot
+be delivered due to errors on the remote peer do not make the whole transaction
+fail, but merely set the corresponding error code in the error code array
+respectively. If <constant>BUS1_SEND_FLAG_SEED</constant> is set the message
+replaces the seed message on the local peer. In this case,
+<varname>n_destinations</varname> must be 0.
+                  </para>
+                  <para>
+<varname>ptr_destinations</varname> is a pointer to an array of handle IDs,
+<varname>ptr_errors</varname> is a pointer to an array of corresponding
+errno codes, and <varname>n_destinations</varname> is the length of the arrays.
+The message being sent is delivered to the peer context owning the nodes pointed
+to by each of the handles in the array.
+                  </para>
+                  <para>
+<varname>ptr_vecs</varname> is a pointer to an array of iovecs and
+<varname>n_vecs</varname> is the length of the array. The iovecs represent the
+payload of the message which is delivered to each destination.
+                  </para>
+                  <para>
+<varname>ptr_handles</varname> is a pointer to an array of handle IDs and
+<varname>n_handles</varname> is the length of the array. Each of the handles in
+this array is installed in each destination peer context at receive time. If the
+underlying node has been destroyed at the time the message is delivered (the
+message would be ordered after the node's destruction notification) then
+<constant>BUS1_HANDLE_INVALID</constant> will be delivered instead.
+                  </para>
+                  <para>
+<varname>ptr_fds</varname> is a pointer to an integer array of file descriptors
+and <varname>n_fds</varname> is the length of the array. Each of the file
+descriptors in this array may be installed in the destination peer context at
+receive time (see below).
+                  </para>
+                </listitem>
+              </varlistentry>
+
+              <varlistentry>
+                <term><constant>BUS1_CMD_RECV</constant></term>
+                <listitem>
+                  <para>
+This command receives a message. It takes the following structure as argument:
+<programlisting>
+struct bus1_cmd_recv {
+        __u64 flags;
+        __u64 max_offset;
+        struct {
+                __u64 type;
+                __u64 flags;
+                __u64 destination;
+                __u32 uid;
+                __u32 gid;
+                __u32 pid;
+                __u32 tid;
+                __u64 offset;
+                __u64 n_bytes;
+                __u64 n_handles;
+                __u64 n_fds;
+                __u64 n_secctx;
+        } msg;
+};
+</programlisting>
+If <constant>BUS1_RECV_FLAG_PEEK</constant> is set in <varname>flags</varname>,
+the received message is not dropped from the queue. If
+<constant>BUS1_RECV_FLAG_SEED</constant> is set, the peer's seed is received
+rather than a message from the queue. If
+<constant>BUS1_RECV_FLAG_INSTALL_FDS</constant> the file descriptors attached to
+the received message are installed in the receiving process. Care must be taken
+when using this flag from more than one process on the same message as file
+descriptor numbers are per-process and not per-peer.
+                  </para>
+                  <para>
+<varname>max_offset</varname> indicates the maximum offset into the pool the
+receiving peer is able to read. If a message slice would exceed this offset
+the call would fail with <constant>-ERANGE</constant>.
+                  </para>
+                  <para>
+<varname>msg.type</varname> indicates the type of message.
+<constant>BUS1_MSG_NONE</constant> is never returned.
+<constant>BUS1_MSG_DATA</constant> indicates a regular message sent from another
+peer, possibly containing a payload, as well as attached handles and
+filedescriptors. <constant>BUS1_MSG_NODE_DESTROY</constant> indicates that the
+node referenced by the handle in <varname>msg.destination</varname> was
+destroyed by its owner. <constant>BUS1_MSG_NODE_RELEASE</constant> indicates
+that all the references to handles referencing the node in
+<varname>msg.destination</varname> have been released.
+                  </para>
+                  <para>
+<varname>msg.flags</varname> indicates additional flags of the message.
+<constant>BUS1_MSG_FLAG_HAS_SECCTX</constant> indicates that a security context
+was attached to the message (to distinguish an empty <varname>n_secctx</varname>
+from an invalid one).
+<constant>BUS1_MSG_FLAG_CONTINUE</constant> indicates that there are more
+messages queued which belong to the same message transaction.
+                  </para>
+                  <para>
+<varname>msg.destination</varname> is the ID of the destination node or handle
+of the message.
+                  </para>
+                  <para>
+<varname>msg.uid</varname>, <varname>msg.gid</varname>,
+<varname>msg.pid</varname>, and <varname>msg.tid</varname> are the user, group,
+process and thread ID of the process that created the sending peer context.
+                  </para>
+                  <para>
+<varname>msg.offset</varname> is the offset, in bytes, into the pool of the
+payload and <varname>msg.n_bytes</varname> is its length.
+                  </para>
+                  <para>
+<varname>msg.n_handles</varname> is the number of handles attached to the
+message. The handle IDs are stored in the pool following the payload (and
+possibly padding to make the array 8-byte aligned).
+                  </para>
+                  <para>
+<varname>msg.n_fds</varname> is the number of handles attached to the
+message, or 0 if <constant>BUS1_RECV_FLAG_INSTALL_FDS</constant> was not set.
+The file descriptor numbers are stored in the pool following the handle array
+(and possibly padding to make the array 8-byte aligned).
+                  </para>
+                  <para>
+<varname>msg.n_secctx</varname> is the number of bytes attached to the message,
+which contain the security context of the sender. The security context is
+stored in the pool following the payload (and possibly padding to make it
+8-byte aligned).
+                  </para>
+                </listitem>
+              </varlistentry>
+            </variablelist>
+          </listitem>
+        </varlistentry> <!-- FOPS IOCTL -->
+
+        <varlistentry> <!-- FOPS CLOSE -->
+          <term>
+            <citerefentry>
+              <refentrytitle>close</refentrytitle>
+              <manvolnum>2</manvolnum>
+            </citerefentry>
+          </term>
+          <listitem>
+            <para>
+A call to
+<citerefentry>
+  <refentrytitle>close</refentrytitle><manvolnum>2</manvolnum>
+</citerefentry>
+releases the passed file descriptor. When all file descriptors associated with
+the same peer context have been closed, the peer is shut down. This destroys all
+nodes of that peer, releases all handles, flushes its queue and pool, and
+deallocates all related resources. Messages that have been sent by the peer and
+are still queued on destination queues are unaffected by this.
+            </para>
+          </listitem>
+        </varlistentry> <!-- FOPS CLOSE -->
+      </variablelist>
+    </refsect2>
+  </refsect1> <!-- DESCRIPTION -->
+
+  <refsect1> <!-- RETURN VALUE -->
+    <title>Return value</title>
+    <para>
+All bus1 operations return zero on success. On failure, a negative error code is
+returned.
+    </para>
+  </refsect1> <!-- RETURN VALUE -->
+
+  <refsect1> <!-- ERRORS -->
+    <title>Errors</title>
+    <para>
+These are all standard errors generated by the bus layer. See the description
+of each ioctl for details on their occurrence.
+    </para>
+    <variablelist>
+      <varlistentry>
+        <term><constant>EAGAIN</constant></term>
+        <listitem><para>
+No messages ready to be read.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EBADF</constant></term>
+        <listitem><para>
+Invalid file descriptor.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EDQUOT</constant></term>
+        <listitem><para>
+Resource quota exceeded.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EFAULT</constant></term>
+        <listitem><para>
+Cannot read, or write, ioctl parameters.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EHOSTUNREACH</constant></term>
+        <listitem><para>
+The destination object is no longer available.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EINVAL</constant></term>
+        <listitem><para>
+Invalid ioctl parameters.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EMSGSIZE</constant></term>
+        <listitem><para>
+The message to be sent exceeds its allowed resource limits.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>ENOMEM</constant></term>
+        <listitem><para>
+Out of kernel memory.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>ENOTTY</constant></term>
+        <listitem><para>
+Unknown ioctl.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>ENXIO</constant></term>
+        <listitem><para>
+Unknown object.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EOPNOTSUPP</constant></term>
+        <listitem><para>
+Operation not supported.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>EPERM</constant></term>
+        <listitem><para>
+Permission denied.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>ERANGE</constant></term>
+        <listitem><para>
+The message to be received would exceed the maximal offset.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><constant>ESHUTDOWN</constant></term>
+        <listitem><para>
+Local peer was already disconnected.
+        </para></listitem>
+      </varlistentry>
+    </variablelist>
+  </refsect1> <!-- ERRORS -->
+
+  <refsect1> <!-- SEE ALSO -->
+    <title>See Also</title>
+    <simplelist type="inline">
+      <member>
+        <citerefentry>
+          <refentrytitle>bus1.pool</refentrytitle>
+          <manvolnum>7</manvolnum>
+        </citerefentry>
+      </member>
+    </simplelist>
+  </refsect1> <!-- SEE ALSO -->
+
+</refentry>
diff --git a/Documentation/bus1/stylesheet.xsl b/Documentation/bus1/stylesheet.xsl
new file mode 100644
index 0000000..52565ea
--- /dev/null
+++ b/Documentation/bus1/stylesheet.xsl
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
+	<param name="chunk.quietly">1</param>
+	<param name="funcsynopsis.style">ansi</param>
+	<param name="funcsynopsis.tabular.threshold">80</param>
+	<param name="callout.graphics">0</param>
+	<param name="paper.type">A4</param>
+	<param name="generate.section.toc.level">2</param>
+	<param name="use.id.as.filename">1</param>
+	<param name="citerefentry.link">1</param>
+	<strip-space elements="*"/>
+	<template name="generate.citerefentry.link">
+		<value-of select="refentrytitle"/>
+		<text>.html</text>
+	</template>
+</stylesheet>
-- 
2.10.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ