lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1237823287-12734-6-git-send-email-philipp.reisner@linbit.com>
Date:	Mon, 23 Mar 2009 16:48:00 +0100
From:	Philipp Reisner <philipp.reisner@...bit.com>
To:	linux-kernel@...r.kernel.org
Cc:	philipp.reisner@...bit.com, gregkh@...e.de
Subject: [PATCH 05/12] DRBD: userspace_interface

DRBD uses netlink via connector. The packets are composed of extensible tag
lists. That interface can be extended over time without breaking old
userspace programs.
The nice part of the interface to userspace: drbd.h. The ugly part is for
sure drbd_tag_magic.h. I realize that macros are generally frowned upon, but
this way it is easier to maintain. The code that gets generated by
repeatedly including drbd_nl.h is hard to maintain over time if it is open
coded. (BTW, did you know that the samba 4 people are proud to have more 
than 50% of their code auto generated:)

---
diff -uNrp linux-2.6.29-rc8/include/linux/drbd.h linux-2.6.29-rc8-drbd/include/linux/drbd.h
--- linux-2.6.29-rc8/include/linux/drbd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/include/linux/drbd.h	2009-03-23 11:25:50.987214000 +0100
@@ -0,0 +1,372 @@
+/*
+  drbd.h
+  Kernel module for 2.6.x Kernels
+
+  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+  Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+  Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@...bit.com>.
+  Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@...bit.com>.
+
+  drbd is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2, or (at your option)
+  any later version.
+
+  drbd is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with drbd; see the file COPYING.  If not, write to
+  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+#ifndef DRBD_H
+#define DRBD_H
+#include <linux/drbd_config.h>
+#include <linux/connector.h>
+
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#else
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <limits.h>
+
+/* Altough the Linux source code makes a difference between
+   generic endiness and the bitfields' endianess, there is no
+   architecture as of Linux-2.6.24-rc4 where the bitfileds' endianess
+   does not match the generic endianess. */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN_BITFIELD
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define __BIG_ENDIAN_BITFIELD
+#else
+# error "sorry, weird endianness on this box"
+#endif
+
+#endif
+
+
+enum io_error_handler {
+	PassOn, /* FIXME should the better be named "Ignore"? */
+	CallIOEHelper,
+	Detach
+};
+
+enum fencing_policy {
+	DontCare,
+	Resource,
+	Stonith
+};
+
+enum disconnect_handler {
+	Reconnect,
+	DropNetConf,
+	FreezeIO
+};
+
+enum after_sb_handler {
+	Disconnect,
+	DiscardYoungerPri,
+	DiscardOlderPri,
+	DiscardZeroChg,
+	DiscardLeastChg,
+	DiscardLocal,
+	DiscardRemote,
+	Consensus,
+	DiscardSecondary,
+	CallHelper,
+	Violently
+};
+
+/* KEEP the order, do not delete or insert!
+ * Or change the API_VERSION, too. */
+enum ret_codes {
+	RetCodeBase = 100,
+	NoError,         /* 101 ... */
+	LAAlreadyInUse,
+	OAAlreadyInUse,
+	LDNameInvalid,
+	MDNameInvalid,
+	LDAlreadyInUse,
+	LDNoBlockDev,
+	MDNoBlockDev,
+	LDOpenFailed,
+	MDOpenFailed,
+	LDDeviceTooSmall,
+	MDDeviceTooSmall,
+	LDNoConfig,
+	LDMounted,
+	MDMounted,
+	LDMDInvalid,
+	LDDeviceTooLarge,
+	MDIOError,
+	MDInvalid,
+	CRAMAlgNotAvail,
+	CRAMAlgNotDigest,
+	KMallocFailed,
+	DiscardNotAllowed,
+	HaveDiskConfig,
+	HaveNetConfig,
+	UnknownMandatoryTag,
+	MinorNotKnown,
+	StateNotAllowed,
+	GotSignal, /* EINTR */
+	NoResizeDuringResync,
+	APrimaryNodeNeeded,
+	SyncAfterInvalid,
+	SyncAfterCycle,
+	PauseFlagAlreadySet,
+	PauseFlagAlreadyClear,
+	DiskLowerThanOutdated, /* obsolete, now SS_LowerThanOutdated */
+	UnknownNetLinkPacket,
+	HaveNoDiskConfig,
+	ProtocolCRequired,
+	VMallocFailed,
+	IntegrityAlgNotAvail,
+	IntegrityAlgNotDigest,
+	CPUMaskParseFailed,
+	CSUMSAlgNotAvail,
+	CSUMSAlgNotDigest,
+	VERIFYAlgNotAvail,
+	VERIFYAlgNotDigest,
+	CSUMSResyncRunning,
+	VERIFYIsRunning,
+	DataOfWrongCurrent,
+	MayNotBeConnected,
+
+	/* insert new ones above this line */
+	AfterLastRetCode,
+};
+
+#define DRBD_PROT_A   1
+#define DRBD_PROT_B   2
+#define DRBD_PROT_C   3
+
+enum drbd_role {
+	Unknown = 0,
+	Primary = 1,     /* role */
+	Secondary = 2,   /* role */
+	role_mask = 3,
+};
+
+/* The order of these constants is important.
+ * The lower ones (<WFReportParams) indicate
+ * that there is no socket!
+ * >=WFReportParams ==> There is a socket
+ */
+enum drbd_conns {
+	StandAlone,
+	Disconnecting,  /* Temporal state on the way to StandAlone. */
+	Unconnected,    /* >= Unconnected -> inc_net() succeeds */
+
+	/* These temporal states are all used on the way
+	 * from >= Connected to Unconnected.
+	 * The 'disconnect reason' states
+	 * I do not allow to change beween them. */
+	Timeout,
+	BrokenPipe,
+	NetworkFailure,
+	ProtocolError,
+	TearDown,
+
+	WFConnection,
+	WFReportParams, /* we have a socket */
+	Connected,      /* we have introduced each other */
+	StartingSyncS,  /* starting full sync by IOCTL. */
+	StartingSyncT,  /* stariing full sync by IOCTL. */
+	WFBitMapS,
+	WFBitMapT,
+	WFSyncUUID,
+
+	/* All SyncStates are tested with this comparison
+	 * xx >= SyncSource && xx <= PausedSyncT */
+	SyncSource,
+	SyncTarget,
+	VerifyS,
+	VerifyT,
+	PausedSyncS,
+	PausedSyncT,
+	conn_mask = 31
+};
+
+enum drbd_disk_state {
+	Diskless,
+	Attaching,      /* In the process of reading the meta-data */
+	Failed,         /* Becomes Diskless as soon as we told it the peer */
+			/* when >= Failed it is legal to access mdev->bc */
+	Negotiating,    /* Late attaching state, we need to talk to the peer */
+	Inconsistent,
+	Outdated,
+	DUnknown,       /* Only used for the peer, never for myself */
+	Consistent,     /* Might be Outdated, might be UpToDate ... */
+	UpToDate,       /* Only this disk state allows applications' IO ! */
+	disk_mask = 15
+};
+
+union drbd_state_t {
+/* According to gcc's docs is the ...
+ * The order of allocation of bit-fields within a unit (C90 6.5.2.1, C99 6.7.2.1).
+ * Determined by ABI.
+ * pointed out by Maxim Uvarov q<muvarov@...mvista.com>
+ * even though we transmit as "cpu_to_be32(state)",
+ * the offsets of the bitfields still need to be swapped
+ * on different endianess.
+ */
+	struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+		unsigned role:2 ;   /* 3/4	 primary/secondary/unknown */
+		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
+		unsigned conn:5 ;   /* 17/32	 cstates */
+		unsigned disk:4 ;   /* 8/16	 from Diskless to UpToDate */
+		unsigned pdsk:4 ;   /* 8/16	 from Diskless to UpToDate */
+		unsigned susp:1 ;   /* 2/2	 IO suspended  no/yes */
+		unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+		unsigned peer_isp:1 ;
+		unsigned user_isp:1 ;
+		unsigned _pad:11;   /* 0	 unused */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+		unsigned _pad:11;   /* 0	 unused */
+		unsigned user_isp:1 ;
+		unsigned peer_isp:1 ;
+		unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+		unsigned susp:1 ;   /* 2/2	 IO suspended  no/yes */
+		unsigned pdsk:4 ;   /* 8/16	 from Diskless to UpToDate */
+		unsigned disk:4 ;   /* 8/16	 from Diskless to UpToDate */
+		unsigned conn:5 ;   /* 17/32	 cstates */
+		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
+		unsigned role:2 ;   /* 3/4	 primary/secondary/unknown */
+#else
+# error "this endianess is not supported"
+#endif
+#ifndef DRBD_DEBUG_STATE_CHANGES
+#define DRBD_DEBUG_STATE_CHANGES 0
+#endif
+#if DRBD_DEBUG_STATE_CHANGES
+		unsigned int line;
+		const char *func;
+#endif
+	};
+	unsigned int i;
+};
+
+enum set_st_err {
+	SS_CW_NoNeed = 4,
+	SS_CW_Success = 3,
+	SS_NothingToDo = 2,
+	SS_Success = 1,
+	SS_UnknownError = 0, /* Used to sleep longer in _drbd_request_state */
+	SS_TwoPrimaries = -1,
+	SS_NoUpToDateDisk = -2,
+	SS_BothInconsistent = -4,
+	SS_SyncingDiskless = -5,
+	SS_ConnectedOutdates = -6,
+	SS_PrimaryNOP = -7,
+	SS_ResyncRunning = -8,
+	SS_AlreadyStandAlone = -9,
+	SS_CW_FailedByPeer = -10,
+	SS_IsDiskLess = -11,
+	SS_DeviceInUse = -12,
+	SS_NoNetConfig = -13,
+	SS_NoVerifyAlg = -14,       /* drbd-8.2 only */
+	SS_NeedConnection = -15,    /* drbd-8.2 only */
+	SS_LowerThanOutdated = -16,
+	SS_NotSupported = -17,      /* drbd-8.2 only */
+	SS_InTransientState = -18,  /* Retry after the next state change */
+	SS_ConcurrentStChg = -19,   /* Concurrent cluster side state change! */
+	SS_AfterLastError = -20,    /* Keep this at bottom */
+};
+
+/* from drbd_strings.c */
+extern const char *conns_to_name(enum drbd_conns);
+extern const char *roles_to_name(enum drbd_role);
+extern const char *disks_to_name(enum drbd_disk_state);
+extern const char *set_st_err_name(enum set_st_err);
+
+#ifndef BDEVNAME_SIZE
+# define BDEVNAME_SIZE 32
+#endif
+
+#define SHARED_SECRET_MAX 64
+
+enum MetaDataFlags {
+	__MDF_Consistent,
+	__MDF_PrimaryInd,
+	__MDF_ConnectedInd,
+	__MDF_FullSync,
+	__MDF_WasUpToDate,
+	__MDF_PeerOutDated, /* or worse (e.g. invalid). */
+	__MDF_CrashedPrimary,
+};
+#define MDF_Consistent      (1<<__MDF_Consistent)
+#define MDF_PrimaryInd      (1<<__MDF_PrimaryInd)
+#define MDF_ConnectedInd    (1<<__MDF_ConnectedInd)
+#define MDF_FullSync        (1<<__MDF_FullSync)
+#define MDF_WasUpToDate     (1<<__MDF_WasUpToDate)
+#define MDF_PeerOutDated    (1<<__MDF_PeerOutDated)
+#define MDF_CrashedPrimary  (1<<__MDF_CrashedPrimary)
+
+enum UuidIndex {
+	Current,
+	Bitmap,
+	History_start,
+	History_end,
+	UUID_SIZE,      /* nl-packet: number of dirty bits */
+	UUID_FLAGS,     /* nl-packet: flags */
+	EXT_UUID_SIZE   /* Everything. */
+};
+
+enum UseTimeout {
+	UT_Default      = 0,
+	UT_Degraded     = 1,
+	UT_PeerOutdated = 2,
+};
+
+#define UUID_JUST_CREATED ((__u64)4)
+
+#define DRBD_MAGIC 0x83740267
+#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC)
+
+/* these are of type "int" */
+#define DRBD_MD_INDEX_INTERNAL -1
+#define DRBD_MD_INDEX_FLEX_EXT -2
+#define DRBD_MD_INDEX_FLEX_INT -3
+
+/* Start of the new netlink/connector stuff */
+
+#define DRBD_NL_CREATE_DEVICE 0x01
+#define DRBD_NL_SET_DEFAULTS  0x02
+
+/* The following line should be moved over to linux/connector.h
+ * when the time comes */
+#ifndef CN_IDX_DRBD
+# define CN_IDX_DRBD			0x4
+/* Ubuntu "intrepid ibex" release defined CN_IDX_DRBD as 0x6 */
+#endif
+#define CN_VAL_DRBD			0x1
+
+/* For searching a vacant cn_idx value */
+#define CN_IDX_STEP			6977
+
+struct drbd_nl_cfg_req {
+	int packet_type;
+	unsigned int drbd_minor;
+	int flags;
+	unsigned short tag_list[];
+};
+
+struct drbd_nl_cfg_reply {
+	int packet_type;
+	unsigned int minor;
+	int ret_code; /* enum ret_code or set_st_err_t */
+	unsigned short tag_list[]; /* only used with get_* calls */
+};
+
+#endif
diff -uNrp linux-2.6.29-rc8/include/linux/drbd_config.h linux-2.6.29-rc8-drbd/include/linux/drbd_config.h
--- linux-2.6.29-rc8/include/linux/drbd_config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/include/linux/drbd_config.h	2009-03-23 11:25:50.987214000 +0100
@@ -0,0 +1,44 @@
+/*
+  drbd_config.h
+  DRBD's compile time configuration.
+
+  drbd is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2, or (at your option)
+  any later version.
+
+  drbd is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with drbd; see the file COPYING.  If not, write to
+  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef DRBD_CONFIG_H
+#define DRBD_CONFIG_H
+
+extern const char *drbd_buildtag(void);
+
+#define REL_VERSION "8.3.1rc1"
+#define API_VERSION 88
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 89
+
+#ifndef __CHECKER__   /* for a sparse run, we need all STATICs */
+#define DBG_ALL_SYMBOLS /* no static functs, improves quality of OOPS traces */
+#endif
+
+
+/* Define this to enable dynamic tracing controlled by module parameters
+ * at run time. This enables ALL use of dynamic tracing including packet
+ * and bio dumping, etc */
+#define ENABLE_DYNAMIC_TRACE
+
+/* Enable fault insertion code */
+#define DRBD_ENABLE_FAULTS
+
+
+#endif
diff -uNrp linux-2.6.29-rc8/include/linux/drbd_limits.h linux-2.6.29-rc8-drbd/include/linux/drbd_limits.h
--- linux-2.6.29-rc8/include/linux/drbd_limits.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/include/linux/drbd_limits.h	2009-03-18 12:19:56.775194000 +0100
@@ -0,0 +1,133 @@
+/*
+  drbd_limits.h
+  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+*/
+
+/*
+ * Our current limitations.
+ * Some of them are hard limits,
+ * some of them are arbitrary range limits, that make it easier to provide
+ * feedback about nonsense settings for certain configurable values.
+ */
+
+#ifndef DRBD_LIMITS_H
+#define DRBD_LIMITS_H 1
+
+#define DEBUG_RANGE_CHECK 0
+
+#define DRBD_MINOR_COUNT_MIN 1
+#define DRBD_MINOR_COUNT_MAX 255
+
+#define DRBD_DIALOG_REFRESH_MIN 0
+#define DRBD_DIALOG_REFRESH_MAX 600
+
+/* valid port number */
+#define DRBD_PORT_MIN 1
+#define DRBD_PORT_MAX 0xffff
+
+/* startup { */
+  /* if you want more than 3.4 days, disable */
+#define DRBD_WFC_TIMEOUT_MIN 0
+#define DRBD_WFC_TIMEOUT_MAX 300000
+#define DRBD_WFC_TIMEOUT_DEF 0
+
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+/* }*/
+
+/* net { */
+  /* timeout, unit centi seconds
+   * more than one minute timeout is not usefull */
+#define DRBD_TIMEOUT_MIN 1
+#define DRBD_TIMEOUT_MAX 600
+#define DRBD_TIMEOUT_DEF 60       /* 6 seconds */
+
+  /* active connection retries when WFConnection */
+#define DRBD_CONNECT_INT_MIN 1
+#define DRBD_CONNECT_INT_MAX 120
+#define DRBD_CONNECT_INT_DEF 10   /* seconds */
+
+  /* keep-alive probes when idle */
+#define DRBD_PING_INT_MIN 1
+#define DRBD_PING_INT_MAX 120
+#define DRBD_PING_INT_DEF 10
+
+ /* timeout for the ping packets.*/
+#define DRBD_PING_TIMEO_MIN  1
+#define DRBD_PING_TIMEO_MAX  100
+#define DRBD_PING_TIMEO_DEF  5
+
+  /* max number of write requests between write barriers */
+#define DRBD_MAX_EPOCH_SIZE_MIN 1
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+
+  /* I don't think that a tcp send buffer of more than 10M is usefull */
+#define DRBD_SNDBUF_SIZE_MIN  0
+#define DRBD_SNDBUF_SIZE_MAX  (10<<20)
+#define DRBD_SNDBUF_SIZE_DEF  (2*65535)
+
+  /* @4k PageSize -> 128kB - 512MB */
+#define DRBD_MAX_BUFFERS_MIN  32
+#define DRBD_MAX_BUFFERS_MAX  131072
+#define DRBD_MAX_BUFFERS_DEF  2048
+
+  /* @4k PageSize -> 4kB - 512MB */
+#define DRBD_UNPLUG_WATERMARK_MIN  1
+#define DRBD_UNPLUG_WATERMARK_MAX  131072
+#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
+
+  /* 0 is disabled.
+   * 200 should be more than enough even for very short timeouts */
+#define DRBD_KO_COUNT_MIN  0
+#define DRBD_KO_COUNT_MAX  200
+#define DRBD_KO_COUNT_DEF  0
+/* } */
+
+/* syncer { */
+  /* FIXME allow rate to be zero? */
+#define DRBD_RATE_MIN 1
+/* channel bonding 10 GbE, or other hardware */
+#define DRBD_RATE_MAX (4 << 20)
+#define DRBD_RATE_DEF 250  /* kb/second */
+
+  /* less than 7 would hit performance unneccessarily.
+   * 3833 is the largest prime that still does fit
+   * into 64 sectors of activity log */
+#define DRBD_AL_EXTENTS_MIN  7
+#define DRBD_AL_EXTENTS_MAX  3833
+#define DRBD_AL_EXTENTS_DEF  127
+
+#define DRBD_AFTER_MIN  -1
+#define DRBD_AFTER_MAX  255
+#define DRBD_AFTER_DEF  -1
+
+/* } */
+
+/* drbdsetup XY resize -d Z
+ * you are free to reduce the device size to nothing, if you want to.
+ * the upper limit with 64bit kernel, enough ram and flexible meta data
+ * is 16 TB, currently. */
+/* DRBD_MAX_SECTORS */
+#define DRBD_DISK_SIZE_SECT_MIN  0
+#define DRBD_DISK_SIZE_SECT_MAX  (16 * (2LLU << 30))
+#define DRBD_DISK_SIZE_SECT_DEF  0 /* = disabled = no user size... */
+
+#define DRBD_ON_IO_ERROR_DEF PassOn
+#define DRBD_FENCING_DEF DontCare
+#define DRBD_AFTER_SB_0P_DEF Disconnect
+#define DRBD_AFTER_SB_1P_DEF Disconnect
+#define DRBD_AFTER_SB_2P_DEF Disconnect
+#define DRBD_RR_CONFLICT_DEF Disconnect
+
+#define DRBD_MAX_BIO_BVECS_MIN 0
+#define DRBD_MAX_BIO_BVECS_MAX 128
+#define DRBD_MAX_BIO_BVECS_DEF 0
+
+#undef RANGE
+#endif
diff -uNrp linux-2.6.29-rc8/include/linux/drbd_nl.h linux-2.6.29-rc8-drbd/include/linux/drbd_nl.h
--- linux-2.6.29-rc8/include/linux/drbd_nl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/include/linux/drbd_nl.h	2009-03-18 12:19:56.779296000 +0100
@@ -0,0 +1,134 @@
+/*
+   PAKET( name,
+	  TYPE ( pn, pr, member )
+	  ...
+   )
+
+   You may never reissue one of the pn arguments
+*/
+
+#if !defined(NL_PACKET) || !defined(NL_STRING) || !defined(NL_INTEGER) || !defined(NL_BIT) || !defined(NL_INT64)
+#error "The macros NL_PACKET, NL_STRING, NL_INTEGER, NL_INT64 and NL_BIT needs to be defined"
+#endif
+
+NL_PACKET(primary, 1,
+       NL_BIT(		1,	T_MAY_IGNORE,	overwrite_peer)
+)
+
+NL_PACKET(secondary, 2, )
+
+NL_PACKET(disk_conf, 3,
+	NL_INT64(	2,	T_MAY_IGNORE,	disk_size)
+	NL_STRING(	3,	T_MANDATORY,	backing_dev,	128)
+	NL_STRING(	4,	T_MANDATORY,	meta_dev,	128)
+	NL_INTEGER(	5,	T_MANDATORY,	meta_dev_idx)
+	NL_INTEGER(	6,	T_MAY_IGNORE,	on_io_error)
+	NL_INTEGER(	7,	T_MAY_IGNORE,	fencing)
+	NL_BIT(		37,	T_MAY_IGNORE,	use_bmbv)
+	NL_BIT(		53,	T_MAY_IGNORE,	no_disk_flush)
+	NL_BIT(		54,	T_MAY_IGNORE,	no_md_flush)
+	  /*  55 max_bio_size was available in 8.2.6rc2 */
+	NL_INTEGER(	56,	T_MAY_IGNORE,	max_bio_bvecs)
+	NL_BIT(		57,	T_MAY_IGNORE,	no_disk_barrier)
+	NL_BIT(		58,	T_MAY_IGNORE,	no_disk_drain)
+)
+
+NL_PACKET(detach, 4, )
+
+NL_PACKET(net_conf, 5,
+	NL_STRING(	8,	T_MANDATORY,	my_addr,	128)
+	NL_STRING(	9,	T_MANDATORY,	peer_addr,	128)
+	NL_STRING(	10,	T_MAY_IGNORE,	shared_secret,	SHARED_SECRET_MAX)
+	NL_STRING(	11,	T_MAY_IGNORE,	cram_hmac_alg,	SHARED_SECRET_MAX)
+	NL_STRING(	44,	T_MAY_IGNORE,	integrity_alg,	SHARED_SECRET_MAX)
+	NL_INTEGER(	14,	T_MAY_IGNORE,	timeout)
+	NL_INTEGER(	15,	T_MANDATORY,	wire_protocol)
+	NL_INTEGER(	16,	T_MAY_IGNORE,	try_connect_int)
+	NL_INTEGER(	17,	T_MAY_IGNORE,	ping_int)
+	NL_INTEGER(	18,	T_MAY_IGNORE,	max_epoch_size)
+	NL_INTEGER(	19,	T_MAY_IGNORE,	max_buffers)
+	NL_INTEGER(	20,	T_MAY_IGNORE,	unplug_watermark)
+	NL_INTEGER(	21,	T_MAY_IGNORE,	sndbuf_size)
+	NL_INTEGER(	22,	T_MAY_IGNORE,	ko_count)
+	NL_INTEGER(	24,	T_MAY_IGNORE,	after_sb_0p)
+	NL_INTEGER(	25,	T_MAY_IGNORE,	after_sb_1p)
+	NL_INTEGER(	26,	T_MAY_IGNORE,	after_sb_2p)
+	NL_INTEGER(	39,	T_MAY_IGNORE,	rr_conflict)
+	NL_INTEGER(	40,	T_MAY_IGNORE,	ping_timeo)
+	  /* 59 addr_family was available in GIT, never released */
+	NL_BIT(		60,	T_MANDATORY,	mind_af)
+	NL_BIT(		27,	T_MAY_IGNORE,	want_lose)
+	NL_BIT(		28,	T_MAY_IGNORE,	two_primaries)
+	NL_BIT(		41,	T_MAY_IGNORE,	always_asbp)
+	NL_BIT(		61,	T_MAY_IGNORE,	no_cork)
+	NL_BIT(		62,	T_MANDATORY,	auto_sndbuf_size)
+)
+
+NL_PACKET(disconnect, 6, )
+
+NL_PACKET(resize, 7,
+	NL_INT64(		29,	T_MAY_IGNORE,	resize_size)
+)
+
+NL_PACKET(syncer_conf, 8,
+	NL_INTEGER(	30,	T_MAY_IGNORE,	rate)
+	NL_INTEGER(	31,	T_MAY_IGNORE,	after)
+	NL_INTEGER(	32,	T_MAY_IGNORE,	al_extents)
+	NL_STRING(      52,     T_MAY_IGNORE,   verify_alg,     SHARED_SECRET_MAX)
+	NL_STRING(      51,     T_MAY_IGNORE,   cpu_mask,       32)
+	NL_STRING(	64,	T_MAY_IGNORE,	csums_alg,	SHARED_SECRET_MAX)
+)
+
+NL_PACKET(invalidate, 9, )
+NL_PACKET(invalidate_peer, 10, )
+NL_PACKET(pause_sync, 11, )
+NL_PACKET(resume_sync, 12, )
+NL_PACKET(suspend_io, 13, )
+NL_PACKET(resume_io, 14, )
+NL_PACKET(outdate, 15, )
+NL_PACKET(get_config, 16, )
+NL_PACKET(get_state, 17,
+	NL_INTEGER(	33,	T_MAY_IGNORE,	state_i)
+)
+
+NL_PACKET(get_uuids, 18,
+	NL_STRING(	34,	T_MAY_IGNORE,	uuids,	(UUID_SIZE*sizeof(__u64)))
+	NL_INTEGER(	35,	T_MAY_IGNORE,	uuids_flags)
+)
+
+NL_PACKET(get_timeout_flag, 19,
+	NL_BIT(		36,	T_MAY_IGNORE,	use_degraded)
+)
+
+NL_PACKET(call_helper, 20,
+	NL_STRING(	38,	T_MAY_IGNORE,	helper,		32)
+)
+
+/* Tag nr 42 already allocated in drbd-8.1 development. */
+
+NL_PACKET(sync_progress, 23,
+	NL_INTEGER(	43,	T_MAY_IGNORE,	sync_progress)
+)
+
+NL_PACKET(dump_ee, 24,
+	NL_STRING(	45,	T_MAY_IGNORE,	dump_ee_reason, 32)
+	NL_STRING(	46,	T_MAY_IGNORE,	seen_digest, SHARED_SECRET_MAX)
+	NL_STRING(	47,	T_MAY_IGNORE,	calc_digest, SHARED_SECRET_MAX)
+	NL_INT64(	48,	T_MAY_IGNORE,	ee_sector)
+	NL_INT64(	49,	T_MAY_IGNORE,	ee_block_id)
+	NL_STRING(	50,	T_MAY_IGNORE,	ee_data,	32 << 10)
+)
+
+NL_PACKET(start_ov, 25,
+)
+
+NL_PACKET(new_c_uuid, 26,
+       NL_BIT(		63,	T_MANDATORY,	clear_bm)
+)
+
+#undef NL_PACKET
+#undef NL_INTEGER
+#undef NL_INT64
+#undef NL_BIT
+#undef NL_STRING
+
diff -uNrp linux-2.6.29-rc8/include/linux/drbd_tag_magic.h linux-2.6.29-rc8-drbd/include/linux/drbd_tag_magic.h
--- linux-2.6.29-rc8/include/linux/drbd_tag_magic.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/include/linux/drbd_tag_magic.h	2008-11-24 11:43:33.148988000 +0100
@@ -0,0 +1,83 @@
+#ifndef DRBD_TAG_MAGIC_H
+#define DRBD_TAG_MAGIC_H
+
+#define TT_END     0
+#define TT_REMOVED 0xE000
+
+/* declare packet_type enums */
+enum packet_types {
+#define NL_PACKET(name, number, fields) P_ ## name = number,
+#define NL_INTEGER(pn, pr, member)
+#define NL_INT64(pn, pr, member)
+#define NL_BIT(pn, pr, member)
+#define NL_STRING(pn, pr, member, len)
+#include "drbd_nl.h"
+	P_nl_after_last_packet,
+};
+
+/* These struct are used to deduce the size of the tag lists: */
+#define NL_PACKET(name, number, fields)	\
+	struct name ## _tag_len_struct { fields };
+#define NL_INTEGER(pn, pr, member)		\
+	int member; int tag_and_len ## member;
+#define NL_INT64(pn, pr, member)		\
+	__u64 member; int tag_and_len ## member;
+#define NL_BIT(pn, pr, member)		\
+	unsigned char member:1; int tag_and_len ## member;
+#define NL_STRING(pn, pr, member, len)	\
+	unsigned char member[len]; int member ## _len; \
+	int tag_and_len ## member;
+#include "linux/drbd_nl.h"
+
+/* declate tag-list-sizes */
+static const int tag_list_sizes[] = {
+#define NL_PACKET(name, number, fields) 2 fields ,
+#define NL_INTEGER(pn, pr, member)      + 4 + 4
+#define NL_INT64(pn, pr, member)        + 4 + 8
+#define NL_BIT(pn, pr, member)          + 4 + 1
+#define NL_STRING(pn, pr, member, len)  + 4 + (len)
+#include "drbd_nl.h"
+};
+
+/* The two highest bits are used for the tag type */
+#define TT_MASK      0xC000
+#define TT_INTEGER   0x0000
+#define TT_INT64     0x4000
+#define TT_BIT       0x8000
+#define TT_STRING    0xC000
+/* The next bit indicates if processing of the tag is mandatory */
+#define T_MANDATORY  0x2000
+#define T_MAY_IGNORE 0x0000
+#define TN_MASK      0x1fff
+/* The remaining 13 bits are used to enumerate the tags */
+
+#define tag_type(T)   ((T) & TT_MASK)
+#define tag_number(T) ((T) & TN_MASK)
+
+/* declare tag enums */
+#define NL_PACKET(name, number, fields) fields
+enum drbd_tags {
+#define NL_INTEGER(pn, pr, member)     T_ ## member = pn | TT_INTEGER | pr ,
+#define NL_INT64(pn, pr, member)       T_ ## member = pn | TT_INT64   | pr ,
+#define NL_BIT(pn, pr, member)         T_ ## member = pn | TT_BIT     | pr ,
+#define NL_STRING(pn, pr, member, len) T_ ## member = pn | TT_STRING  | pr ,
+#include "drbd_nl.h"
+};
+
+struct tag {
+	const char *name;
+	int type_n_flags;
+	int max_len;
+};
+
+/* declare tag names */
+#define NL_PACKET(name, number, fields) fields
+static const struct tag tag_descriptions[] = {
+#define NL_INTEGER(pn, pr, member)     [ pn ] = { #member, TT_INTEGER | pr, sizeof(int)   },
+#define NL_INT64(pn, pr, member)       [ pn ] = { #member, TT_INT64   | pr, sizeof(__u64) },
+#define NL_BIT(pn, pr, member)         [ pn ] = { #member, TT_BIT     | pr, sizeof(int)   },
+#define NL_STRING(pn, pr, member, len) [ pn ] = { #member, TT_STRING  | pr, (len)         },
+#include "drbd_nl.h"
+};
+
+#endif
diff -uNrp linux-2.6.29-rc8/drivers/block/drbd/drbd_nl.c linux-2.6.29-rc8-drbd/drivers/block/drbd/drbd_nl.c
--- linux-2.6.29-rc8/drivers/block/drbd/drbd_nl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.29-rc8-drbd/drivers/block/drbd/drbd_nl.c	2009-03-23 11:25:50.356133000 +0100
@@ -0,0 +1,2420 @@
+/*
+-*- linux-c -*-
+   drbd_nl.c
+   Kernel module for 2.6.x Kernels
+
+   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@...bit.com>.
+   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@...bit.com>.
+
+   drbd is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+
+   drbd is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with drbd; see the file COPYING.  If not, write to
+   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#include <linux/autoconf.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h> /* for fsync_bdev */
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/connector.h>
+#include <linux/drbd.h>
+#include <linux/blkpg.h>
+#include <linux/cpumask.h>
+
+#include "drbd_int.h"
+#include "drbd_wrappers.h"
+#include <linux/drbd_tag_magic.h>
+#include <linux/drbd_limits.h>
+
+/* see get_sb_bdev and bd_claim */
+static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
+
+/* Generate the tag_list to struct functions */
+#define NL_PACKET(name, number, fields) \
+STATIC int name ## _from_tags(struct drbd_conf *mdev, \
+	unsigned short *tags, struct name *arg) \
+{ \
+	int tag; \
+	int dlen; \
+	\
+	while ((tag = *tags++) != TT_END) { \
+		dlen = *tags++; \
+		switch (tag_number(tag)) { \
+		fields \
+		default: \
+			if (tag & T_MANDATORY) { \
+				ERR("Unknown tag: %d\n", tag_number(tag)); \
+				return 0; \
+			} \
+		} \
+		tags = (unsigned short *)((char *)tags + dlen); \
+	} \
+	return 1; \
+}
+#define NL_INTEGER(pn, pr, member) \
+	case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
+		 arg->member = *(int *)(tags); \
+		 break;
+#define NL_INT64(pn, pr, member) \
+	case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
+		 arg->member = *(u64 *)(tags); \
+		 break;
+#define NL_BIT(pn, pr, member) \
+	case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
+		 arg->member = *(char *)(tags) ? 1 : 0; \
+		 break;
+#define NL_STRING(pn, pr, member, len) \
+	case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
+		if (dlen > len) { \
+			ERR("arg too long: %s (%u wanted, max len: %u bytes)\n", \
+				#member, dlen, (unsigned int)len); \
+			return 0; \
+		} \
+		 arg->member ## _len = dlen; \
+		 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
+		 break;
+#include "linux/drbd_nl.h"
+
+/* Generate the struct to tag_list functions */
+#define NL_PACKET(name, number, fields) \
+STATIC unsigned short* \
+name ## _to_tags(struct drbd_conf *mdev, \
+	struct name *arg, unsigned short *tags) \
+{ \
+	fields \
+	return tags; \
+}
+
+#define NL_INTEGER(pn, pr, member) \
+	*tags++ = pn | pr | TT_INTEGER; \
+	*tags++ = sizeof(int); \
+	*(int *)tags = arg->member; \
+	tags = (unsigned short *)((char *)tags+sizeof(int));
+#define NL_INT64(pn, pr, member) \
+	*tags++ = pn | pr | TT_INT64; \
+	*tags++ = sizeof(u64); \
+	*(u64 *)tags = arg->member; \
+	tags = (unsigned short *)((char *)tags+sizeof(u64));
+#define NL_BIT(pn, pr, member) \
+	*tags++ = pn | pr | TT_BIT; \
+	*tags++ = sizeof(char); \
+	*(char *)tags = arg->member; \
+	tags = (unsigned short *)((char *)tags+sizeof(char));
+#define NL_STRING(pn, pr, member, len) \
+	*tags++ = pn | pr | TT_STRING; \
+	*tags++ = arg->member ## _len; \
+	memcpy(tags, arg->member, arg->member ## _len); \
+	tags = (unsigned short *)((char *)tags + arg->member ## _len);
+#include "linux/drbd_nl.h"
+
+void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
+void drbd_nl_send_reply(struct cn_msg *, int);
+
+STATIC char *nl_packet_name(int packet_type)
+{
+/* Generate packet type strings */
+#define NL_PACKET(name, number, fields) \
+	[P_ ## name] = # name,
+#define NL_INTEGER Argh!
+#define NL_BIT Argh!
+#define NL_INT64 Argh!
+#define NL_STRING Argh!
+
+	static char *nl_tag_name[P_nl_after_last_packet] = {
+#include "linux/drbd_nl.h"
+	};
+
+	return (packet_type < sizeof(nl_tag_name)/sizeof(nl_tag_name[0])) ?
+	    nl_tag_name[packet_type] : "*Unknown*";
+}
+
+STATIC void nl_trace_packet(void *data)
+{
+	struct cn_msg *req = data;
+	struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
+
+	printk(KERN_INFO "drbd%d: "
+	       "Netlink: << %s (%d) - seq: %x, ack: %x, len: %x\n",
+	       nlp->drbd_minor,
+	       nl_packet_name(nlp->packet_type),
+	       nlp->packet_type,
+	       req->seq, req->ack, req->len);
+}
+
+STATIC void nl_trace_reply(void *data)
+{
+	struct cn_msg *req = data;
+	struct drbd_nl_cfg_reply *nlp = (struct drbd_nl_cfg_reply *)req->data;
+
+	printk(KERN_INFO "drbd%d: "
+	       "Netlink: >> %s (%d) - seq: %x, ack: %x, len: %x\n",
+	       nlp->minor,
+	       nlp->packet_type == P_nl_after_last_packet ?
+		   "Empty-Reply" : nl_packet_name(nlp->packet_type),
+	       nlp->packet_type,
+	       req->seq, req->ack, req->len);
+}
+
+int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+{
+	char mb[12];
+	char *argv[] = {usermode_helper, cmd, mb, NULL };
+	int ret;
+	static char *envp[] = { "HOME=/",
+				"TERM=linux",
+				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+				NULL };
+
+	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
+
+	INFO("helper command: %s %s %s\n", usermode_helper, cmd, mb);
+
+	drbd_bcast_ev_helper(mdev, cmd);
+	ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+	if (ret)
+		drbd_WARN("helper command: %s %s %s exit code %u (0x%x)\n",
+				usermode_helper, cmd, mb,
+				(ret >> 8) & 0xff, ret);
+	else
+		INFO("helper command: %s %s %s exit code %u (0x%x)\n",
+				usermode_helper, cmd, mb,
+				(ret >> 8) & 0xff, ret);
+
+	if (ret < 0) /* Ignore any ERRNOs we got. */
+		ret = 0;
+
+	return ret;
+}
+
+enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+{
+	char *ex_to_string;
+	int r;
+	enum drbd_disk_state nps;
+	enum fencing_policy fp;
+
+	D_ASSERT(mdev->state.pdsk == DUnknown);
+
+	if (inc_local_if_state(mdev, Consistent)) {
+		fp = mdev->bc->dc.fencing;
+		dec_local(mdev);
+	} else {
+		drbd_WARN("Not fencing peer, I'm not even Consistent myself.\n");
+		return mdev->state.pdsk;
+	}
+
+	if (fp == Stonith)
+		_drbd_request_state(mdev, NS(susp, 1), ChgWaitComplete);
+
+	r = drbd_khelper(mdev, "fence-peer");
+
+	switch ((r>>8) & 0xff) {
+	case 3: /* peer is inconsistent */
+		ex_to_string = "peer is inconsistent or worse";
+		nps = Inconsistent;
+		break;
+	case 4:
+		ex_to_string = "peer is outdated";
+		nps = Outdated;
+		break;
+	case 5: /* peer was down, we will(have) create(d) a new UUID anyways... */
+		/* If we would be more strict, we would return DUnknown here. */
+		ex_to_string = "peer is unreachable, assumed to be dead";
+		nps = Outdated;
+		break;
+	case 6: /* Peer is primary, voluntarily outdate myself.
+		 * This is useful when an unconnected Secondary is asked to
+		 * become Primary, but findes the other peer being active. */
+		ex_to_string = "peer is active";
+		drbd_WARN("Peer is primary, outdating myself.\n");
+		nps = DUnknown;
+		_drbd_request_state(mdev, NS(disk, Outdated), ChgWaitComplete);
+		break;
+	case 7:
+		if (fp != Stonith)
+			ERR("fence-peer() = 7 && fencing != Stonith !!!\n");
+		ex_to_string = "peer was stonithed";
+		nps = Outdated;
+		break;
+	default:
+		/* The script is broken ... */
+		nps = DUnknown;
+		ERR("fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+		return nps;
+	}
+
+	INFO("fence-peer helper returned %d (%s)\n",
+			(r>>8) & 0xff, ex_to_string);
+	return nps;
+}
+
+
+int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+{
+	const int max_tries = 4;
+	int r = 0;
+	int try = 0;
+	int forced = 0;
+	union drbd_state_t mask, val;
+	enum drbd_disk_state nps;
+
+	if (new_role == Primary)
+		request_ping(mdev); /* Detect a dead peer ASAP */
+
+	mutex_lock(&mdev->state_mutex);
+
+	mask.i = 0; mask.role = role_mask;
+	val.i  = 0; val.role  = new_role;
+
+	while (try++ < max_tries) {
+		r = _drbd_request_state(mdev, mask, val, ChgWaitComplete);
+
+		/* in case we first succeeded to outdate,
+		 * but now suddenly could establish a connection */
+		if (r == SS_CW_FailedByPeer && mask.pdsk != 0) {
+			val.pdsk = 0;
+			mask.pdsk = 0;
+			continue;
+		}
+
+		if (r == SS_NoUpToDateDisk && force &&
+		    (mdev->state.disk == Inconsistent ||
+		     mdev->state.disk == Outdated)) {
+			mask.disk = disk_mask;
+			val.disk  = UpToDate;
+			forced = 1;
+			continue;
+		}
+
+		if (r == SS_NoUpToDateDisk &&
+		    mdev->state.disk == Consistent) {
+			D_ASSERT(mdev->state.pdsk == DUnknown);
+			nps = drbd_try_outdate_peer(mdev);
+
+			if (nps == Outdated) {
+				val.disk = UpToDate;
+				mask.disk = disk_mask;
+			}
+
+			val.pdsk = nps;
+			mask.pdsk = disk_mask;
+
+			continue;
+		}
+
+		if (r == SS_NothingToDo)
+			goto fail;
+		if (r == SS_PrimaryNOP) {
+			nps = drbd_try_outdate_peer(mdev);
+
+			if (force && nps > Outdated) {
+				drbd_WARN("Forced into split brain situation!\n");
+				nps = Outdated;
+			}
+
+			mask.pdsk = disk_mask;
+			val.pdsk  = nps;
+
+			continue;
+		}
+		if (r == SS_TwoPrimaries) {
+			/* Maybe the peer is detected as dead very soon...
+			   retry at most once more in this case. */
+			__set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
+			if (try < max_tries)
+				try = max_tries - 1;
+			continue;
+		}
+		if (r < SS_Success) {
+			r = _drbd_request_state(mdev, mask, val,
+						ChgStateVerbose + ChgWaitComplete);
+			if (r < SS_Success)
+				goto fail;
+		}
+		break;
+	}
+
+	if (forced)
+		drbd_WARN("Forced to consider local data as UpToDate!\n");
+
+	fsync_bdev(mdev->this_bdev);
+
+	/* Wait until nothing is on the fly :) */
+	wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+
+	if (new_role == Secondary) {
+		set_disk_ro(mdev->vdisk, TRUE);
+		if (inc_local(mdev)) {
+			mdev->bc->md.uuid[Current] &= ~(u64)1;
+			dec_local(mdev);
+		}
+	} else {
+		if (inc_net(mdev)) {
+			mdev->net_conf->want_lose = 0;
+			dec_net(mdev);
+		}
+		set_disk_ro(mdev->vdisk, FALSE);
+		if (inc_local(mdev)) {
+			if (((mdev->state.conn < Connected ||
+			       mdev->state.pdsk <= Failed)
+			      && mdev->bc->md.uuid[Bitmap] == 0) || forced)
+				drbd_uuid_new_current(mdev);
+
+			mdev->bc->md.uuid[Current] |=  (u64)1;
+			dec_local(mdev);
+		}
+	}
+
+	if ((new_role == Secondary) && inc_local(mdev)) {
+		drbd_al_to_on_disk_bm(mdev);
+		dec_local(mdev);
+	}
+
+	if (mdev->state.conn >= WFReportParams) {
+		/* if this was forced, we should consider sync */
+		if (forced)
+			drbd_send_uuids(mdev);
+		drbd_send_state(mdev);
+	}
+
+	drbd_md_sync(mdev);
+
+ fail:
+	mutex_unlock(&mdev->state_mutex);
+	return r;
+}
+
+
+STATIC int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			   struct drbd_nl_cfg_reply *reply)
+{
+	struct primary primary_args;
+
+	memset(&primary_args, 0, sizeof(struct primary));
+	if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
+		reply->ret_code = UnknownMandatoryTag;
+		return 0;
+	}
+
+	reply->ret_code =
+		drbd_set_role(mdev, Primary, primary_args.overwrite_peer);
+
+	return 0;
+}
+
+STATIC int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			     struct drbd_nl_cfg_reply *reply)
+{
+	reply->ret_code = drbd_set_role(mdev, Secondary, 0);
+
+	return 0;
+}
+
+/* initializes the md.*_offset members, so we are able to find
+ * the on disk meta data */
+STATIC void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
+				       struct drbd_backing_dev *bdev)
+{
+	sector_t md_size_sect = 0;
+	switch (bdev->dc.meta_dev_idx) {
+	default:
+		/* v07 style fixed size indexed meta data */
+		bdev->md.md_size_sect = MD_RESERVED_SECT;
+		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
+		bdev->md.al_offset = MD_AL_OFFSET;
+		bdev->md.bm_offset = MD_BM_OFFSET;
+		break;
+	case DRBD_MD_INDEX_FLEX_EXT:
+		/* just occupy the full device; unit: sectors */
+		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
+		bdev->md.md_offset = 0;
+		bdev->md.al_offset = MD_AL_OFFSET;
+		bdev->md.bm_offset = MD_BM_OFFSET;
+		break;
+	case DRBD_MD_INDEX_INTERNAL:
+	case DRBD_MD_INDEX_FLEX_INT:
+		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
+		/* al size is still fixed */
+		bdev->md.al_offset = -MD_AL_MAX_SIZE;
+		/* we need (slightly less than) ~ this much bitmap sectors: */
+		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
+		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
+		md_size_sect = BM_SECT_TO_EXT(md_size_sect);
+		md_size_sect = ALIGN(md_size_sect, 8);
+
+		/* plus the "drbd meta data super block",
+		 * and the activity log; */
+		md_size_sect += MD_BM_OFFSET;
+
+		bdev->md.md_size_sect = md_size_sect;
+		/* bitmap offset is adjusted by 'super' block size */
+		bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
+		break;
+	}
+}
+
+char *ppsize(char *buf, unsigned long long size)
+{
+	/* Needs 9 bytes at max. */
+	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
+	int base = 0;
+	while (size >= 10000) {
+		/* shift + round */
+		size = (size >> 10) + !!(size & (1<<9));
+		base++;
+	}
+	sprintf(buf, "%lu %cB", (long)size, units[base]);
+
+	return buf;
+}
+
+/* there is still a theoretical deadlock when called from receiver
+ * on an Inconsistent Primary:
+ *  remote READ does inc_ap_bio, receiver would need to receive answer
+ *  packet from remote to dec_ap_bio again.
+ *  receiver receive_sizes(), comes here,
+ *  waits for ap_bio_cnt == 0. -> deadlock.
+ * but this cannot happen, actually, because:
+ *  Primary Inconsistent, and peer's disk is unreachable
+ *  (not connected, *  or bad/no disk on peer):
+ *  see drbd_fail_request_early, ap_bio_cnt is zero.
+ *  Primary Inconsistent, and SyncTarget:
+ *  peer may not initiate a resize.
+ */
+void drbd_suspend_io(struct drbd_conf *mdev)
+{
+	int in_flight;
+	set_bit(SUSPEND_IO, &mdev->flags);
+	in_flight = atomic_read(&mdev->ap_bio_cnt);
+	if (in_flight)
+		wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
+}
+
+void drbd_resume_io(struct drbd_conf *mdev)
+{
+	clear_bit(SUSPEND_IO, &mdev->flags);
+	wake_up(&mdev->misc_wait);
+}
+
+/**
+ * drbd_determin_dev_size:
+ * Evaluates all constraints and sets our correct device size.
+ * Negative return values indicate errors. 0 and positive values
+ * indicate success.
+ * You should call drbd_md_sync() after calling this function.
+ */
+enum determin_dev_size_enum drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
+{
+	sector_t prev_first_sect, prev_size; /* previous meta location */
+	sector_t la_size;
+	sector_t size;
+	char ppb[10];
+
+	int md_moved, la_size_changed;
+	enum determin_dev_size_enum rv = unchanged;
+
+	/* race:
+	 * application request passes inc_ap_bio,
+	 * but then cannot get an AL-reference.
+	 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
+	 *
+	 * to avoid that:
+	 * Suspend IO right here.
+	 * still lock the act_log to not trigger ASSERTs there.
+	 */
+	drbd_suspend_io(mdev);
+
+	/* no wait necessary anymore, actually we could assert that */
+	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+
+	prev_first_sect = drbd_md_first_sector(mdev->bc);
+	prev_size = mdev->bc->md.md_size_sect;
+	la_size = mdev->bc->md.la_size_sect;
+
+	/* TODO: should only be some assert here, not (re)init... */
+	drbd_md_set_sector_offsets(mdev, mdev->bc);
+
+	size = drbd_new_dev_size(mdev, mdev->bc);
+
+	if (drbd_get_capacity(mdev->this_bdev) != size ||
+	    drbd_bm_capacity(mdev) != size) {
+		int err;
+		err = drbd_bm_resize(mdev, size);
+		if (unlikely(err)) {
+			/* currently there is only one error: ENOMEM! */
+			size = drbd_bm_capacity(mdev)>>1;
+			if (size == 0) {
+				ERR("OUT OF MEMORY! "
+				    "Could not allocate bitmap! ");
+			} else {
+				ERR("BM resizing failed. "
+				    "Leaving size unchanged at size = %lu KB\n",
+				    (unsigned long)size);
+			}
+			rv = dev_size_error;
+		}
+		/* racy, see comments above. */
+		drbd_set_my_capacity(mdev, size);
+		mdev->bc->md.la_size_sect = size;
+		INFO("size = %s (%llu KB)\n", ppsize(ppb, size>>1),
+		     (unsigned long long)size>>1);
+	}
+	if (rv == dev_size_error)
+		goto out;
+
+	la_size_changed = (la_size != mdev->bc->md.la_size_sect);
+
+	md_moved = prev_first_sect != drbd_md_first_sector(mdev->bc)
+		|| prev_size	   != mdev->bc->md.md_size_sect;
+
+	if (md_moved) {
+		drbd_WARN("Moving meta-data.\n");
+		/* assert: (flexible) internal meta data */
+	}
+
+	if (la_size_changed || md_moved) {
+		drbd_al_shrink(mdev); /* All extents inactive. */
+		INFO("Writing the whole bitmap, size changed\n");
+		rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed");
+		drbd_md_mark_dirty(mdev);
+	}
+
+	if (size > la_size)
+		rv = grew;
+	if (size < la_size)
+		rv = shrunk;
+out:
+	lc_unlock(mdev->act_log);
+	wake_up(&mdev->al_wait);
+	drbd_resume_io(mdev);
+
+	return rv;
+}
+
+sector_t
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+{
+	sector_t p_size = mdev->p_size;   /* partner's disk size. */
+	sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
+	sector_t m_size; /* my size */
+	sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
+	sector_t size = 0;
+
+	m_size = drbd_get_max_capacity(bdev);
+
+	if (p_size && m_size) {
+		size = min_t(sector_t, p_size, m_size);
+	} else {
+		if (la_size) {
+			size = la_size;
+			if (m_size && m_size < size)
+				size = m_size;
+			if (p_size && p_size < size)
+				size = p_size;
+		} else {
+			if (m_size)
+				size = m_size;
+			if (p_size)
+				size = p_size;
+		}
+	}
+
+	if (size == 0)
+		ERR("Both nodes diskless!\n");
+
+	if (u_size) {
+		if (u_size > size)
+			ERR("Requested disk size is too big (%lu > %lu)\n",
+			    (unsigned long)u_size>>1, (unsigned long)size>>1);
+		else
+			size = u_size;
+	}
+
+	return size;
+}
+
+/**
+ * drbd_check_al_size:
+ * checks that the al lru is of requested size, and if neccessary tries to
+ * allocate a new one. returns -EBUSY if current al lru is still used,
+ * -ENOMEM when allocation failed, and 0 on success. You should call
+ * drbd_md_sync() after you called this function.
+ */
+STATIC int drbd_check_al_size(struct drbd_conf *mdev)
+{
+	struct lru_cache *n, *t;
+	struct lc_element *e;
+	unsigned int in_use;
+	int i;
+
+	ERR_IF(mdev->sync_conf.al_extents < 7)
+		mdev->sync_conf.al_extents = 127;
+
+	if (mdev->act_log &&
+	    mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+		return 0;
+
+	in_use = 0;
+	t = mdev->act_log;
+	n = lc_alloc("act_log", mdev->sync_conf.al_extents,
+		     sizeof(struct lc_element), mdev);
+
+	if (n == NULL) {
+		ERR("Cannot allocate act_log lru!\n");
+		return -ENOMEM;
+	}
+	spin_lock_irq(&mdev->al_lock);
+	if (t) {
+		for (i = 0; i < t->nr_elements; i++) {
+			e = lc_entry(t, i);
+			if (e->refcnt)
+				ERR("refcnt(%d)==%d\n",
+				    e->lc_number, e->refcnt);
+			in_use += e->refcnt;
+		}
+	}
+	if (!in_use)
+		mdev->act_log = n;
+	spin_unlock_irq(&mdev->al_lock);
+	if (in_use) {
+		ERR("Activity log still in use!\n");
+		lc_free(n);
+		return -EBUSY;
+	} else {
+		if (t)
+			lc_free(t);
+	}
+	drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
+	return 0;
+}
+
+void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
+{
+	struct request_queue * const q = mdev->rq_queue;
+	struct request_queue * const b = mdev->bc->backing_bdev->bd_disk->queue;
+	/* unsigned int old_max_seg_s = q->max_segment_size; */
+	int max_segments = mdev->bc->dc.max_bio_bvecs;
+
+	if (b->merge_bvec_fn && !mdev->bc->dc.use_bmbv)
+		max_seg_s = PAGE_SIZE;
+
+	max_seg_s = min(b->max_sectors * b->hardsect_size, max_seg_s);
+
+	MTRACE(TraceTypeRq, TraceLvlSummary,
+	       DUMPI(b->max_sectors);
+	       DUMPI(b->max_phys_segments);
+	       DUMPI(b->max_hw_segments);
+	       DUMPI(b->max_segment_size);
+	       DUMPI(b->hardsect_size);
+	       DUMPI(b->seg_boundary_mask);
+	       );
+
+	q->max_sectors	     = max_seg_s >> 9;
+	if (max_segments) {
+		q->max_phys_segments = max_segments;
+		q->max_hw_segments   = max_segments;
+	} else {
+		q->max_phys_segments = MAX_PHYS_SEGMENTS;
+		q->max_hw_segments   = MAX_HW_SEGMENTS;
+	}
+	q->max_segment_size  = max_seg_s;
+	q->hardsect_size     = 512;
+	q->seg_boundary_mask = PAGE_SIZE-1;
+	blk_queue_stack_limits(q, b);
+
+	/* KERNEL BUG. in ll_rw_blk.c ??
+	 * t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+	 * should be
+	 * t->max_segment_size = min_not_zero(...,...)
+	 * workaround here: */
+	if (q->max_segment_size == 0)
+		q->max_segment_size = max_seg_s;
+
+	MTRACE(TraceTypeRq, TraceLvlSummary,
+	       DUMPI(q->max_sectors);
+	       DUMPI(q->max_phys_segments);
+	       DUMPI(q->max_hw_segments);
+	       DUMPI(q->max_segment_size);
+	       DUMPI(q->hardsect_size);
+	       DUMPI(q->seg_boundary_mask);
+	       );
+
+	if (b->merge_bvec_fn)
+		drbd_WARN("Backing device's merge_bvec_fn() = %p\n",
+		     b->merge_bvec_fn);
+	INFO("max_segment_size ( = BIO size ) = %u\n", q->max_segment_size);
+
+	if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+		INFO("Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
+		     q->backing_dev_info.ra_pages,
+		     b->backing_dev_info.ra_pages);
+		q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+	}
+}
+
+/* does always return 0;
+ * interesting return code is in reply->ret_code */
+STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			     struct drbd_nl_cfg_reply *reply)
+{
+	enum ret_codes retcode;
+	enum determin_dev_size_enum dd;
+	sector_t max_possible_sectors;
+	sector_t min_md_device_sectors;
+	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+	struct inode *inode, *inode2;
+	struct lru_cache *resync_lru = NULL;
+	union drbd_state_t ns, os;
+	int rv, ntries = 0;
+	int cp_discovered = 0;
+
+	/* if you want to reconfigure, please tear down first */
+	if (mdev->state.disk > Diskless) {
+		retcode = HaveDiskConfig;
+		goto fail;
+	}
+
+	/*
+	* We may have gotten here very quickly from a detach. Wait for a bit
+	* then fail.
+	*/
+	while (1) {
+		__no_warn(local, nbc = mdev->bc;);
+		if (nbc == NULL)
+			break;
+		if (ntries++ >= 5) {
+			drbd_WARN("drbd_nl_disk_conf: mdev->bc not NULL.\n");
+			retcode = HaveDiskConfig;
+			goto fail;
+		}
+		__set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ/10);
+	}
+
+	nbc = kmalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
+	if (!nbc) {
+		retcode = KMallocFailed;
+		goto fail;
+	}
+
+	memset(&nbc->md, 0, sizeof(struct drbd_md));
+
+	if (!(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_local(mdev)) {
+		memcpy(&nbc->dc, &mdev->bc->dc, sizeof(struct disk_conf));
+		dec_local(mdev);
+	} else {
+		memset(&nbc->dc, 0, sizeof(struct disk_conf));
+		nbc->dc.disk_size   = DRBD_DISK_SIZE_SECT_DEF;
+		nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
+		nbc->dc.fencing     = DRBD_FENCING_DEF;
+		nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+	}
+
+	if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
+		retcode = UnknownMandatoryTag;
+		goto fail;
+	}
+
+	nbc->lo_file = NULL;
+	nbc->md_file = NULL;
+
+	if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+		retcode = LDMDInvalid;
+		goto fail;
+	}
+
+	nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
+	if (IS_ERR(nbc->lo_file)) {
+		ERR("open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+		    PTR_ERR(nbc->lo_file));
+		nbc->lo_file = NULL;
+		retcode = LDNameInvalid;
+		goto fail;
+	}
+
+	inode = nbc->lo_file->f_dentry->d_inode;
+
+	if (!S_ISBLK(inode->i_mode)) {
+		retcode = LDNoBlockDev;
+		goto fail;
+	}
+
+	nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
+	if (IS_ERR(nbc->md_file)) {
+		ERR("open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+		    PTR_ERR(nbc->md_file));
+		nbc->md_file = NULL;
+		retcode = MDNameInvalid;
+		goto fail;
+	}
+
+	inode2 = nbc->md_file->f_dentry->d_inode;
+
+	if (!S_ISBLK(inode2->i_mode)) {
+		retcode = MDNoBlockDev;
+		goto fail;
+	}
+
+	nbc->backing_bdev = inode->i_bdev;
+	if (bd_claim(nbc->backing_bdev, mdev)) {
+		printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
+		       nbc->backing_bdev, mdev,
+		       nbc->backing_bdev->bd_holder,
+		       nbc->backing_bdev->bd_contains->bd_holder,
+		       nbc->backing_bdev->bd_holders);
+		retcode = LDMounted;
+		goto fail;
+	}
+
+	resync_lru = lc_alloc("resync", 61, sizeof(struct bm_extent), mdev);
+	if (!resync_lru) {
+		retcode = KMallocFailed;
+		goto fail;
+	}
+
+	if (!mdev->bitmap) {
+		if (drbd_bm_init(mdev)) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	nbc->md_bdev = inode2->i_bdev;
+	if (bd_claim(nbc->md_bdev,
+		     (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+		      nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) ?
+		     (void *)mdev : (void *) drbd_m_holder)) {
+		retcode = MDMounted;
+		goto release_bdev_fail;
+	}
+
+	if ((nbc->backing_bdev == nbc->md_bdev) !=
+	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+		retcode = LDMDInvalid;
+		goto release_bdev2_fail;
+	}
+
+	/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
+	drbd_md_set_sector_offsets(mdev, nbc);
+
+	if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+		ERR("max capacity %llu smaller than disk size %llu\n",
+			(unsigned long long) drbd_get_max_capacity(nbc),
+			(unsigned long long) nbc->dc.disk_size);
+		retcode = LDDeviceTooSmall;
+		goto release_bdev2_fail;
+	}
+
+	if (nbc->dc.meta_dev_idx < 0) {
+		max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
+		/* at least one MB, otherwise it does not make sense */
+		min_md_device_sectors = (2<<10);
+	} else {
+		max_possible_sectors = DRBD_MAX_SECTORS;
+		min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+	}
+
+	if (drbd_get_capacity(nbc->md_bdev) > max_possible_sectors)
+		drbd_WARN("truncating very big lower level device "
+		     "to currently maximum possible %llu sectors\n",
+		     (unsigned long long) max_possible_sectors);
+
+	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
+		retcode = MDDeviceTooSmall;
+		drbd_WARN("refusing attach: md-device too small, "
+		     "at least %llu sectors needed for this meta-disk type\n",
+		     (unsigned long long) min_md_device_sectors);
+		goto release_bdev2_fail;
+	}
+
+	/* Make sure the new disk is big enough
+	 * (we may currently be Primary with no local disk...) */
+	if (drbd_get_max_capacity(nbc) <
+	    drbd_get_capacity(mdev->this_bdev)) {
+		retcode = LDDeviceTooSmall;
+		goto release_bdev2_fail;
+	}
+
+	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
+
+	drbd_suspend_io(mdev);
+	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt));
+	retcode = _drbd_request_state(mdev, NS(disk, Attaching), ChgStateVerbose);
+	drbd_resume_io(mdev);
+	if (retcode < SS_Success)
+		goto release_bdev2_fail;
+
+	if (!inc_local_if_state(mdev, Attaching))
+		goto force_diskless;
+
+	drbd_thread_start(&mdev->worker);
+	drbd_md_set_sector_offsets(mdev, nbc);
+
+	retcode = drbd_md_read(mdev, nbc);
+	if (retcode != NoError)
+		goto force_diskless_dec;
+
+	if (mdev->state.conn < Connected &&
+	    mdev->state.role == Primary &&
+	    (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[Current] & ~((u64)1))) {
+		ERR("Can only attach to data with current UUID=%016llX\n",
+		    (unsigned long long)mdev->ed_uuid);
+		retcode = DataOfWrongCurrent;
+		goto force_diskless_dec;
+	}
+
+	/* Since we are diskless, fix the AL first... */
+	if (drbd_check_al_size(mdev)) {
+		retcode = KMallocFailed;
+		goto force_diskless_dec;
+	}
+
+	/* Prevent shrinking of consistent devices ! */
+	if (drbd_md_test_flag(nbc, MDF_Consistent) &&
+	   drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
+		drbd_WARN("refusing to truncate a consistent device\n");
+		retcode = LDDeviceTooSmall;
+		goto force_diskless_dec;
+	}
+
+	if (!drbd_al_read_log(mdev, nbc)) {
+		retcode = MDIOError;
+		goto force_diskless_dec;
+	}
+
+	/* Reset the "barriers don't work" bits here, then force meta data to
+	 * be written, to ensure we determine if barriers are supported. */
+	if (nbc->dc.no_md_flush)
+		set_bit(MD_NO_BARRIER, &mdev->flags);
+	else
+		clear_bit(MD_NO_BARRIER, &mdev->flags);
+
+	/* Point of no return reached.
+	 * Devices and memory are no longer released by error cleanup below.
+	 * now mdev takes over responsibility, and the state engine should
+	 * clean it up somewhere.  */
+	D_ASSERT(mdev->bc == NULL);
+	mdev->bc = nbc;
+	mdev->resync = resync_lru;
+	nbc = NULL;
+	resync_lru = NULL;
+
+	mdev->write_ordering = WO_bio_barrier;
+	drbd_bump_write_ordering(mdev, WO_bio_barrier);
+
+	if (drbd_md_test_flag(mdev->bc, MDF_CrashedPrimary))
+		set_bit(CRASHED_PRIMARY, &mdev->flags);
+	else
+		clear_bit(CRASHED_PRIMARY, &mdev->flags);
+
+	if (drbd_md_test_flag(mdev->bc, MDF_PrimaryInd)) {
+		set_bit(CRASHED_PRIMARY, &mdev->flags);
+		cp_discovered = 1;
+	}
+
+	mdev->send_cnt = 0;
+	mdev->recv_cnt = 0;
+	mdev->read_cnt = 0;
+	mdev->writ_cnt = 0;
+
+	drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
+
+	/* If I am currently not Primary,
+	 * but meta data primary indicator is set,
+	 * I just now recover from a hard crash,
+	 * and have been Primary before that crash.
+	 *
+	 * Now, if I had no connection before that crash
+	 * (have been degraded Primary), chances are that
+	 * I won't find my peer now either.
+	 *
+	 * In that case, and _only_ in that case,
+	 * we use the degr-wfc-timeout instead of the default,
+	 * so we can automatically recover from a crash of a
+	 * degraded but active "cluster" after a certain timeout.
+	 */
+	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+	if (mdev->state.role != Primary &&
+	     drbd_md_test_flag(mdev->bc, MDF_PrimaryInd) &&
+	    !drbd_md_test_flag(mdev->bc, MDF_ConnectedInd))
+		set_bit(USE_DEGR_WFC_T, &mdev->flags);
+
+	dd = drbd_determin_dev_size(mdev);
+	if (dd == dev_size_error) {
+		retcode = VMallocFailed;
+		goto force_diskless_dec;
+	} else if (dd == grew)
+		set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+
+	if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
+		INFO("Assuming that all blocks are out of sync "
+		     "(aka FullSync)\n");
+		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
+			retcode = MDIOError;
+			goto force_diskless_dec;
+		}
+	} else {
+		if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
+			retcode = MDIOError;
+			goto force_diskless_dec;
+		}
+	}
+
+	if (cp_discovered) {
+		drbd_al_apply_to_bm(mdev);
+		drbd_al_to_on_disk_bm(mdev);
+	}
+
+	spin_lock_irq(&mdev->req_lock);
+	os = mdev->state;
+	ns.i = os.i;
+	/* If MDF_Consistent is not set go into inconsistent state,
+	   otherwise investige MDF_WasUpToDate...
+	   If MDF_WasUpToDate is not set go into Outdated disk state,
+	   otherwise into Consistent state.
+	*/
+	if (drbd_md_test_flag(mdev->bc, MDF_Consistent)) {
+		if (drbd_md_test_flag(mdev->bc, MDF_WasUpToDate))
+			ns.disk = Consistent;
+		else
+			ns.disk = Outdated;
+	} else {
+		ns.disk = Inconsistent;
+	}
+
+	if (drbd_md_test_flag(mdev->bc, MDF_PeerOutDated))
+		ns.pdsk = Outdated;
+
+	if ( ns.disk == Consistent &&
+	    (ns.pdsk == Outdated || mdev->bc->dc.fencing == DontCare))
+		ns.disk = UpToDate;
+
+	/* All tests on MDF_PrimaryInd, MDF_ConnectedInd,
+	   MDF_Consistent and MDF_WasUpToDate must happen before
+	   this point, because drbd_request_state() modifies these
+	   flags. */
+
+	/* In case we are Connected postpone any desicion on the new disk
+	   state after the negotiation phase. */
+	if (mdev->state.conn == Connected) {
+		mdev->new_state_tmp.i = ns.i;
+		ns.i = os.i;
+		ns.disk = Negotiating;
+	}
+
+	rv = _drbd_set_state(mdev, ns, ChgStateVerbose, NULL);
+	ns = mdev->state;
+	spin_unlock_irq(&mdev->req_lock);
+
+	if (rv < SS_Success)
+		goto force_diskless_dec;
+
+	if (mdev->state.role == Primary)
+		mdev->bc->md.uuid[Current] |=  (u64)1;
+	else
+		mdev->bc->md.uuid[Current] &= ~(u64)1;
+
+	drbd_md_mark_dirty(mdev);
+	drbd_md_sync(mdev);
+
+	dec_local(mdev);
+	reply->ret_code = retcode;
+	return 0;
+
+ force_diskless_dec:
+	dec_local(mdev);
+ force_diskless:
+	drbd_force_state(mdev, NS(disk, Diskless));
+	drbd_md_sync(mdev);
+ release_bdev2_fail:
+	if (nbc)
+		bd_release(nbc->md_bdev);
+ release_bdev_fail:
+	if (nbc)
+		bd_release(nbc->backing_bdev);
+ fail:
+	if (nbc) {
+		if (nbc->lo_file)
+			fput(nbc->lo_file);
+		if (nbc->md_file)
+			fput(nbc->md_file);
+		kfree(nbc);
+	}
+	if (resync_lru)
+		lc_free(resync_lru);
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			  struct drbd_nl_cfg_reply *reply)
+{
+	fsync_bdev(mdev->this_bdev);
+	reply->ret_code = drbd_request_state(mdev, NS(disk, Diskless));
+
+	__set_current_state(TASK_INTERRUPTIBLE);
+	schedule_timeout(HZ/20); /* 50ms; Time for worker to finally terminate */
+
+	return 0;
+}
+
+#define HMAC_NAME_L 20
+
+STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			    struct drbd_nl_cfg_reply *reply)
+{
+	int i, ns;
+	enum ret_codes retcode;
+	struct net_conf *new_conf = NULL;
+	struct crypto_hash *tfm = NULL;
+	struct crypto_hash *integrity_w_tfm = NULL;
+	struct crypto_hash *integrity_r_tfm = NULL;
+	struct hlist_head *new_tl_hash = NULL;
+	struct hlist_head *new_ee_hash = NULL;
+	struct drbd_conf *odev;
+	char hmac_name[HMAC_NAME_L];
+	void *int_dig_out = NULL;
+	void *int_dig_in = NULL;
+	void *int_dig_vv = NULL;
+
+	if (mdev->state.conn > StandAlone) {
+		retcode = HaveNetConfig;
+		goto fail;
+	}
+
+	new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+	if (!new_conf) {
+		retcode = KMallocFailed;
+		goto fail;
+	}
+
+	if (!(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_net(mdev)) {
+		memcpy(new_conf, mdev->net_conf, sizeof(struct net_conf));
+		dec_net(mdev);
+	} else {
+		memset(new_conf, 0, sizeof(struct net_conf));
+		new_conf->timeout	   = DRBD_TIMEOUT_DEF;
+		new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
+		new_conf->ping_int	   = DRBD_PING_INT_DEF;
+		new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
+		new_conf->max_buffers	   = DRBD_MAX_BUFFERS_DEF;
+		new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
+		new_conf->sndbuf_size	   = DRBD_SNDBUF_SIZE_DEF;
+		new_conf->ko_count	   = DRBD_KO_COUNT_DEF;
+		new_conf->after_sb_0p	   = DRBD_AFTER_SB_0P_DEF;
+		new_conf->after_sb_1p	   = DRBD_AFTER_SB_1P_DEF;
+		new_conf->after_sb_2p	   = DRBD_AFTER_SB_2P_DEF;
+		new_conf->want_lose	   = 0;
+		new_conf->two_primaries    = 0;
+		new_conf->wire_protocol    = DRBD_PROT_C;
+		new_conf->ping_timeo	   = DRBD_PING_TIMEO_DEF;
+		new_conf->rr_conflict	   = DRBD_RR_CONFLICT_DEF;
+	}
+
+	if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
+		retcode = UnknownMandatoryTag;
+		goto fail;
+	}
+
+	if (new_conf->two_primaries
+	&& (new_conf->wire_protocol != DRBD_PROT_C)) {
+		retcode = ProtocolCRequired;
+		goto fail;
+	};
+
+	if (mdev->state.role == Primary && new_conf->want_lose) {
+		retcode = DiscardNotAllowed;
+		goto fail;
+	}
+
+#define M_ADDR(A) (((struct sockaddr_in *)&A->my_addr)->sin_addr.s_addr)
+#define M_PORT(A) (((struct sockaddr_in *)&A->my_addr)->sin_port)
+#define O_ADDR(A) (((struct sockaddr_in *)&A->peer_addr)->sin_addr.s_addr)
+#define O_PORT(A) (((struct sockaddr_in *)&A->peer_addr)->sin_port)
+	retcode = NoError;
+	for (i = 0; i < minor_count; i++) {
+		odev = minor_to_mdev(i);
+		if (!odev || odev == mdev)
+			continue;
+		if (inc_net(odev)) {
+			if (M_ADDR(new_conf) == M_ADDR(odev->net_conf) &&
+			    M_PORT(new_conf) == M_PORT(odev->net_conf))
+				retcode = LAAlreadyInUse;
+
+			if (O_ADDR(new_conf) == O_ADDR(odev->net_conf) &&
+			    O_PORT(new_conf) == O_PORT(odev->net_conf))
+				retcode = OAAlreadyInUse;
+
+			dec_net(odev);
+			if (retcode != NoError)
+				goto fail;
+		}
+	}
+#undef M_ADDR
+#undef M_PORT
+#undef O_ADDR
+#undef O_PORT
+
+	if (new_conf->cram_hmac_alg[0] != 0) {
+		snprintf(hmac_name, HMAC_NAME_L, "hmac(%s)",
+			new_conf->cram_hmac_alg);
+		tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(tfm)) {
+			tfm = NULL;
+			retcode = CRAMAlgNotAvail;
+			goto fail;
+		}
+
+		if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
+						!= CRYPTO_ALG_TYPE_HASH) {
+			retcode = CRAMAlgNotDigest;
+			goto fail;
+		}
+	}
+
+	if (new_conf->integrity_alg[0]) {
+		integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(integrity_w_tfm)) {
+			integrity_w_tfm = NULL;
+			retcode=IntegrityAlgNotAvail;
+			goto fail;
+		}
+
+		if (crypto_tfm_alg_type(crypto_hash_tfm(integrity_w_tfm)) != CRYPTO_ALG_TYPE_DIGEST) {
+			retcode=IntegrityAlgNotDigest;
+			goto fail;
+		}
+
+		integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(integrity_r_tfm)) {
+			integrity_r_tfm = NULL;
+			retcode=IntegrityAlgNotAvail;
+			goto fail;
+		}
+	}
+
+	ns = new_conf->max_epoch_size/8;
+	if (mdev->tl_hash_s != ns) {
+		new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
+		if (!new_tl_hash) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	ns = new_conf->max_buffers/8;
+	if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
+		new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
+		if (!new_ee_hash) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+
+#if 0
+	/* for the connection loss logic in drbd_recv
+	 * I _need_ the resulting timeo in jiffies to be
+	 * non-zero and different
+	 *
+	 * XXX maybe rather store the value scaled to jiffies?
+	 * Note: MAX_SCHEDULE_TIMEOUT/HZ*HZ != MAX_SCHEDULE_TIMEOUT
+	 *	 and HZ > 10; which is unlikely to change...
+	 *	 Thus, if interrupted by a signal,
+	 *	 sock_{send,recv}msg returns -EINTR,
+	 *	 if the timeout expires, -EAGAIN.
+	 */
+	/* unlikely: someone disabled the timeouts ...
+	 * just put some huge values in there. */
+	if (!new_conf->ping_int)
+		new_conf->ping_int = MAX_SCHEDULE_TIMEOUT/HZ;
+	if (!new_conf->timeout)
+		new_conf->timeout = MAX_SCHEDULE_TIMEOUT/HZ*10;
+	if (new_conf->ping_int*10 < new_conf->timeout)
+		new_conf->timeout = new_conf->ping_int*10/6;
+	if (new_conf->ping_int*10 == new_conf->timeout)
+		new_conf->ping_int = new_conf->ping_int+1;
+#endif
+
+	if (integrity_w_tfm) {
+		i = crypto_hash_digestsize(integrity_w_tfm);
+		int_dig_out = kmalloc(i, GFP_KERNEL);
+		if (!int_dig_out) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+		int_dig_in = kmalloc(i, GFP_KERNEL);
+		if (!int_dig_in) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+		int_dig_vv = kmalloc(i, GFP_KERNEL);
+		if (!int_dig_vv) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	if (!mdev->bitmap) {
+		if(drbd_bm_init(mdev)) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	D_ASSERT(mdev->net_conf == NULL);
+	mdev->net_conf = new_conf;
+
+	mdev->send_cnt = 0;
+	mdev->recv_cnt = 0;
+
+	if (new_tl_hash) {
+		kfree(mdev->tl_hash);
+		mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
+		mdev->tl_hash = new_tl_hash;
+	}
+
+	if (new_ee_hash) {
+		kfree(mdev->ee_hash);
+		mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
+		mdev->ee_hash = new_ee_hash;
+	}
+
+	crypto_free_hash(mdev->cram_hmac_tfm);
+	mdev->cram_hmac_tfm = tfm;
+
+	crypto_free_hash(mdev->integrity_w_tfm);
+	mdev->integrity_w_tfm = integrity_w_tfm;
+
+	crypto_free_hash(mdev->integrity_r_tfm);
+	mdev->integrity_r_tfm = integrity_r_tfm;
+
+	kfree(mdev->int_dig_out);
+	kfree(mdev->int_dig_in);
+	kfree(mdev->int_dig_vv);
+	mdev->int_dig_out=int_dig_out;
+	mdev->int_dig_in=int_dig_in;
+	mdev->int_dig_vv=int_dig_vv;
+
+	retcode = _drbd_request_state(mdev, NS(conn, Unconnected), ChgStateVerbose);
+	if (retcode >= SS_Success)
+		drbd_thread_start(&mdev->worker);
+
+	reply->ret_code = retcode;
+	return 0;
+
+fail:
+	kfree(int_dig_out);
+	kfree(int_dig_in);
+	kfree(int_dig_vv);
+	crypto_free_hash(tfm);
+	crypto_free_hash(integrity_w_tfm);
+	crypto_free_hash(integrity_r_tfm);
+	kfree(new_tl_hash);
+	kfree(new_ee_hash);
+	kfree(new_conf);
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			      struct drbd_nl_cfg_reply *reply)
+{
+	int retcode;
+
+	retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), ChgOrdered);
+
+	if (retcode == SS_NothingToDo)
+		goto done;
+	else if (retcode == SS_AlreadyStandAlone)
+		goto done;
+	else if (retcode == SS_PrimaryNOP) {
+		/* Our statche checking code wants to see the peer outdated. */
+		retcode = drbd_request_state(mdev, NS2(conn, Disconnecting,
+						      pdsk, Outdated));
+	} else if (retcode == SS_CW_FailedByPeer) {
+		/* The peer probabely wants to see us outdated. */
+		retcode = _drbd_request_state(mdev, NS2(conn, Disconnecting,
+							disk, Outdated),
+					      ChgOrdered);
+		if (retcode == SS_IsDiskLess || retcode == SS_LowerThanOutdated) {
+			drbd_force_state(mdev, NS(conn, Disconnecting));
+			retcode = SS_Success;
+		}
+	}
+
+	if (retcode < SS_Success)
+		goto fail;
+
+	if (wait_event_interruptible(mdev->state_wait,
+				     mdev->state.conn != Disconnecting)) {
+		/* Do not test for mdev->state.conn == StandAlone, since
+		   someone else might connect us in the mean time! */
+		retcode = GotSignal;
+		goto fail;
+	}
+
+ done:
+	retcode = NoError;
+ fail:
+	drbd_md_sync(mdev);
+	reply->ret_code = retcode;
+	return 0;
+}
+
+void resync_after_online_grow(struct drbd_conf *mdev)
+{
+	int iass; /* I am sync source */
+
+	INFO("Resync of new storage after online grow\n");
+	if (mdev->state.role != mdev->state.peer)
+		iass = (mdev->state.role == Primary);
+	else
+		iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+
+	if (iass)
+		drbd_start_resync(mdev, SyncSource);
+	else
+		_drbd_request_state(mdev, NS(conn, WFSyncUUID), ChgStateVerbose + ChgSerialize);
+}
+
+STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			  struct drbd_nl_cfg_reply *reply)
+{
+	struct resize rs;
+	int retcode = NoError;
+	int ldsc = 0; /* local disk size changed */
+	enum determin_dev_size_enum dd;
+
+	memset(&rs, 0, sizeof(struct resize));
+	if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
+		retcode = UnknownMandatoryTag;
+		goto fail;
+	}
+
+	if (mdev->state.conn > Connected) {
+		retcode = NoResizeDuringResync;
+		goto fail;
+	}
+
+	if (mdev->state.role == Secondary &&
+	    mdev->state.peer == Secondary) {
+		retcode = APrimaryNodeNeeded;
+		goto fail;
+	}
+
+	if (!inc_local(mdev)) {
+		retcode = HaveNoDiskConfig;
+		goto fail;
+	}
+
+	if (mdev->bc->known_size != drbd_get_capacity(mdev->bc->backing_bdev)) {
+		mdev->bc->known_size = drbd_get_capacity(mdev->bc->backing_bdev);
+		ldsc = 1;
+	}
+
+	mdev->bc->dc.disk_size = (sector_t)rs.resize_size;
+	dd = drbd_determin_dev_size(mdev);
+	drbd_md_sync(mdev);
+	dec_local(mdev);
+	if (dd == dev_size_error) {
+		retcode = VMallocFailed;
+		goto fail;
+	}
+
+	if (mdev->state.conn == Connected && (dd != unchanged || ldsc)) {
+		drbd_send_uuids(mdev);
+		drbd_send_sizes(mdev);
+		if (dd == grew)
+			resync_after_online_grow(mdev);
+	}
+
+ fail:
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			       struct drbd_nl_cfg_reply *reply)
+{
+	int retcode = NoError;
+	int err;
+	int ovr; /* online verify running */
+	int rsr; /* re-sync running */
+	struct drbd_conf *odev;
+	struct crypto_hash *verify_tfm = NULL;
+	struct crypto_hash *csums_tfm = NULL;
+	struct syncer_conf sc;
+	cpumask_t n_cpu_mask = CPU_MASK_NONE;
+
+	memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
+
+	if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
+		memset(&sc, 0, sizeof(struct syncer_conf));
+		sc.rate       = DRBD_RATE_DEF;
+		sc.after      = DRBD_AFTER_DEF;
+		sc.al_extents = DRBD_AL_EXTENTS_DEF;
+	}
+
+	if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
+		retcode = UnknownMandatoryTag;
+		goto fail;
+	}
+
+	if (sc.after != -1) {
+		if (sc.after < -1 || minor_to_mdev(sc.after) == NULL) {
+			retcode = SyncAfterInvalid;
+			goto fail;
+		}
+		odev = minor_to_mdev(sc.after); /* check against loops in */
+		while (1) {
+			if (odev == mdev) {
+				retcode = SyncAfterCycle;
+				goto fail;
+			}
+			if (odev->sync_conf.after == -1)
+				break; /* no cycles. */
+			odev = minor_to_mdev(odev->sync_conf.after);
+		}
+	}
+
+	/* re-sync running */
+	rsr = (	mdev->state.conn == SyncSource ||
+		mdev->state.conn == SyncTarget ||
+		mdev->state.conn == PausedSyncS ||
+		mdev->state.conn == PausedSyncT );
+
+	if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
+		retcode = CSUMSResyncRunning;
+		goto fail;
+	}
+
+	if (!rsr && sc.csums_alg[0]) {
+		csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(csums_tfm)) {
+			csums_tfm = NULL;
+			retcode = CSUMSAlgNotAvail;
+			goto fail;
+		}
+
+		if (crypto_tfm_alg_type(crypto_hash_tfm(csums_tfm)) != CRYPTO_ALG_TYPE_DIGEST) {
+			retcode = CSUMSAlgNotDigest;
+			goto fail;
+		}
+	}
+
+	/* online verify running */
+	ovr = (mdev->state.conn == VerifyS || mdev->state.conn == VerifyT);
+
+	if (ovr) {
+		if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
+			retcode = VERIFYIsRunning;
+			goto fail;
+		}
+	}
+
+	if (!ovr && sc.verify_alg[0]) {
+		verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(verify_tfm)) {
+			verify_tfm = NULL;
+			retcode = VERIFYAlgNotAvail;
+			goto fail;
+		}
+
+		if (crypto_tfm_alg_type(crypto_hash_tfm(verify_tfm)) != CRYPTO_ALG_TYPE_DIGEST) {
+			retcode = VERIFYAlgNotDigest;
+			goto fail;
+		}
+	}
+
+	if (sc.cpu_mask[0] != 0) {
+		err = __bitmap_parse(sc.cpu_mask, 32, 0, (unsigned long *)&n_cpu_mask, NR_CPUS);
+		if (err) {
+			drbd_WARN("__bitmap_parse() failed with %d\n", err);
+			retcode = CPUMaskParseFailed;
+			goto fail;
+		}
+	}
+
+	ERR_IF (sc.rate < 1) sc.rate = 1;
+	ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
+#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
+	if (sc.al_extents > AL_MAX) {
+		ERR("sc.al_extents > %d\n", AL_MAX);
+		sc.al_extents = AL_MAX;
+	}
+#undef AL_MAX
+
+	spin_lock(&mdev->peer_seq_lock);
+	/* lock against receive_SyncParam() */
+	mdev->sync_conf = sc;
+
+	if (!rsr) {
+		crypto_free_hash(mdev->csums_tfm);
+		mdev->csums_tfm = csums_tfm;
+		csums_tfm = NULL;
+	}
+
+	if (!ovr) {
+		crypto_free_hash(mdev->verify_tfm);
+		mdev->verify_tfm = verify_tfm;
+		verify_tfm = NULL;
+	}
+	spin_unlock(&mdev->peer_seq_lock);
+
+	if (inc_local(mdev)) {
+		wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+		drbd_al_shrink(mdev);
+		err = drbd_check_al_size(mdev);
+		lc_unlock(mdev->act_log);
+		wake_up(&mdev->al_wait);
+
+		dec_local(mdev);
+		drbd_md_sync(mdev);
+
+		if (err) {
+			retcode = KMallocFailed;
+			goto fail;
+		}
+	}
+
+	if (mdev->state.conn >= Connected)
+		drbd_send_sync_param(mdev, &sc);
+
+	drbd_alter_sa(mdev, sc.after);
+
+	if (!cpus_equal(mdev->cpu_mask, n_cpu_mask)) {
+		mdev->cpu_mask = n_cpu_mask;
+		mdev->cpu_mask = drbd_calc_cpu_mask(mdev);
+		mdev->receiver.reset_cpu_mask = 1;
+		mdev->asender.reset_cpu_mask = 1;
+		mdev->worker.reset_cpu_mask = 1;
+	}
+
+fail:
+	crypto_free_hash(csums_tfm);
+	crypto_free_hash(verify_tfm);
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			      struct drbd_nl_cfg_reply *reply)
+{
+	int retcode;
+
+	retcode = _drbd_request_state(mdev, NS(conn, StartingSyncT), ChgOrdered);
+
+	if (retcode < SS_Success && retcode != SS_NeedConnection)
+		retcode = drbd_request_state(mdev, NS(conn, StartingSyncT));
+
+	while (retcode == SS_NeedConnection) {
+		spin_lock_irq(&mdev->req_lock);
+		if (mdev->state.conn < Connected)
+			retcode = _drbd_set_state(_NS(mdev, disk, Inconsistent), ChgStateVerbose, NULL);
+		spin_unlock_irq(&mdev->req_lock);
+
+		if (retcode != SS_NeedConnection)
+			break;
+
+		retcode = drbd_request_state(mdev, NS(conn, StartingSyncT));
+	}
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+				   struct drbd_nl_cfg_reply *reply)
+{
+
+	reply->ret_code = drbd_request_state(mdev, NS(conn, StartingSyncS));
+
+	return 0;
+}
+
+STATIC int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			      struct drbd_nl_cfg_reply *reply)
+{
+	int retcode = NoError;
+
+	if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NothingToDo)
+		retcode = PauseFlagAlreadySet;
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			       struct drbd_nl_cfg_reply *reply)
+{
+	int retcode = NoError;
+
+	if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NothingToDo)
+		retcode = PauseFlagAlreadyClear;
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			      struct drbd_nl_cfg_reply *reply)
+{
+	reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
+
+	return 0;
+}
+
+STATIC int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			     struct drbd_nl_cfg_reply *reply)
+{
+	reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
+	return 0;
+}
+
+STATIC int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			   struct drbd_nl_cfg_reply *reply)
+{
+	reply->ret_code = drbd_request_state(mdev, NS(disk, Outdated));
+	return 0;
+}
+
+STATIC int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			   struct drbd_nl_cfg_reply *reply)
+{
+	unsigned short *tl;
+
+	tl = reply->tag_list;
+
+	if (inc_local(mdev)) {
+		tl = disk_conf_to_tags(mdev, &mdev->bc->dc, tl);
+		dec_local(mdev);
+	}
+
+	if (inc_net(mdev)) {
+		tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
+		dec_net(mdev);
+	}
+	tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
+
+	*tl++ = TT_END; /* Close the tag list */
+
+	return (int)((char *)tl - (char *)reply->tag_list);
+}
+
+STATIC int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			     struct drbd_nl_cfg_reply *reply)
+{
+	unsigned short *tl = reply->tag_list;
+	union drbd_state_t s = mdev->state;
+	unsigned long rs_left;
+	unsigned int res;
+
+	tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
+
+	/* no local ref, no bitmap, no syncer progress. */
+	if (s.conn >= SyncSource && s.conn <= PausedSyncT) {
+		if (inc_local(mdev)) {
+			drbd_get_syncer_progress(mdev, &rs_left, &res);
+			*tl++ = T_sync_progress;
+			*tl++ = sizeof(int);
+			memcpy(tl, &res, sizeof(int));
+			tl = (unsigned short *)((char *)tl + sizeof(int));
+			dec_local(mdev);
+		}
+	}
+	*tl++ = TT_END; /* Close the tag list */
+
+	return (int)((char *)tl - (char *)reply->tag_list);
+}
+
+STATIC int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			     struct drbd_nl_cfg_reply *reply)
+{
+	unsigned short *tl;
+
+	tl = reply->tag_list;
+
+	if (inc_local(mdev)) {
+		/* This is a hand crafted add tag ;) */
+		*tl++ = T_uuids;
+		*tl++ = UUID_SIZE*sizeof(u64);
+		memcpy(tl, mdev->bc->md.uuid, UUID_SIZE*sizeof(u64));
+		tl = (unsigned short *)((char *)tl + UUID_SIZE*sizeof(u64));
+		*tl++ = T_uuids_flags;
+		*tl++ = sizeof(int);
+		memcpy(tl, &mdev->bc->md.flags, sizeof(int));
+		tl = (unsigned short *)((char *)tl + sizeof(int));
+		dec_local(mdev);
+	}
+	*tl++ = TT_END; /* Close the tag list */
+
+	return (int)((char *)tl - (char *)reply->tag_list);
+}
+
+/**
+ * drbd_nl_get_timeout_flag:
+ * Is used by drbdsetup to find out which timeout value to use.
+ */
+STATIC int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+				    struct drbd_nl_cfg_reply *reply)
+{
+	unsigned short *tl;
+	char rv;
+
+	tl = reply->tag_list;
+
+	rv = mdev->state.pdsk == Outdated        ? UT_PeerOutdated :
+	  test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_Degraded : UT_Default;
+
+	/* This is a hand crafted add tag ;) */
+	*tl++ = T_use_degraded;
+	*tl++ = sizeof(char);
+	*((char *)tl) = rv;
+	tl = (unsigned short *)((char *)tl + sizeof(char));
+	*tl++ = TT_END;
+
+	return (int)((char *)tl - (char *)reply->tag_list);
+}
+
+STATIC int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+				    struct drbd_nl_cfg_reply *reply)
+{
+	reply->ret_code = drbd_request_state(mdev,NS(conn,VerifyS));
+
+	return 0;
+}
+
+
+STATIC int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+			      struct drbd_nl_cfg_reply *reply)
+{
+	int retcode = NoError;
+	int err;
+
+	struct new_c_uuid args;
+
+	memset(&args, 0, sizeof(struct new_c_uuid));
+	if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
+		reply->ret_code = UnknownMandatoryTag;
+		return 0;
+	}
+
+	mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+
+	if (mdev->state.conn >= Connected) {
+		retcode = MayNotBeConnected;
+		goto out;
+	}
+
+	if (!inc_local(mdev)) {
+		retcode = HaveNoDiskConfig;
+		goto out;
+	}
+
+	drbd_uuid_set(mdev, Bitmap, 0); /* Rotate Bitmap to History 1, etc... */
+	drbd_uuid_new_current(mdev); /* New current, previous to Bitmap */
+
+	if (args.clear_bm) {
+		err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
+		if (err) {
+			ERR("Writing bitmap failed with %d\n",err);
+			retcode = MDIOError;
+		}
+	}
+
+	drbd_md_sync(mdev);
+	dec_local(mdev);
+out:
+	mutex_unlock(&mdev->state_mutex);
+
+	reply->ret_code = retcode;
+	return 0;
+}
+
+STATIC struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp)
+{
+	struct drbd_conf *mdev;
+
+	if (nlp->drbd_minor >= minor_count)
+		return NULL;
+
+	mdev = minor_to_mdev(nlp->drbd_minor);
+
+	if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
+		struct gendisk *disk = NULL;
+		mdev = drbd_new_device(nlp->drbd_minor);
+
+		spin_lock_irq(&drbd_pp_lock);
+		if (minor_table[nlp->drbd_minor] == NULL) {
+			minor_table[nlp->drbd_minor] = mdev;
+			disk = mdev->vdisk;
+			mdev = NULL;
+		} /* else: we lost the race */
+		spin_unlock_irq(&drbd_pp_lock);
+
+		if (disk) /* we won the race above */
+			/* in case we ever add a drbd_delete_device(),
+			 * don't forget the del_gendisk! */
+			add_disk(disk);
+		else /* we lost the race above */
+			drbd_free_mdev(mdev);
+
+		mdev = minor_to_mdev(nlp->drbd_minor);
+	}
+
+	return mdev;
+}
+
+struct cn_handler_struct {
+	int (*function)(struct drbd_conf *,
+			 struct drbd_nl_cfg_req *,
+			 struct drbd_nl_cfg_reply *);
+	int reply_body_size;
+};
+
+static struct cn_handler_struct cnd_table[] = {
+	[ P_primary ]		= { &drbd_nl_primary,		0 },
+	[ P_secondary ]		= { &drbd_nl_secondary,		0 },
+	[ P_disk_conf ]		= { &drbd_nl_disk_conf,		0 },
+	[ P_detach ]		= { &drbd_nl_detach,		0 },
+	[ P_net_conf ]		= { &drbd_nl_net_conf,		0 },
+	[ P_disconnect ]	= { &drbd_nl_disconnect,	0 },
+	[ P_resize ]		= { &drbd_nl_resize,		0 },
+	[ P_syncer_conf ]	= { &drbd_nl_syncer_conf,	0 },
+	[ P_invalidate ]	= { &drbd_nl_invalidate,	0 },
+	[ P_invalidate_peer ]	= { &drbd_nl_invalidate_peer,	0 },
+	[ P_pause_sync ]	= { &drbd_nl_pause_sync,	0 },
+	[ P_resume_sync ]	= { &drbd_nl_resume_sync,	0 },
+	[ P_suspend_io ]	= { &drbd_nl_suspend_io,	0 },
+	[ P_resume_io ]		= { &drbd_nl_resume_io,		0 },
+	[ P_outdate ]		= { &drbd_nl_outdate,		0 },
+	[ P_get_config ]	= { &drbd_nl_get_config,
+				    sizeof(struct syncer_conf_tag_len_struct) +
+				    sizeof(struct disk_conf_tag_len_struct) +
+				    sizeof(struct net_conf_tag_len_struct) },
+	[ P_get_state ]		= { &drbd_nl_get_state,
+				    sizeof(struct get_state_tag_len_struct) +
+				    sizeof(struct sync_progress_tag_len_struct)	},
+	[ P_get_uuids ]		= { &drbd_nl_get_uuids,
+				    sizeof(struct get_uuids_tag_len_struct) },
+	[ P_get_timeout_flag ]	= { &drbd_nl_get_timeout_flag,
+				    sizeof(struct get_timeout_flag_tag_len_struct)},
+	[ P_start_ov ]		= { &drbd_nl_start_ov,		0 },
+	[ P_new_c_uuid ]	= { &drbd_nl_new_c_uuid,	0 },
+};
+
+STATIC void drbd_connector_callback(void *data)
+{
+	struct cn_msg *req = data;
+	struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
+	struct cn_handler_struct *cm;
+	struct cn_msg *cn_reply;
+	struct drbd_nl_cfg_reply *reply;
+	struct drbd_conf *mdev;
+	int retcode, rr;
+	int reply_size = sizeof(struct cn_msg)
+		+ sizeof(struct drbd_nl_cfg_reply)
+		+ sizeof(short int);
+
+	if (!try_module_get(THIS_MODULE)) {
+		printk(KERN_ERR "drbd: try_module_get() failed!\n");
+		return;
+	}
+
+	mdev = ensure_mdev(nlp);
+	if (!mdev) {
+		retcode = MinorNotKnown;
+		goto fail;
+	}
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_packet(data););
+
+	if (nlp->packet_type >= P_nl_after_last_packet) {
+		retcode = UnknownNetLinkPacket;
+		goto fail;
+	}
+
+	cm = cnd_table + nlp->packet_type;
+
+	/* This may happen if packet number is 0: */
+	if (cm->function == NULL) {
+		retcode = UnknownNetLinkPacket;
+		goto fail;
+	}
+
+	reply_size += cm->reply_body_size;
+
+	cn_reply = kmalloc(reply_size, GFP_KERNEL);
+	if (!cn_reply) {
+		retcode = KMallocFailed;
+		goto fail;
+	}
+	reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
+
+	reply->packet_type =
+		cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
+	reply->minor = nlp->drbd_minor;
+	reply->ret_code = NoError; /* Might by modified by cm->function. */
+	/* reply->tag_list; might be modified by cm->fucntion. */
+
+	rr = cm->function(mdev, nlp, reply);
+
+	cn_reply->id = req->id;
+	cn_reply->seq = req->seq;
+	cn_reply->ack = req->ack  + 1;
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
+	cn_reply->flags = 0;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+	if (rr && rr != -ESRCH)
+		printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+
+	kfree(cn_reply);
+	module_put(THIS_MODULE);
+	return;
+ fail:
+	drbd_nl_send_reply(req, retcode);
+	module_put(THIS_MODULE);
+}
+
+static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
+
+static inline unsigned short *
+__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
+	int len, int nul_terminated)
+{
+	int l = tag_descriptions[tag_number(tag)].max_len;
+	l = (len < l) ? len :  l;
+	*tl++ = tag;
+	*tl++ = len;
+	memcpy(tl, data, len);
+	/* TODO
+	 * maybe we need to add some padding to the data stream.
+	 * otherwise we may get strange effects on architectures
+	 * that require certain data types to be strictly aligned,
+	 * because now the next "unsigned short" may be misaligned. */
+	tl = (unsigned short*)((char*)tl + len);
+	if (nul_terminated)
+		*((char*)tl - 1) = 0;
+	return tl;
+}
+
+static inline unsigned short *
+tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
+{
+	return __tl_add_blob(tl, tag, data, len, 0);
+}
+
+static inline unsigned short *
+tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
+{
+	return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
+}
+
+static inline unsigned short *
+tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
+{
+	switch(tag_type(tag)) {
+	case TT_INTEGER:
+		*tl++ = tag;
+		*tl++ = sizeof(int);
+		*(int*)tl = *(int*)val;
+		tl = (unsigned short*)((char*)tl+sizeof(int));
+		break;
+	case TT_INT64:
+		*tl++ = tag;
+		*tl++ = sizeof(u64);
+		*(u64*)tl = *(u64*)val;
+		tl = (unsigned short*)((char*)tl+sizeof(u64));
+		break;
+	default:
+		/* someone did something stupid. */
+		;
+	}
+	return tl;
+}
+
+void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state_t state)
+{
+	char buffer[sizeof(struct cn_msg)+
+		    sizeof(struct drbd_nl_cfg_reply)+
+		    sizeof(struct get_state_tag_len_struct)+
+		    sizeof(short int)];
+	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
+	unsigned short *tl = reply->tag_list;
+
+	/* drbd_WARN("drbd_bcast_state() got called\n"); */
+
+	tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
+	*tl++ = TT_END; /* Close the tag list */
+
+	cn_reply->id.idx = CN_IDX_DRBD;
+	cn_reply->id.val = CN_VAL_DRBD;
+
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
+	cn_reply->ack = 0; /* not used here. */
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+		(int)((char *)tl - (char *)reply->tag_list);
+	cn_reply->flags = 0;
+
+	reply->packet_type = P_get_state;
+	reply->minor = mdev_to_minor(mdev);
+	reply->ret_code = NoError;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+}
+
+void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+{
+	char buffer[sizeof(struct cn_msg)+
+		    sizeof(struct drbd_nl_cfg_reply)+
+		    sizeof(struct call_helper_tag_len_struct)+
+		    sizeof(short int)];
+	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
+	unsigned short *tl = reply->tag_list;
+	int str_len;
+
+	/* drbd_WARN("drbd_bcast_state() got called\n"); */
+
+	str_len = strlen(helper_name)+1;
+	*tl++ = T_helper;
+	*tl++ = str_len;
+	memcpy(tl, helper_name, str_len);
+	tl = (unsigned short *)((char *)tl + str_len);
+	*tl++ = TT_END; /* Close the tag list */
+
+	cn_reply->id.idx = CN_IDX_DRBD;
+	cn_reply->id.val = CN_VAL_DRBD;
+
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
+	cn_reply->ack = 0; /* not used here. */
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+		(int)((char *)tl - (char *)reply->tag_list);
+	cn_reply->flags = 0;
+
+	reply->packet_type = P_call_helper;
+	reply->minor = mdev_to_minor(mdev);
+	reply->ret_code = NoError;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+}
+
+void drbd_bcast_ee(struct drbd_conf *mdev,
+		const char *reason, const int dgs,
+		const char* seen_hash, const char* calc_hash,
+		const struct Tl_epoch_entry* e)
+{
+	struct cn_msg *cn_reply;
+	struct drbd_nl_cfg_reply *reply;
+	struct bio_vec *bvec;
+	unsigned short *tl;
+	int i;
+
+	if (!e)
+		return;
+	if (!reason || !reason[0])
+		return;
+
+	/* aparently we have to memcpy twice, first to prepare the data for the
+	 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
+	 * netlink skb. */
+	cn_reply = kmalloc(
+		sizeof(struct cn_msg)+
+		sizeof(struct drbd_nl_cfg_reply)+
+		sizeof(struct dump_ee_tag_len_struct)+
+		sizeof(short int)
+		, GFP_KERNEL);
+
+	if (!cn_reply) {
+		ERR("could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
+				(unsigned long long)e->sector, e->size);
+		return;
+	}
+
+	reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
+	tl = reply->tag_list;
+
+	tl = tl_add_str(tl, T_dump_ee_reason, reason);
+	tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
+	tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
+	tl = tl_add_int(tl, T_ee_sector, &e->sector);
+	tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
+
+	*tl++ = T_ee_data;
+	*tl++ = e->size;
+
+	__bio_for_each_segment(bvec, e->private_bio, i, 0) {
+		void *d = kmap(bvec->bv_page);
+		memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
+		kunmap(bvec->bv_page);
+		tl=(unsigned short*)((char*)tl + bvec->bv_len);
+	}
+	*tl++ = TT_END; /* Close the tag list */
+
+	cn_reply->id.idx = CN_IDX_DRBD;
+	cn_reply->id.val = CN_VAL_DRBD;
+
+	cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
+	cn_reply->ack = 0; // not used here.
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+		(int)((char*)tl - (char*)reply->tag_list);
+	cn_reply->flags = 0;
+
+	reply->packet_type = P_dump_ee;
+	reply->minor = mdev_to_minor(mdev);
+	reply->ret_code = NoError;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+	kfree(cn_reply);
+}
+
+void drbd_bcast_sync_progress(struct drbd_conf *mdev)
+{
+	char buffer[sizeof(struct cn_msg)+
+		    sizeof(struct drbd_nl_cfg_reply)+
+		    sizeof(struct sync_progress_tag_len_struct)+
+		    sizeof(short int)];
+	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
+	unsigned short *tl = reply->tag_list;
+	unsigned long rs_left;
+	unsigned int res;
+
+	/* no local ref, no bitmap, no syncer progress, no broadcast. */
+	if (!inc_local(mdev))
+		return;
+	drbd_get_syncer_progress(mdev, &rs_left, &res);
+	dec_local(mdev);
+
+	*tl++ = T_sync_progress;
+	*tl++ = sizeof(int);
+	memcpy(tl, &res, sizeof(int));
+	tl = (unsigned short *)((char *)tl + sizeof(int));
+	*tl++ = TT_END; /* Close the tag list */
+
+	cn_reply->id.idx = CN_IDX_DRBD;
+	cn_reply->id.val = CN_VAL_DRBD;
+
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
+	cn_reply->ack = 0; /* not used here. */
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+		(int)((char *)tl - (char *)reply->tag_list);
+	cn_reply->flags = 0;
+
+	reply->packet_type = P_sync_progress;
+	reply->minor = mdev_to_minor(mdev);
+	reply->ret_code = NoError;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+}
+
+#ifdef NETLINK_ROUTE6
+int __init cn_init(void);
+void __exit cn_fini(void);
+#endif
+
+int __init drbd_nl_init(void)
+{
+	static struct cb_id cn_id_drbd;
+	int err, try=10;
+
+#ifdef NETLINK_ROUTE6
+	/* pre 2.6.16 */
+	err = cn_init();
+	if (err)
+		return err;
+#endif
+	cn_id_drbd.val = CN_VAL_DRBD;
+	do {
+		cn_id_drbd.idx = cn_idx;
+		err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
+		if (!err)
+			break;
+		cn_idx = (cn_idx + CN_IDX_STEP);
+	} while (try--);
+
+	if (err) {
+		printk(KERN_ERR "drbd: cn_drbd failed to register\n");
+		return err;
+	}
+
+	return 0;
+}
+
+void drbd_nl_cleanup(void)
+{
+	static struct cb_id cn_id_drbd;
+
+	cn_id_drbd.idx = cn_idx;
+	cn_id_drbd.val = CN_VAL_DRBD;
+
+	cn_del_callback(&cn_id_drbd);
+
+#ifdef NETLINK_ROUTE6
+	/* pre 2.6.16 */
+	cn_fini();
+#endif
+}
+
+void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
+{
+	char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
+	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
+	int rr;
+
+	cn_reply->id = req->id;
+
+	cn_reply->seq = req->seq;
+	cn_reply->ack = req->ack  + 1;
+	cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
+	cn_reply->flags = 0;
+
+	reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
+	reply->ret_code = ret_code;
+
+	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+
+	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
+	if (rr && rr != -ESRCH)
+		printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+}
+
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ